summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--CREDITS5
-rw-r--r--Documentation/Changes11
-rw-r--r--Documentation/DocBook/device-drivers.tmpl2
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-expbuf.xml8
-rw-r--r--Documentation/assoc_array.txt574
-rw-r--r--Documentation/block/null_blk.txt72
-rw-r--r--Documentation/cgroups/net_cls.txt5
-rw-r--r--Documentation/device-mapper/cache.txt10
-rw-r--r--Documentation/devicetree/bindings/arm/omap/mpu.txt8
-rw-r--r--Documentation/devicetree/bindings/arm/pmu.txt1
-rw-r--r--Documentation/devicetree/bindings/arm/samsung/exynos-adc.txt2
-rw-r--r--Documentation/devicetree/bindings/clock/exynos4-clock.txt2
-rw-r--r--Documentation/devicetree/bindings/clock/exynos5250-clock.txt2
-rw-r--r--Documentation/devicetree/bindings/clock/exynos5420-clock.txt2
-rw-r--r--Documentation/devicetree/bindings/clock/exynos5440-clock.txt2
-rw-r--r--Documentation/devicetree/bindings/dma/atmel-dma.txt2
-rw-r--r--Documentation/devicetree/bindings/gpio/8xxx_gpio.txt66
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-omap.txt3
-rw-r--r--Documentation/devicetree/bindings/i2c/trivial-devices.txt4
-rw-r--r--Documentation/devicetree/bindings/mmc/ti-omap.txt54
-rw-r--r--Documentation/devicetree/bindings/net/can/microchip,mcp251x.txt25
-rw-r--r--Documentation/devicetree/bindings/net/davinci_emac.txt2
-rw-r--r--Documentation/devicetree/bindings/net/fsl-fec.txt2
-rw-r--r--Documentation/devicetree/bindings/net/phy.txt1
-rw-r--r--Documentation/devicetree/bindings/net/smsc-lan91c111.txt4
-rw-r--r--Documentation/devicetree/bindings/powerpc/fsl/dma.txt138
-rw-r--r--Documentation/devicetree/bindings/rng/qcom,prng.txt17
-rw-r--r--Documentation/devicetree/bindings/spi/nvidia,tegra20-spi.txt5
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.txt2
-rw-r--r--Documentation/dmatest.txt72
-rw-r--r--Documentation/filesystems/btrfs.txt34
-rw-r--r--Documentation/gpio/00-INDEX14
-rw-r--r--Documentation/gpio/board.txt115
-rw-r--r--Documentation/gpio/consumer.txt197
-rw-r--r--Documentation/gpio/driver.txt75
-rw-r--r--Documentation/gpio/gpio-legacy.txt (renamed from Documentation/gpio.txt)0
-rw-r--r--Documentation/gpio/gpio.txt119
-rw-r--r--Documentation/gpio/sysfs.txt155
-rw-r--r--Documentation/kernel-parameters.txt13
-rw-r--r--Documentation/mic/mpssd/mpssd.c18
-rw-r--r--Documentation/module-signing.txt240
-rw-r--r--Documentation/networking/bonding.txt8
-rw-r--r--Documentation/networking/can.txt94
-rw-r--r--Documentation/networking/filter.txt608
-rw-r--r--Documentation/networking/i40evf.txt47
-rw-r--r--Documentation/networking/ip-sysctl.txt33
-rw-r--r--Documentation/networking/ipsec.txt38
-rw-r--r--Documentation/networking/packet_mmap.txt40
-rw-r--r--Documentation/networking/phy.txt3
-rw-r--r--Documentation/networking/timestamping.txt9
-rw-r--r--Documentation/networking/timestamping/.gitignore1
-rw-r--r--Documentation/networking/timestamping/Makefile5
-rw-r--r--Documentation/networking/timestamping/hwtstamp_config.c134
-rw-r--r--Documentation/power/runtime_pm.txt14
-rw-r--r--Documentation/security/00-INDEX2
-rw-r--r--Documentation/security/IMA-templates.txt87
-rw-r--r--Documentation/security/keys.txt20
-rwxr-xr-xDocumentation/target/tcm_mod_builder.py18
-rw-r--r--Documentation/unaligned-memory-access.txt28
-rw-r--r--Documentation/vm/split_page_table_lock6
-rw-r--r--MAINTAINERS123
-rw-r--r--Makefile26
-rw-r--r--arch/alpha/Kconfig76
-rw-r--r--arch/alpha/include/asm/Kbuild1
-rw-r--r--arch/alpha/include/asm/machvec.h22
-rw-r--r--arch/alpha/include/asm/pal.h71
-rw-r--r--arch/alpha/include/asm/rtc.h11
-rw-r--r--arch/alpha/include/asm/string.h24
-rw-r--r--arch/alpha/include/uapi/asm/pal.h1
-rw-r--r--arch/alpha/kernel/Makefile1
-rw-r--r--arch/alpha/kernel/alpha_ksyms.c1
-rw-r--r--arch/alpha/kernel/irq_alpha.c16
-rw-r--r--arch/alpha/kernel/machvec_impl.h5
-rw-r--r--arch/alpha/kernel/perf_event.c15
-rw-r--r--arch/alpha/kernel/process.c17
-rw-r--r--arch/alpha/kernel/proto.h6
-rw-r--r--arch/alpha/kernel/rtc.c323
-rw-r--r--arch/alpha/kernel/setup.c23
-rw-r--r--arch/alpha/kernel/smp.c33
-rw-r--r--arch/alpha/kernel/sys_jensen.c2
-rw-r--r--arch/alpha/kernel/sys_marvel.c55
-rw-r--r--arch/alpha/kernel/time.c405
-rw-r--r--arch/alpha/kernel/traps.c15
-rw-r--r--arch/alpha/lib/csum_partial_copy.c10
-rw-r--r--arch/alpha/lib/ev6-memset.S12
-rw-r--r--arch/alpha/lib/memset.S11
-rw-r--r--arch/arc/Kconfig1
-rw-r--r--arch/arc/include/asm/Kbuild1
-rw-r--r--arch/arc/include/uapi/asm/unistd.h11
-rw-r--r--arch/arc/kernel/perf_event.c4
-rw-r--r--arch/arm/Kconfig11
-rw-r--r--arch/arm/boot/dts/am335x-base0033.dts79
-rw-r--r--arch/arm/boot/dts/am335x-igep0033.dtsi29
-rw-r--r--arch/arm/boot/dts/am3517-evm.dts6
-rw-r--r--arch/arm/boot/dts/am3517.dtsi63
-rw-r--r--arch/arm/boot/dts/armada-370-db.dts28
-rw-r--r--arch/arm/boot/dts/armada-370-xp.dtsi2
-rw-r--r--arch/arm/boot/dts/armada-xp-mv78230.dtsi24
-rw-r--r--arch/arm/boot/dts/armada-xp-mv78260.dtsi109
-rw-r--r--arch/arm/boot/dts/at91sam9x5_usart3.dtsi4
-rw-r--r--arch/arm/boot/dts/bcm2835.dtsi4
-rw-r--r--arch/arm/boot/dts/cros5250-common.dtsi12
-rw-r--r--arch/arm/boot/dts/imx6qdl.dtsi2
-rw-r--r--arch/arm/boot/dts/omap-gpmc-smsc911x.dtsi4
-rw-r--r--arch/arm/boot/dts/omap-zoom-common.dtsi2
-rw-r--r--arch/arm/boot/dts/omap2.dtsi96
-rw-r--r--arch/arm/boot/dts/omap2420.dtsi23
-rw-r--r--arch/arm/boot/dts/omap2430.dtsi49
-rw-r--r--arch/arm/boot/dts/omap3-beagle-xm.dts7
-rw-r--r--arch/arm/boot/dts/omap3-beagle.dts21
-rw-r--r--arch/arm/boot/dts/omap3-igep.dtsi85
-rw-r--r--arch/arm/boot/dts/omap3-igep0020.dts50
-rw-r--r--arch/arm/boot/dts/omap3-igep0030.dts4
-rw-r--r--arch/arm/boot/dts/omap3-n900.dts25
-rw-r--r--arch/arm/boot/dts/omap3-n950-n9.dtsi2
-rw-r--r--arch/arm/boot/dts/omap3.dtsi42
-rw-r--r--arch/arm/boot/dts/omap34xx-hs.dtsi16
-rw-r--r--arch/arm/boot/dts/omap36xx-hs.dtsi16
-rw-r--r--arch/arm/boot/dts/omap4-panda-common.dtsi20
-rw-r--r--arch/arm/boot/dts/omap4-sdp.dts12
-rw-r--r--arch/arm/boot/dts/r8a7790.dtsi28
-rw-r--r--arch/arm/boot/dts/socfpga.dtsi7
-rw-r--r--arch/arm/boot/dts/sun6i-a31.dtsi27
-rw-r--r--arch/arm/boot/dts/sun7i-a20.dtsi42
-rw-r--r--arch/arm/common/edma.c4
-rw-r--r--arch/arm/configs/multi_v7_defconfig3
-rw-r--r--arch/arm/configs/omap2plus_defconfig1
-rw-r--r--arch/arm/configs/sunxi_defconfig7
-rw-r--r--arch/arm/configs/u8500_defconfig3
-rw-r--r--arch/arm/include/asm/Kbuild1
-rw-r--r--arch/arm/include/asm/hardware/iop3xx-adma.h30
-rw-r--r--arch/arm/include/asm/hardware/iop_adma.h4
-rw-r--r--arch/arm/include/asm/memory.h40
-rw-r--r--arch/arm/include/asm/pgtable.h2
-rw-r--r--arch/arm/kernel/head-nommu.S4
-rw-r--r--arch/arm/kernel/head.S9
-rw-r--r--arch/arm/kernel/machine_kexec.c17
-rw-r--r--arch/arm/kernel/process.c7
-rw-r--r--arch/arm/kernel/relocate_kernel.S8
-rw-r--r--arch/arm/kernel/setup.c3
-rw-r--r--arch/arm/kernel/sigreturn_codes.S40
-rw-r--r--arch/arm/kernel/stacktrace.c2
-rw-r--r--arch/arm/kernel/traps.c5
-rw-r--r--arch/arm/kvm/mmu.c34
-rw-r--r--arch/arm/lib/bitops.h2
-rw-r--r--arch/arm/lib/delay-loop.S1
-rw-r--r--arch/arm/mach-at91/at91rm9200_time.c7
-rw-r--r--arch/arm/mach-at91/pm.h4
-rw-r--r--arch/arm/mach-at91/sama5d3.c6
-rw-r--r--arch/arm/mach-davinci/devices-da8xx.c4
-rw-r--r--arch/arm/mach-davinci/dm355.c3
-rw-r--r--arch/arm/mach-davinci/dm365.c3
-rw-r--r--arch/arm/mach-davinci/dm644x.c3
-rw-r--r--arch/arm/mach-davinci/dm646x.c6
-rw-r--r--arch/arm/mach-footbridge/common.c3
-rw-r--r--arch/arm/mach-footbridge/dc21285.c2
-rw-r--r--arch/arm/mach-footbridge/ebsa285.c22
-rw-r--r--arch/arm/mach-highbank/highbank.c23
-rw-r--r--arch/arm/mach-iop13xx/include/mach/adma.h26
-rw-r--r--arch/arm/mach-omap2/Makefile6
-rw-r--r--arch/arm/mach-omap2/board-generic.c18
-rw-r--r--arch/arm/mach-omap2/board-ldp.c7
-rw-r--r--arch/arm/mach-omap2/common.h1
-rw-r--r--arch/arm/mach-omap2/display.c40
-rw-r--r--arch/arm/mach-omap2/dss-common.c2
-rw-r--r--arch/arm/mach-omap2/gpmc.c58
-rw-r--r--arch/arm/mach-omap2/omap-secure.h7
-rw-r--r--arch/arm/mach-omap2/omap4-common.c57
-rw-r--r--arch/arm/mach-omap2/omap_device.c24
-rw-r--r--arch/arm/mach-omap2/omap_device.h1
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.c143
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c4
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_3xxx_data.c19
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_44xx_data.c12
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_54xx_data.c13
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_7xx_data.c2
-rw-r--r--arch/arm/mach-omap2/pdata-quirks.c1
-rw-r--r--arch/arm/mach-omap2/pm34xx.c2
-rw-r--r--arch/arm/mach-omap2/powerdomain.c3
-rw-r--r--arch/arm/mach-omap2/prm44xx_54xx.h2
-rw-r--r--arch/arm/mach-pxa/include/mach/lubbock.h2
-rw-r--r--arch/arm/mach-pxa/reset.c8
-rw-r--r--arch/arm/mach-pxa/tosa.c102
-rw-r--r--arch/arm/mach-s3c64xx/mach-s3c64xx-dt.c11
-rw-r--r--arch/arm/mach-shmobile/board-armadillo800eva.c7
-rw-r--r--arch/arm/mach-shmobile/board-bockw.c2
-rw-r--r--arch/arm/mach-shmobile/board-lager.c4
-rw-r--r--arch/arm/mach-socfpga/Kconfig1
-rw-r--r--arch/arm/mach-tegra/fuse.c12
-rw-r--r--arch/arm/mach-ux500/cpu-db8500.c4
-rw-r--r--arch/arm/mach-vexpress/spc.c40
-rw-r--r--arch/arm/mach-vexpress/spc.h1
-rw-r--r--arch/arm/mach-vexpress/tc2_pm.c66
-rw-r--r--arch/arm/mm/dma-mapping.c88
-rw-r--r--arch/arm/mm/init.c2
-rw-r--r--arch/arm/mm/mmap.c2
-rw-r--r--arch/arm/mm/mmu.c4
-rw-r--r--arch/arm/mm/nommu.c1
-rw-r--r--arch/arm/mm/pgd.c3
-rw-r--r--arch/arm/mm/proc-v7.S17
-rw-r--r--arch/arm/plat-omap/include/plat/dmtimer.h5
-rw-r--r--arch/arm/xen/enlighten.c6
-rw-r--r--arch/arm/xen/p2m.c5
-rw-r--r--arch/arm64/Kconfig3
-rw-r--r--arch/arm64/boot/dts/foundation-v8.dts2
-rw-r--r--arch/arm64/include/asm/Kbuild1
-rw-r--r--arch/arm64/include/asm/io.h2
-rw-r--r--arch/arm64/include/asm/irqflags.h3
-rw-r--r--arch/arm64/include/asm/pgtable-hwdef.h2
-rw-r--r--arch/arm64/include/asm/pgtable.h33
-rw-r--r--arch/arm64/include/asm/xen/page-coherent.h4
-rw-r--r--arch/arm64/kernel/debug-monitors.c20
-rw-r--r--arch/arm64/kernel/entry.S29
-rw-r--r--arch/arm64/kernel/head.S3
-rw-r--r--arch/arm64/kernel/ptrace.c78
-rw-r--r--arch/arm64/kernel/setup.c5
-rw-r--r--arch/arm64/kernel/smp.c1
-rw-r--r--arch/arm64/mm/proc.S2
-rw-r--r--arch/avr32/boards/favr-32/setup.c4
-rw-r--r--arch/avr32/boot/u-boot/head.S35
-rw-r--r--arch/avr32/configs/atngw100_defconfig1
-rw-r--r--arch/avr32/configs/atngw100_evklcd100_defconfig1
-rw-r--r--arch/avr32/configs/atngw100_evklcd101_defconfig1
-rw-r--r--arch/avr32/configs/atngw100_mrmt_defconfig1
-rw-r--r--arch/avr32/configs/atngw100mkii_defconfig1
-rw-r--r--arch/avr32/configs/atngw100mkii_evklcd100_defconfig1
-rw-r--r--arch/avr32/configs/atngw100mkii_evklcd101_defconfig1
-rw-r--r--arch/avr32/configs/atstk1002_defconfig1
-rw-r--r--arch/avr32/configs/atstk1003_defconfig1
-rw-r--r--arch/avr32/configs/atstk1004_defconfig1
-rw-r--r--arch/avr32/configs/atstk1006_defconfig1
-rw-r--r--arch/avr32/configs/favr-32_defconfig1
-rw-r--r--arch/avr32/configs/hammerhead_defconfig1
-rw-r--r--arch/avr32/configs/merisc_defconfig1
-rw-r--r--arch/avr32/configs/mimc200_defconfig1
-rw-r--r--arch/avr32/include/asm/Kbuild1
-rw-r--r--arch/avr32/include/asm/kprobes.h14
-rw-r--r--arch/avr32/include/uapi/asm/Kbuild24
-rw-r--r--arch/avr32/include/uapi/asm/auxvec.h6
-rw-r--r--arch/avr32/include/uapi/asm/bitsperlong.h1
-rw-r--r--arch/avr32/include/uapi/asm/byteorder.h6
-rw-r--r--arch/avr32/include/uapi/asm/cachectl.h6
-rw-r--r--arch/avr32/include/uapi/asm/errno.h6
-rw-r--r--arch/avr32/include/uapi/asm/fcntl.h6
-rw-r--r--arch/avr32/include/uapi/asm/ioctl.h6
-rw-r--r--arch/avr32/include/uapi/asm/ioctls.h6
-rw-r--r--arch/avr32/include/uapi/asm/ipcbuf.h1
-rw-r--r--arch/avr32/include/uapi/asm/kvm_para.h1
-rw-r--r--arch/avr32/include/uapi/asm/mman.h1
-rw-r--r--arch/avr32/include/uapi/asm/msgbuf.h6
-rw-r--r--arch/avr32/include/uapi/asm/poll.h1
-rw-r--r--arch/avr32/include/uapi/asm/posix_types.h6
-rw-r--r--arch/avr32/include/uapi/asm/resource.h6
-rw-r--r--arch/avr32/include/uapi/asm/sembuf.h6
-rw-r--r--arch/avr32/include/uapi/asm/setup.h1
-rw-r--r--arch/avr32/include/uapi/asm/shmbuf.h6
-rw-r--r--arch/avr32/include/uapi/asm/sigcontext.h6
-rw-r--r--arch/avr32/include/uapi/asm/siginfo.h6
-rw-r--r--arch/avr32/include/uapi/asm/signal.h1
-rw-r--r--arch/avr32/include/uapi/asm/socket.h6
-rw-r--r--arch/avr32/include/uapi/asm/sockios.h6
-rw-r--r--arch/avr32/include/uapi/asm/stat.h6
-rw-r--r--arch/avr32/include/uapi/asm/statfs.h6
-rw-r--r--arch/avr32/include/uapi/asm/swab.h6
-rw-r--r--arch/avr32/include/uapi/asm/termbits.h6
-rw-r--r--arch/avr32/include/uapi/asm/termios.h1
-rw-r--r--arch/avr32/include/uapi/asm/types.h5
-rw-r--r--arch/avr32/include/uapi/asm/unistd.h1
-rw-r--r--arch/avr32/kernel/entry-avr32b.S3
-rw-r--r--arch/avr32/kernel/head.S20
-rw-r--r--arch/avr32/kernel/time.c2
-rw-r--r--arch/avr32/mach-at32ap/pm.c2
-rw-r--r--arch/blackfin/include/asm/Kbuild1
-rw-r--r--arch/c6x/include/asm/Kbuild1
-rw-r--r--arch/cris/include/asm/Kbuild1
-rw-r--r--arch/frv/include/asm/Kbuild1
-rw-r--r--arch/hexagon/include/asm/Kbuild1
-rw-r--r--arch/ia64/hp/common/sba_iommu.c2
-rw-r--r--arch/ia64/include/asm/Kbuild3
-rw-r--r--arch/ia64/include/asm/pci.h2
-rw-r--r--arch/ia64/kernel/perfmon.c8
-rw-r--r--arch/ia64/pci/pci.c6
-rw-r--r--arch/ia64/sn/kernel/io_acpi_init.c4
-rw-r--r--arch/m32r/include/asm/Kbuild1
-rw-r--r--arch/m68k/include/asm/Kbuild1
-rw-r--r--arch/metag/include/asm/Kbuild1
-rw-r--r--arch/microblaze/include/asm/Kbuild1
-rw-r--r--arch/mips/bcm47xx/setup.c10
-rw-r--r--arch/mips/include/asm/Kbuild1
-rw-r--r--arch/mn10300/include/asm/Kbuild1
-rw-r--r--arch/openrisc/include/asm/Kbuild1
-rw-r--r--arch/parisc/configs/c3000_defconfig2
-rw-r--r--arch/parisc/configs/c8000_defconfig36
-rw-r--r--arch/parisc/configs/generic-64bit_defconfig39
-rw-r--r--arch/parisc/include/asm/Kbuild1
-rw-r--r--arch/parisc/include/asm/serial.h2
-rw-r--r--arch/parisc/include/asm/socket.h11
-rw-r--r--arch/parisc/include/asm/uaccess.h46
-rw-r--r--arch/parisc/include/uapi/asm/socket.h11
-rw-r--r--arch/parisc/kernel/hardware.c7
-rw-r--r--arch/parisc/kernel/head.S6
-rw-r--r--arch/parisc/kernel/sys_parisc.c25
-rw-r--r--arch/parisc/kernel/unwind.c9
-rw-r--r--arch/parisc/kernel/vmlinux.lds.S138
-rw-r--r--arch/parisc/lib/memcpy.c6
-rw-r--r--arch/parisc/mm/fault.c22
-rw-r--r--arch/parisc/mm/init.c19
-rw-r--r--arch/powerpc/Makefile8
-rw-r--r--arch/powerpc/boot/dts/fsl/b4si-post.dtsi4
-rw-r--r--arch/powerpc/boot/dts/fsl/elo3-dma-0.dtsi82
-rw-r--r--arch/powerpc/boot/dts/fsl/elo3-dma-1.dtsi82
-rw-r--r--arch/powerpc/boot/dts/fsl/t4240si-post.dtsi4
-rw-r--r--arch/powerpc/boot/dts/mpc5121.dtsi1
-rw-r--r--arch/powerpc/boot/dts/mpc5125twr.dts6
-rw-r--r--arch/powerpc/boot/dts/xcalibur1501.dts4
-rw-r--r--arch/powerpc/boot/dts/xpedite5301.dts4
-rw-r--r--arch/powerpc/boot/dts/xpedite5330.dts4
-rw-r--r--arch/powerpc/boot/dts/xpedite5370.dts4
-rw-r--r--arch/powerpc/boot/util.S14
-rw-r--r--arch/powerpc/configs/52xx/cm5200_defconfig3
-rw-r--r--arch/powerpc/configs/52xx/lite5200b_defconfig3
-rw-r--r--arch/powerpc/configs/52xx/motionpro_defconfig3
-rw-r--r--arch/powerpc/configs/52xx/pcm030_defconfig3
-rw-r--r--arch/powerpc/configs/52xx/tqm5200_defconfig3
-rw-r--r--arch/powerpc/configs/mpc5200_defconfig3
-rw-r--r--arch/powerpc/configs/pasemi_defconfig7
-rw-r--r--arch/powerpc/configs/pseries_le_defconfig352
-rw-r--r--arch/powerpc/include/asm/Kbuild3
-rw-r--r--arch/powerpc/include/asm/elf.h4
-rw-r--r--arch/powerpc/include/asm/exception-64s.h2
-rw-r--r--arch/powerpc/include/asm/hvcall.h2
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h4
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_asm.h2
-rw-r--r--arch/powerpc/include/asm/opal.h4
-rw-r--r--arch/powerpc/include/asm/pgalloc-32.h6
-rw-r--r--arch/powerpc/include/asm/pgalloc-64.h7
-rw-r--r--arch/powerpc/include/asm/plpar_wrappers.h26
-rw-r--r--arch/powerpc/include/asm/ppc_asm.h2
-rw-r--r--arch/powerpc/include/asm/reg.h7
-rw-r--r--arch/powerpc/include/asm/smp.h2
-rw-r--r--arch/powerpc/include/asm/switch_to.h2
-rw-r--r--arch/powerpc/include/asm/thread_info.h9
-rw-r--r--arch/powerpc/include/asm/timex.h8
-rw-r--r--arch/powerpc/include/asm/unaligned.h7
-rw-r--r--arch/powerpc/kernel/asm-offsets.c1
-rw-r--r--arch/powerpc/kernel/crash_dump.c6
-rw-r--r--arch/powerpc/kernel/eeh.c9
-rw-r--r--arch/powerpc/kernel/eeh_event.c9
-rw-r--r--arch/powerpc/kernel/head_64.S2
-rw-r--r--arch/powerpc/kernel/machine_kexec.c14
-rw-r--r--arch/powerpc/kernel/misc_64.S5
-rw-r--r--arch/powerpc/kernel/nvram_64.c2
-rw-r--r--arch/powerpc/kernel/process.c103
-rw-r--r--arch/powerpc/kernel/prom.c20
-rw-r--r--arch/powerpc/kernel/ptrace.c4
-rw-r--r--arch/powerpc/kernel/setup-common.c4
-rw-r--r--arch/powerpc/kernel/signal_32.c6
-rw-r--r--arch/powerpc/kernel/signal_64.c31
-rw-r--r--arch/powerpc/kernel/smp.c20
-rw-r--r--arch/powerpc/kernel/time.c4
-rw-r--r--arch/powerpc/kernel/vdso32/gettimeofday.S6
-rw-r--r--arch/powerpc/kernel/vdso64/sigtramp.S16
-rw-r--r--arch/powerpc/kernel/vio.c2
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c18
-rw-r--r--arch/powerpc/kvm/book3s_hv.c24
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_mmu.c9
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S23
-rw-r--r--arch/powerpc/kvm/book3s_interrupts.S19
-rw-r--r--arch/powerpc/kvm/book3s_pr.c22
-rw-r--r--arch/powerpc/kvm/book3s_rmhandlers.S6
-rw-r--r--arch/powerpc/kvm/booke.c12
-rw-r--r--arch/powerpc/lib/copyuser_64.S53
-rw-r--r--arch/powerpc/mm/gup.c5
-rw-r--r--arch/powerpc/mm/hugetlbpage-book3e.c3
-rw-r--r--arch/powerpc/mm/slice.c2
-rw-r--r--arch/powerpc/mm/tlb_nohash.c2
-rw-r--r--arch/powerpc/platforms/Kconfig.cputype25
-rw-r--r--arch/powerpc/platforms/powernv/eeh-ioda.c20
-rw-r--r--arch/powerpc/platforms/powernv/opal-lpc.c12
-rw-r--r--arch/powerpc/platforms/powernv/opal-xscom.c4
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c1
-rw-r--r--arch/powerpc/platforms/powernv/pci.h4
-rw-r--r--arch/powerpc/platforms/powernv/rng.c1
-rw-r--r--arch/powerpc/platforms/pseries/eeh_pseries.c21
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c17
-rw-r--r--arch/powerpc/platforms/pseries/lparcfg.c12
-rw-r--r--arch/powerpc/platforms/pseries/msi.c28
-rw-r--r--arch/powerpc/platforms/pseries/nvram.c46
-rw-r--r--arch/powerpc/platforms/pseries/pci.c8
-rw-r--r--arch/powerpc/platforms/pseries/rng.c1
-rw-r--r--arch/powerpc/platforms/pseries/setup.c42
-rw-r--r--arch/powerpc/platforms/wsp/chroma.c1
-rw-r--r--arch/powerpc/platforms/wsp/h8.c1
-rw-r--r--arch/powerpc/platforms/wsp/ics.c2
-rw-r--r--arch/powerpc/platforms/wsp/opb_pic.c2
-rw-r--r--arch/powerpc/platforms/wsp/psr2.c1
-rw-r--r--arch/powerpc/platforms/wsp/scom_wsp.c1
-rw-r--r--arch/powerpc/platforms/wsp/wsp.c1
-rw-r--r--arch/powerpc/sysdev/ppc4xx_ocm.c2
-rw-r--r--arch/s390/Kconfig8
-rw-r--r--arch/s390/crypto/aes_s390.c50
-rw-r--r--arch/s390/include/asm/Kbuild1
-rw-r--r--arch/s390/include/asm/page.h38
-rw-r--r--arch/s390/include/asm/sclp.h3
-rw-r--r--arch/s390/include/asm/vdso.h5
-rw-r--r--arch/s390/kernel/asm-offsets.c4
-rw-r--r--arch/s390/kernel/compat_signal.c2
-rw-r--r--arch/s390/kernel/pgm_check.S2
-rw-r--r--arch/s390/kernel/signal.c2
-rw-r--r--arch/s390/kernel/time.c46
-rw-r--r--arch/s390/kernel/vdso.c2
-rw-r--r--arch/s390/kernel/vdso32/clock_gettime.S31
-rw-r--r--arch/s390/kernel/vdso32/gettimeofday.S9
-rw-r--r--arch/s390/kernel/vdso64/clock_getres.S4
-rw-r--r--arch/s390/kernel/vdso64/clock_gettime.S24
-rw-r--r--arch/s390/kernel/vdso64/gettimeofday.S9
-rw-r--r--arch/s390/lib/uaccess_pt.c3
-rw-r--r--arch/score/include/asm/Kbuild2
-rw-r--r--arch/sh/include/asm/Kbuild1
-rw-r--r--arch/sh/lib/Makefile2
-rw-r--r--arch/sparc/include/asm/Kbuild1
-rw-r--r--arch/sparc/include/asm/pgtable_64.h4
-rw-r--r--arch/tile/include/asm/Kbuild1
-rw-r--r--arch/um/Makefile9
-rw-r--r--arch/um/include/asm/Kbuild1
-rw-r--r--arch/um/kernel/sysrq.c4
-rw-r--r--arch/unicore32/include/asm/Kbuild1
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/Makefile8
-rw-r--r--arch/x86/boot/Makefile6
-rw-r--r--arch/x86/boot/compressed/Makefile1
-rw-r--r--arch/x86/crypto/Makefile3
-rw-r--r--arch/x86/crypto/aesni-intel_glue.c2
-rw-r--r--arch/x86/crypto/camellia_aesni_avx2_glue.c2
-rw-r--r--arch/x86/crypto/camellia_aesni_avx_glue.c2
-rw-r--r--arch/x86/crypto/cast5_avx_glue.c2
-rw-r--r--arch/x86/crypto/cast6_avx_glue.c2
-rw-r--r--arch/x86/crypto/serpent_avx2_glue.c2
-rw-r--r--arch/x86/crypto/serpent_avx_glue.c2
-rw-r--r--arch/x86/crypto/serpent_sse2_glue.c2
-rw-r--r--arch/x86/crypto/sha256_ssse3_glue.c4
-rw-r--r--arch/x86/crypto/twofish_avx_glue.c2
-rw-r--r--arch/x86/include/asm/atomic.h4
-rw-r--r--arch/x86/include/asm/atomic64_64.h4
-rw-r--r--arch/x86/include/asm/bitops.h6
-rw-r--r--arch/x86/include/asm/hash.h7
-rw-r--r--arch/x86/include/asm/local.h4
-rw-r--r--arch/x86/include/asm/pci.h2
-rw-r--r--arch/x86/include/asm/pgtable.h11
-rw-r--r--arch/x86/include/asm/preempt.h11
-rw-r--r--arch/x86/include/asm/rmwcc.h8
-rw-r--r--arch/x86/include/asm/simd.h11
-rw-r--r--arch/x86/include/asm/trace/irq_vectors.h11
-rw-r--r--arch/x86/include/uapi/asm/msr-index.h2
-rw-r--r--arch/x86/kernel/cpu/intel.c3
-rw-r--r--arch/x86/kernel/cpu/perf_event.h15
-rw-r--r--arch/x86/kernel/early-quirks.c4
-rw-r--r--arch/x86/kernel/reboot.c11
-rw-r--r--arch/x86/kvm/lapic.c35
-rw-r--r--arch/x86/kvm/lapic.h4
-rw-r--r--arch/x86/kvm/mmu_audit.c2
-rw-r--r--arch/x86/kvm/x86.c40
-rw-r--r--arch/x86/lib/Makefile2
-rw-r--r--arch/x86/lib/hash.c88
-rw-r--r--arch/x86/mm/gup.c13
-rw-r--r--arch/x86/mm/pgtable.c6
-rw-r--r--arch/x86/pci/acpi.c4
-rw-r--r--arch/x86/platform/efi/early_printk.c2
-rw-r--r--arch/x86/platform/efi/efi.c7
-rw-r--r--arch/x86/platform/uv/tlb_uv.c5
-rw-r--r--arch/x86/realmode/rm/Makefile3
-rw-r--r--arch/xtensa/include/asm/Kbuild1
-rw-r--r--block/blk-cgroup.h8
-rw-r--r--block/blk-flush.c19
-rw-r--r--block/blk-mq-sysfs.c13
-rw-r--r--block/blk-mq.c30
-rw-r--r--block/partitions/efi.c5
-rw-r--r--crypto/Kconfig26
-rw-r--r--crypto/Makefile9
-rw-r--r--crypto/ablk_helper.c (renamed from arch/x86/crypto/ablk_helper.c)13
-rw-r--r--crypto/ablkcipher.c21
-rw-r--r--crypto/algif_hash.c3
-rw-r--r--crypto/algif_skcipher.c3
-rw-r--r--crypto/ansi_cprng.c4
-rw-r--r--crypto/asymmetric_keys/Kconfig4
-rw-r--r--crypto/asymmetric_keys/asymmetric_type.c1
-rw-r--r--crypto/asymmetric_keys/public_key.c66
-rw-r--r--crypto/asymmetric_keys/public_key.h6
-rw-r--r--crypto/asymmetric_keys/rsa.c19
-rw-r--r--crypto/asymmetric_keys/x509_cert_parser.c35
-rw-r--r--crypto/asymmetric_keys/x509_parser.h18
-rw-r--r--crypto/asymmetric_keys/x509_public_key.c157
-rw-r--r--crypto/async_tx/async_memcpy.c37
-rw-r--r--crypto/async_tx/async_pq.c174
-rw-r--r--crypto/async_tx/async_raid6_recov.c61
-rw-r--r--crypto/async_tx/async_tx.c4
-rw-r--r--crypto/async_tx/async_xor.c123
-rw-r--r--crypto/async_tx/raid6test.c10
-rw-r--r--crypto/authenc.c61
-rw-r--r--crypto/authencesn.c34
-rw-r--r--crypto/ccm.c7
-rw-r--r--crypto/gcm.c2
-rw-r--r--crypto/hash_info.c56
-rw-r--r--crypto/memneq.c138
-rw-r--r--crypto/tcrypt.c4
-rw-r--r--crypto/testmgr.c26
-rw-r--r--drivers/acpi/Kconfig12
-rw-r--r--drivers/acpi/ac.c15
-rw-r--r--drivers/acpi/acpi_lpss.c10
-rw-r--r--drivers/acpi/acpi_platform.c2
-rw-r--r--drivers/acpi/acpica/acresrc.h6
-rw-r--r--drivers/acpi/acpica/nsalloc.c18
-rw-r--r--drivers/acpi/acpica/nsutils.c18
-rw-r--r--drivers/acpi/acpica/rscalc.c9
-rw-r--r--drivers/acpi/acpica/rscreate.c36
-rw-r--r--drivers/acpi/acpica/rsutils.c2
-rw-r--r--drivers/acpi/acpica/utdebug.c31
-rw-r--r--drivers/acpi/apei/Kconfig1
-rw-r--r--drivers/acpi/apei/erst.c1
-rw-r--r--drivers/acpi/blacklist.c35
-rw-r--r--drivers/acpi/device_pm.c14
-rw-r--r--drivers/acpi/ec.c3
-rw-r--r--drivers/acpi/glue.c53
-rw-r--r--drivers/acpi/nvs.c1
-rw-r--r--drivers/acpi/pci_root.c4
-rw-r--r--drivers/acpi/scan.c16
-rw-r--r--drivers/acpi/sleep.c2
-rw-r--r--drivers/acpi/sysfs.c54
-rw-r--r--drivers/acpi/video.c87
-rw-r--r--drivers/ata/ahci.c20
-rw-r--r--drivers/ata/ahci_imx.c3
-rw-r--r--drivers/ata/ahci_platform.c1
-rw-r--r--drivers/ata/libata-acpi.c4
-rw-r--r--drivers/ata/libata-core.c22
-rw-r--r--drivers/ata/libata-scsi.c22
-rw-r--r--drivers/ata/libata-zpodd.c4
-rw-r--r--drivers/ata/pata_arasan_cf.c4
-rw-r--r--drivers/atm/he.c1
-rw-r--r--drivers/atm/nicstar.c4
-rw-r--r--drivers/atm/solos-pci.c2
-rw-r--r--drivers/base/platform.c4
-rw-r--r--drivers/base/power/main.c3
-rw-r--r--drivers/base/regmap/regmap-mmio.c11
-rw-r--r--drivers/base/regmap/regmap.c8
-rw-r--r--drivers/bcma/bcma_private.h2
-rw-r--r--drivers/bcma/main.c13
-rw-r--r--drivers/block/null_blk.c118
-rw-r--r--drivers/block/skd_main.c4
-rw-r--r--drivers/block/virtio_blk.c5
-rw-r--r--drivers/block/xen-blkfront.c7
-rw-r--r--drivers/char/hw_random/Kconfig25
-rw-r--r--drivers/char/hw_random/Makefile2
-rw-r--r--drivers/char/hw_random/msm-rng.c197
-rw-r--r--drivers/char/hw_random/omap3-rom-rng.c141
-rw-r--r--drivers/char/hw_random/pseries-rng.c5
-rw-r--r--drivers/char/hw_random/via-rng.c2
-rw-r--r--drivers/char/i8k.c7
-rw-r--r--drivers/char/tpm/Kconfig37
-rw-r--r--drivers/char/tpm/Makefile11
-rw-r--r--drivers/char/tpm/tpm-interface.c (renamed from drivers/char/tpm/tpm.c)138
-rw-r--r--drivers/char/tpm/tpm.h3
-rw-r--r--drivers/char/tpm/tpm_atmel.c2
-rw-r--r--drivers/char/tpm/tpm_eventlog.c3
-rw-r--r--drivers/char/tpm/tpm_i2c_atmel.c284
-rw-r--r--drivers/char/tpm/tpm_i2c_infineon.c4
-rw-r--r--drivers/char/tpm/tpm_i2c_nuvoton.c710
-rw-r--r--drivers/char/tpm/tpm_i2c_stm_st33.c12
-rw-r--r--drivers/char/tpm/tpm_ibmvtpm.c6
-rw-r--r--drivers/char/tpm/tpm_ppi.c4
-rw-r--r--drivers/char/tpm/tpm_tis.c2
-rw-r--r--drivers/char/tpm/xen-tpmfront.c2
-rw-r--r--drivers/clk/clk-s2mps11.c6
-rw-r--r--drivers/clocksource/Kconfig2
-rw-r--r--drivers/clocksource/clksrc-of.c1
-rw-r--r--drivers/clocksource/dw_apb_timer_of.c7
-rw-r--r--drivers/clocksource/sh_mtu2.c16
-rw-r--r--drivers/clocksource/sh_tmu.c20
-rw-r--r--drivers/clocksource/sun4i_timer.c3
-rw-r--r--drivers/clocksource/time-armada-370-xp.c10
-rw-r--r--drivers/cpufreq/at32ap-cpufreq.c2
-rw-r--r--drivers/cpufreq/cpufreq.c69
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c3
-rw-r--r--drivers/cpufreq/cpufreq_governor.c4
-rw-r--r--drivers/cpufreq/exynos4210-cpufreq.c1
-rw-r--r--drivers/cpufreq/exynos4x12-cpufreq.c1
-rw-r--r--drivers/cpufreq/exynos5250-cpufreq.c1
-rw-r--r--drivers/cpufreq/omap-cpufreq.c1
-rw-r--r--drivers/cpufreq/tegra-cpufreq.c4
-rw-r--r--drivers/cpuidle/cpuidle.c2
-rw-r--r--drivers/crypto/caam/Kconfig25
-rw-r--r--drivers/crypto/caam/Makefile4
-rw-r--r--drivers/crypto/caam/caamalg.c134
-rw-r--r--drivers/crypto/caam/caamhash.c88
-rw-r--r--drivers/crypto/caam/caamrng.c29
-rw-r--r--drivers/crypto/caam/ctrl.c418
-rw-r--r--drivers/crypto/caam/desc.h17
-rw-r--r--drivers/crypto/caam/intern.h20
-rw-r--r--drivers/crypto/caam/jr.c340
-rw-r--r--drivers/crypto/caam/jr.h5
-rw-r--r--drivers/crypto/caam/regs.h14
-rw-r--r--drivers/crypto/caam/sg_sw_sec4.h34
-rw-r--r--drivers/crypto/dcp.c49
-rw-r--r--drivers/crypto/ixp4xx_crypto.c26
-rw-r--r--drivers/crypto/mv_cesa.c14
-rw-r--r--drivers/crypto/omap-aes.c6
-rw-r--r--drivers/crypto/omap-sham.c1
-rw-r--r--drivers/crypto/picoxcell_crypto.c32
-rw-r--r--drivers/crypto/sahara.c2
-rw-r--r--drivers/crypto/talitos.c103
-rw-r--r--drivers/crypto/tegra-aes.c26
-rw-r--r--drivers/dma/Kconfig16
-rw-r--r--drivers/dma/amba-pl08x.c39
-rw-r--r--drivers/dma/at_hdmac.c28
-rw-r--r--drivers/dma/at_hdmac_regs.h4
-rw-r--r--drivers/dma/coh901318.c4
-rw-r--r--drivers/dma/cppi41.c178
-rw-r--r--drivers/dma/dma-jz4740.c2
-rw-r--r--drivers/dma/dmaengine.c264
-rw-r--r--drivers/dma/dmatest.c917
-rw-r--r--drivers/dma/dw/core.c29
-rw-r--r--drivers/dma/edma.c369
-rw-r--r--drivers/dma/ep93xx_dma.c30
-rw-r--r--drivers/dma/fsldma.c57
-rw-r--r--drivers/dma/fsldma.h2
-rw-r--r--drivers/dma/imx-dma.c42
-rw-r--r--drivers/dma/imx-sdma.c10
-rw-r--r--drivers/dma/intel_mid_dma.c4
-rw-r--r--drivers/dma/ioat/dma.c53
-rw-r--r--drivers/dma/ioat/dma.h14
-rw-r--r--drivers/dma/ioat/dma_v2.c2
-rw-r--r--drivers/dma/ioat/dma_v2.h1
-rw-r--r--drivers/dma/ioat/dma_v3.c323
-rw-r--r--drivers/dma/ioat/pci.c20
-rw-r--r--drivers/dma/iop-adma.c113
-rw-r--r--drivers/dma/ipu/ipu_idmac.c6
-rw-r--r--drivers/dma/k3dma.c4
-rw-r--r--drivers/dma/mmp_pdma.c8
-rw-r--r--drivers/dma/mmp_tdma.c40
-rw-r--r--drivers/dma/mv_xor.c159
-rw-r--r--drivers/dma/mv_xor.h25
-rw-r--r--drivers/dma/mxs-dma.c178
-rw-r--r--drivers/dma/omap-dma.c2
-rw-r--r--drivers/dma/pl330.c37
-rw-r--r--drivers/dma/ppc4xx/adma.c299
-rw-r--r--drivers/dma/s3c24xx-dma.c33
-rw-r--r--drivers/dma/sa11x0-dma.c2
-rw-r--r--drivers/dma/sh/rcar-hpbdma.c11
-rw-r--r--drivers/dma/sh/shdma-base.c2
-rw-r--r--drivers/dma/sh/shdmac.c4
-rw-r--r--drivers/dma/ste_dma40.c7
-rw-r--r--drivers/dma/tegra20-apb-dma.c6
-rw-r--r--drivers/dma/timb_dma.c37
-rw-r--r--drivers/dma/txx9dmac.c30
-rw-r--r--drivers/edac/sb_edac.c2
-rw-r--r--drivers/extcon/extcon-arizona.c4
-rw-r--r--drivers/extcon/extcon-class.c3
-rw-r--r--drivers/firmware/Makefile1
-rw-r--r--drivers/firmware/efi/Kconfig6
-rw-r--r--drivers/firmware/efi/Makefile2
-rw-r--r--drivers/firmware/efi/efi-pstore.c164
-rw-r--r--drivers/firmware/efi/efivars.c12
-rw-r--r--drivers/firmware/efi/vars.c12
-rw-r--r--drivers/gpio/gpio-bcm-kona.c2
-rw-r--r--drivers/gpio/gpio-davinci.c4
-rw-r--r--drivers/gpio/gpio-mpc8xxx.c8
-rw-r--r--drivers/gpio/gpio-msm-v2.c6
-rw-r--r--drivers/gpio/gpio-mvebu.c2
-rw-r--r--drivers/gpio/gpio-pl061.c10
-rw-r--r--drivers/gpio/gpio-rcar.c5
-rw-r--r--drivers/gpio/gpio-tb10x.c1
-rw-r--r--drivers/gpio/gpio-twl4030.c26
-rw-r--r--drivers/gpio/gpio-ucb1400.c1
-rw-r--r--drivers/gpio/gpiolib.c79
-rw-r--r--drivers/gpu/drm/armada/armada_drm.h1
-rw-r--r--drivers/gpu/drm/armada/armada_drv.c7
-rw-r--r--drivers/gpu/drm/armada/armada_fbdev.c20
-rw-r--r--drivers/gpu/drm/armada/armada_gem.c7
-rw-r--r--drivers/gpu/drm/drm_edid.c12
-rw-r--r--drivers/gpu/drm/drm_stub.c6
-rw-r--r--drivers/gpu/drm/drm_sysfs.c40
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c35
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c2
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c20
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c3
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h10
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c41
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c16
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c13
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c14
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c88
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c15
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h1
-rw-r--r--drivers/gpu/drm/i915/intel_acpi.c2
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c7
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c25
-rw-r--r--drivers/gpu/drm/i915/intel_display.c61
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c36
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h3
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c2
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c26
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c62
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c1
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c8
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c27
-rw-r--r--drivers/gpu/drm/nouveau/Makefile1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv50.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nv50.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/clock.h4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c7
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nvaa.c445
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mxm/base.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/overlay.c42
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hwmon.c1
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c2
-rw-r--r--drivers/gpu/drm/qxl/Kconfig1
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_release.c1
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c4
-rw-r--r--drivers/gpu/drm/radeon/atombios_i2c.c17
-rw-r--r--drivers/gpu/drm/radeon/cik.c57
-rw-r--r--drivers/gpu/drm/radeon/cik_sdma.c15
-rw-r--r--drivers/gpu/drm/radeon/cypress_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/dce6_afmt.c20
-rw-r--r--drivers/gpu/drm/radeon/evergreen_dma.c9
-rw-r--r--drivers/gpu/drm/radeon/evergreen_hdmi.c4
-rw-r--r--drivers/gpu/drm/radeon/ni.c20
-rw-r--r--drivers/gpu/drm/radeon/ni_dpm.c30
-rw-r--r--drivers/gpu/drm/radeon/r100.c3
-rw-r--r--drivers/gpu/drm/radeon/r600.c13
-rw-r--r--drivers/gpu/drm/radeon/r600_dma.c13
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon.h46
-rw-r--r--drivers/gpu/drm/radeon/radeon_acpi.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h18
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_atpx_handler.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_bios.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c47
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.h3
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c30
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c22
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c28
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c57
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c46
-rw-r--r--drivers/gpu/drm/radeon/radeon_semaphore.c129
-rw-r--r--drivers/gpu/drm/radeon/radeon_trace.h69
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/cayman4
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/evergreen4
-rw-r--r--drivers/gpu/drm/radeon/rs690.c10
-rw-r--r--drivers/gpu/drm/radeon/rv770_dma.c9
-rw-r--r--drivers/gpu/drm/radeon/rv770_dpm.c6
-rw-r--r--drivers/gpu/drm/radeon/si.c11
-rw-r--r--drivers/gpu/drm/radeon/si_dma.c9
-rw-r--r--drivers/gpu/drm/radeon/trinity_dpm.c6
-rw-r--r--drivers/gpu/drm/radeon/uvd_v1_0.c4
-rw-r--r--drivers/gpu/drm/radeon/uvd_v3_1.c4
-rw-r--r--drivers/gpu/drm/tegra/drm.c34
-rw-r--r--drivers/gpu/drm/tegra/drm.h2
-rw-r--r--drivers/gpu/drm/tegra/fb.c2
-rw-r--r--drivers/gpu/drm/tegra/rgb.c11
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c35
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c8
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c32
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c32
-rw-r--r--drivers/gpu/drm/ttm/ttm_object.c254
-rw-r--r--drivers/gpu/drm/udl/udl_gem.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/Makefile2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c7
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h15
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_prime.c137
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c165
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c5
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c30
-rw-r--r--drivers/gpu/host1x/bus.c5
-rw-r--r--drivers/gpu/host1x/hw/cdma_hw.c4
-rw-r--r--drivers/gpu/host1x/hw/debug_hw.c4
-rw-r--r--drivers/hid/Kconfig1
-rw-r--r--drivers/hid/hid-appleir.c3
-rw-r--r--drivers/hid/hid-core.c2
-rw-r--r--drivers/hid/hid-ids.h5
-rw-r--r--drivers/hid/hid-kye.c14
-rw-r--r--drivers/hid/hid-multitouch.c6
-rw-r--r--drivers/hid/hid-sensor-hub.c22
-rw-r--r--drivers/hid/hid-sony.c53
-rw-r--r--drivers/hid/hid-wiimote-core.c5
-rw-r--r--drivers/hid/i2c-hid/i2c-hid.c2
-rw-r--r--drivers/hid/uhid.c2
-rw-r--r--drivers/hwmon/Kconfig1
-rw-r--r--drivers/hwmon/acpi_power_meter.c5
-rw-r--r--drivers/hwmon/asus_atk0110.c1
-rw-r--r--drivers/hwmon/hih6130.c16
-rw-r--r--drivers/hwmon/lm75.c3
-rw-r--r--drivers/hwmon/lm78.c2
-rw-r--r--drivers/hwmon/lm90.c4
-rw-r--r--drivers/hwmon/nct6775.c91
-rw-r--r--drivers/hwmon/sis5595.c2
-rw-r--r--drivers/hwmon/vt8231.c2
-rw-r--r--drivers/hwmon/w83l786ng.c13
-rw-r--r--drivers/i2c/busses/i2c-bcm-kona.c3
-rw-r--r--drivers/i2c/busses/i2c-bcm2835.c1
-rw-r--r--drivers/i2c/busses/i2c-davinci.c4
-rw-r--r--drivers/i2c/busses/i2c-diolan-u2c.c16
-rw-r--r--drivers/i2c/busses/i2c-imx.c4
-rw-r--r--drivers/i2c/busses/i2c-omap.c30
-rw-r--r--drivers/i2c/i2c-core.c25
-rw-r--r--drivers/i2c/i2c-mux.c2
-rw-r--r--drivers/ide/ide-acpi.c5
-rw-r--r--drivers/idle/intel_idle.c27
-rw-r--r--drivers/iio/accel/hid-sensor-accel-3d.c5
-rw-r--r--drivers/iio/accel/kxsd9.c7
-rw-r--r--drivers/iio/adc/ad7887.c16
-rw-r--r--drivers/iio/adc/at91_adc.c1
-rw-r--r--drivers/iio/adc/mcp3422.c8
-rw-r--r--drivers/iio/adc/ti_am335x_adc.c7
-rw-r--r--drivers/iio/common/hid-sensors/Kconfig9
-rw-r--r--drivers/iio/common/hid-sensors/hid-sensor-trigger.c29
-rw-r--r--drivers/iio/common/hid-sensors/hid-sensor-trigger.h2
-rw-r--r--drivers/iio/gyro/hid-sensor-gyro-3d.c5
-rw-r--r--drivers/iio/imu/adis16400_core.c7
-rw-r--r--drivers/iio/light/Kconfig3
-rw-r--r--drivers/iio/light/cm36651.c2
-rw-r--r--drivers/iio/light/hid-sensor-als.c5
-rw-r--r--drivers/iio/magnetometer/Kconfig2
-rw-r--r--drivers/iio/magnetometer/hid-sensor-magn-3d.c5
-rw-r--r--drivers/iio/magnetometer/mag3110.c7
-rw-r--r--drivers/infiniband/core/iwcm.c11
-rw-r--r--drivers/infiniband/core/uverbs.h10
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c17
-rw-r--r--drivers/infiniband/core/uverbs_main.c27
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c78
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c2
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c3
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_netlink.c3
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c121
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.h6
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c10
-rw-r--r--drivers/input/keyboard/adp5588-keys.c3
-rw-r--r--drivers/input/keyboard/adp5589-keys.c3
-rw-r--r--drivers/input/keyboard/bf54x-keys.c3
-rw-r--r--drivers/input/misc/adxl34x.c2
-rw-r--r--drivers/input/misc/hp_sdc_rtc.c5
-rw-r--r--drivers/input/misc/pcf8574_keypad.c7
-rw-r--r--drivers/input/mouse/alps.c206
-rw-r--r--drivers/input/mouse/alps.h1
-rw-r--r--drivers/input/mouse/elantech.c1
-rw-r--r--drivers/input/serio/serio.c24
-rw-r--r--drivers/input/touchscreen/Kconfig11
-rw-r--r--drivers/input/touchscreen/Makefile1
-rw-r--r--drivers/input/touchscreen/atmel-wm97xx.c2
-rw-r--r--drivers/input/touchscreen/cyttsp4_core.c3
-rw-r--r--drivers/input/touchscreen/sur40.c466
-rw-r--r--drivers/input/touchscreen/usbtouchscreen.c22
-rw-r--r--drivers/iommu/arm-smmu.c66
-rw-r--r--drivers/irqchip/irq-gic.c9
-rw-r--r--drivers/irqchip/irq-renesas-intc-irqpin.c8
-rw-r--r--drivers/isdn/hisax/hfc_pci.c4
-rw-r--r--drivers/isdn/hisax/telespci.c4
-rw-r--r--drivers/isdn/i4l/isdn_net.c4
-rw-r--r--drivers/isdn/sc/event.c2
-rw-r--r--drivers/leds/leds-pwm.c53
-rw-r--r--drivers/macintosh/Makefile1
-rw-r--r--drivers/md/bcache/alloc.c2
-rw-r--r--drivers/md/bcache/bcache.h12
-rw-r--r--drivers/md/bcache/btree.c27
-rw-r--r--drivers/md/bcache/movinggc.c21
-rw-r--r--drivers/md/bcache/super.c2
-rw-r--r--drivers/md/bcache/sysfs.c50
-rw-r--r--drivers/md/bcache/util.c8
-rw-r--r--drivers/md/bcache/util.h2
-rw-r--r--drivers/md/bcache/writeback.c53
-rw-r--r--drivers/md/dm-bufio.c5
-rw-r--r--drivers/md/dm-cache-policy-mq.c13
-rw-r--r--drivers/md/dm-cache-target.c2
-rw-r--r--drivers/md/dm-delay.c23
-rw-r--r--drivers/md/dm-snap.c71
-rw-r--r--drivers/md/dm-stats.c1
-rw-r--r--drivers/md/dm-table.c5
-rw-r--r--drivers/md/dm-thin-metadata.c8
-rw-r--r--drivers/md/dm-thin-metadata.h1
-rw-r--r--drivers/md/dm-thin.c66
-rw-r--r--drivers/md/md.c147
-rw-r--r--drivers/md/persistent-data/dm-array.c10
-rw-r--r--drivers/md/persistent-data/dm-block-manager.c6
-rw-r--r--drivers/md/persistent-data/dm-block-manager.h7
-rw-r--r--drivers/md/persistent-data/dm-space-map-common.c32
-rw-r--r--drivers/md/persistent-data/dm-space-map-metadata.c8
-rw-r--r--drivers/md/raid1.c162
-rw-r--r--drivers/md/raid1.h15
-rw-r--r--drivers/md/raid10.c6
-rw-r--r--drivers/md/raid5.c425
-rw-r--r--drivers/md/raid5.h16
-rw-r--r--drivers/media/common/siano/smscoreapi.h4
-rw-r--r--drivers/media/common/siano/smsdvb.h2
-rw-r--r--drivers/media/dvb-core/dvb_demux.c9
-rw-r--r--drivers/media/dvb-core/dvb_net.c10
-rw-r--r--drivers/media/dvb-frontends/af9033.c12
-rw-r--r--drivers/media/dvb-frontends/cxd2820r_c.c2
-rw-r--r--drivers/media/dvb-frontends/dib8000.c4
-rw-r--r--drivers/media/dvb-frontends/drxk_hard.c18
-rw-r--r--drivers/media/dvb-frontends/rtl2830.c1
-rw-r--r--drivers/media/i2c/adv7183_regs.h6
-rw-r--r--drivers/media/i2c/adv7604.c2
-rw-r--r--drivers/media/i2c/adv7842.c2
-rw-r--r--drivers/media/i2c/ir-kbd-i2c.c2
-rw-r--r--drivers/media/i2c/m5mols/m5mols_controls.c2
-rw-r--r--drivers/media/i2c/mt9p031.c1
-rw-r--r--drivers/media/i2c/s5c73m3/s5c73m3-core.c2
-rw-r--r--drivers/media/i2c/s5c73m3/s5c73m3.h2
-rw-r--r--drivers/media/i2c/saa7115.c2
-rw-r--r--drivers/media/i2c/soc_camera/ov5642.c2
-rw-r--r--drivers/media/i2c/ths7303.c3
-rw-r--r--drivers/media/i2c/wm8775.c4
-rw-r--r--drivers/media/pci/bt8xx/bttv-driver.c3
-rw-r--r--drivers/media/pci/cx18/cx18-driver.h2
-rw-r--r--drivers/media/pci/cx23885/cx23885-417.c2
-rw-r--r--drivers/media/pci/pluto2/pluto2.c2
-rw-r--r--drivers/media/pci/saa7164/saa7164-core.c4
-rw-r--r--drivers/media/platform/coda.c2
-rw-r--r--drivers/media/platform/exynos4-is/fimc-core.c2
-rw-r--r--drivers/media/platform/exynos4-is/media-dev.c2
-rw-r--r--drivers/media/platform/m2m-deinterlace.c3
-rw-r--r--drivers/media/platform/marvell-ccic/mmp-driver.c46
-rw-r--r--drivers/media/platform/omap3isp/isp.c2
-rw-r--r--drivers/media/platform/omap3isp/ispvideo.c7
-rw-r--r--drivers/media/platform/s5p-mfc/regs-mfc.h2
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc.c12
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c2
-rw-r--r--drivers/media/platform/s5p-tv/mixer.h2
-rw-r--r--drivers/media/platform/s5p-tv/mixer_video.c4
-rw-r--r--drivers/media/platform/soc_camera/omap1_camera.c2
-rw-r--r--drivers/media/platform/timblogiw.c2
-rw-r--r--drivers/media/platform/vivi.c4
-rw-r--r--drivers/media/platform/vsp1/vsp1_drv.c2
-rw-r--r--drivers/media/platform/vsp1/vsp1_video.c4
-rw-r--r--drivers/media/radio/radio-shark.c4
-rw-r--r--drivers/media/radio/radio-shark2.c4
-rw-r--r--drivers/media/radio/radio-si476x.c4
-rw-r--r--drivers/media/radio/radio-tea5764.c2
-rw-r--r--drivers/media/radio/tef6862.c2
-rw-r--r--drivers/media/rc/imon.c2
-rw-r--r--drivers/media/rc/redrat3.c2
-rw-r--r--drivers/media/tuners/mt2063.c4
-rw-r--r--drivers/media/tuners/tuner-xc2028-types.h2
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-cards.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9035.c17
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf.c4
-rw-r--r--drivers/media/usb/dvb-usb/technisat-usb2.c2
-rw-r--r--drivers/media/usb/em28xx/em28xx-video.c2
-rw-r--r--drivers/media/usb/gspca/gl860/gl860.c2
-rw-r--r--drivers/media/usb/gspca/pac207.c2
-rw-r--r--drivers/media/usb/gspca/pac7302.c2
-rw-r--r--drivers/media/usb/gspca/stk1135.c3
-rw-r--r--drivers/media/usb/gspca/stv0680.c2
-rw-r--r--drivers/media/usb/gspca/sunplus.c1
-rw-r--r--drivers/media/usb/gspca/zc3xx.c2
-rw-r--r--drivers/media/usb/pwc/pwc-if.c2
-rw-r--r--drivers/media/usb/usbtv/usbtv.c174
-rw-r--r--drivers/media/usb/uvc/uvc_video.c2
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls.c2
-rw-r--r--drivers/media/v4l2-core/videobuf2-core.c29
-rw-r--r--drivers/media/v4l2-core/videobuf2-dma-contig.c4
-rw-r--r--drivers/media/v4l2-core/videobuf2-dma-sg.c3
-rw-r--r--drivers/mfd/Kconfig2
-rw-r--r--drivers/mfd/lpc_ich.c2
-rw-r--r--drivers/mfd/sec-core.c30
-rw-r--r--drivers/mfd/sec-irq.c6
-rw-r--r--drivers/mfd/ti-ssp.c2
-rw-r--r--drivers/misc/carma/carma-fpga.c3
-rw-r--r--drivers/misc/enclosure.c7
-rw-r--r--drivers/misc/mei/hw-me-regs.h5
-rw-r--r--drivers/misc/mei/pci-me.c4
-rw-r--r--drivers/misc/mic/card/mic_virtio.c33
-rw-r--r--drivers/misc/mic/card/mic_virtio.h7
-rw-r--r--drivers/misc/mic/host/mic_boot.c2
-rw-r--r--drivers/misc/mic/host/mic_virtio.c30
-rw-r--r--drivers/misc/mic/host/mic_x100.c4
-rw-r--r--drivers/mmc/core/sdio_bus.c3
-rw-r--r--drivers/mmc/host/omap.c45
-rw-r--r--drivers/mtd/nand/atmel_nand.c3
-rw-r--r--drivers/mtd/nand/fsmc_nand.c2
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c6
-rw-r--r--drivers/net/Space.c29
-rw-r--r--drivers/net/bonding/bond_3ad.c219
-rw-r--r--drivers/net/bonding/bond_3ad.h2
-rw-r--r--drivers/net/bonding/bond_alb.c49
-rw-r--r--drivers/net/bonding/bond_alb.h3
-rw-r--r--drivers/net/bonding/bond_main.c273
-rw-r--r--drivers/net/bonding/bond_netlink.c378
-rw-r--r--drivers/net/bonding/bond_options.c624
-rw-r--r--drivers/net/bonding/bond_sysfs.c690
-rw-r--r--drivers/net/bonding/bonding.h49
-rw-r--r--drivers/net/can/Kconfig2
-rw-r--r--drivers/net/can/c_can/c_can.c44
-rw-r--r--drivers/net/can/dev.c3
-rw-r--r--drivers/net/can/flexcan.c2
-rw-r--r--drivers/net/can/mcp251x.c121
-rw-r--r--drivers/net/can/mscan/mpc5xxx_can.c3
-rw-r--r--drivers/net/can/mscan/mscan.c3
-rw-r--r--drivers/net/can/mscan/mscan.h3
-rw-r--r--drivers/net/can/pch_can.c3
-rw-r--r--drivers/net/can/sja1000/ems_pci.c3
-rw-r--r--drivers/net/can/sja1000/kvaser_pci.c3
-rw-r--r--drivers/net/can/sja1000/plx_pci.c3
-rw-r--r--drivers/net/can/sja1000/sja1000.c17
-rw-r--r--drivers/net/can/sja1000/sja1000_isa.c3
-rw-r--r--drivers/net/can/sja1000/sja1000_of_platform.c3
-rw-r--r--drivers/net/can/sja1000/sja1000_platform.c3
-rw-r--r--drivers/net/can/slcan.c4
-rw-r--r--drivers/net/can/softing/softing_cs.c3
-rw-r--r--drivers/net/can/softing/softing_fw.c3
-rw-r--r--drivers/net/can/softing/softing_main.c3
-rw-r--r--drivers/net/can/usb/ems_usb.c3
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_pro.c3
-rw-r--r--drivers/net/ethernet/3com/3c509.c3
-rw-r--r--drivers/net/ethernet/3com/3c59x.c6
-rw-r--r--drivers/net/ethernet/8390/8390.h7
-rw-r--r--drivers/net/ethernet/8390/apne.c62
-rw-r--r--drivers/net/ethernet/8390/ax88796.c22
-rw-r--r--drivers/net/ethernet/8390/axnet_cs.c119
-rw-r--r--drivers/net/ethernet/8390/etherh.c53
-rw-r--r--drivers/net/ethernet/8390/hydra.c11
-rw-r--r--drivers/net/ethernet/8390/lib8390.c77
-rw-r--r--drivers/net/ethernet/8390/mac8390.c19
-rw-r--r--drivers/net/ethernet/8390/mcf8390.c8
-rw-r--r--drivers/net/ethernet/8390/ne.c96
-rw-r--r--drivers/net/ethernet/8390/ne2k-pci.c54
-rw-r--r--drivers/net/ethernet/8390/pcnet_cs.c62
-rw-r--r--drivers/net/ethernet/8390/smc-ultra.c48
-rw-r--r--drivers/net/ethernet/8390/stnic.c28
-rw-r--r--drivers/net/ethernet/8390/wd.c42
-rw-r--r--drivers/net/ethernet/8390/zorro8390.c22
-rw-r--r--drivers/net/ethernet/adi/bfin_mac.c22
-rw-r--r--drivers/net/ethernet/aeroflex/greth.c2
-rw-r--r--drivers/net/ethernet/allwinner/sun4i-emac.c5
-rw-r--r--drivers/net/ethernet/amd/7990.c836
-rw-r--r--drivers/net/ethernet/amd/7990.h268
-rw-r--r--drivers/net/ethernet/amd/amd8111e.c4
-rw-r--r--drivers/net/ethernet/amd/amd8111e.h4
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.c3
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.h3
-rw-r--r--drivers/net/ethernet/amd/hplance.c96
-rw-r--r--drivers/net/ethernet/amd/mvme147.c36
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c2
-rw-r--r--drivers/net/ethernet/arc/emac.h2
-rw-r--r--drivers/net/ethernet/arc/emac_main.c24
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c8
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c3
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig1
-rw-r--r--drivers/net/ethernet/broadcom/b44.c250
-rw-r--r--drivers/net/ethernet/broadcom/b44.h15
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c362
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.h94
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c59
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.h10
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h13
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c34
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c94
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c122
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c25
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h7
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c310
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c50
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c1
-rw-r--r--drivers/net/ethernet/broadcom/cnic.h2
-rw-r--r--drivers/net/ethernet/broadcom/cnic_if.h4
-rw-r--r--drivers/net/ethernet/broadcom/sb1250-mac.c3
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c285
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h11
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.c650
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.h8
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c40
-rw-r--r--drivers/net/ethernet/brocade/bna/bfi.h33
-rw-r--r--drivers/net/ethernet/brocade/bna/bfi_enet.h3
-rw-r--r--drivers/net/ethernet/brocade/bna/bna.h24
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_enet.c58
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_hw_defs.h4
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_tx_rx.c251
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_types.h57
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c559
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.h26
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_ethtool.c1
-rw-r--r--drivers/net/ethernet/brocade/bna/cna.h4
-rw-r--r--drivers/net/ethernet/cadence/macb.c126
-rw-r--r--drivers/net/ethernet/cadence/macb.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/common.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cphy.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cpl5_cmd.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cxgb2.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/elmer0.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/espi.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/espi.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/gmac.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/mv88x201x.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/pm3393.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/regs.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/sge.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/sge.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/subr.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/suni1x10gexp_regs.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/l2t.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h111
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c337
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h9
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.c35
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c20
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c471
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_regs.h87
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h7
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/adapter.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c15
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h25
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c8
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c11
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_pp.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/uli526x.c6
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h4
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c40
-rw-r--r--drivers/net/ethernet/emulex/benet/be_hw.h3
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c92
-rw-r--r--drivers/net/ethernet/freescale/fec.h3
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c25
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c16
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c22
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c5
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c2
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c3
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.h3
-rw-r--r--drivers/net/ethernet/intel/Kconfig40
-rw-r--r--drivers/net/ethernet/intel/Makefile1
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000.h7
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c60
-rw-r--r--drivers/net/ethernet/intel/e1000e/80003es2lan.c7
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c18
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.c10
-rw-r--r--drivers/net/ethernet/intel/i40e/Makefile7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h86
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c209
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.h21
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h132
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_alloc.h7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c500
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c274
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_diag.c23
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_diag.h15
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c386
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_hmc.c9
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_hmc.h7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h11
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c983
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c74
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_osdep.h7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h33
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_register.h170
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_status.h7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c134
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h56
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h101
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl.h11
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c846
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h11
-rw-r--r--drivers/net/ethernet/intel/i40evf/Makefile33
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq.c927
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq.h106
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h2153
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_alloc.h55
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_common.c254
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_hmc.h238
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h165
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_osdep.h72
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_prototype.h84
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_register.h4667
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_status.h97
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c1573
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.h296
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_type.h1152
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h364
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf.h321
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c390
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c2353
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c772
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c89
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_defines.h16
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_hw.h3
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.c5
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h28
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c17
-rw-r--r--drivers/net/ethernet/intel/igb/igb_hwmon.c108
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c303
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h8
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c9
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c5
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c84
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h5
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c283
-rw-r--r--drivers/net/ethernet/lantiq_etop.c3
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c28
-rw-r--r--drivers/net/ethernet/marvell/mvmdio.c6
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c4
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c23
-rw-r--r--drivers/net/ethernet/marvell/sky2.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/Kconfig1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/alloc.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cq.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_clock.c198
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_cq.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_main.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c142
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_resources.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_selftest.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c62
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c93
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c111
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c48
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h15
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mr.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/pd.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c41
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/srq.c4
-rw-r--r--drivers/net/ethernet/micrel/ks8842.c6
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c9
-rw-r--r--drivers/net/ethernet/natsemi/ns83820.c5
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c32
-rw-r--r--drivers/net/ethernet/netx-eth.c3
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c13
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h3
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c3
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.h3
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c3
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c3
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c3
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c3
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h3
-rw-r--r--drivers/net/ethernet/packetengines/yellowfin.c17
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.c3
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.h3
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac_ethtool.c3
-rw-r--r--drivers/net/ethernet/qlogic/netxen/Makefile4
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic.h4
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c4
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c4
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h4
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c6
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.h4
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c8
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h50
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c138
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h13
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c79
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c49
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c31
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h14
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c24
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h1
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c50
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c38
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c70
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h12
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c248
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c197
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c118
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge.h3
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_dbg.c4
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c4
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c8
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c5
-rw-r--r--drivers/net/ethernet/realtek/r8169.c5
-rw-r--r--drivers/net/ethernet/renesas/Kconfig2
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c127
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h53
-rw-r--r--drivers/net/ethernet/seeq/sgiseeq.c2
-rw-r--r--drivers/net/ethernet/sfc/ef10.c518
-rw-r--r--drivers/net/ethernet/sfc/efx.c209
-rw-r--r--drivers/net/ethernet/sfc/efx.h13
-rw-r--r--drivers/net/ethernet/sfc/enum.h1
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c12
-rw-r--r--drivers/net/ethernet/sfc/falcon.c38
-rw-r--r--drivers/net/ethernet/sfc/farch.c48
-rw-r--r--drivers/net/ethernet/sfc/filter.h17
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c449
-rw-r--r--drivers/net/ethernet/sfc/mcdi.h23
-rw-r--r--drivers/net/ethernet/sfc/mcdi_mon.c154
-rw-r--r--drivers/net/ethernet/sfc/mcdi_pcol.h733
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port.c89
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h78
-rw-r--r--drivers/net/ethernet/sfc/nic.c12
-rw-r--r--drivers/net/ethernet/sfc/nic.h36
-rw-r--r--drivers/net/ethernet/sfc/ptp.c914
-rw-r--r--drivers/net/ethernet/sfc/rx.c30
-rw-r--r--drivers/net/ethernet/sfc/selftest.c2
-rw-r--r--drivers/net/ethernet/sfc/selftest.h1
-rw-r--r--drivers/net/ethernet/sfc/siena.c119
-rw-r--r--drivers/net/ethernet/sis/sis900.c2
-rw-r--r--drivers/net/ethernet/smsc/smc911x.c3
-rw-r--r--drivers/net/ethernet/smsc/smc911x.h3
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c48
-rw-r--r--drivers/net/ethernet/smsc/smc91x.h25
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c3
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.h3
-rw-r--r--drivers/net/ethernet/smsc/smsc9420.c5
-rw-r--r--drivers/net/ethernet/smsc/smsc9420.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c22
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c4
-rw-r--r--drivers/net/ethernet/sun/cassini.c4
-rw-r--r--drivers/net/ethernet/sun/cassini.h4
-rw-r--r--drivers/net/ethernet/sun/niu.c10
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c2
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c1
-rw-r--r--drivers/net/ethernet/ti/cpsw.c55
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.c2
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.c7
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c26
-rw-r--r--drivers/net/ethernet/tile/Kconfig12
-rw-r--r--drivers/net/ethernet/tile/tilegx.c38
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.c18
-rw-r--r--drivers/net/ethernet/toshiba/tc35815.c15
-rw-r--r--drivers/net/ethernet/tundra/tsi108_eth.h4
-rw-r--r--drivers/net/ethernet/via/via-velocity.c11
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c2
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c2
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c51
-rw-r--r--drivers/net/ethernet/xircom/xirc2ps_cs.c3
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c36
-rw-r--r--drivers/net/fddi/defxx.c20
-rw-r--r--drivers/net/fddi/skfp/fplustm.c27
-rw-r--r--drivers/net/fddi/skfp/h/supern_2.h96
-rw-r--r--drivers/net/fddi/skfp/skfddi.c1
-rw-r--r--drivers/net/fddi/skfp/smt.c2
-rw-r--r--drivers/net/fddi/skfp/srf.c24
-rw-r--r--drivers/net/hamradio/6pack.c3
-rw-r--r--drivers/net/hamradio/bpqether.c2
-rw-r--r--drivers/net/hamradio/hdlcdrv.c2
-rw-r--r--drivers/net/hamradio/mkiss.c3
-rw-r--r--drivers/net/hamradio/yam.c1
-rw-r--r--drivers/net/hippi/rrunner.c5
-rw-r--r--drivers/net/hyperv/hyperv_net.h3
-rw-r--r--drivers/net/hyperv/netvsc.c3
-rw-r--r--drivers/net/hyperv/netvsc_drv.c24
-rw-r--r--drivers/net/hyperv/rndis_filter.c3
-rw-r--r--drivers/net/ieee802154/at86rf230.c2
-rw-r--r--drivers/net/ieee802154/mrf24j40.c1
-rw-r--r--drivers/net/irda/Kconfig4
-rw-r--r--drivers/net/irda/au1k_ir.c3
-rw-r--r--drivers/net/irda/esi-sir.c4
-rw-r--r--drivers/net/irda/litelink-sir.c4
-rw-r--r--drivers/net/irda/ma600-sir.c4
-rw-r--r--drivers/net/irda/old_belkin-sir.c4
-rw-r--r--drivers/net/irda/sh_irda.c2
-rw-r--r--drivers/net/irda/sh_sir.c2
-rw-r--r--drivers/net/irda/smsc-ircc2.c4
-rw-r--r--drivers/net/irda/smsc-ircc2.h4
-rw-r--r--drivers/net/irda/via-ircc.c5
-rw-r--r--drivers/net/irda/via-ircc.h3
-rw-r--r--drivers/net/irda/vlsi_ir.c7
-rw-r--r--drivers/net/irda/vlsi_ir.h4
-rw-r--r--drivers/net/macvlan.c54
-rw-r--r--drivers/net/macvtap.c101
-rw-r--r--drivers/net/mdio.c28
-rw-r--r--drivers/net/phy/cicada.c4
-rw-r--r--drivers/net/phy/davicom.c2
-rw-r--r--drivers/net/phy/dp83640.c4
-rw-r--r--drivers/net/phy/icplus.c2
-rw-r--r--drivers/net/phy/lxt.c4
-rw-r--r--drivers/net/phy/marvell.c22
-rw-r--r--drivers/net/phy/mdio_bus.c35
-rw-r--r--drivers/net/phy/micrel.c19
-rw-r--r--drivers/net/phy/phy.c434
-rw-r--r--drivers/net/phy/phy_device.c305
-rw-r--r--drivers/net/phy/spi_ks8995.c6
-rw-r--r--drivers/net/phy/vitesse.c15
-rw-r--r--drivers/net/plip/plip.c4
-rw-r--r--drivers/net/ppp/ppp_mppe.c3
-rw-r--r--drivers/net/ppp/pppoe.c4
-rw-r--r--drivers/net/team/team.c4
-rw-r--r--drivers/net/tun.c78
-rw-r--r--drivers/net/usb/Kconfig6
-rw-r--r--drivers/net/usb/asix.h3
-rw-r--r--drivers/net/usb/asix_common.c3
-rw-r--r--drivers/net/usb/asix_devices.c3
-rw-r--r--drivers/net/usb/ax88172a.c3
-rw-r--r--drivers/net/usb/ax88179_178a.c3
-rw-r--r--drivers/net/usb/catc.c3
-rw-r--r--drivers/net/usb/cdc_eem.c3
-rw-r--r--drivers/net/usb/cdc_ether.c13
-rw-r--r--drivers/net/usb/cdc_subset.c3
-rw-r--r--drivers/net/usb/cx82310_eth.c3
-rw-r--r--drivers/net/usb/dm9601.c44
-rw-r--r--drivers/net/usb/gl620a.c3
-rw-r--r--drivers/net/usb/hso.c13
-rw-r--r--drivers/net/usb/int51x1.c3
-rw-r--r--drivers/net/usb/kaweth.c3
-rw-r--r--drivers/net/usb/lg-vl600.c3
-rw-r--r--drivers/net/usb/mcs7830.c22
-rw-r--r--drivers/net/usb/net1080.c3
-rw-r--r--drivers/net/usb/plusb.c3
-rw-r--r--drivers/net/usb/r8152.c861
-rw-r--r--drivers/net/usb/r815x.c2
-rw-r--r--drivers/net/usb/rndis_host.c3
-rw-r--r--drivers/net/usb/sierra_net.c3
-rw-r--r--drivers/net/usb/smsc75xx.c3
-rw-r--r--drivers/net/usb/smsc75xx.h3
-rw-r--r--drivers/net/usb/smsc95xx.c3
-rw-r--r--drivers/net/usb/smsc95xx.h3
-rw-r--r--drivers/net/usb/usbnet.c3
-rw-r--r--drivers/net/usb/zaurus.c3
-rw-r--r--drivers/net/virtio_net.c205
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c5
-rw-r--r--drivers/net/vxlan.c105
-rw-r--r--drivers/net/wan/dscc4.c2
-rw-r--r--drivers/net/wan/lmc/lmc_main.c2
-rw-r--r--drivers/net/wan/pc300too.c1
-rw-r--r--drivers/net/wan/pci200syn.c1
-rw-r--r--drivers/net/wan/sbni.c1
-rw-r--r--drivers/net/wan/wanxl.c1
-rw-r--r--drivers/net/wireless/adm8211.c2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/p2p.c7
-rw-r--r--drivers/net/wireless/cw1200/sta.c5
-rw-r--r--drivers/net/wireless/cw1200/txrx.c3
-rw-r--r--drivers/net/wireless/hostap/hostap_80211_rx.c8
-rw-r--r--drivers/net/wireless/hostap/hostap_80211_tx.c4
-rw-r--r--drivers/net/wireless/hostap/hostap_ap.c28
-rw-r--r--drivers/net/wireless/hostap/hostap_hw.c2
-rw-r--r--drivers/net/wireless/hostap/hostap_ioctl.c4
-rw-r--r--drivers/net/wireless/hostap/hostap_main.c8
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.c34
-rw-r--r--drivers/net/wireless/ipw2x00/libipw_rx.c10
-rw-r--r--drivers/net/wireless/mwifiex/11n.c2
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmdresp.c3
-rw-r--r--drivers/net/wireless/mwifiex/sta_rx.c2
-rw-r--r--drivers/net/wireless/prism54/isl_ioctl.c3
-rw-r--r--drivers/net/wireless/rtlwifi/cam.c4
-rw-r--r--drivers/net/wireless/ti/wl1251/main.c2
-rw-r--r--drivers/net/wireless/wl3501_cs.c4
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.c5
-rw-r--r--drivers/net/xen-netback/common.h47
-rw-r--r--drivers/net/xen-netback/interface.c78
-rw-r--r--drivers/net/xen-netback/netback.c534
-rw-r--r--drivers/net/xen-netback/xenbus.c3
-rw-r--r--drivers/ntb/ntb_hw.c121
-rw-r--r--drivers/ntb/ntb_hw.h7
-rw-r--r--drivers/ntb/ntb_regs.h16
-rw-r--r--drivers/ntb/ntb_transport.c163
-rw-r--r--drivers/of/Kconfig2
-rw-r--r--drivers/of/address.c8
-rw-r--r--drivers/of/fdt.c12
-rw-r--r--drivers/of/irq.c5
-rw-r--r--drivers/of/of_mdio.c135
-rw-r--r--drivers/pci/ats.c2
-rw-r--r--drivers/pci/host/pci-mvebu.c5
-rw-r--r--drivers/pci/host/pci-tegra.c12
-rw-r--r--drivers/pci/host/pcie-designware.c2
-rw-r--r--drivers/pci/hotplug/Kconfig4
-rw-r--r--drivers/pci/hotplug/Makefile2
-rw-r--r--drivers/pci/hotplug/acpi_pcihp.c2
-rw-r--r--drivers/pci/hotplug/acpiphp.h1
-rw-r--r--drivers/pci/hotplug/acpiphp_core.c12
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c2
-rw-r--r--drivers/pci/hotplug/acpiphp_ibm.c14
-rw-r--r--drivers/pci/hotplug/cpci_hotplug_core.c2
-rw-r--r--drivers/pci/hotplug/cpci_hotplug_pci.c2
-rw-r--r--drivers/pci/hotplug/cpcihp_generic.c20
-rw-r--r--drivers/pci/hotplug/cpcihp_zt5550.c22
-rw-r--r--drivers/pci/hotplug/cpcihp_zt5550.h18
-rw-r--r--drivers/pci/hotplug/cpqphp_core.c4
-rw-r--r--drivers/pci/hotplug/cpqphp_ctrl.c10
-rw-r--r--drivers/pci/hotplug/cpqphp_pci.c5
-rw-r--r--drivers/pci/hotplug/ibmphp.h12
-rw-r--r--drivers/pci/hotplug/ibmphp_core.c109
-rw-r--r--drivers/pci/hotplug/ibmphp_ebda.c103
-rw-r--r--drivers/pci/hotplug/ibmphp_hpc.c24
-rw-r--r--drivers/pci/hotplug/ibmphp_pci.c71
-rw-r--r--drivers/pci/hotplug/ibmphp_res.c76
-rw-r--r--drivers/pci/hotplug/pci_hotplug_core.c8
-rw-r--r--drivers/pci/hotplug/pciehp.h2
-rw-r--r--drivers/pci/hotplug/pciehp_acpi.c6
-rw-r--r--drivers/pci/hotplug/pciehp_core.c4
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c6
-rw-r--r--drivers/pci/hotplug/pcihp_skeleton.c8
-rw-r--r--drivers/pci/hotplug/rpadlpar_core.c2
-rw-r--r--drivers/pci/hotplug/rpaphp.h6
-rw-r--r--drivers/pci/hotplug/rpaphp_core.c8
-rw-r--r--drivers/pci/hotplug/rpaphp_pci.c3
-rw-r--r--drivers/pci/hotplug/rpaphp_slot.c19
-rw-r--r--drivers/pci/hotplug/sgi_hotplug.c8
-rw-r--r--drivers/pci/hotplug/shpchp.h8
-rw-r--r--drivers/pci/hotplug/shpchp_core.c10
-rw-r--r--drivers/pci/hotplug/shpchp_hpc.c2
-rw-r--r--drivers/pci/ioapic.c2
-rw-r--r--drivers/pci/iov.c2
-rw-r--r--drivers/pci/irq.c2
-rw-r--r--drivers/pci/msi.c2
-rw-r--r--drivers/pci/pci-acpi.c8
-rw-r--r--drivers/pci/pci-driver.c54
-rw-r--r--drivers/pci/pci-label.c6
-rw-r--r--drivers/pci/pci-stub.c4
-rw-r--r--drivers/pci/pci-sysfs.c28
-rw-r--r--drivers/pci/pci.c54
-rw-r--r--drivers/pci/pcie/aer/aerdrv_core.c2
-rw-r--r--drivers/pci/pcie/aspm.c2
-rw-r--r--drivers/pci/pcie/pme.c4
-rw-r--r--drivers/pci/pcie/portdrv.h2
-rw-r--r--drivers/pci/pcie/portdrv_bus.c4
-rw-r--r--drivers/pci/pcie/portdrv_core.c2
-rw-r--r--drivers/pci/pcie/portdrv_pci.c7
-rw-r--r--drivers/pci/probe.c10
-rw-r--r--drivers/pci/proc.c2
-rw-r--r--drivers/pci/quirks.c108
-rw-r--r--drivers/pci/remove.c6
-rw-r--r--drivers/pci/search.c12
-rw-r--r--drivers/pci/setup-bus.c18
-rw-r--r--drivers/pci/setup-res.c2
-rw-r--r--drivers/pci/slot.c2
-rw-r--r--drivers/pci/syscall.c2
-rw-r--r--drivers/phy/Kconfig4
-rw-r--r--drivers/phy/phy-core.c26
-rw-r--r--drivers/pinctrl/pinctrl-abx500.c6
-rw-r--r--drivers/pinctrl/pinctrl-abx500.h2
-rw-r--r--drivers/pinctrl/pinctrl-baytrail.c1
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.c5
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7740.c2
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7372.c2
-rw-r--r--drivers/pinctrl/sh-pfc/sh_pfc.h2
-rw-r--r--drivers/platform/Kconfig1
-rw-r--r--drivers/platform/Makefile1
-rw-r--r--drivers/platform/chrome/Kconfig28
-rw-r--r--drivers/platform/chrome/Makefile2
-rw-r--r--drivers/platform/chrome/chromeos_laptop.c (renamed from drivers/platform/x86/chromeos_laptop.c)0
-rw-r--r--drivers/platform/x86/Kconfig11
-rw-r--r--drivers/platform/x86/Makefile1
-rw-r--r--drivers/platform/x86/apple-gmux.c2
-rw-r--r--drivers/platform/x86/asus-laptop.c5
-rw-r--r--drivers/platform/x86/dell-laptop.c288
-rw-r--r--drivers/platform/x86/dell-wmi.c7
-rw-r--r--drivers/platform/x86/eeepc-laptop.c4
-rw-r--r--drivers/platform/x86/hp-wmi.c14
-rw-r--r--drivers/platform/x86/ideapad-laptop.c4
-rw-r--r--drivers/platform/x86/intel_mid_powerbtn.c4
-rw-r--r--drivers/platform/x86/intel_scu_ipc.c117
-rw-r--r--drivers/platform/x86/panasonic-laptop.c5
-rw-r--r--drivers/platform/x86/sony-laptop.c47
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c8
-rw-r--r--drivers/platform/x86/topstar-laptop.c4
-rw-r--r--drivers/platform/x86/toshiba_acpi.c4
-rw-r--r--drivers/platform/x86/wmi.c6
-rw-r--r--drivers/pnp/driver.c12
-rw-r--r--drivers/pnp/pnpacpi/core.c10
-rw-r--r--drivers/powercap/intel_rapl.c13
-rw-r--r--drivers/powercap/powercap_sys.c7
-rw-r--r--drivers/regulator/arizona-micsupp.c54
-rw-r--r--drivers/regulator/as3722-regulator.c2
-rw-r--r--drivers/regulator/core.c14
-rw-r--r--drivers/regulator/gpio-regulator.c7
-rw-r--r--drivers/regulator/pfuze100-regulator.c14
-rw-r--r--drivers/regulator/s2mps11.c2
-rw-r--r--drivers/regulator/s5m8767.c2
-rw-r--r--drivers/rtc/Kconfig10
-rw-r--r--drivers/rtc/rtc-at91rm9200.c11
-rw-r--r--drivers/rtc/rtc-s5m.c118
-rw-r--r--drivers/s390/block/dasd_eckd.c2
-rw-r--r--drivers/s390/block/dasd_genhd.c1
-rw-r--r--drivers/s390/char/sclp_early.c5
-rw-r--r--drivers/s390/net/netiucv.c8
-rw-r--r--drivers/s390/net/qeth_core.h8
-rw-r--r--drivers/s390/net/qeth_core_main.c198
-rw-r--r--drivers/s390/net/qeth_core_mpc.h28
-rw-r--r--drivers/scsi/3w-9xxx.c3
-rw-r--r--drivers/scsi/3w-sas.c3
-rw-r--r--drivers/scsi/3w-xxxx.c3
-rw-r--r--drivers/scsi/aacraid/linit.c1
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c1
-rw-r--r--drivers/scsi/bfa/bfa_fcs.h1
-rw-r--r--drivers/scsi/bfa/bfa_fcs_lport.c14
-rw-r--r--drivers/scsi/bfa/bfad_attr.c7
-rw-r--r--drivers/scsi/gdth.c1
-rw-r--r--drivers/scsi/hosts.c1
-rw-r--r--drivers/scsi/hpsa.c5
-rw-r--r--drivers/scsi/ipr.c3
-rw-r--r--drivers/scsi/ips.c1
-rw-r--r--drivers/scsi/libsas/sas_ata.c2
-rw-r--r--drivers/scsi/megaraid.c1
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c1
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c1
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c2
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.h4
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c91
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.c4
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.h9
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.c2
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.h2
-rw-r--r--drivers/scsi/pmcraid.c21
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c10
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c56
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.h2
-rw-r--r--drivers/scsi/sd.c6
-rw-r--r--drivers/scsi/storvsc_drv.c1
-rw-r--r--drivers/spi/spi-bcm2835.c2
-rw-r--r--drivers/spi/spi-bcm63xx.c2
-rw-r--r--drivers/spi/spi-dw-mid.c4
-rw-r--r--drivers/spi/spi-mpc512x-psc.c2
-rw-r--r--drivers/spi/spi-mxs.c2
-rw-r--r--drivers/spi/spi-pxa2xx.c5
-rw-r--r--drivers/spi/spi-rspi.c3
-rw-r--r--drivers/spi/spi-ti-qspi.c23
-rw-r--r--drivers/spi/spi-txx9.c2
-rw-r--r--drivers/spi/spi.c21
-rw-r--r--drivers/staging/btmtk_usb/btmtk_usb.c3
-rw-r--r--drivers/staging/comedi/drivers.c2
-rw-r--r--drivers/staging/comedi/drivers/8255_pci.c15
-rw-r--r--drivers/staging/comedi/drivers/pcl730.c6
-rw-r--r--drivers/staging/comedi/drivers/s626.c2
-rw-r--r--drivers/staging/comedi/drivers/vmk80xx.c2
-rw-r--r--drivers/staging/ft1000/ft1000-usb/ft1000_download.c3
-rw-r--r--drivers/staging/iio/magnetometer/Kconfig2
-rw-r--r--drivers/staging/iio/magnetometer/hmc5843.c7
-rw-r--r--drivers/staging/imx-drm/Makefile4
-rw-r--r--drivers/staging/imx-drm/imx-drm-core.c40
-rw-r--r--drivers/staging/imx-drm/imx-tve.c9
-rw-r--r--drivers/staging/imx-drm/ipu-v3/ipu-common.c32
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/pinger.c4
-rw-r--r--drivers/staging/media/go7007/go7007-usb.c28
-rw-r--r--drivers/staging/nvec/nvec.c3
-rw-r--r--drivers/staging/ozwpan/ozcdev.c2
-rw-r--r--drivers/staging/ozwpan/ozproto.c5
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_ap.c3
-rw-r--r--drivers/staging/tidspbridge/Kconfig2
-rw-r--r--drivers/staging/tidspbridge/rmgr/drv_interface.c13
-rw-r--r--drivers/staging/vt6655/hostap.c3
-rw-r--r--drivers/staging/vt6656/baseband.c11
-rw-r--r--drivers/staging/vt6656/hostap.c3
-rw-r--r--drivers/staging/vt6656/rndis.h2
-rw-r--r--drivers/staging/zram/zram_drv.c19
-rw-r--r--drivers/staging/zsmalloc/zsmalloc-main.c17
-rw-r--r--drivers/target/iscsi/iscsi_target.c117
-rw-r--r--drivers/target/iscsi/iscsi_target_auth.c7
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c89
-rw-r--r--drivers/target/iscsi/iscsi_target_core.h34
-rw-r--r--drivers/target/iscsi/iscsi_target_device.c6
-rw-r--r--drivers/target/iscsi/iscsi_target_erl0.c12
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c23
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.c10
-rw-r--r--drivers/target/iscsi/iscsi_target_nodeattrib.c5
-rw-r--r--drivers/target/iscsi/iscsi_target_nodeattrib.h3
-rw-r--r--drivers/target/iscsi/iscsi_target_stat.c22
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.c42
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.h2
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c20
-rw-r--r--drivers/target/loopback/tcm_loop.c242
-rw-r--r--drivers/target/loopback/tcm_loop.h6
-rw-r--r--drivers/target/sbp/sbp_target.c18
-rw-r--r--drivers/target/target_core_alua.c150
-rw-r--r--drivers/target/target_core_alua.h33
-rw-r--r--drivers/target/target_core_configfs.c123
-rw-r--r--drivers/target/target_core_device.c40
-rw-r--r--drivers/target/target_core_fabric_configfs.c38
-rw-r--r--drivers/target/target_core_file.c10
-rw-r--r--drivers/target/target_core_file.h5
-rw-r--r--drivers/target/target_core_iblock.c43
-rw-r--r--drivers/target/target_core_internal.h4
-rw-r--r--drivers/target/target_core_pr.c24
-rw-r--r--drivers/target/target_core_rd.c1
-rw-r--r--drivers/target/target_core_sbc.c12
-rw-r--r--drivers/target/target_core_spc.c17
-rw-r--r--drivers/target/target_core_stat.c16
-rw-r--r--drivers/target/target_core_tmr.c4
-rw-r--r--drivers/target/target_core_tpg.c35
-rw-r--r--drivers/target/target_core_transport.c244
-rw-r--r--drivers/target/target_core_ua.h2
-rw-r--r--drivers/target/target_core_xcopy.c19
-rw-r--r--drivers/target/tcm_fc/tcm_fc.h1
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c18
-rw-r--r--drivers/target/tcm_fc/tfc_conf.c18
-rw-r--r--drivers/target/tcm_fc/tfc_sess.c3
-rw-r--r--drivers/tty/amiserial.c3
-rw-r--r--drivers/tty/n_tty.c27
-rw-r--r--drivers/tty/serial/8250/8250_dw.c8
-rw-r--r--drivers/tty/serial/8250/Kconfig2
-rw-r--r--drivers/tty/serial/pmac_zilog.c3
-rw-r--r--drivers/tty/serial/sh-sci.c2
-rw-r--r--drivers/tty/serial/xilinx_uartps.c2
-rw-r--r--drivers/tty/tty_io.c1
-rw-r--r--drivers/tty/tty_ldsem.c16
-rw-r--r--drivers/uio/uio.c2
-rw-r--r--drivers/usb/chipidea/core.c4
-rw-r--r--drivers/usb/chipidea/host.c3
-rw-r--r--drivers/usb/chipidea/udc.c3
-rw-r--r--drivers/usb/class/cdc-acm.c2
-rw-r--r--drivers/usb/class/cdc-wdm.c8
-rw-r--r--drivers/usb/core/hub.c7
-rw-r--r--drivers/usb/core/usb-acpi.c4
-rw-r--r--drivers/usb/dwc3/core.c8
-rw-r--r--drivers/usb/dwc3/ep0.c2
-rw-r--r--drivers/usb/dwc3/gadget.c5
-rw-r--r--drivers/usb/gadget/Kconfig1
-rw-r--r--drivers/usb/gadget/composite.c1
-rw-r--r--drivers/usb/gadget/f_fs.c2
-rw-r--r--drivers/usb/gadget/f_mass_storage.c27
-rw-r--r--drivers/usb/gadget/pxa25x_udc.c1
-rw-r--r--drivers/usb/gadget/s3c-hsotg.c7
-rw-r--r--drivers/usb/gadget/storage_common.h4
-rw-r--r--drivers/usb/gadget/tcm_usb_gadget.c20
-rw-r--r--drivers/usb/gadget/zero.c6
-rw-r--r--drivers/usb/host/ohci-at91.c26
-rw-r--r--drivers/usb/host/ohci-pxa27x.c1
-rw-r--r--drivers/usb/host/xhci-pci.c7
-rw-r--r--drivers/usb/host/xhci-ring.c54
-rw-r--r--drivers/usb/musb/musb_core.c9
-rw-r--r--drivers/usb/musb/musb_cppi41.c164
-rw-r--r--drivers/usb/musb/musb_gadget.c4
-rw-r--r--drivers/usb/phy/Kconfig4
-rw-r--r--drivers/usb/phy/phy-am335x.c5
-rw-r--r--drivers/usb/phy/phy-generic.c68
-rw-r--r--drivers/usb/phy/phy-generic.h4
-rw-r--r--drivers/usb/phy/phy-mxs-usb.c2
-rw-r--r--drivers/usb/phy/phy-rcar-gen2-usb.c4
-rw-r--r--drivers/usb/phy/phy-tegra-usb.c2
-rw-r--r--drivers/usb/phy/phy-twl6030-usb.c3
-rw-r--r--drivers/usb/serial/ftdi_sio.c37
-rw-r--r--drivers/usb/serial/generic.c12
-rw-r--r--drivers/usb/serial/mos7840.c32
-rw-r--r--drivers/usb/serial/option.c29
-rw-r--r--drivers/usb/serial/pl2303.c30
-rw-r--r--drivers/usb/serial/spcp8x5.c30
-rw-r--r--drivers/usb/serial/zte_ev.c3
-rw-r--r--drivers/usb/wusbcore/devconnect.c72
-rw-r--r--drivers/usb/wusbcore/security.c98
-rw-r--r--drivers/usb/wusbcore/wusbhc.h6
-rw-r--r--drivers/vhost/net.c9
-rw-r--r--drivers/vhost/scsi.c25
-rw-r--r--drivers/vhost/test.c8
-rw-r--r--drivers/vhost/vhost.c4
-rw-r--r--drivers/vhost/vhost.h2
-rw-r--r--drivers/video/atmel_lcdfb.c1
-rw-r--r--drivers/video/kyro/fbdev.c6
-rw-r--r--drivers/video/offb.c29
-rw-r--r--drivers/video/omap2/displays-new/panel-sony-acx565akm.c5
-rw-r--r--drivers/video/sh_mobile_meram.c2
-rw-r--r--drivers/video/vt8500lcdfb.c25
-rw-r--r--drivers/virtio/virtio_balloon.c2
-rw-r--r--drivers/watchdog/bcm2835_wdt.c1
-rw-r--r--drivers/watchdog/ep93xx_wdt.c1
-rw-r--r--drivers/watchdog/ie6xx_wdt.c1
-rw-r--r--drivers/watchdog/jz4740_wdt.c1
-rw-r--r--drivers/watchdog/kempld_wdt.c1
-rw-r--r--drivers/watchdog/max63xx_wdt.c1
-rw-r--r--drivers/watchdog/orion_wdt.c1
-rw-r--r--drivers/watchdog/pnx4008_wdt.c1
-rw-r--r--drivers/watchdog/rt2880_wdt.c1
-rw-r--r--drivers/watchdog/sc1200wdt.c3
-rw-r--r--drivers/watchdog/shwdt.c1
-rw-r--r--drivers/watchdog/softdog.c1
-rw-r--r--drivers/watchdog/stmp3xxx_rtc_wdt.c1
-rw-r--r--drivers/watchdog/txx9wdt.c1
-rw-r--r--drivers/watchdog/ux500_wdt.c1
-rw-r--r--drivers/xen/balloon.c63
-rw-r--r--drivers/xen/grant-table.c9
-rw-r--r--drivers/xen/pci.c6
-rw-r--r--drivers/xen/privcmd.c9
-rw-r--r--drivers/xen/swiotlb-xen.c5
-rw-r--r--fs/9p/vfs_dentry.c19
-rw-r--r--fs/affs/Changes2
-rw-r--r--fs/aio.c245
-rw-r--r--fs/bio.c2
-rw-r--r--fs/btrfs/Kconfig15
-rw-r--r--fs/btrfs/async-thread.c1
-rw-r--r--fs/btrfs/check-integrity.c57
-rw-r--r--fs/btrfs/check-integrity.h2
-rw-r--r--fs/btrfs/ctree.h6
-rw-r--r--fs/btrfs/dev-replace.c2
-rw-r--r--fs/btrfs/disk-io.c21
-rw-r--r--fs/btrfs/extent-tree.c22
-rw-r--r--fs/btrfs/extent_io.c23
-rw-r--r--fs/btrfs/inode.c6
-rw-r--r--fs/btrfs/ioctl.c3
-rw-r--r--fs/btrfs/ordered-data.c3
-rw-r--r--fs/btrfs/relocation.c81
-rw-r--r--fs/btrfs/scrub.c39
-rw-r--r--fs/btrfs/send.c4
-rw-r--r--fs/btrfs/super.c5
-rw-r--r--fs/btrfs/transaction.c4
-rw-r--r--fs/btrfs/tree-log.c5
-rw-r--r--fs/btrfs/volumes.c2
-rw-r--r--fs/ceph/addr.c10
-rw-r--r--fs/ceph/cache.c3
-rw-r--r--fs/ceph/caps.c27
-rw-r--r--fs/ceph/dir.c11
-rw-r--r--fs/ceph/inode.c183
-rw-r--r--fs/ceph/mds_client.c61
-rw-r--r--fs/ceph/mds_client.h1
-rw-r--r--fs/ceph/super.h8
-rw-r--r--fs/cifs/cifsglob.h1
-rw-r--r--fs/cifs/ioctl.c6
-rw-r--r--fs/cifs/smb2ops.c99
-rw-r--r--fs/cifs/smb2pdu.c92
-rw-r--r--fs/cifs/smb2pdu.h12
-rw-r--r--fs/cifs/smb2proto.h1
-rw-r--r--fs/cifs/smbfsctl.h2
-rw-r--r--fs/configfs/dir.c28
-rw-r--r--fs/coredump.c6
-rw-r--r--fs/dcache.c86
-rw-r--r--fs/ecryptfs/file.c8
-rw-r--r--fs/efivarfs/super.c11
-rw-r--r--fs/eventpoll.c3
-rw-r--r--fs/exec.c5
-rw-r--r--fs/ext2/super.c1
-rw-r--r--fs/ext4/ext4.h10
-rw-r--r--fs/ext4/ext4_jbd2.c9
-rw-r--r--fs/ext4/extents.c45
-rw-r--r--fs/ext4/inode.c12
-rw-r--r--fs/ext4/mballoc.c17
-rw-r--r--fs/ext4/super.c21
-rw-r--r--fs/gfs2/glock.c3
-rw-r--r--fs/gfs2/inode.c5
-rw-r--r--fs/gfs2/lock_dlm.c8
-rw-r--r--fs/gfs2/quota.c23
-rw-r--r--fs/gfs2/rgrp.c4
-rw-r--r--fs/hfsplus/wrapper.c17
-rw-r--r--fs/hostfs/hostfs_kern.c11
-rw-r--r--fs/jbd2/journal.c18
-rw-r--r--fs/jbd2/recovery.c2
-rw-r--r--fs/jbd2/transaction.c16
-rw-r--r--fs/libfs.c12
-rw-r--r--fs/logfs/dev_bdev.c13
-rw-r--r--fs/namei.c11
-rw-r--r--fs/nfs/blocklayout/blocklayout.h1
-rw-r--r--fs/nfs/blocklayout/extents.c2
-rw-r--r--fs/nfs/dns_resolve.c2
-rw-r--r--fs/nfs/inode.c2
-rw-r--r--fs/nfs/internal.h15
-rw-r--r--fs/nfs/nfs4_fs.h8
-rw-r--r--fs/nfs/nfs4proc.c30
-rw-r--r--fs/nfsd/nfs4xdr.c3
-rw-r--r--fs/nfsd/nfscache.c9
-rw-r--r--fs/nfsd/vfs.c173
-rw-r--r--fs/pipe.c39
-rw-r--r--fs/proc/base.c14
-rw-r--r--fs/proc/generic.c18
-rw-r--r--fs/proc/inode.c14
-rw-r--r--fs/proc/namespaces.c8
-rw-r--r--fs/pstore/platform.c7
-rw-r--r--fs/squashfs/Kconfig72
-rw-r--r--fs/squashfs/Makefile5
-rw-r--r--fs/squashfs/block.c36
-rw-r--r--fs/squashfs/cache.c28
-rw-r--r--fs/squashfs/decompressor.c59
-rw-r--r--fs/squashfs/decompressor.h24
-rw-r--r--fs/squashfs/decompressor_multi.c198
-rw-r--r--fs/squashfs/decompressor_multi_percpu.c97
-rw-r--r--fs/squashfs/decompressor_single.c85
-rw-r--r--fs/squashfs/file.c142
-rw-r--r--fs/squashfs/file_cache.c38
-rw-r--r--fs/squashfs/file_direct.c176
-rw-r--r--fs/squashfs/lzo_wrapper.c47
-rw-r--r--fs/squashfs/page_actor.c100
-rw-r--r--fs/squashfs/page_actor.h81
-rw-r--r--fs/squashfs/squashfs.h20
-rw-r--r--fs/squashfs/squashfs_fs_sb.h4
-rw-r--r--fs/squashfs/super.c10
-rw-r--r--fs/squashfs/xz_wrapper.c105
-rw-r--r--fs/squashfs/zlib_wrapper.c64
-rw-r--r--fs/sysfs/file.c18
-rw-r--r--fs/xfs/xfs_bmap.c70
-rw-r--r--fs/xfs/xfs_bmap_util.c14
-rw-r--r--fs/xfs/xfs_buf.c37
-rw-r--r--fs/xfs/xfs_buf.h11
-rw-r--r--fs/xfs/xfs_buf_item.c21
-rw-r--r--fs/xfs/xfs_dir2_node.c26
-rw-r--r--fs/xfs/xfs_discard.c5
-rw-r--r--fs/xfs/xfs_fsops.c6
-rw-r--r--fs/xfs/xfs_ioctl.c3
-rw-r--r--fs/xfs/xfs_ioctl32.c3
-rw-r--r--fs/xfs/xfs_iops.c3
-rw-r--r--fs/xfs/xfs_log_recover.c13
-rw-r--r--fs/xfs/xfs_mount.c15
-rw-r--r--fs/xfs/xfs_mount.h2
-rw-r--r--fs/xfs/xfs_qm.c80
-rw-r--r--fs/xfs/xfs_trans_buf.c13
-rw-r--r--fs/xfs/xfs_trans_inode.c8
-rw-r--r--fs/xfs/xfs_trans_resv.c3
-rw-r--r--include/acpi/acconfig.h2
-rw-r--r--include/acpi/acpi_bus.h3
-rw-r--r--include/acpi/acpixf.h2
-rw-r--r--include/asm-generic/hash.h9
-rw-r--r--include/asm-generic/pgtable.h7
-rw-r--r--include/asm-generic/preempt.h35
-rw-r--r--include/asm-generic/simd.h14
-rw-r--r--include/asm-generic/word-at-a-time.h8
-rw-r--r--include/crypto/ablk_helper.h (renamed from arch/x86/include/asm/crypto/ablk_helper.h)0
-rw-r--r--include/crypto/algapi.h18
-rw-r--r--include/crypto/authenc.h12
-rw-r--r--include/crypto/hash_info.h40
-rw-r--r--include/crypto/public_key.h25
-rw-r--r--include/crypto/scatterwalk.h3
-rw-r--r--include/drm/ttm/ttm_bo_api.h4
-rw-r--r--include/drm/ttm/ttm_execbuf_util.h3
-rw-r--r--include/drm/ttm/ttm_object.h61
-rw-r--r--include/keys/big_key-type.h25
-rw-r--r--include/keys/keyring-type.h17
-rw-r--r--include/keys/system_keyring.h23
-rw-r--r--include/linux/acpi.h23
-rw-r--r--include/linux/assoc_array.h92
-rw-r--r--include/linux/assoc_array_priv.h182
-rw-r--r--include/linux/audit.h15
-rw-r--r--include/linux/auxvec.h2
-rw-r--r--include/linux/bcma/bcma.h9
-rw-r--r--include/linux/blkdev.h3
-rw-r--r--include/linux/cgroup_subsys.h4
-rw-r--r--include/linux/compiler-intel.h2
-rw-r--r--include/linux/dcache.h2
-rw-r--r--include/linux/device.h12
-rw-r--r--include/linux/dmaengine.h76
-rw-r--r--include/linux/efi.h4
-rw-r--r--include/linux/etherdevice.h94
-rw-r--r--include/linux/fs.h2
-rw-r--r--include/linux/ftrace_event.h16
-rw-r--r--include/linux/gpio/driver.h14
-rw-r--r--include/linux/hash.h36
-rw-r--r--include/linux/hid-sensor-hub.h5
-rw-r--r--include/linux/hid-sensor-ids.h12
-rw-r--r--include/linux/hugetlb.h15
-rw-r--r--include/linux/if_macvlan.h37
-rw-r--r--include/linux/if_tunnel.h9
-rw-r--r--include/linux/if_vlan.h38
-rw-r--r--include/linux/inet_lro.h23
-rw-r--r--include/linux/inetdevice.h14
-rw-r--r--include/linux/ipv6.h5
-rw-r--r--include/linux/irqreturn.h2
-rw-r--r--include/linux/kernel.h3
-rw-r--r--include/linux/kexec.h3
-rw-r--r--include/linux/key-type.h6
-rw-r--r--include/linux/key.h52
-rw-r--r--include/linux/libata.h1
-rw-r--r--include/linux/lockref.h2
-rw-r--r--include/linux/math64.h30
-rw-r--r--include/linux/mdio.h3
-rw-r--r--include/linux/mfd/samsung/core.h3
-rw-r--r--include/linux/micrel_phy.h2
-rw-r--r--include/linux/migrate.h12
-rw-r--r--include/linux/mlx4/cmd.h1
-rw-r--r--include/linux/mlx4/cq.h5
-rw-r--r--include/linux/mlx4/device.h39
-rw-r--r--include/linux/mlx4/qp.h6
-rw-r--r--include/linux/mm.h15
-rw-r--r--include/linux/mm_types.h82
-rw-r--r--include/linux/msi.h10
-rw-r--r--include/linux/net.h2
-rw-r--r--include/linux/netdevice.h131
-rw-r--r--include/linux/netfilter/ipset/ip_set.h1
-rw-r--r--include/linux/netlink.h2
-rw-r--r--include/linux/nfs4.h10
-rw-r--r--include/linux/nfs_fs.h18
-rw-r--r--include/linux/padata.h3
-rw-r--r--include/linux/pci-acpi.h4
-rw-r--r--include/linux/pci.h86
-rw-r--r--include/linux/pci_hotplug.h5
-rw-r--r--include/linux/pcieport_if.h2
-rw-r--r--include/linux/percpu-defs.h1
-rw-r--r--include/linux/phy.h74
-rw-r--r--include/linux/platform_data/edma.h8
-rw-r--r--include/linux/platform_data/eth-netx.h6
-rw-r--r--include/linux/printk.h7
-rw-r--r--include/linux/pstore.h3
-rw-r--r--include/linux/reboot.h1
-rw-r--r--include/linux/rtnetlink.h5
-rw-r--r--include/linux/sched.h7
-rw-r--r--include/linux/sctp.h5
-rw-r--r--include/linux/security.h26
-rw-r--r--include/linux/seqlock.h29
-rw-r--r--include/linux/sh_eth.h1
-rw-r--r--include/linux/shmem_fs.h2
-rw-r--r--include/linux/skbuff.h270
-rw-r--r--include/linux/slab.h111
-rw-r--r--include/linux/slab_def.h4
-rw-r--r--include/linux/slub_def.h2
-rw-r--r--include/linux/socket.h2
-rw-r--r--include/linux/tcp.h5
-rw-r--r--include/linux/tegra-powergate.h27
-rw-r--r--include/linux/tracepoint.h4
-rw-r--r--include/linux/usb.h2
-rw-r--r--include/linux/usb/wusb.h2
-rw-r--r--include/linux/user_namespace.h6
-rw-r--r--include/linux/wait.h25
-rw-r--r--include/media/videobuf2-core.h2
-rw-r--r--include/net/Space.h31
-rw-r--r--include/net/act_api.h46
-rw-r--r--include/net/addrconf.h11
-rw-r--r--include/net/arp.h1
-rw-r--r--include/net/cipso_ipv4.h5
-rw-r--r--include/net/cls_cgroup.h40
-rw-r--r--include/net/dcbevent.h3
-rw-r--r--include/net/dcbnl.h3
-rw-r--r--include/net/dn_dev.h2
-rw-r--r--include/net/dst.h5
-rw-r--r--include/net/flow.h3
-rw-r--r--include/net/genetlink.h4
-rw-r--r--include/net/gre.h3
-rw-r--r--include/net/if_inet6.h2
-rw-r--r--include/net/inetpeer.h1
-rw-r--r--include/net/ip.h8
-rw-r--r--include/net/ip6_fib.h2
-rw-r--r--include/net/ip6_route.h34
-rw-r--r--include/net/ip6_tunnel.h2
-rw-r--r--include/net/ip_tunnels.h11
-rw-r--r--include/net/ipv6.h25
-rw-r--r--include/net/irda/discovery.h4
-rw-r--r--include/net/irda/ircomm_core.h4
-rw-r--r--include/net/irda/ircomm_event.h4
-rw-r--r--include/net/irda/ircomm_lmp.h4
-rw-r--r--include/net/irda/ircomm_param.h4
-rw-r--r--include/net/irda/ircomm_ttp.h4
-rw-r--r--include/net/irda/ircomm_tty.h4
-rw-r--r--include/net/irda/ircomm_tty_attach.h4
-rw-r--r--include/net/irda/irda_device.h4
-rw-r--r--include/net/irda/irlap_event.h4
-rw-r--r--include/net/irda/irlap_frame.h4
-rw-r--r--include/net/irda/parameters.h4
-rw-r--r--include/net/irda/qos.h4
-rw-r--r--include/net/llc.h1
-rw-r--r--include/net/llc_pdu.h2
-rw-r--r--include/net/mip6.h3
-rw-r--r--include/net/neighbour.h78
-rw-r--r--include/net/netfilter/ipv4/nf_conntrack_ipv4.h2
-rw-r--r--include/net/netfilter/ipv4/nf_reject.h128
-rw-r--r--include/net/netfilter/ipv6/nf_reject.h171
-rw-r--r--include/net/netfilter/nf_conntrack_l3proto.h1
-rw-r--r--include/net/netfilter/nf_queue.h62
-rw-r--r--include/net/netfilter/nf_tables.h47
-rw-r--r--include/net/netfilter/nf_tables_ipv4.h5
-rw-r--r--include/net/netfilter/nf_tables_ipv6.h3
-rw-r--r--include/net/netlabel.h3
-rw-r--r--include/net/netns/conntrack.h33
-rw-r--r--include/net/netns/ipv4.h1
-rw-r--r--include/net/netns/ipv6.h1
-rw-r--r--include/net/netns/nftables.h1
-rw-r--r--include/net/netns/xfrm.h6
-rw-r--r--include/net/netprio_cgroup.h18
-rw-r--r--include/net/nfc/hci.h4
-rw-r--r--include/net/nfc/llc.h4
-rw-r--r--include/net/nfc/nci.h3
-rw-r--r--include/net/nfc/nci_core.h3
-rw-r--r--include/net/nfc/nfc.h4
-rw-r--r--include/net/ping.h11
-rw-r--r--include/net/pkt_cls.h37
-rw-r--r--include/net/pkt_sched.h1
-rw-r--r--include/net/route.h8
-rw-r--r--include/net/rtnetlink.h3
-rw-r--r--include/net/sch_generic.h2
-rw-r--r--include/net/sctp/auth.h5
-rw-r--r--include/net/sctp/checksum.h5
-rw-r--r--include/net/sctp/command.h5
-rw-r--r--include/net/sctp/constants.h5
-rw-r--r--include/net/sctp/sctp.h5
-rw-r--r--include/net/sctp/sm.h5
-rw-r--r--include/net/sctp/structs.h74
-rw-r--r--include/net/sctp/tsnmap.h5
-rw-r--r--include/net/sctp/ulpevent.h5
-rw-r--r--include/net/sctp/ulpqueue.h5
-rw-r--r--include/net/sock.h32
-rw-r--r--include/net/tc_act/tc_skbedit.h3
-rw-r--r--include/net/tcp.h11
-rw-r--r--include/net/xfrm.h14
-rw-r--r--include/rdma/ib_verbs.h2
-rw-r--r--include/scsi/scsi_host.h6
-rw-r--r--include/sound/memalloc.h2
-rw-r--r--include/sound/soc-dapm.h3
-rw-r--r--include/target/target_core_backend.h5
-rw-r--r--include/target/target_core_base.h89
-rw-r--r--include/target/target_core_configfs.h1
-rw-r--r--include/target/target_core_fabric.h2
-rw-r--r--include/trace/events/btrfs.h4
-rw-r--r--include/trace/ftrace.h12
-rw-r--r--include/uapi/drm/radeon_drm.h2
-rw-r--r--include/uapi/drm/vmwgfx_drm.h1
-rw-r--r--include/uapi/linux/audit.h26
-rw-r--r--include/uapi/linux/eventpoll.h13
-rw-r--r--include/uapi/linux/genetlink.h1
-rw-r--r--include/uapi/linux/hash_info.h37
-rw-r--r--include/uapi/linux/if_addr.h5
-rw-r--r--include/uapi/linux/if_link.h36
-rw-r--r--include/uapi/linux/if_packet.h28
-rw-r--r--include/uapi/linux/in6.h4
-rw-r--r--include/uapi/linux/input.h3
-rw-r--r--include/uapi/linux/keyctl.h1
-rw-r--r--include/uapi/linux/mic_common.h40
-rw-r--r--include/uapi/linux/net_tstamp.h16
-rw-r--r--include/uapi/linux/netconf.h1
-rw-r--r--include/uapi/linux/netfilter.h1
-rw-r--r--include/uapi/linux/netfilter/Kbuild2
-rw-r--r--include/uapi/linux/netfilter/nf_nat.h12
-rw-r--r--include/uapi/linux/netfilter/nf_tables.h30
-rw-r--r--include/uapi/linux/netfilter/nfnetlink_queue.h5
-rw-r--r--include/uapi/linux/netfilter/xt_cgroup.h11
-rw-r--r--include/uapi/linux/netfilter/xt_ipcomp.h16
-rw-r--r--include/uapi/linux/netfilter/xt_osf.h3
-rw-r--r--include/uapi/linux/netlink_diag.h1
-rw-r--r--include/uapi/linux/openvswitch.h14
-rw-r--r--include/uapi/linux/packet_diag.h1
-rw-r--r--include/uapi/linux/pci_regs.h77
-rw-r--r--include/uapi/linux/perf_event.h1
-rw-r--r--include/uapi/linux/pkt_sched.h53
-rw-r--r--include/uapi/linux/raid/md_p.h1
-rw-r--r--include/uapi/linux/sctp.h5
-rw-r--r--include/uapi/linux/snmp.h1
-rw-r--r--include/uapi/linux/sockios.h3
-rw-r--r--include/uapi/linux/unix_diag.h1
-rw-r--r--include/uapi/sound/compress_offload.h6
-rw-r--r--include/xen/interface/io/blkif.h10
-rw-r--r--init/Kconfig33
-rw-r--r--init/main.c2
-rw-r--r--ipc/shm.c37
-rw-r--r--kernel/.gitignore1
-rw-r--r--kernel/Makefile51
-rw-r--r--kernel/audit.c153
-rw-r--r--kernel/audit.h3
-rw-r--r--kernel/auditfilter.c3
-rw-r--r--kernel/auditsc.c133
-rw-r--r--kernel/bounds.c2
-rw-r--r--kernel/cgroup.c92
-rw-r--r--kernel/cpuset.c8
-rw-r--r--kernel/events/core.c29
-rw-r--r--kernel/extable.c4
-rw-r--r--kernel/fork.c1
-rw-r--r--kernel/freezer.c6
-rw-r--r--kernel/futex.c7
-rw-r--r--kernel/irq/pm.c2
-rw-r--r--kernel/kexec.c5
-rw-r--r--kernel/modsign_certificate.S12
-rw-r--r--kernel/modsign_pubkey.c104
-rw-r--r--kernel/module-internal.h2
-rw-r--r--kernel/module_signing.c11
-rw-r--r--kernel/padata.c9
-rw-r--r--kernel/power/console.c1
-rw-r--r--kernel/power/snapshot.c3
-rw-r--r--kernel/power/user.c1
-rw-r--r--kernel/rcu/tree_plugin.h4
-rw-r--r--kernel/reboot.c2
-rw-r--r--kernel/sched/core.c10
-rw-r--r--kernel/sched/fair.c178
-rw-r--r--kernel/sched/rt.c14
-rw-r--r--kernel/system_certificates.S20
-rw-r--r--kernel/system_keyring.c105
-rw-r--r--kernel/time/tick-common.c15
-rw-r--r--kernel/time/tick-sched.c25
-rw-r--r--kernel/time/timekeeping.c2
-rw-r--r--kernel/timer.c5
-rw-r--r--kernel/trace/ftrace.c66
-rw-r--r--kernel/trace/trace_event_perf.c8
-rw-r--r--kernel/trace/trace_events.c3
-rw-r--r--kernel/trace/trace_syscalls.c10
-rw-r--r--kernel/user.c4
-rw-r--r--kernel/user_namespace.c6
-rw-r--r--kernel/workqueue.c82
-rw-r--r--lib/Kconfig14
-rw-r--r--lib/Makefile5
-rw-r--r--lib/assoc_array.c1746
-rw-r--r--lib/hash.c39
-rw-r--r--lib/lockref.c9
-rw-r--r--lib/mpi/mpiutil.c3
-rw-r--r--lib/percpu_ida.c5
-rw-r--r--mm/Kconfig2
-rw-r--r--mm/compaction.c4
-rw-r--r--mm/huge_memory.c57
-rw-r--r--mm/hugetlb.c51
-rw-r--r--mm/memcontrol.c41
-rw-r--r--mm/memory-failure.c14
-rw-r--r--mm/memory.c9
-rw-r--r--mm/mempolicy.c18
-rw-r--r--mm/migrate.c130
-rw-r--r--mm/mprotect.c13
-rw-r--r--mm/page_alloc.c19
-rw-r--r--mm/pgtable-generic.c8
-rw-r--r--mm/rmap.c4
-rw-r--r--mm/shmem.c36
-rw-r--r--mm/slab.c571
-rw-r--r--mm/slub.c45
-rw-r--r--mm/swap.c143
-rw-r--r--net/802/hippi.c4
-rw-r--r--net/8021q/vlan_dev.c19
-rw-r--r--net/Kconfig15
-rw-r--r--net/batman-adv/bat_iv_ogm.c36
-rw-r--r--net/batman-adv/distributed-arp-table.c6
-rw-r--r--net/batman-adv/fragmentation.c8
-rw-r--r--net/batman-adv/icmp_socket.c6
-rw-r--r--net/batman-adv/main.c16
-rw-r--r--net/batman-adv/main.h2
-rw-r--r--net/batman-adv/network-coding.c22
-rw-r--r--net/batman-adv/originator.c2
-rw-r--r--net/batman-adv/packet.h124
-rw-r--r--net/batman-adv/routing.c30
-rw-r--r--net/batman-adv/send.c10
-rw-r--r--net/batman-adv/soft-interface.c18
-rw-r--r--net/batman-adv/translation-table.c8
-rw-r--r--net/bluetooth/bnep/bnep.h3
-rw-r--r--net/bridge/br_device.c10
-rw-r--r--net/bridge/br_fdb.c3
-rw-r--r--net/bridge/br_forward.c6
-rw-r--r--net/bridge/br_if.c2
-rw-r--r--net/bridge/br_input.c2
-rw-r--r--net/bridge/br_ioctl.c2
-rw-r--r--net/bridge/br_multicast.c4
-rw-r--r--net/bridge/br_netfilter.c2
-rw-r--r--net/bridge/br_netlink.c12
-rw-r--r--net/bridge/br_private.h22
-rw-r--r--net/bridge/br_stp_bpdu.c2
-rw-r--r--net/bridge/br_stp_timer.c2
-rw-r--r--net/bridge/br_sysfs_br.c249
-rw-r--r--net/bridge/br_sysfs_if.c20
-rw-r--r--net/bridge/br_vlan.c2
-rw-r--r--net/bridge/netfilter/ebt_log.c2
-rw-r--r--net/bridge/netfilter/ebt_snat.c2
-rw-r--r--net/bridge/netfilter/ebt_vlan.c3
-rw-r--r--net/bridge/netfilter/ebtable_broute.c6
-rw-r--r--net/bridge/netfilter/ebtable_filter.c9
-rw-r--r--net/bridge/netfilter/ebtable_nat.c9
-rw-r--r--net/bridge/netfilter/ebtables.c17
-rw-r--r--net/bridge/netfilter/nf_tables_bridge.c44
-rw-r--r--net/can/gw.c6
-rw-r--r--net/compat.c2
-rw-r--r--net/core/Makefile3
-rw-r--r--net/core/dev.c301
-rw-r--r--net/core/dev_addr_lists.c97
-rw-r--r--net/core/dev_ioctl.c2
-rw-r--r--net/core/drop_monitor.c1
-rw-r--r--net/core/flow_dissector.c6
-rw-r--r--net/core/neighbour.c469
-rw-r--r--net/core/net-sysfs.c6
-rw-r--r--net/core/net-sysfs.h2
-rw-r--r--net/core/netclassid_cgroup.c120
-rw-r--r--net/core/netpoll.c11
-rw-r--r--net/core/netprio_cgroup.c2
-rw-r--r--net/core/pktgen.c7
-rw-r--r--net/core/rtnetlink.c22
-rw-r--r--net/core/skbuff.c94
-rw-r--r--net/core/sock.c37
-rw-r--r--net/core/sysctl_net_core.c3
-rw-r--r--net/dcb/dcbevent.c3
-rw-r--r--net/dcb/dcbnl.c3
-rw-r--r--net/dccp/ccids/lib/tfrc.c2
-rw-r--r--net/dccp/ccids/lib/tfrc.h1
-rw-r--r--net/dccp/dccp.h1
-rw-r--r--net/dccp/ipv4.c2
-rw-r--r--net/dccp/ipv6.c12
-rw-r--r--net/dccp/options.c32
-rw-r--r--net/dccp/probe.c19
-rw-r--r--net/decnet/dn_dev.c13
-rw-r--r--net/decnet/dn_neigh.c28
-rw-r--r--net/decnet/dn_route.c2
-rw-r--r--net/dns_resolver/dns_key.c3
-rw-r--r--net/dns_resolver/dns_query.c3
-rw-r--r--net/dns_resolver/internal.h3
-rw-r--r--net/hsr/hsr_framereg.c16
-rw-r--r--net/hsr/hsr_netlink.c28
-rw-r--r--net/ieee802154/wpan-class.c2
-rw-r--r--net/ipv4/Makefile4
-rw-r--r--net/ipv4/af_inet.c42
-rw-r--r--net/ipv4/arp.c53
-rw-r--r--net/ipv4/cipso_ipv4.c12
-rw-r--r--net/ipv4/datagram.c2
-rw-r--r--net/ipv4/devinet.c64
-rw-r--r--net/ipv4/fib_lookup.h2
-rw-r--r--net/ipv4/fib_rules.c5
-rw-r--r--net/ipv4/fib_semantics.c5
-rw-r--r--net/ipv4/gre_demux.c9
-rw-r--r--net/ipv4/gre_offload.c178
-rw-r--r--net/ipv4/icmp.c4
-rw-r--r--net/ipv4/igmp.c70
-rw-r--r--net/ipv4/inet_diag.c16
-rw-r--r--net/ipv4/inet_lro.c173
-rw-r--r--net/ipv4/inetpeer.c11
-rw-r--r--net/ipv4/ip_fragment.c2
-rw-r--r--net/ipv4/ip_gre.c1
-rw-r--r--net/ipv4/ip_options.c42
-rw-r--r--net/ipv4/ip_output.c5
-rw-r--r--net/ipv4/ip_sockglue.c5
-rw-r--r--net/ipv4/ip_tunnel.c142
-rw-r--r--net/ipv4/ip_tunnel_core.c5
-rw-r--r--net/ipv4/ip_vti.c2
-rw-r--r--net/ipv4/ipmr.c2
-rw-r--r--net/ipv4/netfilter/Kconfig18
-rw-r--r--net/ipv4/netfilter/Makefile1
-rw-r--r--net/ipv4/netfilter/ipt_REJECT.c140
-rw-r--r--net/ipv4/netfilter/ipt_SYNPROXY.c1
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c6
-rw-r--r--net/ipv4/netfilter/nf_nat_snmp_basic.c15
-rw-r--r--net/ipv4/netfilter/nf_tables_arp.c44
-rw-r--r--net/ipv4/netfilter/nf_tables_ipv4.c60
-rw-r--r--net/ipv4/netfilter/nft_chain_nat_ipv4.c10
-rw-r--r--net/ipv4/netfilter/nft_chain_route_ipv4.c10
-rw-r--r--net/ipv4/ping.c17
-rw-r--r--net/ipv4/proc.c9
-rw-r--r--net/ipv4/protocol.c8
-rw-r--r--net/ipv4/raw.c4
-rw-r--r--net/ipv4/syncookies.c2
-rw-r--r--net/ipv4/sysctl_net_ipv4.c25
-rw-r--r--net/ipv4/tcp.c75
-rw-r--r--net/ipv4/tcp_input.c6
-rw-r--r--net/ipv4/tcp_ipv4.c6
-rw-r--r--net/ipv4/tcp_memcontrol.c9
-rw-r--r--net/ipv4/tcp_minisocks.c2
-rw-r--r--net/ipv4/tcp_offload.c45
-rw-r--r--net/ipv4/tcp_output.c141
-rw-r--r--net/ipv4/tcp_probe.c4
-rw-r--r--net/ipv4/tcp_yeah.c20
-rw-r--r--net/ipv4/udp.c55
-rw-r--r--net/ipv4/udp_offload.c37
-rw-r--r--net/ipv4/xfrm4_mode_beet.c2
-rw-r--r--net/ipv4/xfrm4_state.c2
-rw-r--r--net/ipv6/addrconf.c295
-rw-r--r--net/ipv6/af_inet6.c7
-rw-r--r--net/ipv6/ah6.c3
-rw-r--r--net/ipv6/datagram.c11
-rw-r--r--net/ipv6/esp6.c3
-rw-r--r--net/ipv6/fib6_rules.c6
-rw-r--r--net/ipv6/icmp.c12
-rw-r--r--net/ipv6/inet6_connection_sock.c4
-rw-r--r--net/ipv6/ip6_fib.c6
-rw-r--r--net/ipv6/ip6_gre.c13
-rw-r--r--net/ipv6/ip6_offload.c53
-rw-r--r--net/ipv6/ip6_output.c83
-rw-r--r--net/ipv6/ip6_tunnel.c36
-rw-r--r--net/ipv6/ip6_vti.c29
-rw-r--r--net/ipv6/ipcomp6.c3
-rw-r--r--net/ipv6/ipv6_sockglue.c9
-rw-r--r--net/ipv6/mip6.c3
-rw-r--r--net/ipv6/ndisc.c58
-rw-r--r--net/ipv6/netfilter/Kconfig12
-rw-r--r--net/ipv6/netfilter/ip6t_REJECT.c179
-rw-r--r--net/ipv6/netfilter/ip6t_SYNPROXY.c1
-rw-r--r--net/ipv6/netfilter/nf_tables_ipv6.c65
-rw-r--r--net/ipv6/netfilter/nft_chain_nat_ipv6.c10
-rw-r--r--net/ipv6/netfilter/nft_chain_route_ipv6.c10
-rw-r--r--net/ipv6/ping.c5
-rw-r--r--net/ipv6/protocol.c4
-rw-r--r--net/ipv6/raw.c7
-rw-r--r--net/ipv6/route.c102
-rw-r--r--net/ipv6/sit.c68
-rw-r--r--net/ipv6/syncookies.c2
-rw-r--r--net/ipv6/sysctl_net_ipv6.c8
-rw-r--r--net/ipv6/tcp_ipv6.c38
-rw-r--r--net/ipv6/tcpv6_offload.c38
-rw-r--r--net/ipv6/tunnel6.c3
-rw-r--r--net/ipv6/udp.c12
-rw-r--r--net/ipv6/xfrm6_mode_ro.c3
-rw-r--r--net/ipv6/xfrm6_tunnel.c3
-rw-r--r--net/irda/af_irda.c4
-rw-r--r--net/irda/discovery.c4
-rw-r--r--net/irda/ircomm/ircomm_core.c4
-rw-r--r--net/irda/ircomm/ircomm_event.c4
-rw-r--r--net/irda/ircomm/ircomm_lmp.c4
-rw-r--r--net/irda/ircomm/ircomm_param.c4
-rw-r--r--net/irda/ircomm/ircomm_ttp.c4
-rw-r--r--net/irda/ircomm/ircomm_tty.c4
-rw-r--r--net/irda/ircomm/ircomm_tty_attach.c4
-rw-r--r--net/irda/ircomm/ircomm_tty_ioctl.c4
-rw-r--r--net/irda/irda_device.c4
-rw-r--r--net/irda/irlap.c4
-rw-r--r--net/irda/parameters.c4
-rw-r--r--net/irda/qos.c4
-rw-r--r--net/key/af_key.c29
-rw-r--r--net/l2tp/l2tp_ip6.c5
-rw-r--r--net/llc/af_llc.c5
-rw-r--r--net/llc/llc_conn.c4
-rw-r--r--net/llc/llc_core.c5
-rw-r--r--net/llc/llc_sap.c4
-rw-r--r--net/mac80211/iface.c7
-rw-r--r--net/mac802154/wpan.c4
-rw-r--r--net/netfilter/Kconfig86
-rw-r--r--net/netfilter/Makefile6
-rw-r--r--net/netfilter/ipset/ip_set_core.c28
-rw-r--r--net/netfilter/ipset/ip_set_hash_netnet.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_nfct.c9
-rw-r--r--net/netfilter/ipvs/ip_vs_sync.c5
-rw-r--r--net/netfilter/nf_conntrack_core.c15
-rw-r--r--net/netfilter/nf_conntrack_netlink.c12
-rw-r--r--net/netfilter/nf_conntrack_proto.c6
-rw-r--r--net/netfilter/nf_conntrack_proto_dccp.c10
-rw-r--r--net/netfilter/nf_conntrack_seqadj.c5
-rw-r--r--net/netfilter/nf_conntrack_timestamp.c1
-rw-r--r--net/netfilter/nf_nat_core.c4
-rw-r--r--net/netfilter/nf_nat_proto_common.c10
-rw-r--r--net/netfilter/nf_tables_api.c397
-rw-r--r--net/netfilter/nf_tables_core.c11
-rw-r--r--net/netfilter/nf_tables_inet.c104
-rw-r--r--net/netfilter/nfnetlink_log.c1
-rw-r--r--net/netfilter/nfnetlink_queue_core.c94
-rw-r--r--net/netfilter/nft_compat.c8
-rw-r--r--net/netfilter/nft_ct.c199
-rw-r--r--net/netfilter/nft_exthdr.c2
-rw-r--r--net/netfilter/nft_log.c2
-rw-r--r--net/netfilter/nft_meta.c157
-rw-r--r--net/netfilter/nft_meta_target.c117
-rw-r--r--net/netfilter/nft_queue.c134
-rw-r--r--net/netfilter/nft_reject.c (renamed from net/ipv4/netfilter/nft_reject_ipv4.c)28
-rw-r--r--net/netfilter/xt_CT.c4
-rw-r--r--net/netfilter/xt_NFQUEUE.c80
-rw-r--r--net/netfilter/xt_cgroup.c71
-rw-r--r--net/netfilter/xt_connmark.c3
-rw-r--r--net/netfilter/xt_hashlimit.c25
-rw-r--r--net/netfilter/xt_ipcomp.c111
-rw-r--r--net/netfilter/xt_osf.c3
-rw-r--r--net/netlabel/netlabel_addrlist.c3
-rw-r--r--net/netlabel/netlabel_addrlist.h3
-rw-r--r--net/netlabel/netlabel_cipso_v4.c3
-rw-r--r--net/netlabel/netlabel_cipso_v4.h3
-rw-r--r--net/netlabel/netlabel_domainhash.c3
-rw-r--r--net/netlabel/netlabel_domainhash.h3
-rw-r--r--net/netlabel/netlabel_kapi.c3
-rw-r--r--net/netlabel/netlabel_mgmt.c3
-rw-r--r--net/netlabel/netlabel_mgmt.h3
-rw-r--r--net/netlabel/netlabel_unlabeled.c3
-rw-r--r--net/netlabel/netlabel_unlabeled.h3
-rw-r--r--net/netlabel/netlabel_user.c3
-rw-r--r--net/netlabel/netlabel_user.h3
-rw-r--r--net/netlink/af_netlink.c37
-rw-r--r--net/netlink/genetlink.c34
-rw-r--r--net/openvswitch/actions.c10
-rw-r--r--net/openvswitch/datapath.c231
-rw-r--r--net/openvswitch/datapath.h6
-rw-r--r--net/openvswitch/flow.c96
-rw-r--r--net/openvswitch/flow.h33
-rw-r--r--net/openvswitch/flow_netlink.c66
-rw-r--r--net/openvswitch/flow_netlink.h1
-rw-r--r--net/openvswitch/flow_table.c64
-rw-r--r--net/openvswitch/flow_table.h6
-rw-r--r--net/openvswitch/vport.c18
-rw-r--r--net/openvswitch/vport.h3
-rw-r--r--net/packet/af_packet.c202
-rw-r--r--net/packet/internal.h1
-rw-r--r--net/rds/ib.c3
-rw-r--r--net/rds/ib_send.c5
-rw-r--r--net/rose/af_rose.c18
-rw-r--r--net/rose/rose_dev.c2
-rw-r--r--net/sched/Kconfig23
-rw-r--r--net/sched/Makefile2
-rw-r--r--net/sched/act_api.c323
-rw-r--r--net/sched/act_csum.c25
-rw-r--r--net/sched/act_gact.c22
-rw-r--r--net/sched/act_ipt.c33
-rw-r--r--net/sched/act_mirred.c20
-rw-r--r--net/sched/act_nat.c24
-rw-r--r--net/sched/act_pedit.c22
-rw-r--r--net/sched/act_police.c74
-rw-r--r--net/sched/act_simple.c30
-rw-r--r--net/sched/act_skbedit.c24
-rw-r--r--net/sched/cls_api.c119
-rw-r--r--net/sched/cls_basic.c13
-rw-r--r--net/sched/cls_bpf.c15
-rw-r--r--net/sched/cls_cgroup.c125
-rw-r--r--net/sched/cls_flow.c15
-rw-r--r--net/sched/cls_fw.c13
-rw-r--r--net/sched/cls_route.c13
-rw-r--r--net/sched/cls_rsvp.h13
-rw-r--r--net/sched/cls_tcindex.c17
-rw-r--r--net/sched/cls_u32.c15
-rw-r--r--net/sched/em_meta.c159
-rw-r--r--net/sched/sch_api.c10
-rw-r--r--net/sched/sch_cbq.c10
-rw-r--r--net/sched/sch_dsmark.c39
-rw-r--r--net/sched/sch_fq.c36
-rw-r--r--net/sched/sch_generic.c6
-rw-r--r--net/sched/sch_gred.c4
-rw-r--r--net/sched/sch_hhf.c745
-rw-r--r--net/sched/sch_htb.c55
-rw-r--r--net/sched/sch_mq.c13
-rw-r--r--net/sched/sch_mqprio.c10
-rw-r--r--net/sched/sch_multiq.c3
-rw-r--r--net/sched/sch_netem.c26
-rw-r--r--net/sched/sch_pie.c555
-rw-r--r--net/sched/sch_sfq.c10
-rw-r--r--net/sched/sch_tbf.c162
-rw-r--r--net/sctp/associola.c75
-rw-r--r--net/sctp/auth.c32
-rw-r--r--net/sctp/bind_addr.c5
-rw-r--r--net/sctp/chunk.c9
-rw-r--r--net/sctp/command.c5
-rw-r--r--net/sctp/debug.c5
-rw-r--r--net/sctp/endpointola.c5
-rw-r--r--net/sctp/input.c82
-rw-r--r--net/sctp/inqueue.c5
-rw-r--r--net/sctp/ipv6.c14
-rw-r--r--net/sctp/objcnt.c7
-rw-r--r--net/sctp/output.c30
-rw-r--r--net/sctp/outqueue.c51
-rw-r--r--net/sctp/primitive.c5
-rw-r--r--net/sctp/probe.c17
-rw-r--r--net/sctp/proc.c13
-rw-r--r--net/sctp/protocol.c9
-rw-r--r--net/sctp/sm_make_chunk.c76
-rw-r--r--net/sctp/sm_sideeffect.c11
-rw-r--r--net/sctp/sm_statefuns.c44
-rw-r--r--net/sctp/sm_statetable.c7
-rw-r--r--net/sctp/socket.c136
-rw-r--r--net/sctp/ssnmap.c5
-rw-r--r--net/sctp/sysctl.c83
-rw-r--r--net/sctp/transport.c7
-rw-r--r--net/sctp/tsnmap.c5
-rw-r--r--net/sctp/ulpevent.c5
-rw-r--r--net/sctp/ulpqueue.c16
-rw-r--r--net/socket.c108
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c2
-rw-r--r--net/sunrpc/rpc_pipe.c11
-rw-r--r--net/tipc/bcast.c8
-rw-r--r--net/tipc/bearer.c342
-rw-r--r--net/tipc/bearer.h56
-rw-r--r--net/tipc/core.c15
-rw-r--r--net/tipc/core.h2
-rw-r--r--net/tipc/discover.c28
-rw-r--r--net/tipc/eth_media.c326
-rw-r--r--net/tipc/handler.c11
-rw-r--r--net/tipc/ib_media.c319
-rw-r--r--net/tipc/link.c222
-rw-r--r--net/tipc/link.h25
-rw-r--r--net/tipc/name_table.c3
-rw-r--r--net/tipc/node.c15
-rw-r--r--net/tipc/node.h3
-rw-r--r--net/tipc/port.c54
-rw-r--r--net/tipc/port.h6
-rw-r--r--net/tipc/socket.c78
-rw-r--r--net/unix/af_unix.c25
-rw-r--r--net/x25/af_x25.c4
-rw-r--r--net/x25/x25_dev.c10
-rw-r--r--net/x25/x25_facilities.c14
-rw-r--r--net/x25/x25_forward.c5
-rw-r--r--net/x25/x25_in.c4
-rw-r--r--net/x25/x25_link.c6
-rw-r--r--net/x25/x25_subr.c6
-rw-r--r--net/xfrm/xfrm_policy.c165
-rw-r--r--net/xfrm/xfrm_state.c149
-rw-r--r--net/xfrm/xfrm_user.c56
-rw-r--r--scripts/asn1_compiler.c2
-rwxr-xr-xscripts/checkpatch.pl1
-rw-r--r--scripts/link-vmlinux.sh4
-rwxr-xr-xscripts/recordmcount.pl3
-rw-r--r--scripts/sortextable.c5
-rw-r--r--security/Makefile1
-rw-r--r--security/apparmor/audit.c14
-rw-r--r--security/apparmor/capability.c15
-rw-r--r--security/apparmor/domain.c16
-rw-r--r--security/apparmor/include/audit.h1
-rw-r--r--security/apparmor/include/capability.h5
-rw-r--r--security/apparmor/include/ipc.h4
-rw-r--r--security/apparmor/ipc.c9
-rw-r--r--security/apparmor/lsm.c2
-rw-r--r--security/capability.c15
-rw-r--r--security/integrity/digsig.c7
-rw-r--r--security/integrity/digsig_asymmetric.c11
-rw-r--r--security/integrity/evm/evm_main.c4
-rw-r--r--security/integrity/evm/evm_posix_acl.c3
-rw-r--r--security/integrity/iint.c2
-rw-r--r--security/integrity/ima/Kconfig64
-rw-r--r--security/integrity/ima/Makefile2
-rw-r--r--security/integrity/ima/ima.h106
-rw-r--r--security/integrity/ima/ima_api.c154
-rw-r--r--security/integrity/ima/ima_appraise.c106
-rw-r--r--security/integrity/ima/ima_crypto.c141
-rw-r--r--security/integrity/ima/ima_fs.c75
-rw-r--r--security/integrity/ima/ima_init.c40
-rw-r--r--security/integrity/ima/ima_main.c63
-rw-r--r--security/integrity/ima/ima_policy.c1
-rw-r--r--security/integrity/ima/ima_queue.c10
-rw-r--r--security/integrity/ima/ima_template.c187
-rw-r--r--security/integrity/ima/ima_template_lib.c351
-rw-r--r--security/integrity/ima/ima_template_lib.h49
-rw-r--r--security/integrity/integrity.h40
-rw-r--r--security/keys/Kconfig29
-rw-r--r--security/keys/Makefile2
-rw-r--r--security/keys/big_key.c207
-rw-r--r--security/keys/compat.c3
-rw-r--r--security/keys/gc.c47
-rw-r--r--security/keys/internal.h74
-rw-r--r--security/keys/key.c110
-rw-r--r--security/keys/keyctl.c3
-rw-r--r--security/keys/keyring.c1535
-rw-r--r--security/keys/persistent.c167
-rw-r--r--security/keys/proc.c17
-rw-r--r--security/keys/process_keys.c141
-rw-r--r--security/keys/request_key.c60
-rw-r--r--security/keys/request_key_auth.c31
-rw-r--r--security/keys/sysctl.c11
-rw-r--r--security/keys/user_defined.c18
-rw-r--r--security/lsm_audit.c3
-rw-r--r--security/security.c13
-rw-r--r--security/selinux/hooks.c249
-rw-r--r--security/selinux/include/objsec.h4
-rw-r--r--security/selinux/include/security.h13
-rw-r--r--security/selinux/include/xfrm.h49
-rw-r--r--security/selinux/netlabel.c6
-rw-r--r--security/selinux/netnode.c2
-rw-r--r--security/selinux/nlmsgtab.c2
-rw-r--r--security/selinux/selinuxfs.c4
-rw-r--r--security/selinux/ss/ebitmap.c20
-rw-r--r--security/selinux/ss/ebitmap.h10
-rw-r--r--security/selinux/ss/mls.c22
-rw-r--r--security/selinux/ss/mls_types.h2
-rw-r--r--security/selinux/ss/policydb.c3
-rw-r--r--security/selinux/ss/services.c24
-rw-r--r--security/selinux/xfrm.c481
-rw-r--r--security/smack/smack.h12
-rw-r--r--security/smack/smack_access.c10
-rw-r--r--security/smack/smack_lsm.c11
-rw-r--r--security/smack/smackfs.c10
-rw-r--r--sound/atmel/abdac.c3
-rw-r--r--sound/core/pcm_lib.c2
-rw-r--r--sound/firewire/amdtp.c15
-rw-r--r--sound/firewire/amdtp.h1
-rw-r--r--sound/firewire/dice.c4
-rw-r--r--sound/pci/hda/Kconfig3
-rw-r--r--sound/pci/hda/hda_codec.c4
-rw-r--r--sound/pci/hda/hda_codec.h1
-rw-r--r--sound/pci/hda/hda_generic.c126
-rw-r--r--sound/pci/hda/hda_generic.h3
-rw-r--r--sound/pci/hda/hda_intel.c16
-rw-r--r--sound/pci/hda/patch_analog.c16
-rw-r--r--sound/pci/hda/patch_conexant.c24
-rw-r--r--sound/pci/hda/patch_hdmi.c32
-rw-r--r--sound/pci/hda/patch_realtek.c215
-rw-r--r--sound/pci/hda/patch_sigmatel.c3
-rw-r--r--sound/soc/atmel/atmel_ssc_dai.c30
-rw-r--r--sound/soc/atmel/sam9x5_wm8731.c4
-rw-r--r--sound/soc/codecs/ab8500-codec.c66
-rw-r--r--sound/soc/codecs/arizona.c4
-rw-r--r--sound/soc/codecs/wm5110.c70
-rw-r--r--sound/soc/codecs/wm8731.c4
-rw-r--r--sound/soc/codecs/wm8904.c2
-rw-r--r--sound/soc/codecs/wm8962.c13
-rw-r--r--sound/soc/codecs/wm8990.c2
-rw-r--r--sound/soc/codecs/wm_adsp.c10
-rw-r--r--sound/soc/davinci/davinci-pcm.c2
-rw-r--r--sound/soc/fsl/imx-wm8962.c2
-rw-r--r--sound/soc/fsl/pcm030-audio-fabric.c3
-rw-r--r--sound/soc/kirkwood/kirkwood-i2s.c46
-rw-r--r--sound/soc/omap/n810.c4
-rw-r--r--sound/soc/sh/Kconfig1
-rw-r--r--sound/soc/sh/rcar/core.c13
-rw-r--r--sound/soc/sh/rcar/scu.c2
-rw-r--r--sound/soc/soc-core.c4
-rw-r--r--sound/soc/soc-devres.c4
-rw-r--r--sound/soc/soc-generic-dmaengine-pcm.c38
-rw-r--r--sound/soc/soc-pcm.c23
-rw-r--r--sound/soc/tegra/tegra20_i2s.c6
-rw-r--r--sound/soc/tegra/tegra20_spdif.c10
-rw-r--r--sound/soc/tegra/tegra30_i2s.c6
-rw-r--r--sound/usb/endpoint.c16
-rw-r--r--sound/usb/mixer_quirks.c2
-rw-r--r--tools/lib/traceevent/event-parse.c25
-rw-r--r--tools/net/Makefile23
-rw-r--r--tools/net/bpf_asm.c52
-rw-r--r--tools/net/bpf_dbg.c1404
-rw-r--r--tools/net/bpf_exp.l143
-rw-r--r--tools/net/bpf_exp.y762
-rw-r--r--tools/perf/util/header.c6
-rw-r--r--tools/perf/util/thread.c11
-rw-r--r--tools/power/cpupower/man/cpupower-idle-info.13
-rw-r--r--tools/power/cpupower/man/cpupower-idle-set.171
-rw-r--r--tools/power/cpupower/utils/cpupower-set.c6
-rw-r--r--tools/power/cpupower/utils/helpers/sysfs.c4
-rw-r--r--tools/power/x86/turbostat/turbostat.c197
-rw-r--r--tools/usb/Makefile5
-rw-r--r--virt/kvm/kvm_main.c8
2694 files changed, 75732 insertions, 29313 deletions
diff --git a/CREDITS b/CREDITS
index 4fc997d58ab2..4c7738f49357 100644
--- a/CREDITS
+++ b/CREDITS
@@ -655,6 +655,11 @@ S: Stanford University
S: Stanford, California 94305
S: USA
+N: Carlos Chinea
+E: carlos.chinea@nokia.com
+E: cch.devel@gmail.com
+D: Author of HSI Subsystem
+
N: Randolph Chung
E: tausq@debian.org
D: Linux/PA-RISC hacker
diff --git a/Documentation/Changes b/Documentation/Changes
index b17580885273..07c75d18154e 100644
--- a/Documentation/Changes
+++ b/Documentation/Changes
@@ -196,13 +196,6 @@ chmod 0644 /dev/cpu/microcode
as root before you can use this. You'll probably also want to
get the user-space microcode_ctl utility to use with this.
-Powertweak
-----------
-
-If you are running v0.1.17 or earlier, you should upgrade to
-version v0.99.0 or higher. Running old versions may cause problems
-with programs using shared memory.
-
udev
----
udev is a userspace application for populating /dev dynamically with
@@ -366,10 +359,6 @@ Intel P6 microcode
------------------
o <http://www.urbanmyth.org/microcode/>
-Powertweak
-----------
-o <http://powertweak.sourceforge.net/>
-
udev
----
o <http://www.kernel.org/pub/linux/utils/kernel/hotplug/udev.html>
diff --git a/Documentation/DocBook/device-drivers.tmpl b/Documentation/DocBook/device-drivers.tmpl
index 6c9d9d37c83a..f5170082bdb3 100644
--- a/Documentation/DocBook/device-drivers.tmpl
+++ b/Documentation/DocBook/device-drivers.tmpl
@@ -58,7 +58,7 @@
</sect1>
<sect1><title>Wait queues and Wake events</title>
!Iinclude/linux/wait.h
-!Ekernel/wait.c
+!Ekernel/sched/wait.c
</sect1>
<sect1><title>High-resolution timers</title>
!Iinclude/linux/ktime.h
diff --git a/Documentation/DocBook/media/v4l/vidioc-expbuf.xml b/Documentation/DocBook/media/v4l/vidioc-expbuf.xml
index e287c8fc803b..4165e7bfa4ff 100644
--- a/Documentation/DocBook/media/v4l/vidioc-expbuf.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-expbuf.xml
@@ -73,7 +73,8 @@ range from zero to the maximal number of valid planes for the currently active
format. For the single-planar API, applications must set <structfield> plane
</structfield> to zero. Additional flags may be posted in the <structfield>
flags </structfield> field. Refer to a manual for open() for details.
-Currently only O_CLOEXEC is supported. All other fields must be set to zero.
+Currently only O_CLOEXEC, O_RDONLY, O_WRONLY, and O_RDWR are supported. All
+other fields must be set to zero.
In the case of multi-planar API, every plane is exported separately using
multiple <constant> VIDIOC_EXPBUF </constant> calls. </para>
@@ -170,8 +171,9 @@ multi-planar API. Otherwise this value must be set to zero. </entry>
<entry>__u32</entry>
<entry><structfield>flags</structfield></entry>
<entry>Flags for the newly created file, currently only <constant>
-O_CLOEXEC </constant> is supported, refer to the manual of open() for more
-details.</entry>
+O_CLOEXEC </constant>, <constant>O_RDONLY</constant>, <constant>O_WRONLY
+</constant>, and <constant>O_RDWR</constant> are supported, refer to the manual
+of open() for more details.</entry>
</row>
<row>
<entry>__s32</entry>
diff --git a/Documentation/assoc_array.txt b/Documentation/assoc_array.txt
new file mode 100644
index 000000000000..2f2c6cdd73c0
--- /dev/null
+++ b/Documentation/assoc_array.txt
@@ -0,0 +1,574 @@
+ ========================================
+ GENERIC ASSOCIATIVE ARRAY IMPLEMENTATION
+ ========================================
+
+Contents:
+
+ - Overview.
+
+ - The public API.
+ - Edit script.
+ - Operations table.
+ - Manipulation functions.
+ - Access functions.
+ - Index key form.
+
+ - Internal workings.
+ - Basic internal tree layout.
+ - Shortcuts.
+ - Splitting and collapsing nodes.
+ - Non-recursive iteration.
+ - Simultaneous alteration and iteration.
+
+
+========
+OVERVIEW
+========
+
+This associative array implementation is an object container with the following
+properties:
+
+ (1) Objects are opaque pointers. The implementation does not care where they
+ point (if anywhere) or what they point to (if anything).
+
+ [!] NOTE: Pointers to objects _must_ be zero in the least significant bit.
+
+ (2) Objects do not need to contain linkage blocks for use by the array. This
+ permits an object to be located in multiple arrays simultaneously.
+ Rather, the array is made up of metadata blocks that point to objects.
+
+ (3) Objects require index keys to locate them within the array.
+
+ (4) Index keys must be unique. Inserting an object with the same key as one
+ already in the array will replace the old object.
+
+ (5) Index keys can be of any length and can be of different lengths.
+
+ (6) Index keys should encode the length early on, before any variation due to
+ length is seen.
+
+ (7) Index keys can include a hash to scatter objects throughout the array.
+
+ (8) The array can iterated over. The objects will not necessarily come out in
+ key order.
+
+ (9) The array can be iterated over whilst it is being modified, provided the
+ RCU readlock is being held by the iterator. Note, however, under these
+ circumstances, some objects may be seen more than once. If this is a
+ problem, the iterator should lock against modification. Objects will not
+ be missed, however, unless deleted.
+
+(10) Objects in the array can be looked up by means of their index key.
+
+(11) Objects can be looked up whilst the array is being modified, provided the
+ RCU readlock is being held by the thread doing the look up.
+
+The implementation uses a tree of 16-pointer nodes internally that are indexed
+on each level by nibbles from the index key in the same manner as in a radix
+tree. To improve memory efficiency, shortcuts can be emplaced to skip over
+what would otherwise be a series of single-occupancy nodes. Further, nodes
+pack leaf object pointers into spare space in the node rather than making an
+extra branch until as such time an object needs to be added to a full node.
+
+
+==============
+THE PUBLIC API
+==============
+
+The public API can be found in <linux/assoc_array.h>. The associative array is
+rooted on the following structure:
+
+ struct assoc_array {
+ ...
+ };
+
+The code is selected by enabling CONFIG_ASSOCIATIVE_ARRAY.
+
+
+EDIT SCRIPT
+-----------
+
+The insertion and deletion functions produce an 'edit script' that can later be
+applied to effect the changes without risking ENOMEM. This retains the
+preallocated metadata blocks that will be installed in the internal tree and
+keeps track of the metadata blocks that will be removed from the tree when the
+script is applied.
+
+This is also used to keep track of dead blocks and dead objects after the
+script has been applied so that they can be freed later. The freeing is done
+after an RCU grace period has passed - thus allowing access functions to
+proceed under the RCU read lock.
+
+The script appears as outside of the API as a pointer of the type:
+
+ struct assoc_array_edit;
+
+There are two functions for dealing with the script:
+
+ (1) Apply an edit script.
+
+ void assoc_array_apply_edit(struct assoc_array_edit *edit);
+
+ This will perform the edit functions, interpolating various write barriers
+ to permit accesses under the RCU read lock to continue. The edit script
+ will then be passed to call_rcu() to free it and any dead stuff it points
+ to.
+
+ (2) Cancel an edit script.
+
+ void assoc_array_cancel_edit(struct assoc_array_edit *edit);
+
+ This frees the edit script and all preallocated memory immediately. If
+ this was for insertion, the new object is _not_ released by this function,
+ but must rather be released by the caller.
+
+These functions are guaranteed not to fail.
+
+
+OPERATIONS TABLE
+----------------
+
+Various functions take a table of operations:
+
+ struct assoc_array_ops {
+ ...
+ };
+
+This points to a number of methods, all of which need to be provided:
+
+ (1) Get a chunk of index key from caller data:
+
+ unsigned long (*get_key_chunk)(const void *index_key, int level);
+
+ This should return a chunk of caller-supplied index key starting at the
+ *bit* position given by the level argument. The level argument will be a
+ multiple of ASSOC_ARRAY_KEY_CHUNK_SIZE and the function should return
+ ASSOC_ARRAY_KEY_CHUNK_SIZE bits. No error is possible.
+
+
+ (2) Get a chunk of an object's index key.
+
+ unsigned long (*get_object_key_chunk)(const void *object, int level);
+
+ As the previous function, but gets its data from an object in the array
+ rather than from a caller-supplied index key.
+
+
+ (3) See if this is the object we're looking for.
+
+ bool (*compare_object)(const void *object, const void *index_key);
+
+ Compare the object against an index key and return true if it matches and
+ false if it doesn't.
+
+
+ (4) Diff the index keys of two objects.
+
+ int (*diff_objects)(const void *object, const void *index_key);
+
+ Return the bit position at which the index key of the specified object
+ differs from the given index key or -1 if they are the same.
+
+
+ (5) Free an object.
+
+ void (*free_object)(void *object);
+
+ Free the specified object. Note that this may be called an RCU grace
+ period after assoc_array_apply_edit() was called, so synchronize_rcu() may
+ be necessary on module unloading.
+
+
+MANIPULATION FUNCTIONS
+----------------------
+
+There are a number of functions for manipulating an associative array:
+
+ (1) Initialise an associative array.
+
+ void assoc_array_init(struct assoc_array *array);
+
+ This initialises the base structure for an associative array. It can't
+ fail.
+
+
+ (2) Insert/replace an object in an associative array.
+
+ struct assoc_array_edit *
+ assoc_array_insert(struct assoc_array *array,
+ const struct assoc_array_ops *ops,
+ const void *index_key,
+ void *object);
+
+ This inserts the given object into the array. Note that the least
+ significant bit of the pointer must be zero as it's used to type-mark
+ pointers internally.
+
+ If an object already exists for that key then it will be replaced with the
+ new object and the old one will be freed automatically.
+
+ The index_key argument should hold index key information and is
+ passed to the methods in the ops table when they are called.
+
+ This function makes no alteration to the array itself, but rather returns
+ an edit script that must be applied. -ENOMEM is returned in the case of
+ an out-of-memory error.
+
+ The caller should lock exclusively against other modifiers of the array.
+
+
+ (3) Delete an object from an associative array.
+
+ struct assoc_array_edit *
+ assoc_array_delete(struct assoc_array *array,
+ const struct assoc_array_ops *ops,
+ const void *index_key);
+
+ This deletes an object that matches the specified data from the array.
+
+ The index_key argument should hold index key information and is
+ passed to the methods in the ops table when they are called.
+
+ This function makes no alteration to the array itself, but rather returns
+ an edit script that must be applied. -ENOMEM is returned in the case of
+ an out-of-memory error. NULL will be returned if the specified object is
+ not found within the array.
+
+ The caller should lock exclusively against other modifiers of the array.
+
+
+ (4) Delete all objects from an associative array.
+
+ struct assoc_array_edit *
+ assoc_array_clear(struct assoc_array *array,
+ const struct assoc_array_ops *ops);
+
+ This deletes all the objects from an associative array and leaves it
+ completely empty.
+
+ This function makes no alteration to the array itself, but rather returns
+ an edit script that must be applied. -ENOMEM is returned in the case of
+ an out-of-memory error.
+
+ The caller should lock exclusively against other modifiers of the array.
+
+
+ (5) Destroy an associative array, deleting all objects.
+
+ void assoc_array_destroy(struct assoc_array *array,
+ const struct assoc_array_ops *ops);
+
+ This destroys the contents of the associative array and leaves it
+ completely empty. It is not permitted for another thread to be traversing
+ the array under the RCU read lock at the same time as this function is
+ destroying it as no RCU deferral is performed on memory release -
+ something that would require memory to be allocated.
+
+ The caller should lock exclusively against other modifiers and accessors
+ of the array.
+
+
+ (6) Garbage collect an associative array.
+
+ int assoc_array_gc(struct assoc_array *array,
+ const struct assoc_array_ops *ops,
+ bool (*iterator)(void *object, void *iterator_data),
+ void *iterator_data);
+
+ This iterates over the objects in an associative array and passes each one
+ to iterator(). If iterator() returns true, the object is kept. If it
+ returns false, the object will be freed. If the iterator() function
+ returns true, it must perform any appropriate refcount incrementing on the
+ object before returning.
+
+ The internal tree will be packed down if possible as part of the iteration
+ to reduce the number of nodes in it.
+
+ The iterator_data is passed directly to iterator() and is otherwise
+ ignored by the function.
+
+ The function will return 0 if successful and -ENOMEM if there wasn't
+ enough memory.
+
+ It is possible for other threads to iterate over or search the array under
+ the RCU read lock whilst this function is in progress. The caller should
+ lock exclusively against other modifiers of the array.
+
+
+ACCESS FUNCTIONS
+----------------
+
+There are two functions for accessing an associative array:
+
+ (1) Iterate over all the objects in an associative array.
+
+ int assoc_array_iterate(const struct assoc_array *array,
+ int (*iterator)(const void *object,
+ void *iterator_data),
+ void *iterator_data);
+
+ This passes each object in the array to the iterator callback function.
+ iterator_data is private data for that function.
+
+ This may be used on an array at the same time as the array is being
+ modified, provided the RCU read lock is held. Under such circumstances,
+ it is possible for the iteration function to see some objects twice. If
+ this is a problem, then modification should be locked against. The
+ iteration algorithm should not, however, miss any objects.
+
+ The function will return 0 if no objects were in the array or else it will
+ return the result of the last iterator function called. Iteration stops
+ immediately if any call to the iteration function results in a non-zero
+ return.
+
+
+ (2) Find an object in an associative array.
+
+ void *assoc_array_find(const struct assoc_array *array,
+ const struct assoc_array_ops *ops,
+ const void *index_key);
+
+ This walks through the array's internal tree directly to the object
+ specified by the index key..
+
+ This may be used on an array at the same time as the array is being
+ modified, provided the RCU read lock is held.
+
+ The function will return the object if found (and set *_type to the object
+ type) or will return NULL if the object was not found.
+
+
+INDEX KEY FORM
+--------------
+
+The index key can be of any form, but since the algorithms aren't told how long
+the key is, it is strongly recommended that the index key includes its length
+very early on before any variation due to the length would have an effect on
+comparisons.
+
+This will cause leaves with different length keys to scatter away from each
+other - and those with the same length keys to cluster together.
+
+It is also recommended that the index key begin with a hash of the rest of the
+key to maximise scattering throughout keyspace.
+
+The better the scattering, the wider and lower the internal tree will be.
+
+Poor scattering isn't too much of a problem as there are shortcuts and nodes
+can contain mixtures of leaves and metadata pointers.
+
+The index key is read in chunks of machine word. Each chunk is subdivided into
+one nibble (4 bits) per level, so on a 32-bit CPU this is good for 8 levels and
+on a 64-bit CPU, 16 levels. Unless the scattering is really poor, it is
+unlikely that more than one word of any particular index key will have to be
+used.
+
+
+=================
+INTERNAL WORKINGS
+=================
+
+The associative array data structure has an internal tree. This tree is
+constructed of two types of metadata blocks: nodes and shortcuts.
+
+A node is an array of slots. Each slot can contain one of four things:
+
+ (*) A NULL pointer, indicating that the slot is empty.
+
+ (*) A pointer to an object (a leaf).
+
+ (*) A pointer to a node at the next level.
+
+ (*) A pointer to a shortcut.
+
+
+BASIC INTERNAL TREE LAYOUT
+--------------------------
+
+Ignoring shortcuts for the moment, the nodes form a multilevel tree. The index
+key space is strictly subdivided by the nodes in the tree and nodes occur on
+fixed levels. For example:
+
+ Level: 0 1 2 3
+ =============== =============== =============== ===============
+ NODE D
+ NODE B NODE C +------>+---+
+ +------>+---+ +------>+---+ | | 0 |
+ NODE A | | 0 | | | 0 | | +---+
+ +---+ | +---+ | +---+ | : :
+ | 0 | | : : | : : | +---+
+ +---+ | +---+ | +---+ | | f |
+ | 1 |---+ | 3 |---+ | 7 |---+ +---+
+ +---+ +---+ +---+
+ : : : : | 8 |---+
+ +---+ +---+ +---+ | NODE E
+ | e |---+ | f | : : +------>+---+
+ +---+ | +---+ +---+ | 0 |
+ | f | | | f | +---+
+ +---+ | +---+ : :
+ | NODE F +---+
+ +------>+---+ | f |
+ | 0 | NODE G +---+
+ +---+ +------>+---+
+ : : | | 0 |
+ +---+ | +---+
+ | 6 |---+ : :
+ +---+ +---+
+ : : | f |
+ +---+ +---+
+ | f |
+ +---+
+
+In the above example, there are 7 nodes (A-G), each with 16 slots (0-f).
+Assuming no other meta data nodes in the tree, the key space is divided thusly:
+
+ KEY PREFIX NODE
+ ========== ====
+ 137* D
+ 138* E
+ 13[0-69-f]* C
+ 1[0-24-f]* B
+ e6* G
+ e[0-57-f]* F
+ [02-df]* A
+
+So, for instance, keys with the following example index keys will be found in
+the appropriate nodes:
+
+ INDEX KEY PREFIX NODE
+ =============== ======= ====
+ 13694892892489 13 C
+ 13795289025897 137 D
+ 13889dde88793 138 E
+ 138bbb89003093 138 E
+ 1394879524789 12 C
+ 1458952489 1 B
+ 9431809de993ba - A
+ b4542910809cd - A
+ e5284310def98 e F
+ e68428974237 e6 G
+ e7fffcbd443 e F
+ f3842239082 - A
+
+To save memory, if a node can hold all the leaves in its portion of keyspace,
+then the node will have all those leaves in it and will not have any metadata
+pointers - even if some of those leaves would like to be in the same slot.
+
+A node can contain a heterogeneous mix of leaves and metadata pointers.
+Metadata pointers must be in the slots that match their subdivisions of key
+space. The leaves can be in any slot not occupied by a metadata pointer. It
+is guaranteed that none of the leaves in a node will match a slot occupied by a
+metadata pointer. If the metadata pointer is there, any leaf whose key matches
+the metadata key prefix must be in the subtree that the metadata pointer points
+to.
+
+In the above example list of index keys, node A will contain:
+
+ SLOT CONTENT INDEX KEY (PREFIX)
+ ==== =============== ==================
+ 1 PTR TO NODE B 1*
+ any LEAF 9431809de993ba
+ any LEAF b4542910809cd
+ e PTR TO NODE F e*
+ any LEAF f3842239082
+
+and node B:
+
+ 3 PTR TO NODE C 13*
+ any LEAF 1458952489
+
+
+SHORTCUTS
+---------
+
+Shortcuts are metadata records that jump over a piece of keyspace. A shortcut
+is a replacement for a series of single-occupancy nodes ascending through the
+levels. Shortcuts exist to save memory and to speed up traversal.
+
+It is possible for the root of the tree to be a shortcut - say, for example,
+the tree contains at least 17 nodes all with key prefix '1111'. The insertion
+algorithm will insert a shortcut to skip over the '1111' keyspace in a single
+bound and get to the fourth level where these actually become different.
+
+
+SPLITTING AND COLLAPSING NODES
+------------------------------
+
+Each node has a maximum capacity of 16 leaves and metadata pointers. If the
+insertion algorithm finds that it is trying to insert a 17th object into a
+node, that node will be split such that at least two leaves that have a common
+key segment at that level end up in a separate node rooted on that slot for
+that common key segment.
+
+If the leaves in a full node and the leaf that is being inserted are
+sufficiently similar, then a shortcut will be inserted into the tree.
+
+When the number of objects in the subtree rooted at a node falls to 16 or
+fewer, then the subtree will be collapsed down to a single node - and this will
+ripple towards the root if possible.
+
+
+NON-RECURSIVE ITERATION
+-----------------------
+
+Each node and shortcut contains a back pointer to its parent and the number of
+slot in that parent that points to it. None-recursive iteration uses these to
+proceed rootwards through the tree, going to the parent node, slot N + 1 to
+make sure progress is made without the need for a stack.
+
+The backpointers, however, make simultaneous alteration and iteration tricky.
+
+
+SIMULTANEOUS ALTERATION AND ITERATION
+-------------------------------------
+
+There are a number of cases to consider:
+
+ (1) Simple insert/replace. This involves simply replacing a NULL or old
+ matching leaf pointer with the pointer to the new leaf after a barrier.
+ The metadata blocks don't change otherwise. An old leaf won't be freed
+ until after the RCU grace period.
+
+ (2) Simple delete. This involves just clearing an old matching leaf. The
+ metadata blocks don't change otherwise. The old leaf won't be freed until
+ after the RCU grace period.
+
+ (3) Insertion replacing part of a subtree that we haven't yet entered. This
+ may involve replacement of part of that subtree - but that won't affect
+ the iteration as we won't have reached the pointer to it yet and the
+ ancestry blocks are not replaced (the layout of those does not change).
+
+ (4) Insertion replacing nodes that we're actively processing. This isn't a
+ problem as we've passed the anchoring pointer and won't switch onto the
+ new layout until we follow the back pointers - at which point we've
+ already examined the leaves in the replaced node (we iterate over all the
+ leaves in a node before following any of its metadata pointers).
+
+ We might, however, re-see some leaves that have been split out into a new
+ branch that's in a slot further along than we were at.
+
+ (5) Insertion replacing nodes that we're processing a dependent branch of.
+ This won't affect us until we follow the back pointers. Similar to (4).
+
+ (6) Deletion collapsing a branch under us. This doesn't affect us because the
+ back pointers will get us back to the parent of the new node before we
+ could see the new node. The entire collapsed subtree is thrown away
+ unchanged - and will still be rooted on the same slot, so we shouldn't
+ process it a second time as we'll go back to slot + 1.
+
+Note:
+
+ (*) Under some circumstances, we need to simultaneously change the parent
+ pointer and the parent slot pointer on a node (say, for example, we
+ inserted another node before it and moved it up a level). We cannot do
+ this without locking against a read - so we have to replace that node too.
+
+ However, when we're changing a shortcut into a node this isn't a problem
+ as shortcuts only have one slot and so the parent slot number isn't used
+ when traversing backwards over one. This means that it's okay to change
+ the slot number first - provided suitable barriers are used to make sure
+ the parent slot number is read after the back pointer.
+
+Obsolete blocks and leaves are freed up after an RCU grace period has passed,
+so as long as anyone doing walking or iteration holds the RCU read lock, the
+old superstructure should not go away on them.
diff --git a/Documentation/block/null_blk.txt b/Documentation/block/null_blk.txt
new file mode 100644
index 000000000000..b2830b435895
--- /dev/null
+++ b/Documentation/block/null_blk.txt
@@ -0,0 +1,72 @@
+Null block device driver
+================================================================================
+
+I. Overview
+
+The null block device (/dev/nullb*) is used for benchmarking the various
+block-layer implementations. It emulates a block device of X gigabytes in size.
+The following instances are possible:
+
+ Single-queue block-layer
+ - Request-based.
+ - Single submission queue per device.
+ - Implements IO scheduling algorithms (CFQ, Deadline, noop).
+ Multi-queue block-layer
+ - Request-based.
+ - Configurable submission queues per device.
+ No block-layer (Known as bio-based)
+ - Bio-based. IO requests are submitted directly to the device driver.
+ - Directly accepts bio data structure and returns them.
+
+All of them have a completion queue for each core in the system.
+
+II. Module parameters applicable for all instances:
+
+queue_mode=[0-2]: Default: 2-Multi-queue
+ Selects which block-layer the module should instantiate with.
+
+ 0: Bio-based.
+ 1: Single-queue.
+ 2: Multi-queue.
+
+home_node=[0--nr_nodes]: Default: NUMA_NO_NODE
+ Selects what CPU node the data structures are allocated from.
+
+gb=[Size in GB]: Default: 250GB
+ The size of the device reported to the system.
+
+bs=[Block size (in bytes)]: Default: 512 bytes
+ The block size reported to the system.
+
+nr_devices=[Number of devices]: Default: 2
+ Number of block devices instantiated. They are instantiated as /dev/nullb0,
+ etc.
+
+irq_mode=[0-2]: Default: 1-Soft-irq
+ The completion mode used for completing IOs to the block-layer.
+
+ 0: None.
+ 1: Soft-irq. Uses IPI to complete IOs across CPU nodes. Simulates the overhead
+ when IOs are issued from another CPU node than the home the device is
+ connected to.
+ 2: Timer: Waits a specific period (completion_nsec) for each IO before
+ completion.
+
+completion_nsec=[ns]: Default: 10.000ns
+ Combined with irq_mode=2 (timer). The time each completion event must wait.
+
+submit_queues=[0..nr_cpus]:
+ The number of submission queues attached to the device driver. If unset, it
+ defaults to 1 on single-queue and bio-based instances. For multi-queue,
+ it is ignored when use_per_node_hctx module parameter is 1.
+
+hw_queue_depth=[0..qdepth]: Default: 64
+ The hardware queue depth of the device.
+
+III: Multi-queue specific parameters
+
+use_per_node_hctx=[0/1]: Default: 0
+ 0: The number of submit queues are set to the value of the submit_queues
+ parameter.
+ 1: The multi-queue block layer is instantiated with a hardware dispatch
+ queue for each CPU node in the system.
diff --git a/Documentation/cgroups/net_cls.txt b/Documentation/cgroups/net_cls.txt
index 9face6bb578a..ec182346dea2 100644
--- a/Documentation/cgroups/net_cls.txt
+++ b/Documentation/cgroups/net_cls.txt
@@ -6,6 +6,8 @@ tag network packets with a class identifier (classid).
The Traffic Controller (tc) can be used to assign
different priorities to packets from different cgroups.
+Also, Netfilter (iptables) can use this tag to perform
+actions on such packets.
Creating a net_cls cgroups instance creates a net_cls.classid file.
This net_cls.classid value is initialized to 0.
@@ -32,3 +34,6 @@ tc class add dev eth0 parent 10: classid 10:1 htb rate 40mbit
- creating traffic class 10:1
tc filter add dev eth0 parent 10: protocol ip prio 10 handle 1: cgroup
+
+configuring iptables, basic example:
+iptables -A OUTPUT -m cgroup ! --cgroup 0x100001 -j DROP
diff --git a/Documentation/device-mapper/cache.txt b/Documentation/device-mapper/cache.txt
index 274752f8bdf9..719320b5ed3f 100644
--- a/Documentation/device-mapper/cache.txt
+++ b/Documentation/device-mapper/cache.txt
@@ -266,10 +266,12 @@ E.g.
Invalidation is removing an entry from the cache without writing it
back. Cache blocks can be invalidated via the invalidate_cblocks
message, which takes an arbitrary number of cblock ranges. Each cblock
-must be expressed as a decimal value, in the future a variant message
-that takes cblock ranges expressed in hexidecimal may be needed to
-better support efficient invalidation of larger caches. The cache must
-be in passthrough mode when invalidate_cblocks is used.
+range's end value is "one past the end", meaning 5-10 expresses a range
+of values from 5 to 9. Each cblock must be expressed as a decimal
+value, in the future a variant message that takes cblock ranges
+expressed in hexidecimal may be needed to better support efficient
+invalidation of larger caches. The cache must be in passthrough mode
+when invalidate_cblocks is used.
invalidate_cblocks [<cblock>|<cblock begin>-<cblock end>]*
diff --git a/Documentation/devicetree/bindings/arm/omap/mpu.txt b/Documentation/devicetree/bindings/arm/omap/mpu.txt
index 1a5a42ce21bb..83f405bde138 100644
--- a/Documentation/devicetree/bindings/arm/omap/mpu.txt
+++ b/Documentation/devicetree/bindings/arm/omap/mpu.txt
@@ -7,10 +7,18 @@ The MPU contain CPUs, GIC, L2 cache and a local PRCM.
Required properties:
- compatible : Should be "ti,omap3-mpu" for OMAP3
Should be "ti,omap4-mpu" for OMAP4
+ Should be "ti,omap5-mpu" for OMAP5
- ti,hwmods: "mpu"
Examples:
+- For an OMAP5 SMP system:
+
+mpu {
+ compatible = "ti,omap5-mpu";
+ ti,hwmods = "mpu"
+};
+
- For an OMAP4 SMP system:
mpu {
diff --git a/Documentation/devicetree/bindings/arm/pmu.txt b/Documentation/devicetree/bindings/arm/pmu.txt
index 343781b9f246..3e1e498fea96 100644
--- a/Documentation/devicetree/bindings/arm/pmu.txt
+++ b/Documentation/devicetree/bindings/arm/pmu.txt
@@ -7,6 +7,7 @@ representation in the device tree should be done as under:-
Required properties:
- compatible : should be one of
+ "arm,armv8-pmuv3"
"arm,cortex-a15-pmu"
"arm,cortex-a9-pmu"
"arm,cortex-a8-pmu"
diff --git a/Documentation/devicetree/bindings/arm/samsung/exynos-adc.txt b/Documentation/devicetree/bindings/arm/samsung/exynos-adc.txt
index 47ada1dff216..5d49f2b37f68 100644
--- a/Documentation/devicetree/bindings/arm/samsung/exynos-adc.txt
+++ b/Documentation/devicetree/bindings/arm/samsung/exynos-adc.txt
@@ -49,7 +49,7 @@ adc@12D10000 {
/* NTC thermistor is a hwmon device */
ncp15wb473@0 {
compatible = "ntc,ncp15wb473";
- pullup-uV = <1800000>;
+ pullup-uv = <1800000>;
pullup-ohm = <47000>;
pulldown-ohm = <0>;
io-channels = <&adc 4>;
diff --git a/Documentation/devicetree/bindings/clock/exynos4-clock.txt b/Documentation/devicetree/bindings/clock/exynos4-clock.txt
index c6bf8a6c8f52..a2ac2d9ac71a 100644
--- a/Documentation/devicetree/bindings/clock/exynos4-clock.txt
+++ b/Documentation/devicetree/bindings/clock/exynos4-clock.txt
@@ -6,7 +6,7 @@ SoC's in the Exynos4 family.
Required Properties:
-- comptible: should be one of the following.
+- compatible: should be one of the following.
- "samsung,exynos4210-clock" - controller compatible with Exynos4210 SoC.
- "samsung,exynos4412-clock" - controller compatible with Exynos4412 SoC.
diff --git a/Documentation/devicetree/bindings/clock/exynos5250-clock.txt b/Documentation/devicetree/bindings/clock/exynos5250-clock.txt
index 24765c146e31..46f5c791ea0d 100644
--- a/Documentation/devicetree/bindings/clock/exynos5250-clock.txt
+++ b/Documentation/devicetree/bindings/clock/exynos5250-clock.txt
@@ -5,7 +5,7 @@ controllers within the Exynos5250 SoC.
Required Properties:
-- comptible: should be one of the following.
+- compatible: should be one of the following.
- "samsung,exynos5250-clock" - controller compatible with Exynos5250 SoC.
- reg: physical base address of the controller and length of memory mapped
diff --git a/Documentation/devicetree/bindings/clock/exynos5420-clock.txt b/Documentation/devicetree/bindings/clock/exynos5420-clock.txt
index 32aa34ecad36..458f34789e5d 100644
--- a/Documentation/devicetree/bindings/clock/exynos5420-clock.txt
+++ b/Documentation/devicetree/bindings/clock/exynos5420-clock.txt
@@ -5,7 +5,7 @@ controllers within the Exynos5420 SoC.
Required Properties:
-- comptible: should be one of the following.
+- compatible: should be one of the following.
- "samsung,exynos5420-clock" - controller compatible with Exynos5420 SoC.
- reg: physical base address of the controller and length of memory mapped
diff --git a/Documentation/devicetree/bindings/clock/exynos5440-clock.txt b/Documentation/devicetree/bindings/clock/exynos5440-clock.txt
index 4499e9966bc9..9955dc9c7d96 100644
--- a/Documentation/devicetree/bindings/clock/exynos5440-clock.txt
+++ b/Documentation/devicetree/bindings/clock/exynos5440-clock.txt
@@ -5,7 +5,7 @@ controllers within the Exynos5440 SoC.
Required Properties:
-- comptible: should be "samsung,exynos5440-clock".
+- compatible: should be "samsung,exynos5440-clock".
- reg: physical base address of the controller and length of memory mapped
region.
diff --git a/Documentation/devicetree/bindings/dma/atmel-dma.txt b/Documentation/devicetree/bindings/dma/atmel-dma.txt
index e1f343c7a34b..f69bcf5a6343 100644
--- a/Documentation/devicetree/bindings/dma/atmel-dma.txt
+++ b/Documentation/devicetree/bindings/dma/atmel-dma.txt
@@ -28,7 +28,7 @@ The three cells in order are:
dependent:
- bit 7-0: peripheral identifier for the hardware handshaking interface. The
identifier can be different for tx and rx.
- - bit 11-8: FIFO configuration. 0 for half FIFO, 1 for ALAP, 1 for ASAP.
+ - bit 11-8: FIFO configuration. 0 for half FIFO, 1 for ALAP, 2 for ASAP.
Example:
diff --git a/Documentation/devicetree/bindings/gpio/8xxx_gpio.txt b/Documentation/devicetree/bindings/gpio/8xxx_gpio.txt
index b0019eb5330e..798cfc9d3839 100644
--- a/Documentation/devicetree/bindings/gpio/8xxx_gpio.txt
+++ b/Documentation/devicetree/bindings/gpio/8xxx_gpio.txt
@@ -5,16 +5,42 @@ This is for the non-QE/CPM/GUTs GPIO controllers as found on
Every GPIO controller node must have #gpio-cells property defined,
this information will be used to translate gpio-specifiers.
+See bindings/gpio/gpio.txt for details of how to specify GPIO
+information for devices.
+
+The GPIO module usually is connected to the SoC's internal interrupt
+controller, see bindings/interrupt-controller/interrupts.txt (the
+interrupt client nodes section) for details how to specify this GPIO
+module's interrupt.
+
+The GPIO module may serve as another interrupt controller (cascaded to
+the SoC's internal interrupt controller). See the interrupt controller
+nodes section in bindings/interrupt-controller/interrupts.txt for
+details.
Required properties:
-- compatible : "fsl,<CHIP>-gpio" followed by "fsl,mpc8349-gpio" for
- 83xx, "fsl,mpc8572-gpio" for 85xx and "fsl,mpc8610-gpio" for 86xx.
-- #gpio-cells : Should be two. The first cell is the pin number and the
- second cell is used to specify optional parameters (currently unused).
- - interrupts : Interrupt mapping for GPIO IRQ.
- - interrupt-parent : Phandle for the interrupt controller that
- services interrupts for this device.
-- gpio-controller : Marks the port as GPIO controller.
+- compatible: "fsl,<chip>-gpio" followed by "fsl,mpc8349-gpio"
+ for 83xx, "fsl,mpc8572-gpio" for 85xx, or
+ "fsl,mpc8610-gpio" for 86xx.
+- #gpio-cells: Should be two. The first cell is the pin number
+ and the second cell is used to specify optional
+ parameters (currently unused).
+- interrupt-parent: Phandle for the interrupt controller that
+ services interrupts for this device.
+- interrupts: Interrupt mapping for GPIO IRQ.
+- gpio-controller: Marks the port as GPIO controller.
+
+Optional properties:
+- interrupt-controller: Empty boolean property which marks the GPIO
+ module as an IRQ controller.
+- #interrupt-cells: Should be two. Defines the number of integer
+ cells required to specify an interrupt within
+ this interrupt controller. The first cell
+ defines the pin number, the second cell
+ defines additional flags (trigger type,
+ trigger polarity). Note that the available
+ set of trigger conditions supported by the
+ GPIO module depends on the actual SoC.
Example of gpio-controller nodes for a MPC8347 SoC:
@@ -22,39 +48,27 @@ Example of gpio-controller nodes for a MPC8347 SoC:
#gpio-cells = <2>;
compatible = "fsl,mpc8347-gpio", "fsl,mpc8349-gpio";
reg = <0xc00 0x100>;
- interrupts = <74 0x8>;
interrupt-parent = <&ipic>;
+ interrupts = <74 0x8>;
gpio-controller;
+ interrupt-controller;
+ #interrupt-cells = <2>;
};
gpio2: gpio-controller@d00 {
#gpio-cells = <2>;
compatible = "fsl,mpc8347-gpio", "fsl,mpc8349-gpio";
reg = <0xd00 0x100>;
- interrupts = <75 0x8>;
interrupt-parent = <&ipic>;
+ interrupts = <75 0x8>;
gpio-controller;
};
-See booting-without-of.txt for details of how to specify GPIO
-information for devices.
-
-To use GPIO pins as interrupt sources for peripherals, specify the
-GPIO controller as the interrupt parent and define GPIO number +
-trigger mode using the interrupts property, which is defined like
-this:
-
-interrupts = <number trigger>, where:
- - number: GPIO pin (0..31)
- - trigger: trigger mode:
- 2 = trigger on falling edge
- 3 = trigger on both edges
-
-Example of device using this is:
+Example of a peripheral using the GPIO module as an IRQ controller:
funkyfpga@0 {
compatible = "funky-fpga";
...
- interrupts = <4 3>;
interrupt-parent = <&gpio1>;
+ interrupts = <4 3>;
};
diff --git a/Documentation/devicetree/bindings/i2c/i2c-omap.txt b/Documentation/devicetree/bindings/i2c/i2c-omap.txt
index 56564aa4b444..7e49839d4124 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-omap.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-omap.txt
@@ -1,7 +1,8 @@
I2C for OMAP platforms
Required properties :
-- compatible : Must be "ti,omap3-i2c" or "ti,omap4-i2c"
+- compatible : Must be "ti,omap2420-i2c", "ti,omap2430-i2c", "ti,omap3-i2c"
+ or "ti,omap4-i2c"
- ti,hwmods : Must be "i2c<n>", n being the instance number (1-based)
- #address-cells = <1>;
- #size-cells = <0>;
diff --git a/Documentation/devicetree/bindings/i2c/trivial-devices.txt b/Documentation/devicetree/bindings/i2c/trivial-devices.txt
index ad6a73852f08..b1cb3415e6f1 100644
--- a/Documentation/devicetree/bindings/i2c/trivial-devices.txt
+++ b/Documentation/devicetree/bindings/i2c/trivial-devices.txt
@@ -15,6 +15,7 @@ adi,adt7461 +/-1C TDM Extended Temp Range I.C
adt7461 +/-1C TDM Extended Temp Range I.C
at,24c08 i2c serial eeprom (24cxx)
atmel,24c02 i2c serial eeprom (24cxx)
+atmel,at97sc3204t i2c trusted platform module (TPM)
catalyst,24c32 i2c serial eeprom
dallas,ds1307 64 x 8, Serial, I2C Real-Time Clock
dallas,ds1338 I2C RTC with 56-Byte NV RAM
@@ -35,6 +36,7 @@ fsl,mc13892 MC13892: Power Management Integrated Circuit (PMIC) for i.MX35/51
fsl,mma8450 MMA8450Q: Xtrinsic Low-power, 3-axis Xtrinsic Accelerometer
fsl,mpr121 MPR121: Proximity Capacitive Touch Sensor Controller
fsl,sgtl5000 SGTL5000: Ultra Low-Power Audio Codec
+gmt,g751 G751: Digital Temperature Sensor and Thermal Watchdog with Two-Wire Interface
infineon,slb9635tt Infineon SLB9635 (Soft-) I2C TPM (old protocol, max 100khz)
infineon,slb9645tt Infineon SLB9645 I2C TPM (new protocol, max 400khz)
maxim,ds1050 5 Bit Programmable, Pulse-Width Modulator
@@ -44,6 +46,7 @@ mc,rv3029c2 Real Time Clock Module with I2C-Bus
national,lm75 I2C TEMP SENSOR
national,lm80 Serial Interface ACPI-Compatible Microprocessor System Hardware Monitor
national,lm92 ±0.33°C Accurate, 12-Bit + Sign Temperature Sensor and Thermal Window Comparator with Two-Wire Interface
+nuvoton,npct501 i2c trusted platform module (TPM)
nxp,pca9556 Octal SMBus and I2C registered interface
nxp,pca9557 8-bit I2C-bus and SMBus I/O port with reset
nxp,pcf8563 Real-time clock/calendar
@@ -61,3 +64,4 @@ taos,tsl2550 Ambient Light Sensor with SMBUS/Two Wire Serial Interface
ti,tsc2003 I2C Touch-Screen Controller
ti,tmp102 Low Power Digital Temperature Sensor with SMBUS/Two Wire Serial Interface
ti,tmp275 Digital Temperature Sensor
+winbond,wpct301 i2c trusted platform module (TPM)
diff --git a/Documentation/devicetree/bindings/mmc/ti-omap.txt b/Documentation/devicetree/bindings/mmc/ti-omap.txt
new file mode 100644
index 000000000000..8de579969763
--- /dev/null
+++ b/Documentation/devicetree/bindings/mmc/ti-omap.txt
@@ -0,0 +1,54 @@
+* TI MMC host controller for OMAP1 and 2420
+
+The MMC Host Controller on TI OMAP1 and 2420 family provides
+an interface for MMC, SD, and SDIO types of memory cards.
+
+This file documents differences between the core properties described
+by mmc.txt and the properties used by the omap mmc driver.
+
+Note that this driver will not work with omap2430 or later omaps,
+please see the omap hsmmc driver for the current omaps.
+
+Required properties:
+- compatible: Must be "ti,omap2420-mmc", for OMAP2420 controllers
+- ti,hwmods: For 2420, must be "msdi<n>", where n is controller
+ instance starting 1
+
+Examples:
+
+ msdi1: mmc@4809c000 {
+ compatible = "ti,omap2420-mmc";
+ ti,hwmods = "msdi1";
+ reg = <0x4809c000 0x80>;
+ interrupts = <83>;
+ dmas = <&sdma 61 &sdma 62>;
+ dma-names = "tx", "rx";
+ };
+
+* TI MMC host controller for OMAP1 and 2420
+
+The MMC Host Controller on TI OMAP1 and 2420 family provides
+an interface for MMC, SD, and SDIO types of memory cards.
+
+This file documents differences between the core properties described
+by mmc.txt and the properties used by the omap mmc driver.
+
+Note that this driver will not work with omap2430 or later omaps,
+please see the omap hsmmc driver for the current omaps.
+
+Required properties:
+- compatible: Must be "ti,omap2420-mmc", for OMAP2420 controllers
+- ti,hwmods: For 2420, must be "msdi<n>", where n is controller
+ instance starting 1
+
+Examples:
+
+ msdi1: mmc@4809c000 {
+ compatible = "ti,omap2420-mmc";
+ ti,hwmods = "msdi1";
+ reg = <0x4809c000 0x80>;
+ interrupts = <83>;
+ dmas = <&sdma 61 &sdma 62>;
+ dma-names = "tx", "rx";
+ };
+
diff --git a/Documentation/devicetree/bindings/net/can/microchip,mcp251x.txt b/Documentation/devicetree/bindings/net/can/microchip,mcp251x.txt
new file mode 100644
index 000000000000..ee3723beb701
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/can/microchip,mcp251x.txt
@@ -0,0 +1,25 @@
+* Microchip MCP251X stand-alone CAN controller device tree bindings
+
+Required properties:
+ - compatible: Should be one of the following:
+ - "microchip,mcp2510" for MCP2510.
+ - "microchip,mcp2515" for MCP2515.
+ - reg: SPI chip select.
+ - clocks: The clock feeding the CAN controller.
+ - interrupt-parent: The parent interrupt controller.
+ - interrupts: Should contain IRQ line for the CAN controller.
+
+Optional properties:
+ - vdd-supply: Regulator that powers the CAN controller.
+ - xceiver-supply: Regulator that powers the CAN transceiver.
+
+Example:
+ can0: can@1 {
+ compatible = "microchip,mcp2515";
+ reg = <1>;
+ clocks = <&clk24m>;
+ interrupt-parent = <&gpio4>;
+ interrupts = <13 0x2>;
+ vdd-supply = <&reg5v0>;
+ xceiver-supply = <&reg5v0>;
+ };
diff --git a/Documentation/devicetree/bindings/net/davinci_emac.txt b/Documentation/devicetree/bindings/net/davinci_emac.txt
index 48b259e29e87..bad381faf036 100644
--- a/Documentation/devicetree/bindings/net/davinci_emac.txt
+++ b/Documentation/devicetree/bindings/net/davinci_emac.txt
@@ -4,7 +4,7 @@ This file provides information, what the device node
for the davinci_emac interface contains.
Required properties:
-- compatible: "ti,davinci-dm6467-emac";
+- compatible: "ti,davinci-dm6467-emac" or "ti,am3517-emac"
- reg: Offset and length of the register set for the device
- ti,davinci-ctrl-reg-offset: offset to control register
- ti,davinci-ctrl-mod-reg-offset: offset to control module register
diff --git a/Documentation/devicetree/bindings/net/fsl-fec.txt b/Documentation/devicetree/bindings/net/fsl-fec.txt
index d53639221403..845ff848d895 100644
--- a/Documentation/devicetree/bindings/net/fsl-fec.txt
+++ b/Documentation/devicetree/bindings/net/fsl-fec.txt
@@ -15,6 +15,7 @@ Optional properties:
only if property "phy-reset-gpios" is available. Missing the property
will have the duration be 1 millisecond. Numbers greater than 1000 are
invalid and 1 millisecond will be used instead.
+- phy-supply: regulator that powers the Ethernet PHY.
Example:
@@ -25,4 +26,5 @@ ethernet@83fec000 {
phy-mode = "mii";
phy-reset-gpios = <&gpio2 14 0>; /* GPIO2_14 */
local-mac-address = [00 04 9F 01 1B B9];
+ phy-supply = <&reg_fec_supply>;
};
diff --git a/Documentation/devicetree/bindings/net/phy.txt b/Documentation/devicetree/bindings/net/phy.txt
index 7cd18fbfcf71..f648094abc35 100644
--- a/Documentation/devicetree/bindings/net/phy.txt
+++ b/Documentation/devicetree/bindings/net/phy.txt
@@ -22,6 +22,7 @@ Optional Properties:
specifications. If neither of these are specified, the default is to
assume clause 22. The compatible list may also contain other
elements.
+- max-speed: Maximum PHY supported speed (10, 100, 1000...)
Example:
diff --git a/Documentation/devicetree/bindings/net/smsc-lan91c111.txt b/Documentation/devicetree/bindings/net/smsc-lan91c111.txt
index 953049b4248a..5a41a8658daa 100644
--- a/Documentation/devicetree/bindings/net/smsc-lan91c111.txt
+++ b/Documentation/devicetree/bindings/net/smsc-lan91c111.txt
@@ -8,3 +8,7 @@ Required properties:
Optional properties:
- phy-device : phandle to Ethernet phy
- local-mac-address : Ethernet mac address to use
+- reg-io-width : Mask of sizes (in bytes) of the IO accesses that
+ are supported on the device. Valid value for SMSC LAN91c111 are
+ 1, 2 or 4. If it's omitted or invalid, the size would be 2 meaning
+ 16-bit access only.
diff --git a/Documentation/devicetree/bindings/powerpc/fsl/dma.txt b/Documentation/devicetree/bindings/powerpc/fsl/dma.txt
index 2a4b4bce6110..7fc1b010fa75 100644
--- a/Documentation/devicetree/bindings/powerpc/fsl/dma.txt
+++ b/Documentation/devicetree/bindings/powerpc/fsl/dma.txt
@@ -1,33 +1,30 @@
-* Freescale 83xx DMA Controller
+* Freescale DMA Controllers
-Freescale PowerPC 83xx have on chip general purpose DMA controllers.
+** Freescale Elo DMA Controller
+ This is a little-endian 4-channel DMA controller, used in Freescale mpc83xx
+ series chips such as mpc8315, mpc8349, mpc8379 etc.
Required properties:
-- compatible : compatible list, contains 2 entries, first is
- "fsl,CHIP-dma", where CHIP is the processor
- (mpc8349, mpc8360, etc.) and the second is
- "fsl,elo-dma"
-- reg : <registers mapping for DMA general status reg>
-- ranges : Should be defined as specified in 1) to describe the
- DMA controller channels.
+- compatible : must include "fsl,elo-dma"
+- reg : DMA General Status Register, i.e. DGSR which contains
+ status for all the 4 DMA channels
+- ranges : describes the mapping between the address space of the
+ DMA channels and the address space of the DMA controller
- cell-index : controller index. 0 for controller @ 0x8100
-- interrupts : <interrupt mapping for DMA IRQ>
+- interrupts : interrupt specifier for DMA IRQ
- interrupt-parent : optional, if needed for interrupt mapping
-
- DMA channel nodes:
- - compatible : compatible list, contains 2 entries, first is
- "fsl,CHIP-dma-channel", where CHIP is the processor
- (mpc8349, mpc8350, etc.) and the second is
- "fsl,elo-dma-channel". However, see note below.
- - reg : <registers mapping for channel>
- - cell-index : dma channel index starts at 0.
+ - compatible : must include "fsl,elo-dma-channel"
+ However, see note below.
+ - reg : DMA channel specific registers
+ - cell-index : DMA channel index starts at 0.
Optional properties:
- - interrupts : <interrupt mapping for DMA channel IRQ>
- (on 83xx this is expected to be identical to
- the interrupts property of the parent node)
+ - interrupts : interrupt specifier for DMA channel IRQ
+ (on 83xx this is expected to be identical to
+ the interrupts property of the parent node)
- interrupt-parent : optional, if needed for interrupt mapping
Example:
@@ -70,30 +67,27 @@ Example:
};
};
-* Freescale 85xx/86xx DMA Controller
-
-Freescale PowerPC 85xx/86xx have on chip general purpose DMA controllers.
+** Freescale EloPlus DMA Controller
+ This is a 4-channel DMA controller with extended addresses and chaining,
+ mainly used in Freescale mpc85xx/86xx, Pxxx and BSC series chips, such as
+ mpc8540, mpc8641 p4080, bsc9131 etc.
Required properties:
-- compatible : compatible list, contains 2 entries, first is
- "fsl,CHIP-dma", where CHIP is the processor
- (mpc8540, mpc8540, etc.) and the second is
- "fsl,eloplus-dma"
-- reg : <registers mapping for DMA general status reg>
+- compatible : must include "fsl,eloplus-dma"
+- reg : DMA General Status Register, i.e. DGSR which contains
+ status for all the 4 DMA channels
- cell-index : controller index. 0 for controller @ 0x21000,
1 for controller @ 0xc000
-- ranges : Should be defined as specified in 1) to describe the
- DMA controller channels.
+- ranges : describes the mapping between the address space of the
+ DMA channels and the address space of the DMA controller
- DMA channel nodes:
- - compatible : compatible list, contains 2 entries, first is
- "fsl,CHIP-dma-channel", where CHIP is the processor
- (mpc8540, mpc8560, etc.) and the second is
- "fsl,eloplus-dma-channel". However, see note below.
- - cell-index : dma channel index starts at 0.
- - reg : <registers mapping for channel>
- - interrupts : <interrupt mapping for DMA channel IRQ>
+ - compatible : must include "fsl,eloplus-dma-channel"
+ However, see note below.
+ - cell-index : DMA channel index starts at 0.
+ - reg : DMA channel specific registers
+ - interrupts : interrupt specifier for DMA channel IRQ
- interrupt-parent : optional, if needed for interrupt mapping
Example:
@@ -134,6 +128,76 @@ Example:
};
};
+** Freescale Elo3 DMA Controller
+ DMA controller which has same function as EloPlus except that Elo3 has 8
+ channels while EloPlus has only 4, it is used in Freescale Txxx and Bxxx
+ series chips, such as t1040, t4240, b4860.
+
+Required properties:
+
+- compatible : must include "fsl,elo3-dma"
+- reg : contains two entries for DMA General Status Registers,
+ i.e. DGSR0 which includes status for channel 1~4, and
+ DGSR1 for channel 5~8
+- ranges : describes the mapping between the address space of the
+ DMA channels and the address space of the DMA controller
+
+- DMA channel nodes:
+ - compatible : must include "fsl,eloplus-dma-channel"
+ - reg : DMA channel specific registers
+ - interrupts : interrupt specifier for DMA channel IRQ
+ - interrupt-parent : optional, if needed for interrupt mapping
+
+Example:
+dma@100300 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "fsl,elo3-dma";
+ reg = <0x100300 0x4>,
+ <0x100600 0x4>;
+ ranges = <0x0 0x100100 0x500>;
+ dma-channel@0 {
+ compatible = "fsl,eloplus-dma-channel";
+ reg = <0x0 0x80>;
+ interrupts = <28 2 0 0>;
+ };
+ dma-channel@80 {
+ compatible = "fsl,eloplus-dma-channel";
+ reg = <0x80 0x80>;
+ interrupts = <29 2 0 0>;
+ };
+ dma-channel@100 {
+ compatible = "fsl,eloplus-dma-channel";
+ reg = <0x100 0x80>;
+ interrupts = <30 2 0 0>;
+ };
+ dma-channel@180 {
+ compatible = "fsl,eloplus-dma-channel";
+ reg = <0x180 0x80>;
+ interrupts = <31 2 0 0>;
+ };
+ dma-channel@300 {
+ compatible = "fsl,eloplus-dma-channel";
+ reg = <0x300 0x80>;
+ interrupts = <76 2 0 0>;
+ };
+ dma-channel@380 {
+ compatible = "fsl,eloplus-dma-channel";
+ reg = <0x380 0x80>;
+ interrupts = <77 2 0 0>;
+ };
+ dma-channel@400 {
+ compatible = "fsl,eloplus-dma-channel";
+ reg = <0x400 0x80>;
+ interrupts = <78 2 0 0>;
+ };
+ dma-channel@480 {
+ compatible = "fsl,eloplus-dma-channel";
+ reg = <0x480 0x80>;
+ interrupts = <79 2 0 0>;
+ };
+};
+
Note on DMA channel compatible properties: The compatible property must say
"fsl,elo-dma-channel" or "fsl,eloplus-dma-channel" to be used by the Elo DMA
driver (fsldma). Any DMA channel used by fsldma cannot be used by another
diff --git a/Documentation/devicetree/bindings/rng/qcom,prng.txt b/Documentation/devicetree/bindings/rng/qcom,prng.txt
new file mode 100644
index 000000000000..8e5853c2879b
--- /dev/null
+++ b/Documentation/devicetree/bindings/rng/qcom,prng.txt
@@ -0,0 +1,17 @@
+Qualcomm MSM pseudo random number generator.
+
+Required properties:
+
+- compatible : should be "qcom,prng"
+- reg : specifies base physical address and size of the registers map
+- clocks : phandle to clock-controller plus clock-specifier pair
+- clock-names : "core" clocks all registers, FIFO and circuits in PRNG IP block
+
+Example:
+
+ rng@f9bff000 {
+ compatible = "qcom,prng";
+ reg = <0xf9bff000 0x200>;
+ clocks = <&clock GCC_PRNG_AHB_CLK>;
+ clock-names = "core";
+ };
diff --git a/Documentation/devicetree/bindings/spi/nvidia,tegra20-spi.txt b/Documentation/devicetree/bindings/spi/nvidia,tegra20-spi.txt
deleted file mode 100644
index 6b9e51896693..000000000000
--- a/Documentation/devicetree/bindings/spi/nvidia,tegra20-spi.txt
+++ /dev/null
@@ -1,5 +0,0 @@
-NVIDIA Tegra 2 SPI device
-
-Required properties:
-- compatible : should be "nvidia,tegra20-spi".
-- gpios : should specify GPIOs used for chipselect.
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index ce95ed1c6d3e..edbb8d88c85e 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -32,12 +32,14 @@ est ESTeem Wireless Modems
fsl Freescale Semiconductor
GEFanuc GE Fanuc Intelligent Platforms Embedded Systems, Inc.
gef GE Fanuc Intelligent Platforms Embedded Systems, Inc.
+gmt Global Mixed-mode Technology, Inc.
hisilicon Hisilicon Limited.
hp Hewlett Packard
ibm International Business Machines (IBM)
idt Integrated Device Technologies, Inc.
img Imagination Technologies Ltd.
intercontrol Inter Control Group
+lg LG Corporation
linux Linux-specific binding
lsi LSI Corp. (LSI Logic)
marvell Marvell Technology Group Ltd.
diff --git a/Documentation/dmatest.txt b/Documentation/dmatest.txt
index a2b5663eae26..dd77a81bdb80 100644
--- a/Documentation/dmatest.txt
+++ b/Documentation/dmatest.txt
@@ -15,39 +15,48 @@ be built as module or inside kernel. Let's consider those cases.
Part 2 - When dmatest is built as a module...
-After mounting debugfs and loading the module, the /sys/kernel/debug/dmatest
-folder with nodes will be created. There are two important files located. First
-is the 'run' node that controls run and stop phases of the test, and the second
-one, 'results', is used to get the test case results.
-
-Note that in this case test will not run on load automatically.
-
Example of usage:
+ % modprobe dmatest channel=dma0chan0 timeout=2000 iterations=1 run=1
+
+...or:
+ % modprobe dmatest
% echo dma0chan0 > /sys/module/dmatest/parameters/channel
% echo 2000 > /sys/module/dmatest/parameters/timeout
% echo 1 > /sys/module/dmatest/parameters/iterations
- % echo 1 > /sys/kernel/debug/dmatest/run
+ % echo 1 > /sys/module/dmatest/parameters/run
+
+...or on the kernel command line:
+
+ dmatest.channel=dma0chan0 dmatest.timeout=2000 dmatest.iterations=1 dmatest.run=1
Hint: available channel list could be extracted by running the following
command:
% ls -1 /sys/class/dma/
-After a while you will start to get messages about current status or error like
-in the original code.
+Once started a message like "dmatest: Started 1 threads using dma0chan0" is
+emitted. After that only test failure messages are reported until the test
+stops.
Note that running a new test will not stop any in progress test.
-The following command should return actual state of the test.
- % cat /sys/kernel/debug/dmatest/run
-
-To wait for test done the user may perform a busy loop that checks the state.
-
- % while [ $(cat /sys/kernel/debug/dmatest/run) = "Y" ]
- > do
- > echo -n "."
- > sleep 1
- > done
- > echo
+The following command returns the state of the test.
+ % cat /sys/module/dmatest/parameters/run
+
+To wait for test completion userpace can poll 'run' until it is false, or use
+the wait parameter. Specifying 'wait=1' when loading the module causes module
+initialization to pause until a test run has completed, while reading
+/sys/module/dmatest/parameters/wait waits for any running test to complete
+before returning. For example, the following scripts wait for 42 tests
+to complete before exiting. Note that if 'iterations' is set to 'infinite' then
+waiting is disabled.
+
+Example:
+ % modprobe dmatest run=1 iterations=42 wait=1
+ % modprobe -r dmatest
+...or:
+ % modprobe dmatest run=1 iterations=42
+ % cat /sys/module/dmatest/parameters/wait
+ % modprobe -r dmatest
Part 3 - When built-in in the kernel...
@@ -62,21 +71,22 @@ case. You always could check them at run-time by running
Part 4 - Gathering the test results
-The module provides a storage for the test results in the memory. The gathered
-data could be used after test is done.
+Test results are printed to the kernel log buffer with the format:
-The special file 'results' in the debugfs represents gathered data of the in
-progress test. The messages collected are printed to the kernel log as well.
+"dmatest: result <channel>: <test id>: '<error msg>' with src_off=<val> dst_off=<val> len=<val> (<err code>)"
Example of output:
- % cat /sys/kernel/debug/dmatest/results
- dma0chan0-copy0: #1: No errors with src_off=0x7bf dst_off=0x8ad len=0x3fea (0)
+ % dmesg | tail -n 1
+ dmatest: result dma0chan0-copy0: #1: No errors with src_off=0x7bf dst_off=0x8ad len=0x3fea (0)
The message format is unified across the different types of errors. A number in
the parens represents additional information, e.g. error code, error counter,
-or status.
+or status. A test thread also emits a summary line at completion listing the
+number of tests executed, number that failed, and a result code.
-Comparison between buffers is stored to the dedicated structure.
+Example:
+ % dmesg | tail -n 1
+ dmatest: dma0chan0-copy0: summary 1 test, 0 failures 1000 iops 100000 KB/s (0)
-Note that the verify result is now accessible only via file 'results' in the
-debugfs.
+The details of a data miscompare error are also emitted, but do not follow the
+above format.
diff --git a/Documentation/filesystems/btrfs.txt b/Documentation/filesystems/btrfs.txt
index 9dae59407437..5dd282dda55c 100644
--- a/Documentation/filesystems/btrfs.txt
+++ b/Documentation/filesystems/btrfs.txt
@@ -70,6 +70,12 @@ Unless otherwise specified, all options default to off.
See comments at the top of fs/btrfs/check-integrity.c for more info.
+ commit=<seconds>
+ Set the interval of periodic commit, 30 seconds by default. Higher
+ values defer data being synced to permanent storage with obvious
+ consequences when the system crashes. The upper bound is not forced,
+ but a warning is printed if it's more than 300 seconds (5 minutes).
+
compress
compress=<type>
compress-force
@@ -154,7 +160,11 @@ Unless otherwise specified, all options default to off.
Currently this scans a list of several previous tree roots and tries to
use the first readable.
- skip_balance
+ rescan_uuid_tree
+ Force check and rebuild procedure of the UUID tree. This should not
+ normally be needed.
+
+ skip_balance
Skip automatic resume of interrupted balance operation after mount.
May be resumed with "btrfs balance resume."
@@ -234,24 +244,14 @@ available from the git repository at the following location:
These include the following tools:
-mkfs.btrfs: create a filesystem
-
-btrfsctl: control program to create snapshots and subvolumes:
+* mkfs.btrfs: create a filesystem
- mount /dev/sda2 /mnt
- btrfsctl -s new_subvol_name /mnt
- btrfsctl -s snapshot_of_default /mnt/default
- btrfsctl -s snapshot_of_new_subvol /mnt/new_subvol_name
- btrfsctl -s snapshot_of_a_snapshot /mnt/snapshot_of_new_subvol
- ls /mnt
- default snapshot_of_a_snapshot snapshot_of_new_subvol
- new_subvol_name snapshot_of_default
+* btrfs: a single tool to manage the filesystems, refer to the manpage for more details
- Snapshots and subvolumes cannot be deleted right now, but you can
- rm -rf all the files and directories inside them.
+* 'btrfsck' or 'btrfs check': do a consistency check of the filesystem
-btrfsck: do a limited check of the FS extent trees.
+Other tools for specific tasks:
-btrfs-debug-tree: print all of the FS metadata in text form. Example:
+* btrfs-convert: in-place conversion from ext2/3/4 filesystems
- btrfs-debug-tree /dev/sda2 >& big_output_file
+* btrfs-image: dump filesystem metadata for debugging
diff --git a/Documentation/gpio/00-INDEX b/Documentation/gpio/00-INDEX
new file mode 100644
index 000000000000..1de43ae46ae6
--- /dev/null
+++ b/Documentation/gpio/00-INDEX
@@ -0,0 +1,14 @@
+00-INDEX
+ - This file
+gpio.txt
+ - Introduction to GPIOs and their kernel interfaces
+consumer.txt
+ - How to obtain and use GPIOs in a driver
+driver.txt
+ - How to write a GPIO driver
+board.txt
+ - How to assign GPIOs to a consumer device and a function
+sysfs.txt
+ - Information about the GPIO sysfs interface
+gpio-legacy.txt
+ - Historical documentation of the deprecated GPIO integer interface
diff --git a/Documentation/gpio/board.txt b/Documentation/gpio/board.txt
new file mode 100644
index 000000000000..0d03506f2cc5
--- /dev/null
+++ b/Documentation/gpio/board.txt
@@ -0,0 +1,115 @@
+GPIO Mappings
+=============
+
+This document explains how GPIOs can be assigned to given devices and functions.
+Note that it only applies to the new descriptor-based interface. For a
+description of the deprecated integer-based GPIO interface please refer to
+gpio-legacy.txt (actually, there is no real mapping possible with the old
+interface; you just fetch an integer from somewhere and request the
+corresponding GPIO.
+
+Platforms that make use of GPIOs must select ARCH_REQUIRE_GPIOLIB (if GPIO usage
+is mandatory) or ARCH_WANT_OPTIONAL_GPIOLIB (if GPIO support can be omitted) in
+their Kconfig. Then, how GPIOs are mapped depends on what the platform uses to
+describe its hardware layout. Currently, mappings can be defined through device
+tree, ACPI, and platform data.
+
+Device Tree
+-----------
+GPIOs can easily be mapped to devices and functions in the device tree. The
+exact way to do it depends on the GPIO controller providing the GPIOs, see the
+device tree bindings for your controller.
+
+GPIOs mappings are defined in the consumer device's node, in a property named
+<function>-gpios, where <function> is the function the driver will request
+through gpiod_get(). For example:
+
+ foo_device {
+ compatible = "acme,foo";
+ ...
+ led-gpios = <&gpio 15 GPIO_ACTIVE_HIGH>, /* red */
+ <&gpio 16 GPIO_ACTIVE_HIGH>, /* green */
+ <&gpio 17 GPIO_ACTIVE_HIGH>; /* blue */
+
+ power-gpio = <&gpio 1 GPIO_ACTIVE_LOW>;
+ };
+
+This property will make GPIOs 15, 16 and 17 available to the driver under the
+"led" function, and GPIO 1 as the "power" GPIO:
+
+ struct gpio_desc *red, *green, *blue, *power;
+
+ red = gpiod_get_index(dev, "led", 0);
+ green = gpiod_get_index(dev, "led", 1);
+ blue = gpiod_get_index(dev, "led", 2);
+
+ power = gpiod_get(dev, "power");
+
+The led GPIOs will be active-high, while the power GPIO will be active-low (i.e.
+gpiod_is_active_low(power) will be true).
+
+ACPI
+----
+ACPI does not support function names for GPIOs. Therefore, only the "idx"
+argument of gpiod_get_index() is useful to discriminate between GPIOs assigned
+to a device. The "con_id" argument can still be set for debugging purposes (it
+will appear under error messages as well as debug and sysfs nodes).
+
+Platform Data
+-------------
+Finally, GPIOs can be bound to devices and functions using platform data. Board
+files that desire to do so need to include the following header:
+
+ #include <linux/gpio/driver.h>
+
+GPIOs are mapped by the means of tables of lookups, containing instances of the
+gpiod_lookup structure. Two macros are defined to help declaring such mappings:
+
+ GPIO_LOOKUP(chip_label, chip_hwnum, dev_id, con_id, flags)
+ GPIO_LOOKUP_IDX(chip_label, chip_hwnum, dev_id, con_id, idx, flags)
+
+where
+
+ - chip_label is the label of the gpiod_chip instance providing the GPIO
+ - chip_hwnum is the hardware number of the GPIO within the chip
+ - dev_id is the identifier of the device that will make use of this GPIO. If
+ NULL, the GPIO will be available to all devices.
+ - con_id is the name of the GPIO function from the device point of view. It
+ can be NULL.
+ - idx is the index of the GPIO within the function.
+ - flags is defined to specify the following properties:
+ * GPIOF_ACTIVE_LOW - to configure the GPIO as active-low
+ * GPIOF_OPEN_DRAIN - GPIO pin is open drain type.
+ * GPIOF_OPEN_SOURCE - GPIO pin is open source type.
+
+In the future, these flags might be extended to support more properties.
+
+Note that GPIO_LOOKUP() is just a shortcut to GPIO_LOOKUP_IDX() where idx = 0.
+
+A lookup table can then be defined as follows:
+
+ struct gpiod_lookup gpios_table[] = {
+ GPIO_LOOKUP_IDX("gpio.0", 15, "foo.0", "led", 0, GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP_IDX("gpio.0", 16, "foo.0", "led", 1, GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP_IDX("gpio.0", 17, "foo.0", "led", 2, GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP("gpio.0", 1, "foo.0", "power", GPIO_ACTIVE_LOW),
+ };
+
+And the table can be added by the board code as follows:
+
+ gpiod_add_table(gpios_table, ARRAY_SIZE(gpios_table));
+
+The driver controlling "foo.0" will then be able to obtain its GPIOs as follows:
+
+ struct gpio_desc *red, *green, *blue, *power;
+
+ red = gpiod_get_index(dev, "led", 0);
+ green = gpiod_get_index(dev, "led", 1);
+ blue = gpiod_get_index(dev, "led", 2);
+
+ power = gpiod_get(dev, "power");
+ gpiod_direction_output(power, 1);
+
+Since the "power" GPIO is mapped as active-low, its actual signal will be 0
+after this code. Contrary to the legacy integer GPIO interface, the active-low
+property is handled during mapping and is thus transparent to GPIO consumers.
diff --git a/Documentation/gpio/consumer.txt b/Documentation/gpio/consumer.txt
new file mode 100644
index 000000000000..07c74a3765a0
--- /dev/null
+++ b/Documentation/gpio/consumer.txt
@@ -0,0 +1,197 @@
+GPIO Descriptor Consumer Interface
+==================================
+
+This document describes the consumer interface of the GPIO framework. Note that
+it describes the new descriptor-based interface. For a description of the
+deprecated integer-based GPIO interface please refer to gpio-legacy.txt.
+
+
+Guidelines for GPIOs consumers
+==============================
+
+Drivers that can't work without standard GPIO calls should have Kconfig entries
+that depend on GPIOLIB. The functions that allow a driver to obtain and use
+GPIOs are available by including the following file:
+
+ #include <linux/gpio/consumer.h>
+
+All the functions that work with the descriptor-based GPIO interface are
+prefixed with gpiod_. The gpio_ prefix is used for the legacy interface. No
+other function in the kernel should use these prefixes.
+
+
+Obtaining and Disposing GPIOs
+=============================
+
+With the descriptor-based interface, GPIOs are identified with an opaque,
+non-forgeable handler that must be obtained through a call to one of the
+gpiod_get() functions. Like many other kernel subsystems, gpiod_get() takes the
+device that will use the GPIO and the function the requested GPIO is supposed to
+fulfill:
+
+ struct gpio_desc *gpiod_get(struct device *dev, const char *con_id)
+
+If a function is implemented by using several GPIOs together (e.g. a simple LED
+device that displays digits), an additional index argument can be specified:
+
+ struct gpio_desc *gpiod_get_index(struct device *dev,
+ const char *con_id, unsigned int idx)
+
+Both functions return either a valid GPIO descriptor, or an error code checkable
+with IS_ERR(). They will never return a NULL pointer.
+
+Device-managed variants of these functions are also defined:
+
+ struct gpio_desc *devm_gpiod_get(struct device *dev, const char *con_id)
+
+ struct gpio_desc *devm_gpiod_get_index(struct device *dev,
+ const char *con_id,
+ unsigned int idx)
+
+A GPIO descriptor can be disposed of using the gpiod_put() function:
+
+ void gpiod_put(struct gpio_desc *desc)
+
+It is strictly forbidden to use a descriptor after calling this function. The
+device-managed variant is, unsurprisingly:
+
+ void devm_gpiod_put(struct device *dev, struct gpio_desc *desc)
+
+
+Using GPIOs
+===========
+
+Setting Direction
+-----------------
+The first thing a driver must do with a GPIO is setting its direction. This is
+done by invoking one of the gpiod_direction_*() functions:
+
+ int gpiod_direction_input(struct gpio_desc *desc)
+ int gpiod_direction_output(struct gpio_desc *desc, int value)
+
+The return value is zero for success, else a negative errno. It should be
+checked, since the get/set calls don't return errors and since misconfiguration
+is possible. You should normally issue these calls from a task context. However,
+for spinlock-safe GPIOs it is OK to use them before tasking is enabled, as part
+of early board setup.
+
+For output GPIOs, the value provided becomes the initial output value. This
+helps avoid signal glitching during system startup.
+
+A driver can also query the current direction of a GPIO:
+
+ int gpiod_get_direction(const struct gpio_desc *desc)
+
+This function will return either GPIOF_DIR_IN or GPIOF_DIR_OUT.
+
+Be aware that there is no default direction for GPIOs. Therefore, **using a GPIO
+without setting its direction first is illegal and will result in undefined
+behavior!**
+
+
+Spinlock-Safe GPIO Access
+-------------------------
+Most GPIO controllers can be accessed with memory read/write instructions. Those
+don't need to sleep, and can safely be done from inside hard (non-threaded) IRQ
+handlers and similar contexts.
+
+Use the following calls to access GPIOs from an atomic context:
+
+ int gpiod_get_value(const struct gpio_desc *desc);
+ void gpiod_set_value(struct gpio_desc *desc, int value);
+
+The values are boolean, zero for low, nonzero for high. When reading the value
+of an output pin, the value returned should be what's seen on the pin. That
+won't always match the specified output value, because of issues including
+open-drain signaling and output latencies.
+
+The get/set calls do not return errors because "invalid GPIO" should have been
+reported earlier from gpiod_direction_*(). However, note that not all platforms
+can read the value of output pins; those that can't should always return zero.
+Also, using these calls for GPIOs that can't safely be accessed without sleeping
+(see below) is an error.
+
+
+GPIO Access That May Sleep
+--------------------------
+Some GPIO controllers must be accessed using message based buses like I2C or
+SPI. Commands to read or write those GPIO values require waiting to get to the
+head of a queue to transmit a command and get its response. This requires
+sleeping, which can't be done from inside IRQ handlers.
+
+Platforms that support this type of GPIO distinguish them from other GPIOs by
+returning nonzero from this call:
+
+ int gpiod_cansleep(const struct gpio_desc *desc)
+
+To access such GPIOs, a different set of accessors is defined:
+
+ int gpiod_get_value_cansleep(const struct gpio_desc *desc)
+ void gpiod_set_value_cansleep(struct gpio_desc *desc, int value)
+
+Accessing such GPIOs requires a context which may sleep, for example a threaded
+IRQ handler, and those accessors must be used instead of spinlock-safe
+accessors without the cansleep() name suffix.
+
+Other than the fact that these accessors might sleep, and will work on GPIOs
+that can't be accessed from hardIRQ handlers, these calls act the same as the
+spinlock-safe calls.
+
+
+Active-low State and Raw GPIO Values
+------------------------------------
+Device drivers like to manage the logical state of a GPIO, i.e. the value their
+device will actually receive, no matter what lies between it and the GPIO line.
+In some cases, it might make sense to control the actual GPIO line value. The
+following set of calls ignore the active-low property of a GPIO and work on the
+raw line value:
+
+ int gpiod_get_raw_value(const struct gpio_desc *desc)
+ void gpiod_set_raw_value(struct gpio_desc *desc, int value)
+ int gpiod_get_raw_value_cansleep(const struct gpio_desc *desc)
+ void gpiod_set_raw_value_cansleep(struct gpio_desc *desc, int value)
+
+The active-low state of a GPIO can also be queried using the following call:
+
+ int gpiod_is_active_low(const struct gpio_desc *desc)
+
+Note that these functions should only be used with great moderation ; a driver
+should not have to care about the physical line level.
+
+GPIOs mapped to IRQs
+--------------------
+GPIO lines can quite often be used as IRQs. You can get the IRQ number
+corresponding to a given GPIO using the following call:
+
+ int gpiod_to_irq(const struct gpio_desc *desc)
+
+It will return an IRQ number, or an negative errno code if the mapping can't be
+done (most likely because that particular GPIO cannot be used as IRQ). It is an
+unchecked error to use a GPIO that wasn't set up as an input using
+gpiod_direction_input(), or to use an IRQ number that didn't originally come
+from gpiod_to_irq(). gpiod_to_irq() is not allowed to sleep.
+
+Non-error values returned from gpiod_to_irq() can be passed to request_irq() or
+free_irq(). They will often be stored into IRQ resources for platform devices,
+by the board-specific initialization code. Note that IRQ trigger options are
+part of the IRQ interface, e.g. IRQF_TRIGGER_FALLING, as are system wakeup
+capabilities.
+
+
+Interacting With the Legacy GPIO Subsystem
+==========================================
+Many kernel subsystems still handle GPIOs using the legacy integer-based
+interface. Although it is strongly encouraged to upgrade them to the safer
+descriptor-based API, the following two functions allow you to convert a GPIO
+descriptor into the GPIO integer namespace and vice-versa:
+
+ int desc_to_gpio(const struct gpio_desc *desc)
+ struct gpio_desc *gpio_to_desc(unsigned gpio)
+
+The GPIO number returned by desc_to_gpio() can be safely used as long as the
+GPIO descriptor has not been freed. All the same, a GPIO number passed to
+gpio_to_desc() must have been properly acquired, and usage of the returned GPIO
+descriptor is only possible after the GPIO number has been released.
+
+Freeing a GPIO obtained by one API with the other API is forbidden and an
+unchecked error.
diff --git a/Documentation/gpio/driver.txt b/Documentation/gpio/driver.txt
new file mode 100644
index 000000000000..9da0bfa74781
--- /dev/null
+++ b/Documentation/gpio/driver.txt
@@ -0,0 +1,75 @@
+GPIO Descriptor Driver Interface
+================================
+
+This document serves as a guide for GPIO chip drivers writers. Note that it
+describes the new descriptor-based interface. For a description of the
+deprecated integer-based GPIO interface please refer to gpio-legacy.txt.
+
+Each GPIO controller driver needs to include the following header, which defines
+the structures used to define a GPIO driver:
+
+ #include <linux/gpio/driver.h>
+
+
+Internal Representation of GPIOs
+================================
+
+Inside a GPIO driver, individual GPIOs are identified by their hardware number,
+which is a unique number between 0 and n, n being the number of GPIOs managed by
+the chip. This number is purely internal: the hardware number of a particular
+GPIO descriptor is never made visible outside of the driver.
+
+On top of this internal number, each GPIO also need to have a global number in
+the integer GPIO namespace so that it can be used with the legacy GPIO
+interface. Each chip must thus have a "base" number (which can be automatically
+assigned), and for each GPIO the global number will be (base + hardware number).
+Although the integer representation is considered deprecated, it still has many
+users and thus needs to be maintained.
+
+So for example one platform could use numbers 32-159 for GPIOs, with a
+controller defining 128 GPIOs at a "base" of 32 ; while another platform uses
+numbers 0..63 with one set of GPIO controllers, 64-79 with another type of GPIO
+controller, and on one particular board 80-95 with an FPGA. The numbers need not
+be contiguous; either of those platforms could also use numbers 2000-2063 to
+identify GPIOs in a bank of I2C GPIO expanders.
+
+
+Controller Drivers: gpio_chip
+=============================
+
+In the gpiolib framework each GPIO controller is packaged as a "struct
+gpio_chip" (see linux/gpio/driver.h for its complete definition) with members
+common to each controller of that type:
+
+ - methods to establish GPIO direction
+ - methods used to access GPIO values
+ - method to return the IRQ number associated to a given GPIO
+ - flag saying whether calls to its methods may sleep
+ - optional debugfs dump method (showing extra state like pullup config)
+ - optional base number (will be automatically assigned if omitted)
+ - label for diagnostics and GPIOs mapping using platform data
+
+The code implementing a gpio_chip should support multiple instances of the
+controller, possibly using the driver model. That code will configure each
+gpio_chip and issue gpiochip_add(). Removing a GPIO controller should be rare;
+use gpiochip_remove() when it is unavoidable.
+
+Most often a gpio_chip is part of an instance-specific structure with state not
+exposed by the GPIO interfaces, such as addressing, power management, and more.
+Chips such as codecs will have complex non-GPIO state.
+
+Any debugfs dump method should normally ignore signals which haven't been
+requested as GPIOs. They can use gpiochip_is_requested(), which returns either
+NULL or the label associated with that GPIO when it was requested.
+
+Locking IRQ usage
+-----------------
+Input GPIOs can be used as IRQ signals. When this happens, a driver is requested
+to mark the GPIO as being used as an IRQ:
+
+ int gpiod_lock_as_irq(struct gpio_desc *desc)
+
+This will prevent the use of non-irq related GPIO APIs until the GPIO IRQ lock
+is released:
+
+ void gpiod_unlock_as_irq(struct gpio_desc *desc)
diff --git a/Documentation/gpio.txt b/Documentation/gpio/gpio-legacy.txt
index 6f83fa965b4b..6f83fa965b4b 100644
--- a/Documentation/gpio.txt
+++ b/Documentation/gpio/gpio-legacy.txt
diff --git a/Documentation/gpio/gpio.txt b/Documentation/gpio/gpio.txt
new file mode 100644
index 000000000000..cd9b356e88cd
--- /dev/null
+++ b/Documentation/gpio/gpio.txt
@@ -0,0 +1,119 @@
+GPIO Interfaces
+===============
+
+The documents in this directory give detailed instructions on how to access
+GPIOs in drivers, and how to write a driver for a device that provides GPIOs
+itself.
+
+Due to the history of GPIO interfaces in the kernel, there are two different
+ways to obtain and use GPIOs:
+
+ - The descriptor-based interface is the preferred way to manipulate GPIOs,
+and is described by all the files in this directory excepted gpio-legacy.txt.
+ - The legacy integer-based interface which is considered deprecated (but still
+usable for compatibility reasons) is documented in gpio-legacy.txt.
+
+The remainder of this document applies to the new descriptor-based interface.
+gpio-legacy.txt contains the same information applied to the legacy
+integer-based interface.
+
+
+What is a GPIO?
+===============
+
+A "General Purpose Input/Output" (GPIO) is a flexible software-controlled
+digital signal. They are provided from many kinds of chip, and are familiar
+to Linux developers working with embedded and custom hardware. Each GPIO
+represents a bit connected to a particular pin, or "ball" on Ball Grid Array
+(BGA) packages. Board schematics show which external hardware connects to
+which GPIOs. Drivers can be written generically, so that board setup code
+passes such pin configuration data to drivers.
+
+System-on-Chip (SOC) processors heavily rely on GPIOs. In some cases, every
+non-dedicated pin can be configured as a GPIO; and most chips have at least
+several dozen of them. Programmable logic devices (like FPGAs) can easily
+provide GPIOs; multifunction chips like power managers, and audio codecs
+often have a few such pins to help with pin scarcity on SOCs; and there are
+also "GPIO Expander" chips that connect using the I2C or SPI serial buses.
+Most PC southbridges have a few dozen GPIO-capable pins (with only the BIOS
+firmware knowing how they're used).
+
+The exact capabilities of GPIOs vary between systems. Common options:
+
+ - Output values are writable (high=1, low=0). Some chips also have
+ options about how that value is driven, so that for example only one
+ value might be driven, supporting "wire-OR" and similar schemes for the
+ other value (notably, "open drain" signaling).
+
+ - Input values are likewise readable (1, 0). Some chips support readback
+ of pins configured as "output", which is very useful in such "wire-OR"
+ cases (to support bidirectional signaling). GPIO controllers may have
+ input de-glitch/debounce logic, sometimes with software controls.
+
+ - Inputs can often be used as IRQ signals, often edge triggered but
+ sometimes level triggered. Such IRQs may be configurable as system
+ wakeup events, to wake the system from a low power state.
+
+ - Usually a GPIO will be configurable as either input or output, as needed
+ by different product boards; single direction ones exist too.
+
+ - Most GPIOs can be accessed while holding spinlocks, but those accessed
+ through a serial bus normally can't. Some systems support both types.
+
+On a given board each GPIO is used for one specific purpose like monitoring
+MMC/SD card insertion/removal, detecting card write-protect status, driving
+a LED, configuring a transceiver, bit-banging a serial bus, poking a hardware
+watchdog, sensing a switch, and so on.
+
+
+Common GPIO Properties
+======================
+
+These properties are met through all the other documents of the GPIO interface
+and it is useful to understand them, especially if you need to define GPIO
+mappings.
+
+Active-High and Active-Low
+--------------------------
+It is natural to assume that a GPIO is "active" when its output signal is 1
+("high"), and inactive when it is 0 ("low"). However in practice the signal of a
+GPIO may be inverted before is reaches its destination, or a device could decide
+to have different conventions about what "active" means. Such decisions should
+be transparent to device drivers, therefore it is possible to define a GPIO as
+being either active-high ("1" means "active", the default) or active-low ("0"
+means "active") so that drivers only need to worry about the logical signal and
+not about what happens at the line level.
+
+Open Drain and Open Source
+--------------------------
+Sometimes shared signals need to use "open drain" (where only the low signal
+level is actually driven), or "open source" (where only the high signal level is
+driven) signaling. That term applies to CMOS transistors; "open collector" is
+used for TTL. A pullup or pulldown resistor causes the high or low signal level.
+This is sometimes called a "wire-AND"; or more practically, from the negative
+logic (low=true) perspective this is a "wire-OR".
+
+One common example of an open drain signal is a shared active-low IRQ line.
+Also, bidirectional data bus signals sometimes use open drain signals.
+
+Some GPIO controllers directly support open drain and open source outputs; many
+don't. When you need open drain signaling but your hardware doesn't directly
+support it, there's a common idiom you can use to emulate it with any GPIO pin
+that can be used as either an input or an output:
+
+ LOW: gpiod_direction_output(gpio, 0) ... this drives the signal and overrides
+ the pullup.
+
+ HIGH: gpiod_direction_input(gpio) ... this turns off the output, so the pullup
+ (or some other device) controls the signal.
+
+The same logic can be applied to emulate open source signaling, by driving the
+high signal and configuring the GPIO as input for low. This open drain/open
+source emulation can be handled transparently by the GPIO framework.
+
+If you are "driving" the signal high but gpiod_get_value(gpio) reports a low
+value (after the appropriate rise time passes), you know some other component is
+driving the shared signal low. That's not necessarily an error. As one common
+example, that's how I2C clocks are stretched: a slave that needs a slower clock
+delays the rising edge of SCK, and the I2C master adjusts its signaling rate
+accordingly.
diff --git a/Documentation/gpio/sysfs.txt b/Documentation/gpio/sysfs.txt
new file mode 100644
index 000000000000..c2c3a97f8ff7
--- /dev/null
+++ b/Documentation/gpio/sysfs.txt
@@ -0,0 +1,155 @@
+GPIO Sysfs Interface for Userspace
+==================================
+
+Platforms which use the "gpiolib" implementors framework may choose to
+configure a sysfs user interface to GPIOs. This is different from the
+debugfs interface, since it provides control over GPIO direction and
+value instead of just showing a gpio state summary. Plus, it could be
+present on production systems without debugging support.
+
+Given appropriate hardware documentation for the system, userspace could
+know for example that GPIO #23 controls the write protect line used to
+protect boot loader segments in flash memory. System upgrade procedures
+may need to temporarily remove that protection, first importing a GPIO,
+then changing its output state, then updating the code before re-enabling
+the write protection. In normal use, GPIO #23 would never be touched,
+and the kernel would have no need to know about it.
+
+Again depending on appropriate hardware documentation, on some systems
+userspace GPIO can be used to determine system configuration data that
+standard kernels won't know about. And for some tasks, simple userspace
+GPIO drivers could be all that the system really needs.
+
+Note that standard kernel drivers exist for common "LEDs and Buttons"
+GPIO tasks: "leds-gpio" and "gpio_keys", respectively. Use those
+instead of talking directly to the GPIOs; they integrate with kernel
+frameworks better than your userspace code could.
+
+
+Paths in Sysfs
+--------------
+There are three kinds of entry in /sys/class/gpio:
+
+ - Control interfaces used to get userspace control over GPIOs;
+
+ - GPIOs themselves; and
+
+ - GPIO controllers ("gpio_chip" instances).
+
+That's in addition to standard files including the "device" symlink.
+
+The control interfaces are write-only:
+
+ /sys/class/gpio/
+
+ "export" ... Userspace may ask the kernel to export control of
+ a GPIO to userspace by writing its number to this file.
+
+ Example: "echo 19 > export" will create a "gpio19" node
+ for GPIO #19, if that's not requested by kernel code.
+
+ "unexport" ... Reverses the effect of exporting to userspace.
+
+ Example: "echo 19 > unexport" will remove a "gpio19"
+ node exported using the "export" file.
+
+GPIO signals have paths like /sys/class/gpio/gpio42/ (for GPIO #42)
+and have the following read/write attributes:
+
+ /sys/class/gpio/gpioN/
+
+ "direction" ... reads as either "in" or "out". This value may
+ normally be written. Writing as "out" defaults to
+ initializing the value as low. To ensure glitch free
+ operation, values "low" and "high" may be written to
+ configure the GPIO as an output with that initial value.
+
+ Note that this attribute *will not exist* if the kernel
+ doesn't support changing the direction of a GPIO, or
+ it was exported by kernel code that didn't explicitly
+ allow userspace to reconfigure this GPIO's direction.
+
+ "value" ... reads as either 0 (low) or 1 (high). If the GPIO
+ is configured as an output, this value may be written;
+ any nonzero value is treated as high.
+
+ If the pin can be configured as interrupt-generating interrupt
+ and if it has been configured to generate interrupts (see the
+ description of "edge"), you can poll(2) on that file and
+ poll(2) will return whenever the interrupt was triggered. If
+ you use poll(2), set the events POLLPRI and POLLERR. If you
+ use select(2), set the file descriptor in exceptfds. After
+ poll(2) returns, either lseek(2) to the beginning of the sysfs
+ file and read the new value or close the file and re-open it
+ to read the value.
+
+ "edge" ... reads as either "none", "rising", "falling", or
+ "both". Write these strings to select the signal edge(s)
+ that will make poll(2) on the "value" file return.
+
+ This file exists only if the pin can be configured as an
+ interrupt generating input pin.
+
+ "active_low" ... reads as either 0 (false) or 1 (true). Write
+ any nonzero value to invert the value attribute both
+ for reading and writing. Existing and subsequent
+ poll(2) support configuration via the edge attribute
+ for "rising" and "falling" edges will follow this
+ setting.
+
+GPIO controllers have paths like /sys/class/gpio/gpiochip42/ (for the
+controller implementing GPIOs starting at #42) and have the following
+read-only attributes:
+
+ /sys/class/gpio/gpiochipN/
+
+ "base" ... same as N, the first GPIO managed by this chip
+
+ "label" ... provided for diagnostics (not always unique)
+
+ "ngpio" ... how many GPIOs this manges (N to N + ngpio - 1)
+
+Board documentation should in most cases cover what GPIOs are used for
+what purposes. However, those numbers are not always stable; GPIOs on
+a daughtercard might be different depending on the base board being used,
+or other cards in the stack. In such cases, you may need to use the
+gpiochip nodes (possibly in conjunction with schematics) to determine
+the correct GPIO number to use for a given signal.
+
+
+Exporting from Kernel code
+--------------------------
+Kernel code can explicitly manage exports of GPIOs which have already been
+requested using gpio_request():
+
+ /* export the GPIO to userspace */
+ int gpiod_export(struct gpio_desc *desc, bool direction_may_change);
+
+ /* reverse gpio_export() */
+ void gpiod_unexport(struct gpio_desc *desc);
+
+ /* create a sysfs link to an exported GPIO node */
+ int gpiod_export_link(struct device *dev, const char *name,
+ struct gpio_desc *desc);
+
+ /* change the polarity of a GPIO node in sysfs */
+ int gpiod_sysfs_set_active_low(struct gpio_desc *desc, int value);
+
+After a kernel driver requests a GPIO, it may only be made available in
+the sysfs interface by gpiod_export(). The driver can control whether the
+signal direction may change. This helps drivers prevent userspace code
+from accidentally clobbering important system state.
+
+This explicit exporting can help with debugging (by making some kinds
+of experiments easier), or can provide an always-there interface that's
+suitable for documenting as part of a board support package.
+
+After the GPIO has been exported, gpiod_export_link() allows creating
+symlinks from elsewhere in sysfs to the GPIO sysfs node. Drivers can
+use this to provide the interface under their own device in sysfs with
+a descriptive name.
+
+Drivers can use gpiod_sysfs_set_active_low() to hide GPIO line polarity
+differences between boards from user space. Polarity change can be done both
+before and after gpiod_export(), and previously enabled poll(2) support for
+either rising or falling edge will be reconfigured to follow this setting.
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 9ca3e74a10e1..b9e9bd854298 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1190,15 +1190,24 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
owned by uid=0.
ima_hash= [IMA]
- Format: { "sha1" | "md5" }
+ Format: { md5 | sha1 | rmd160 | sha256 | sha384
+ | sha512 | ... }
default: "sha1"
+ The list of supported hash algorithms is defined
+ in crypto/hash_info.h.
+
ima_tcb [IMA]
Load a policy which meets the needs of the Trusted
Computing Base. This means IMA will measure all
programs exec'd, files mmap'd for exec, and all files
opened for read by uid=0.
+ ima_template= [IMA]
+ Select one of defined IMA measurements template formats.
+ Formats: { "ima" | "ima-ng" }
+ Default: "ima-ng"
+
init= [KNL]
Format: <full_path>
Run specified binary instead of /sbin/init as init
@@ -1520,6 +1529,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
* atapi_dmadir: Enable ATAPI DMADIR bridge support
+ * disable: Disable this device.
+
If there are multiple matching configurations changing
the same attribute, the last one is used.
diff --git a/Documentation/mic/mpssd/mpssd.c b/Documentation/mic/mpssd/mpssd.c
index 0c980ad40b17..4d17487d5ad9 100644
--- a/Documentation/mic/mpssd/mpssd.c
+++ b/Documentation/mic/mpssd/mpssd.c
@@ -313,7 +313,7 @@ static struct mic_device_desc *get_device_desc(struct mic_info *mic, int type)
int i;
void *dp = get_dp(mic, type);
- for (i = mic_aligned_size(struct mic_bootparam); i < PAGE_SIZE;
+ for (i = sizeof(struct mic_bootparam); i < PAGE_SIZE;
i += mic_total_desc_size(d)) {
d = dp + i;
@@ -445,8 +445,8 @@ init_vr(struct mic_info *mic, int fd, int type,
__func__, mic->name, vr0->va, vr0->info, vr_size,
vring_size(MIC_VRING_ENTRIES, MIC_VIRTIO_RING_ALIGN));
mpsslog("magic 0x%x expected 0x%x\n",
- vr0->info->magic, MIC_MAGIC + type);
- assert(vr0->info->magic == MIC_MAGIC + type);
+ le32toh(vr0->info->magic), MIC_MAGIC + type);
+ assert(le32toh(vr0->info->magic) == MIC_MAGIC + type);
if (vr1) {
vr1->va = (struct mic_vring *)
&va[MIC_DEVICE_PAGE_END + vr_size];
@@ -458,8 +458,8 @@ init_vr(struct mic_info *mic, int fd, int type,
__func__, mic->name, vr1->va, vr1->info, vr_size,
vring_size(MIC_VRING_ENTRIES, MIC_VIRTIO_RING_ALIGN));
mpsslog("magic 0x%x expected 0x%x\n",
- vr1->info->magic, MIC_MAGIC + type + 1);
- assert(vr1->info->magic == MIC_MAGIC + type + 1);
+ le32toh(vr1->info->magic), MIC_MAGIC + type + 1);
+ assert(le32toh(vr1->info->magic) == MIC_MAGIC + type + 1);
}
done:
return va;
@@ -520,7 +520,7 @@ static void *
virtio_net(void *arg)
{
static __u8 vnet_hdr[2][sizeof(struct virtio_net_hdr)];
- static __u8 vnet_buf[2][MAX_NET_PKT_SIZE] __aligned(64);
+ static __u8 vnet_buf[2][MAX_NET_PKT_SIZE] __attribute__ ((aligned(64)));
struct iovec vnet_iov[2][2] = {
{ { .iov_base = vnet_hdr[0], .iov_len = sizeof(vnet_hdr[0]) },
{ .iov_base = vnet_buf[0], .iov_len = sizeof(vnet_buf[0]) } },
@@ -1412,6 +1412,12 @@ mic_config(void *arg)
}
do {
+ ret = lseek(fd, 0, SEEK_SET);
+ if (ret < 0) {
+ mpsslog("%s: Failed to seek to file start '%s': %s\n",
+ mic->name, pathname, strerror(errno));
+ goto close_error1;
+ }
ret = read(fd, value, sizeof(value));
if (ret < 0) {
mpsslog("%s: Failed to read sysfs entry '%s': %s\n",
diff --git a/Documentation/module-signing.txt b/Documentation/module-signing.txt
new file mode 100644
index 000000000000..2b40e04d3c49
--- /dev/null
+++ b/Documentation/module-signing.txt
@@ -0,0 +1,240 @@
+ ==============================
+ KERNEL MODULE SIGNING FACILITY
+ ==============================
+
+CONTENTS
+
+ - Overview.
+ - Configuring module signing.
+ - Generating signing keys.
+ - Public keys in the kernel.
+ - Manually signing modules.
+ - Signed modules and stripping.
+ - Loading signed modules.
+ - Non-valid signatures and unsigned modules.
+ - Administering/protecting the private key.
+
+
+========
+OVERVIEW
+========
+
+The kernel module signing facility cryptographically signs modules during
+installation and then checks the signature upon loading the module. This
+allows increased kernel security by disallowing the loading of unsigned modules
+or modules signed with an invalid key. Module signing increases security by
+making it harder to load a malicious module into the kernel. The module
+signature checking is done by the kernel so that it is not necessary to have
+trusted userspace bits.
+
+This facility uses X.509 ITU-T standard certificates to encode the public keys
+involved. The signatures are not themselves encoded in any industrial standard
+type. The facility currently only supports the RSA public key encryption
+standard (though it is pluggable and permits others to be used). The possible
+hash algorithms that can be used are SHA-1, SHA-224, SHA-256, SHA-384, and
+SHA-512 (the algorithm is selected by data in the signature).
+
+
+==========================
+CONFIGURING MODULE SIGNING
+==========================
+
+The module signing facility is enabled by going to the "Enable Loadable Module
+Support" section of the kernel configuration and turning on
+
+ CONFIG_MODULE_SIG "Module signature verification"
+
+This has a number of options available:
+
+ (1) "Require modules to be validly signed" (CONFIG_MODULE_SIG_FORCE)
+
+ This specifies how the kernel should deal with a module that has a
+ signature for which the key is not known or a module that is unsigned.
+
+ If this is off (ie. "permissive"), then modules for which the key is not
+ available and modules that are unsigned are permitted, but the kernel will
+ be marked as being tainted.
+
+ If this is on (ie. "restrictive"), only modules that have a valid
+ signature that can be verified by a public key in the kernel's possession
+ will be loaded. All other modules will generate an error.
+
+ Irrespective of the setting here, if the module has a signature block that
+ cannot be parsed, it will be rejected out of hand.
+
+
+ (2) "Automatically sign all modules" (CONFIG_MODULE_SIG_ALL)
+
+ If this is on then modules will be automatically signed during the
+ modules_install phase of a build. If this is off, then the modules must
+ be signed manually using:
+
+ scripts/sign-file
+
+
+ (3) "Which hash algorithm should modules be signed with?"
+
+ This presents a choice of which hash algorithm the installation phase will
+ sign the modules with:
+
+ CONFIG_SIG_SHA1 "Sign modules with SHA-1"
+ CONFIG_SIG_SHA224 "Sign modules with SHA-224"
+ CONFIG_SIG_SHA256 "Sign modules with SHA-256"
+ CONFIG_SIG_SHA384 "Sign modules with SHA-384"
+ CONFIG_SIG_SHA512 "Sign modules with SHA-512"
+
+ The algorithm selected here will also be built into the kernel (rather
+ than being a module) so that modules signed with that algorithm can have
+ their signatures checked without causing a dependency loop.
+
+
+=======================
+GENERATING SIGNING KEYS
+=======================
+
+Cryptographic keypairs are required to generate and check signatures. A
+private key is used to generate a signature and the corresponding public key is
+used to check it. The private key is only needed during the build, after which
+it can be deleted or stored securely. The public key gets built into the
+kernel so that it can be used to check the signatures as the modules are
+loaded.
+
+Under normal conditions, the kernel build will automatically generate a new
+keypair using openssl if one does not exist in the files:
+
+ signing_key.priv
+ signing_key.x509
+
+during the building of vmlinux (the public part of the key needs to be built
+into vmlinux) using parameters in the:
+
+ x509.genkey
+
+file (which is also generated if it does not already exist).
+
+It is strongly recommended that you provide your own x509.genkey file.
+
+Most notably, in the x509.genkey file, the req_distinguished_name section
+should be altered from the default:
+
+ [ req_distinguished_name ]
+ O = Magrathea
+ CN = Glacier signing key
+ emailAddress = slartibartfast@magrathea.h2g2
+
+The generated RSA key size can also be set with:
+
+ [ req ]
+ default_bits = 4096
+
+
+It is also possible to manually generate the key private/public files using the
+x509.genkey key generation configuration file in the root node of the Linux
+kernel sources tree and the openssl command. The following is an example to
+generate the public/private key files:
+
+ openssl req -new -nodes -utf8 -sha256 -days 36500 -batch -x509 \
+ -config x509.genkey -outform DER -out signing_key.x509 \
+ -keyout signing_key.priv
+
+
+=========================
+PUBLIC KEYS IN THE KERNEL
+=========================
+
+The kernel contains a ring of public keys that can be viewed by root. They're
+in a keyring called ".system_keyring" that can be seen by:
+
+ [root@deneb ~]# cat /proc/keys
+ ...
+ 223c7853 I------ 1 perm 1f030000 0 0 keyring .system_keyring: 1
+ 302d2d52 I------ 1 perm 1f010000 0 0 asymmetri Fedora kernel signing key: d69a84e6bce3d216b979e9505b3e3ef9a7118079: X509.RSA a7118079 []
+ ...
+
+Beyond the public key generated specifically for module signing, any file
+placed in the kernel source root directory or the kernel build root directory
+whose name is suffixed with ".x509" will be assumed to be an X.509 public key
+and will be added to the keyring.
+
+Further, the architecture code may take public keys from a hardware store and
+add those in also (e.g. from the UEFI key database).
+
+Finally, it is possible to add additional public keys by doing:
+
+ keyctl padd asymmetric "" [.system_keyring-ID] <[key-file]
+
+e.g.:
+
+ keyctl padd asymmetric "" 0x223c7853 <my_public_key.x509
+
+Note, however, that the kernel will only permit keys to be added to
+.system_keyring _if_ the new key's X.509 wrapper is validly signed by a key
+that is already resident in the .system_keyring at the time the key was added.
+
+
+=========================
+MANUALLY SIGNING MODULES
+=========================
+
+To manually sign a module, use the scripts/sign-file tool available in
+the Linux kernel source tree. The script requires 4 arguments:
+
+ 1. The hash algorithm (e.g., sha256)
+ 2. The private key filename
+ 3. The public key filename
+ 4. The kernel module to be signed
+
+The following is an example to sign a kernel module:
+
+ scripts/sign-file sha512 kernel-signkey.priv \
+ kernel-signkey.x509 module.ko
+
+The hash algorithm used does not have to match the one configured, but if it
+doesn't, you should make sure that hash algorithm is either built into the
+kernel or can be loaded without requiring itself.
+
+
+============================
+SIGNED MODULES AND STRIPPING
+============================
+
+A signed module has a digital signature simply appended at the end. The string
+"~Module signature appended~." at the end of the module's file confirms that a
+signature is present but it does not confirm that the signature is valid!
+
+Signed modules are BRITTLE as the signature is outside of the defined ELF
+container. Thus they MAY NOT be stripped once the signature is computed and
+attached. Note the entire module is the signed payload, including any and all
+debug information present at the time of signing.
+
+
+======================
+LOADING SIGNED MODULES
+======================
+
+Modules are loaded with insmod, modprobe, init_module() or finit_module(),
+exactly as for unsigned modules as no processing is done in userspace. The
+signature checking is all done within the kernel.
+
+
+=========================================
+NON-VALID SIGNATURES AND UNSIGNED MODULES
+=========================================
+
+If CONFIG_MODULE_SIG_FORCE is enabled or enforcemodulesig=1 is supplied on
+the kernel command line, the kernel will only load validly signed modules
+for which it has a public key. Otherwise, it will also load modules that are
+unsigned. Any module for which the kernel has a key, but which proves to have
+a signature mismatch will not be permitted to load.
+
+Any module that has an unparseable signature will be rejected.
+
+
+=========================================
+ADMINISTERING/PROTECTING THE PRIVATE KEY
+=========================================
+
+Since the private key is used to sign modules, viruses and malware could use
+the private key to sign modules and compromise the operating system. The
+private key must be either destroyed or moved to a secure location and not kept
+in the root node of the kernel source tree.
diff --git a/Documentation/networking/bonding.txt b/Documentation/networking/bonding.txt
index 2cdb8b66caa9..a4d925e4ba7a 100644
--- a/Documentation/networking/bonding.txt
+++ b/Documentation/networking/bonding.txt
@@ -853,6 +853,14 @@ resend_igmp
This option was added for bonding version 3.7.0.
+lp_interval
+
+ Specifies the number of seconds between instances where the bonding
+ driver sends learning packets to each slaves peer switch.
+
+ The valid range is 1 - 0x7fffffff; the default value is 1. This Option
+ has effect only in balance-tlb and balance-alb modes.
+
3. Configuring Bonding Devices
==============================
diff --git a/Documentation/networking/can.txt b/Documentation/networking/can.txt
index 4c072414eadb..f3089d423515 100644
--- a/Documentation/networking/can.txt
+++ b/Documentation/networking/can.txt
@@ -2,21 +2,20 @@
can.txt
-Readme file for the Controller Area Network Protocol Family (aka Socket CAN)
+Readme file for the Controller Area Network Protocol Family (aka SocketCAN)
This file contains
- 1 Overview / What is Socket CAN
+ 1 Overview / What is SocketCAN
2 Motivation / Why using the socket API
- 3 Socket CAN concept
+ 3 SocketCAN concept
3.1 receive lists
3.2 local loopback of sent frames
- 3.3 network security issues (capabilities)
- 3.4 network problem notifications
+ 3.3 network problem notifications
- 4 How to use Socket CAN
+ 4 How to use SocketCAN
4.1 RAW protocol sockets with can_filters (SOCK_RAW)
4.1.1 RAW socket option CAN_RAW_FILTER
4.1.2 RAW socket option CAN_RAW_ERR_FILTER
@@ -34,7 +33,7 @@ This file contains
4.3 connected transport protocols (SOCK_SEQPACKET)
4.4 unconnected transport protocols (SOCK_DGRAM)
- 5 Socket CAN core module
+ 5 SocketCAN core module
5.1 can.ko module params
5.2 procfs content
5.3 writing own CAN protocol modules
@@ -51,20 +50,20 @@ This file contains
6.6 CAN FD (flexible data rate) driver support
6.7 supported CAN hardware
- 7 Socket CAN resources
+ 7 SocketCAN resources
8 Credits
============================================================================
-1. Overview / What is Socket CAN
+1. Overview / What is SocketCAN
--------------------------------
The socketcan package is an implementation of CAN protocols
(Controller Area Network) for Linux. CAN is a networking technology
which has widespread use in automation, embedded devices, and
automotive fields. While there have been other CAN implementations
-for Linux based on character devices, Socket CAN uses the Berkeley
+for Linux based on character devices, SocketCAN uses the Berkeley
socket API, the Linux network stack and implements the CAN device
drivers as network interfaces. The CAN socket API has been designed
as similar as possible to the TCP/IP protocols to allow programmers,
@@ -74,7 +73,7 @@ sockets.
2. Motivation / Why using the socket API
----------------------------------------
-There have been CAN implementations for Linux before Socket CAN so the
+There have been CAN implementations for Linux before SocketCAN so the
question arises, why we have started another project. Most existing
implementations come as a device driver for some CAN hardware, they
are based on character devices and provide comparatively little
@@ -89,10 +88,10 @@ the CAN controller requires employment of another device driver and
often the need for adaption of large parts of the application to the
new driver's API.
-Socket CAN was designed to overcome all of these limitations. A new
+SocketCAN was designed to overcome all of these limitations. A new
protocol family has been implemented which provides a socket interface
to user space applications and which builds upon the Linux network
-layer, so to use all of the provided queueing functionality. A device
+layer, enabling use all of the provided queueing functionality. A device
driver for CAN controller hardware registers itself with the Linux
network layer as a network device, so that CAN frames from the
controller can be passed up to the network layer and on to the CAN
@@ -146,15 +145,15 @@ solution for a couple of reasons:
providing an API for device drivers to register with. However, then
it would be no more difficult, or may be even easier, to use the
networking framework provided by the Linux kernel, and this is what
- Socket CAN does.
+ SocketCAN does.
The use of the networking framework of the Linux kernel is just the
natural and most appropriate way to implement CAN for Linux.
-3. Socket CAN concept
+3. SocketCAN concept
---------------------
- As described in chapter 2 it is the main goal of Socket CAN to
+ As described in chapter 2 it is the main goal of SocketCAN to
provide a socket interface to user space applications which builds
upon the Linux network layer. In contrast to the commonly known
TCP/IP and ethernet networking, the CAN bus is a broadcast-only(!)
@@ -168,11 +167,11 @@ solution for a couple of reasons:
The network transparent access of multiple applications leads to the
problem that different applications may be interested in the same
- CAN-IDs from the same CAN network interface. The Socket CAN core
+ CAN-IDs from the same CAN network interface. The SocketCAN core
module - which implements the protocol family CAN - provides several
high efficient receive lists for this reason. If e.g. a user space
application opens a CAN RAW socket, the raw protocol module itself
- requests the (range of) CAN-IDs from the Socket CAN core that are
+ requests the (range of) CAN-IDs from the SocketCAN core that are
requested by the user. The subscription and unsubscription of
CAN-IDs can be done for specific CAN interfaces or for all(!) known
CAN interfaces with the can_rx_(un)register() functions provided to
@@ -217,21 +216,7 @@ solution for a couple of reasons:
* = you really like to have this when you're running analyser tools
like 'candump' or 'cansniffer' on the (same) node.
- 3.3 network security issues (capabilities)
-
- The Controller Area Network is a local field bus transmitting only
- broadcast messages without any routing and security concepts.
- In the majority of cases the user application has to deal with
- raw CAN frames. Therefore it might be reasonable NOT to restrict
- the CAN access only to the user root, as known from other networks.
- Since the currently implemented CAN_RAW and CAN_BCM sockets can only
- send and receive frames to/from CAN interfaces it does not affect
- security of others networks to allow all users to access the CAN.
- To enable non-root users to access CAN_RAW and CAN_BCM protocol
- sockets the Kconfig options CAN_RAW_USER and/or CAN_BCM_USER may be
- selected at kernel compile time.
-
- 3.4 network problem notifications
+ 3.3 network problem notifications
The use of the CAN bus may lead to several problems on the physical
and media access control layer. Detecting and logging of these lower
@@ -251,11 +236,11 @@ solution for a couple of reasons:
by default. The format of the CAN error message frame is briefly
described in the Linux header file "include/linux/can/error.h".
-4. How to use Socket CAN
+4. How to use SocketCAN
------------------------
Like TCP/IP, you first need to open a socket for communicating over a
- CAN network. Since Socket CAN implements a new protocol family, you
+ CAN network. Since SocketCAN implements a new protocol family, you
need to pass PF_CAN as the first argument to the socket(2) system
call. Currently, there are two CAN protocols to choose from, the raw
socket protocol and the broadcast manager (BCM). So to open a socket,
@@ -286,8 +271,8 @@ solution for a couple of reasons:
};
The alignment of the (linear) payload data[] to a 64bit boundary
- allows the user to define own structs and unions to easily access the
- CAN payload. There is no given byteorder on the CAN bus by
+ allows the user to define their own structs and unions to easily access
+ the CAN payload. There is no given byteorder on the CAN bus by
default. A read(2) system call on a CAN_RAW socket transfers a
struct can_frame to the user space.
@@ -479,7 +464,7 @@ solution for a couple of reasons:
setsockopt(s, SOL_CAN_RAW, CAN_RAW_FILTER, NULL, 0);
- To set the filters to zero filters is quite obsolete as not read
+ To set the filters to zero filters is quite obsolete as to not read
data causes the raw socket to discard the received CAN frames. But
having this 'send only' use-case we may remove the receive list in the
Kernel to save a little (really a very little!) CPU usage.
@@ -814,17 +799,17 @@ solution for a couple of reasons:
4.4 unconnected transport protocols (SOCK_DGRAM)
-5. Socket CAN core module
+5. SocketCAN core module
-------------------------
- The Socket CAN core module implements the protocol family
+ The SocketCAN core module implements the protocol family
PF_CAN. CAN protocol modules are loaded by the core module at
runtime. The core module provides an interface for CAN protocol
modules to subscribe needed CAN IDs (see chapter 3.1).
5.1 can.ko module params
- - stats_timer: To calculate the Socket CAN core statistics
+ - stats_timer: To calculate the SocketCAN core statistics
(e.g. current/maximum frames per second) this 1 second timer is
invoked at can.ko module start time by default. This timer can be
disabled by using stattimer=0 on the module commandline.
@@ -833,7 +818,7 @@ solution for a couple of reasons:
5.2 procfs content
- As described in chapter 3.1 the Socket CAN core uses several filter
+ As described in chapter 3.1 the SocketCAN core uses several filter
lists to deliver received CAN frames to CAN protocol modules. These
receive lists, their filters and the count of filter matches can be
checked in the appropriate receive list. All entries contain the
@@ -860,15 +845,15 @@ solution for a couple of reasons:
Additional procfs files in /proc/net/can
- stats - Socket CAN core statistics (rx/tx frames, match ratios, ...)
+ stats - SocketCAN core statistics (rx/tx frames, match ratios, ...)
reset_stats - manual statistic reset
- version - prints the Socket CAN core version and the ABI version
+ version - prints the SocketCAN core version and the ABI version
5.3 writing own CAN protocol modules
To implement a new protocol in the protocol family PF_CAN a new
protocol has to be defined in include/linux/can.h .
- The prototypes and definitions to use the Socket CAN core can be
+ The prototypes and definitions to use the SocketCAN core can be
accessed by including include/linux/can/core.h .
In addition to functions that register the CAN protocol and the
CAN device notifier chain there are functions to subscribe CAN
@@ -1105,7 +1090,7 @@ solution for a couple of reasons:
$ ip link set canX up type can bitrate 125000
- A device may enter the "bus-off" state if too much errors occurred on
+ A device may enter the "bus-off" state if too many errors occurred on
the CAN bus. Then no more messages are received or sent. An automatic
bus-off recovery can be enabled by setting the "restart-ms" to a
non-zero value, e.g.:
@@ -1125,7 +1110,7 @@ solution for a couple of reasons:
CAN FD capable CAN controllers support two different bitrates for the
arbitration phase and the payload phase of the CAN FD frame. Therefore a
- second bittiming has to be specified in order to enable the CAN FD bitrate.
+ second bit timing has to be specified in order to enable the CAN FD bitrate.
Additionally CAN FD capable CAN controllers support up to 64 bytes of
payload. The representation of this length in can_frame.can_dlc and
@@ -1150,21 +1135,16 @@ solution for a couple of reasons:
6.7 Supported CAN hardware
Please check the "Kconfig" file in "drivers/net/can" to get an actual
- list of the support CAN hardware. On the Socket CAN project website
+ list of the support CAN hardware. On the SocketCAN project website
(see chapter 7) there might be further drivers available, also for
older kernel versions.
-7. Socket CAN resources
+7. SocketCAN resources
-----------------------
- You can find further resources for Socket CAN like user space tools,
- support for old kernel versions, more drivers, mailing lists, etc.
- at the BerliOS OSS project website for Socket CAN:
-
- http://developer.berlios.de/projects/socketcan
-
- If you have questions, bug fixes, etc., don't hesitate to post them to
- the Socketcan-Users mailing list. But please search the archives first.
+ The Linux CAN / SocketCAN project ressources (project site / mailing list)
+ are referenced in the MAINTAINERS file in the Linux source tree.
+ Search for CAN NETWORK [LAYERS|DRIVERS].
8. Credits
----------
diff --git a/Documentation/networking/filter.txt b/Documentation/networking/filter.txt
index cdb3e40b9d14..a06b48d2f5cc 100644
--- a/Documentation/networking/filter.txt
+++ b/Documentation/networking/filter.txt
@@ -1,49 +1,563 @@
-filter.txt: Linux Socket Filtering
-Written by: Jay Schulist <jschlst@samba.org>
+Linux Socket Filtering aka Berkeley Packet Filter (BPF)
+=======================================================
Introduction
-============
-
- Linux Socket Filtering is derived from the Berkeley
-Packet Filter. There are some distinct differences between
-the BSD and Linux Kernel Filtering.
-
-Linux Socket Filtering (LSF) allows a user-space program to
-attach a filter onto any socket and allow or disallow certain
-types of data to come through the socket. LSF follows exactly
-the same filter code structure as the BSD Berkeley Packet Filter
-(BPF), so referring to the BSD bpf.4 manpage is very helpful in
-creating filters.
-
-LSF is much simpler than BPF. One does not have to worry about
-devices or anything like that. You simply create your filter
-code, send it to the kernel via the SO_ATTACH_FILTER option and
-if your filter code passes the kernel check on it, you then
-immediately begin filtering data on that socket.
-
-You can also detach filters from your socket via the
-SO_DETACH_FILTER option. This will probably not be used much
-since when you close a socket that has a filter on it the
-filter is automagically removed. The other less common case
-may be adding a different filter on the same socket where you had another
-filter that is still running: the kernel takes care of removing
-the old one and placing your new one in its place, assuming your
-filter has passed the checks, otherwise if it fails the old filter
-will remain on that socket.
-
-SO_LOCK_FILTER option allows to lock the filter attached to a
-socket. Once set, a filter cannot be removed or changed. This allows
-one process to setup a socket, attach a filter, lock it then drop
-privileges and be assured that the filter will be kept until the
-socket is closed.
-
-Examples
-========
-
-Ioctls-
-setsockopt(sockfd, SOL_SOCKET, SO_ATTACH_FILTER, &Filter, sizeof(Filter));
-setsockopt(sockfd, SOL_SOCKET, SO_DETACH_FILTER, &value, sizeof(value));
-setsockopt(sockfd, SOL_SOCKET, SO_LOCK_FILTER, &value, sizeof(value));
-
-See the BSD bpf.4 manpage and the BSD Packet Filter paper written by
-Steven McCanne and Van Jacobson of Lawrence Berkeley Laboratory.
+------------
+
+Linux Socket Filtering (LSF) is derived from the Berkeley Packet Filter.
+Though there are some distinct differences between the BSD and Linux
+Kernel filtering, but when we speak of BPF or LSF in Linux context, we
+mean the very same mechanism of filtering in the Linux kernel.
+
+BPF allows a user-space program to attach a filter onto any socket and
+allow or disallow certain types of data to come through the socket. LSF
+follows exactly the same filter code structure as BSD's BPF, so referring
+to the BSD bpf.4 manpage is very helpful in creating filters.
+
+On Linux, BPF is much simpler than on BSD. One does not have to worry
+about devices or anything like that. You simply create your filter code,
+send it to the kernel via the SO_ATTACH_FILTER option and if your filter
+code passes the kernel check on it, you then immediately begin filtering
+data on that socket.
+
+You can also detach filters from your socket via the SO_DETACH_FILTER
+option. This will probably not be used much since when you close a socket
+that has a filter on it the filter is automagically removed. The other
+less common case may be adding a different filter on the same socket where
+you had another filter that is still running: the kernel takes care of
+removing the old one and placing your new one in its place, assuming your
+filter has passed the checks, otherwise if it fails the old filter will
+remain on that socket.
+
+SO_LOCK_FILTER option allows to lock the filter attached to a socket. Once
+set, a filter cannot be removed or changed. This allows one process to
+setup a socket, attach a filter, lock it then drop privileges and be
+assured that the filter will be kept until the socket is closed.
+
+The biggest user of this construct might be libpcap. Issuing a high-level
+filter command like `tcpdump -i em1 port 22` passes through the libpcap
+internal compiler that generates a structure that can eventually be loaded
+via SO_ATTACH_FILTER to the kernel. `tcpdump -i em1 port 22 -ddd`
+displays what is being placed into this structure.
+
+Although we were only speaking about sockets here, BPF in Linux is used
+in many more places. There's xt_bpf for netfilter, cls_bpf in the kernel
+qdisc layer, SECCOMP-BPF (SECure COMPuting [1]), and lots of other places
+such as team driver, PTP code, etc where BPF is being used.
+
+ [1] Documentation/prctl/seccomp_filter.txt
+
+Original BPF paper:
+
+Steven McCanne and Van Jacobson. 1993. The BSD packet filter: a new
+architecture for user-level packet capture. In Proceedings of the
+USENIX Winter 1993 Conference Proceedings on USENIX Winter 1993
+Conference Proceedings (USENIX'93). USENIX Association, Berkeley,
+CA, USA, 2-2. [http://www.tcpdump.org/papers/bpf-usenix93.pdf]
+
+Structure
+---------
+
+User space applications include <linux/filter.h> which contains the
+following relevant structures:
+
+struct sock_filter { /* Filter block */
+ __u16 code; /* Actual filter code */
+ __u8 jt; /* Jump true */
+ __u8 jf; /* Jump false */
+ __u32 k; /* Generic multiuse field */
+};
+
+Such a structure is assembled as an array of 4-tuples, that contains
+a code, jt, jf and k value. jt and jf are jump offsets and k a generic
+value to be used for a provided code.
+
+struct sock_fprog { /* Required for SO_ATTACH_FILTER. */
+ unsigned short len; /* Number of filter blocks */
+ struct sock_filter __user *filter;
+};
+
+For socket filtering, a pointer to this structure (as shown in
+follow-up example) is being passed to the kernel through setsockopt(2).
+
+Example
+-------
+
+#include <sys/socket.h>
+#include <sys/types.h>
+#include <arpa/inet.h>
+#include <linux/if_ether.h>
+/* ... */
+
+/* From the example above: tcpdump -i em1 port 22 -dd */
+struct sock_filter code[] = {
+ { 0x28, 0, 0, 0x0000000c },
+ { 0x15, 0, 8, 0x000086dd },
+ { 0x30, 0, 0, 0x00000014 },
+ { 0x15, 2, 0, 0x00000084 },
+ { 0x15, 1, 0, 0x00000006 },
+ { 0x15, 0, 17, 0x00000011 },
+ { 0x28, 0, 0, 0x00000036 },
+ { 0x15, 14, 0, 0x00000016 },
+ { 0x28, 0, 0, 0x00000038 },
+ { 0x15, 12, 13, 0x00000016 },
+ { 0x15, 0, 12, 0x00000800 },
+ { 0x30, 0, 0, 0x00000017 },
+ { 0x15, 2, 0, 0x00000084 },
+ { 0x15, 1, 0, 0x00000006 },
+ { 0x15, 0, 8, 0x00000011 },
+ { 0x28, 0, 0, 0x00000014 },
+ { 0x45, 6, 0, 0x00001fff },
+ { 0xb1, 0, 0, 0x0000000e },
+ { 0x48, 0, 0, 0x0000000e },
+ { 0x15, 2, 0, 0x00000016 },
+ { 0x48, 0, 0, 0x00000010 },
+ { 0x15, 0, 1, 0x00000016 },
+ { 0x06, 0, 0, 0x0000ffff },
+ { 0x06, 0, 0, 0x00000000 },
+};
+
+struct sock_fprog bpf = {
+ .len = ARRAY_SIZE(code),
+ .filter = code,
+};
+
+sock = socket(PF_PACKET, SOCK_RAW, htons(ETH_P_ALL));
+if (sock < 0)
+ /* ... bail out ... */
+
+ret = setsockopt(sock, SOL_SOCKET, SO_ATTACH_FILTER, &bpf, sizeof(bpf));
+if (ret < 0)
+ /* ... bail out ... */
+
+/* ... */
+close(sock);
+
+The above example code attaches a socket filter for a PF_PACKET socket
+in order to let all IPv4/IPv6 packets with port 22 pass. The rest will
+be dropped for this socket.
+
+The setsockopt(2) call to SO_DETACH_FILTER doesn't need any arguments
+and SO_LOCK_FILTER for preventing the filter to be detached, takes an
+integer value with 0 or 1.
+
+Note that socket filters are not restricted to PF_PACKET sockets only,
+but can also be used on other socket families.
+
+Summary of system calls:
+
+ * setsockopt(sockfd, SOL_SOCKET, SO_ATTACH_FILTER, &val, sizeof(val));
+ * setsockopt(sockfd, SOL_SOCKET, SO_DETACH_FILTER, &val, sizeof(val));
+ * setsockopt(sockfd, SOL_SOCKET, SO_LOCK_FILTER, &val, sizeof(val));
+
+Normally, most use cases for socket filtering on packet sockets will be
+covered by libpcap in high-level syntax, so as an application developer
+you should stick to that. libpcap wraps its own layer around all that.
+
+Unless i) using/linking to libpcap is not an option, ii) the required BPF
+filters use Linux extensions that are not supported by libpcap's compiler,
+iii) a filter might be more complex and not cleanly implementable with
+libpcap's compiler, or iv) particular filter codes should be optimized
+differently than libpcap's internal compiler does; then in such cases
+writing such a filter "by hand" can be of an alternative. For example,
+xt_bpf and cls_bpf users might have requirements that could result in
+more complex filter code, or one that cannot be expressed with libpcap
+(e.g. different return codes for various code paths). Moreover, BPF JIT
+implementors may wish to manually write test cases and thus need low-level
+access to BPF code as well.
+
+BPF engine and instruction set
+------------------------------
+
+Under tools/net/ there's a small helper tool called bpf_asm which can
+be used to write low-level filters for example scenarios mentioned in the
+previous section. Asm-like syntax mentioned here has been implemented in
+bpf_asm and will be used for further explanations (instead of dealing with
+less readable opcodes directly, principles are the same). The syntax is
+closely modelled after Steven McCanne's and Van Jacobson's BPF paper.
+
+The BPF architecture consists of the following basic elements:
+
+ Element Description
+
+ A 32 bit wide accumulator
+ X 32 bit wide X register
+ M[] 16 x 32 bit wide misc registers aka "scratch memory
+ store", addressable from 0 to 15
+
+A program, that is translated by bpf_asm into "opcodes" is an array that
+consists of the following elements (as already mentioned):
+
+ op:16, jt:8, jf:8, k:32
+
+The element op is a 16 bit wide opcode that has a particular instruction
+encoded. jt and jf are two 8 bit wide jump targets, one for condition
+"jump if true", the other one "jump if false". Eventually, element k
+contains a miscellaneous argument that can be interpreted in different
+ways depending on the given instruction in op.
+
+The instruction set consists of load, store, branch, alu, miscellaneous
+and return instructions that are also represented in bpf_asm syntax. This
+table lists all bpf_asm instructions available resp. what their underlying
+opcodes as defined in linux/filter.h stand for:
+
+ Instruction Addressing mode Description
+
+ ld 1, 2, 3, 4, 10 Load word into A
+ ldi 4 Load word into A
+ ldh 1, 2 Load half-word into A
+ ldb 1, 2 Load byte into A
+ ldx 3, 4, 5, 10 Load word into X
+ ldxi 4 Load word into X
+ ldxb 5 Load byte into X
+
+ st 3 Store A into M[]
+ stx 3 Store X into M[]
+
+ jmp 6 Jump to label
+ ja 6 Jump to label
+ jeq 7, 8 Jump on k == A
+ jneq 8 Jump on k != A
+ jne 8 Jump on k != A
+ jlt 8 Jump on k < A
+ jle 8 Jump on k <= A
+ jgt 7, 8 Jump on k > A
+ jge 7, 8 Jump on k >= A
+ jset 7, 8 Jump on k & A
+
+ add 0, 4 A + <x>
+ sub 0, 4 A - <x>
+ mul 0, 4 A * <x>
+ div 0, 4 A / <x>
+ mod 0, 4 A % <x>
+ neg 0, 4 !A
+ and 0, 4 A & <x>
+ or 0, 4 A | <x>
+ xor 0, 4 A ^ <x>
+ lsh 0, 4 A << <x>
+ rsh 0, 4 A >> <x>
+
+ tax Copy A into X
+ txa Copy X into A
+
+ ret 4, 9 Return
+
+The next table shows addressing formats from the 2nd column:
+
+ Addressing mode Syntax Description
+
+ 0 x/%x Register X
+ 1 [k] BHW at byte offset k in the packet
+ 2 [x + k] BHW at the offset X + k in the packet
+ 3 M[k] Word at offset k in M[]
+ 4 #k Literal value stored in k
+ 5 4*([k]&0xf) Lower nibble * 4 at byte offset k in the packet
+ 6 L Jump label L
+ 7 #k,Lt,Lf Jump to Lt if true, otherwise jump to Lf
+ 8 #k,Lt Jump to Lt if predicate is true
+ 9 a/%a Accumulator A
+ 10 extension BPF extension
+
+The Linux kernel also has a couple of BPF extensions that are used along
+with the class of load instructions by "overloading" the k argument with
+a negative offset + a particular extension offset. The result of such BPF
+extensions are loaded into A.
+
+Possible BPF extensions are shown in the following table:
+
+ Extension Description
+
+ len skb->len
+ proto skb->protocol
+ type skb->pkt_type
+ poff Payload start offset
+ ifidx skb->dev->ifindex
+ nla Netlink attribute of type X with offset A
+ nlan Nested Netlink attribute of type X with offset A
+ mark skb->mark
+ queue skb->queue_mapping
+ hatype skb->dev->type
+ rxhash skb->rxhash
+ cpu raw_smp_processor_id()
+ vlan_tci vlan_tx_tag_get(skb)
+ vlan_pr vlan_tx_tag_present(skb)
+
+These extensions can also be prefixed with '#'.
+Examples for low-level BPF:
+
+** ARP packets:
+
+ ldh [12]
+ jne #0x806, drop
+ ret #-1
+ drop: ret #0
+
+** IPv4 TCP packets:
+
+ ldh [12]
+ jne #0x800, drop
+ ldb [23]
+ jneq #6, drop
+ ret #-1
+ drop: ret #0
+
+** (Accelerated) VLAN w/ id 10:
+
+ ld vlan_tci
+ jneq #10, drop
+ ret #-1
+ drop: ret #0
+
+** SECCOMP filter example:
+
+ ld [4] /* offsetof(struct seccomp_data, arch) */
+ jne #0xc000003e, bad /* AUDIT_ARCH_X86_64 */
+ ld [0] /* offsetof(struct seccomp_data, nr) */
+ jeq #15, good /* __NR_rt_sigreturn */
+ jeq #231, good /* __NR_exit_group */
+ jeq #60, good /* __NR_exit */
+ jeq #0, good /* __NR_read */
+ jeq #1, good /* __NR_write */
+ jeq #5, good /* __NR_fstat */
+ jeq #9, good /* __NR_mmap */
+ jeq #14, good /* __NR_rt_sigprocmask */
+ jeq #13, good /* __NR_rt_sigaction */
+ jeq #35, good /* __NR_nanosleep */
+ bad: ret #0 /* SECCOMP_RET_KILL */
+ good: ret #0x7fff0000 /* SECCOMP_RET_ALLOW */
+
+The above example code can be placed into a file (here called "foo"), and
+then be passed to the bpf_asm tool for generating opcodes, output that xt_bpf
+and cls_bpf understands and can directly be loaded with. Example with above
+ARP code:
+
+$ ./bpf_asm foo
+4,40 0 0 12,21 0 1 2054,6 0 0 4294967295,6 0 0 0,
+
+In copy and paste C-like output:
+
+$ ./bpf_asm -c foo
+{ 0x28, 0, 0, 0x0000000c },
+{ 0x15, 0, 1, 0x00000806 },
+{ 0x06, 0, 0, 0xffffffff },
+{ 0x06, 0, 0, 0000000000 },
+
+In particular, as usage with xt_bpf or cls_bpf can result in more complex BPF
+filters that might not be obvious at first, it's good to test filters before
+attaching to a live system. For that purpose, there's a small tool called
+bpf_dbg under tools/net/ in the kernel source directory. This debugger allows
+for testing BPF filters against given pcap files, single stepping through the
+BPF code on the pcap's packets and to do BPF machine register dumps.
+
+Starting bpf_dbg is trivial and just requires issuing:
+
+# ./bpf_dbg
+
+In case input and output do not equal stdin/stdout, bpf_dbg takes an
+alternative stdin source as a first argument, and an alternative stdout
+sink as a second one, e.g. `./bpf_dbg test_in.txt test_out.txt`.
+
+Other than that, a particular libreadline configuration can be set via
+file "~/.bpf_dbg_init" and the command history is stored in the file
+"~/.bpf_dbg_history".
+
+Interaction in bpf_dbg happens through a shell that also has auto-completion
+support (follow-up example commands starting with '>' denote bpf_dbg shell).
+The usual workflow would be to ...
+
+> load bpf 6,40 0 0 12,21 0 3 2048,48 0 0 23,21 0 1 1,6 0 0 65535,6 0 0 0
+ Loads a BPF filter from standard output of bpf_asm, or transformed via
+ e.g. `tcpdump -iem1 -ddd port 22 | tr '\n' ','`. Note that for JIT
+ debugging (next section), this command creates a temporary socket and
+ loads the BPF code into the kernel. Thus, this will also be useful for
+ JIT developers.
+
+> load pcap foo.pcap
+ Loads standard tcpdump pcap file.
+
+> run [<n>]
+bpf passes:1 fails:9
+ Runs through all packets from a pcap to account how many passes and fails
+ the filter will generate. A limit of packets to traverse can be given.
+
+> disassemble
+l0: ldh [12]
+l1: jeq #0x800, l2, l5
+l2: ldb [23]
+l3: jeq #0x1, l4, l5
+l4: ret #0xffff
+l5: ret #0
+ Prints out BPF code disassembly.
+
+> dump
+/* { op, jt, jf, k }, */
+{ 0x28, 0, 0, 0x0000000c },
+{ 0x15, 0, 3, 0x00000800 },
+{ 0x30, 0, 0, 0x00000017 },
+{ 0x15, 0, 1, 0x00000001 },
+{ 0x06, 0, 0, 0x0000ffff },
+{ 0x06, 0, 0, 0000000000 },
+ Prints out C-style BPF code dump.
+
+> breakpoint 0
+breakpoint at: l0: ldh [12]
+> breakpoint 1
+breakpoint at: l1: jeq #0x800, l2, l5
+ ...
+ Sets breakpoints at particular BPF instructions. Issuing a `run` command
+ will walk through the pcap file continuing from the current packet and
+ break when a breakpoint is being hit (another `run` will continue from
+ the currently active breakpoint executing next instructions):
+
+ > run
+ -- register dump --
+ pc: [0] <-- program counter
+ code: [40] jt[0] jf[0] k[12] <-- plain BPF code of current instruction
+ curr: l0: ldh [12] <-- disassembly of current instruction
+ A: [00000000][0] <-- content of A (hex, decimal)
+ X: [00000000][0] <-- content of X (hex, decimal)
+ M[0,15]: [00000000][0] <-- folded content of M (hex, decimal)
+ -- packet dump -- <-- Current packet from pcap (hex)
+ len: 42
+ 0: 00 19 cb 55 55 a4 00 14 a4 43 78 69 08 06 00 01
+ 16: 08 00 06 04 00 01 00 14 a4 43 78 69 0a 3b 01 26
+ 32: 00 00 00 00 00 00 0a 3b 01 01
+ (breakpoint)
+ >
+
+> breakpoint
+breakpoints: 0 1
+ Prints currently set breakpoints.
+
+> step [-<n>, +<n>]
+ Performs single stepping through the BPF program from the current pc
+ offset. Thus, on each step invocation, above register dump is issued.
+ This can go forwards and backwards in time, a plain `step` will break
+ on the next BPF instruction, thus +1. (No `run` needs to be issued here.)
+
+> select <n>
+ Selects a given packet from the pcap file to continue from. Thus, on
+ the next `run` or `step`, the BPF program is being evaluated against
+ the user pre-selected packet. Numbering starts just as in Wireshark
+ with index 1.
+
+> quit
+#
+ Exits bpf_dbg.
+
+JIT compiler
+------------
+
+The Linux kernel has a built-in BPF JIT compiler for x86_64, SPARC, PowerPC,
+ARM and s390 and can be enabled through CONFIG_BPF_JIT. The JIT compiler is
+transparently invoked for each attached filter from user space or for internal
+kernel users if it has been previously enabled by root:
+
+ echo 1 > /proc/sys/net/core/bpf_jit_enable
+
+For JIT developers, doing audits etc, each compile run can output the generated
+opcode image into the kernel log via:
+
+ echo 2 > /proc/sys/net/core/bpf_jit_enable
+
+Example output from dmesg:
+
+[ 3389.935842] flen=6 proglen=70 pass=3 image=ffffffffa0069c8f
+[ 3389.935847] JIT code: 00000000: 55 48 89 e5 48 83 ec 60 48 89 5d f8 44 8b 4f 68
+[ 3389.935849] JIT code: 00000010: 44 2b 4f 6c 4c 8b 87 d8 00 00 00 be 0c 00 00 00
+[ 3389.935850] JIT code: 00000020: e8 1d 94 ff e0 3d 00 08 00 00 75 16 be 17 00 00
+[ 3389.935851] JIT code: 00000030: 00 e8 28 94 ff e0 83 f8 01 75 07 b8 ff ff 00 00
+[ 3389.935852] JIT code: 00000040: eb 02 31 c0 c9 c3
+
+In the kernel source tree under tools/net/, there's bpf_jit_disasm for
+generating disassembly out of the kernel log's hexdump:
+
+# ./bpf_jit_disasm
+70 bytes emitted from JIT compiler (pass:3, flen:6)
+ffffffffa0069c8f + <x>:
+ 0: push %rbp
+ 1: mov %rsp,%rbp
+ 4: sub $0x60,%rsp
+ 8: mov %rbx,-0x8(%rbp)
+ c: mov 0x68(%rdi),%r9d
+ 10: sub 0x6c(%rdi),%r9d
+ 14: mov 0xd8(%rdi),%r8
+ 1b: mov $0xc,%esi
+ 20: callq 0xffffffffe0ff9442
+ 25: cmp $0x800,%eax
+ 2a: jne 0x0000000000000042
+ 2c: mov $0x17,%esi
+ 31: callq 0xffffffffe0ff945e
+ 36: cmp $0x1,%eax
+ 39: jne 0x0000000000000042
+ 3b: mov $0xffff,%eax
+ 40: jmp 0x0000000000000044
+ 42: xor %eax,%eax
+ 44: leaveq
+ 45: retq
+
+Issuing option `-o` will "annotate" opcodes to resulting assembler
+instructions, which can be very useful for JIT developers:
+
+# ./bpf_jit_disasm -o
+70 bytes emitted from JIT compiler (pass:3, flen:6)
+ffffffffa0069c8f + <x>:
+ 0: push %rbp
+ 55
+ 1: mov %rsp,%rbp
+ 48 89 e5
+ 4: sub $0x60,%rsp
+ 48 83 ec 60
+ 8: mov %rbx,-0x8(%rbp)
+ 48 89 5d f8
+ c: mov 0x68(%rdi),%r9d
+ 44 8b 4f 68
+ 10: sub 0x6c(%rdi),%r9d
+ 44 2b 4f 6c
+ 14: mov 0xd8(%rdi),%r8
+ 4c 8b 87 d8 00 00 00
+ 1b: mov $0xc,%esi
+ be 0c 00 00 00
+ 20: callq 0xffffffffe0ff9442
+ e8 1d 94 ff e0
+ 25: cmp $0x800,%eax
+ 3d 00 08 00 00
+ 2a: jne 0x0000000000000042
+ 75 16
+ 2c: mov $0x17,%esi
+ be 17 00 00 00
+ 31: callq 0xffffffffe0ff945e
+ e8 28 94 ff e0
+ 36: cmp $0x1,%eax
+ 83 f8 01
+ 39: jne 0x0000000000000042
+ 75 07
+ 3b: mov $0xffff,%eax
+ b8 ff ff 00 00
+ 40: jmp 0x0000000000000044
+ eb 02
+ 42: xor %eax,%eax
+ 31 c0
+ 44: leaveq
+ c9
+ 45: retq
+ c3
+
+For BPF JIT developers, bpf_jit_disasm, bpf_asm and bpf_dbg provides a useful
+toolchain for developing and testing the kernel's JIT compiler.
+
+Misc
+----
+
+Also trinity, the Linux syscall fuzzer, has built-in support for BPF and
+SECCOMP-BPF kernel fuzzing.
+
+Written by
+----------
+
+The document was written in the hope that it is found useful and in order
+to give potential BPF hackers or security auditors a better overview of
+the underlying architecture.
+
+Jay Schulist <jschlst@samba.org>
+Daniel Borkmann <dborkman@redhat.com>
diff --git a/Documentation/networking/i40evf.txt b/Documentation/networking/i40evf.txt
new file mode 100644
index 000000000000..21e41271af79
--- /dev/null
+++ b/Documentation/networking/i40evf.txt
@@ -0,0 +1,47 @@
+Linux* Base Driver for Intel(R) Network Connection
+==================================================
+
+Intel XL710 X710 Virtual Function Linux driver.
+Copyright(c) 2013 Intel Corporation.
+
+Contents
+========
+
+- Identifying Your Adapter
+- Known Issues/Troubleshooting
+- Support
+
+This file describes the i40evf Linux* Base Driver for the Intel(R) XL710
+X710 Virtual Function.
+
+The i40evf driver supports XL710 and X710 virtual function devices that
+can only be activated on kernels with CONFIG_PCI_IOV enabled.
+
+The guest OS loading the i40evf driver must support MSI-X interrupts.
+
+Identifying Your Adapter
+========================
+
+For more information on how to identify your adapter, go to the Adapter &
+Driver ID Guide at:
+
+ http://support.intel.com/support/go/network/adapter/idguide.htm
+
+Known Issues/Troubleshooting
+============================
+
+
+Support
+=======
+
+For general information, go to the Intel support website at:
+
+ http://support.intel.com
+
+or the Intel Wired Networking project hosted by Sourceforge at:
+
+ http://sourceforge.net/projects/e1000
+
+If an issue is identified with the released source code on the supported
+kernel with a supported adapter, email the specific information related
+to the issue to e1000-devel@lists.sf.net
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index 3c12d9a7ed00..7373115407e4 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -15,9 +15,19 @@ ip_default_ttl - INTEGER
forwarded) IP packets. Should be between 1 and 255 inclusive.
Default: 64 (as recommended by RFC1700)
-ip_no_pmtu_disc - BOOLEAN
- Disable Path MTU Discovery.
- default FALSE
+ip_no_pmtu_disc - INTEGER
+ Disable Path MTU Discovery. If enabled in mode 1 and a
+ fragmentation-required ICMP is received, the PMTU to this
+ destination will be set to min_pmtu (see below). You will need
+ to raise min_pmtu to the smallest interface MTU on your system
+ manually if you want to avoid locally generated fragments.
+
+ In mode 2 incoming Path MTU Discovery messages will be
+ discarded. Outgoing frames are handled the same as in mode 1,
+ implicitly setting IP_PMTUDISC_DONT on every created socket.
+
+ Possible values: 0-2
+ Default: FALSE
min_pmtu - INTEGER
default 552 - minimum discovered Path MTU
@@ -156,6 +166,16 @@ tcp_app_win - INTEGER
buffer. Value 0 is special, it means that nothing is reserved.
Default: 31
+tcp_autocorking - BOOLEAN
+ Enable TCP auto corking :
+ When applications do consecutive small write()/sendmsg() system calls,
+ we try to coalesce these small writes as much as possible, to lower
+ total amount of sent packets. This is done if at least one prior
+ packet for the flow is waiting in Qdisc queues or device transmit
+ queue. Applications can still use TCP_CORK for optimal behavior
+ when they know how/when to uncork their sockets.
+ Default : 1
+
tcp_available_congestion_control - STRING
Shows the available congestion control choices that are registered.
More congestion control algorithms may be available as modules,
@@ -1074,6 +1094,13 @@ bindv6only - BOOLEAN
Default: FALSE (as specified in RFC3493)
+anycast_src_echo_reply - BOOLEAN
+ Controls the use of anycast addresses as source addresses for ICMPv6
+ echo reply
+ TRUE: enabled
+ FALSE: disabled
+ Default: FALSE
+
IPv6 Fragmentation:
ip6frag_high_thresh - INTEGER
diff --git a/Documentation/networking/ipsec.txt b/Documentation/networking/ipsec.txt
new file mode 100644
index 000000000000..8dbc08b7e431
--- /dev/null
+++ b/Documentation/networking/ipsec.txt
@@ -0,0 +1,38 @@
+
+Here documents known IPsec corner cases which need to be keep in mind when
+deploy various IPsec configuration in real world production environment.
+
+1. IPcomp: Small IP packet won't get compressed at sender, and failed on
+ policy check on receiver.
+
+Quote from RFC3173:
+2.2. Non-Expansion Policy
+
+ If the total size of a compressed payload and the IPComp header, as
+ defined in section 3, is not smaller than the size of the original
+ payload, the IP datagram MUST be sent in the original non-compressed
+ form. To clarify: If an IP datagram is sent non-compressed, no
+
+ IPComp header is added to the datagram. This policy ensures saving
+ the decompression processing cycles and avoiding incurring IP
+ datagram fragmentation when the expanded datagram is larger than the
+ MTU.
+
+ Small IP datagrams are likely to expand as a result of compression.
+ Therefore, a numeric threshold should be applied before compression,
+ where IP datagrams of size smaller than the threshold are sent in the
+ original form without attempting compression. The numeric threshold
+ is implementation dependent.
+
+Current IPComp implementation is indeed by the book, while as in practice
+when sending non-compressed packet to the peer(whether or not packet len
+is smaller than the threshold or the compressed len is large than original
+packet len), the packet is dropped when checking the policy as this packet
+matches the selector but not coming from any XFRM layer, i.e., with no
+security path. Such naked packet will not eventually make it to upper layer.
+The result is much more wired to the user when ping peer with different
+payload length.
+
+One workaround is try to set "level use" for each policy if user observed
+above scenario. The consequence of doing so is small packet(uncompressed)
+will skip policy checking on receiver side.
diff --git a/Documentation/networking/packet_mmap.txt b/Documentation/networking/packet_mmap.txt
index c01223628a87..723bf3d33a6e 100644
--- a/Documentation/networking/packet_mmap.txt
+++ b/Documentation/networking/packet_mmap.txt
@@ -123,6 +123,16 @@ Transmission process is similar to capture as shown below.
[shutdown] close() --------> destruction of the transmission socket and
deallocation of all associated resources.
+Socket creation and destruction is also straight forward, and is done
+the same way as in capturing described in the previous paragraph:
+
+ int fd = socket(PF_PACKET, mode, 0);
+
+The protocol can optionally be 0 in case we only want to transmit
+via this socket, which avoids an expensive call to packet_rcv().
+In this case, you also need to bind(2) the TX_RING with sll_protocol = 0
+set. Otherwise, htons(ETH_P_ALL) or any other protocol, for example.
+
Binding the socket to your network interface is mandatory (with zero copy) to
know the header size of frames used in the circular buffer.
@@ -507,8 +517,6 @@ where 'tpacket_version' can be TPACKET_V1 (default), TPACKET_V2, TPACKET_V3.
TPACKET_V1:
- Default if not otherwise specified by setsockopt(2)
- RX_RING, TX_RING available
- - VLAN metadata information available for packets
- (TP_STATUS_VLAN_VALID)
TPACKET_V1 --> TPACKET_V2:
- Made 64 bit clean due to unsigned long usage in TPACKET_V1
@@ -516,6 +524,13 @@ TPACKET_V1 --> TPACKET_V2:
userspace and the like
- Timestamp resolution in nanoseconds instead of microseconds
- RX_RING, TX_RING available
+ - VLAN metadata information available for packets
+ (TP_STATUS_VLAN_VALID, TP_STATUS_VLAN_TPID_VALID),
+ in the tpacket2_hdr structure:
+ - TP_STATUS_VLAN_VALID bit being set into the tp_status field indicates
+ that the tp_vlan_tci field has valid VLAN TCI value
+ - TP_STATUS_VLAN_TPID_VALID bit being set into the tp_status field
+ indicates that the tp_vlan_tpid field has valid VLAN TPID value
- How to switch to TPACKET_V2:
1. Replace struct tpacket_hdr by struct tpacket2_hdr
2. Query header len and save
@@ -943,6 +958,27 @@ int main(int argc, char **argp)
}
-------------------------------------------------------------------------------
++ PACKET_QDISC_BYPASS
+-------------------------------------------------------------------------------
+
+If there is a requirement to load the network with many packets in a similar
+fashion as pktgen does, you might set the following option after socket
+creation:
+
+ int one = 1;
+ setsockopt(fd, SOL_PACKET, PACKET_QDISC_BYPASS, &one, sizeof(one));
+
+This has the side-effect, that packets sent through PF_PACKET will bypass the
+kernel's qdisc layer and are forcedly pushed to the driver directly. Meaning,
+packet are not buffered, tc disciplines are ignored, increased loss can occur
+and such packets are also not visible to other PF_PACKET sockets anymore. So,
+you have been warned; generally, this can be useful for stress testing various
+components of a system.
+
+On default, PACKET_QDISC_BYPASS is disabled and needs to be explicitly enabled
+on PF_PACKET sockets.
+
+-------------------------------------------------------------------------------
+ PACKET_TIMESTAMP
-------------------------------------------------------------------------------
diff --git a/Documentation/networking/phy.txt b/Documentation/networking/phy.txt
index d5b1a3935245..ebf270719402 100644
--- a/Documentation/networking/phy.txt
+++ b/Documentation/networking/phy.txt
@@ -255,7 +255,8 @@ Writing a PHY driver
config_init: configures PHY into a sane state after a reset.
For instance, a Davicom PHY requires descrambling disabled.
- probe: Does any setup needed by the driver
+ probe: Allocate phy->priv, optionally refuse to bind.
+ PHY may not have been reset or had fixups run yet.
suspend/resume: power management
config_aneg: Changes the speed/duplex/negotiation settings
read_status: Reads the current speed/duplex/negotiation settings
diff --git a/Documentation/networking/timestamping.txt b/Documentation/networking/timestamping.txt
index 98097d8cb910..661d3c316a17 100644
--- a/Documentation/networking/timestamping.txt
+++ b/Documentation/networking/timestamping.txt
@@ -85,7 +85,7 @@ Filled in if SOF_TIMESTAMPING_SYS_HARDWARE is set. Requires support
by the network device and will be empty without that support.
-SIOCSHWTSTAMP:
+SIOCSHWTSTAMP, SIOCGHWTSTAMP:
Hardware time stamping must also be initialized for each device driver
that is expected to do hardware time stamping. The parameter is defined in
@@ -115,6 +115,10 @@ Only a processes with admin rights may change the configuration. User
space is responsible to ensure that multiple processes don't interfere
with each other and that the settings are reset.
+Any process can read the actual configuration by passing this
+structure to ioctl(SIOCGHWTSTAMP) in the same way. However, this has
+not been implemented in all drivers.
+
/* possible values for hwtstamp_config->tx_type */
enum {
/*
@@ -157,7 +161,8 @@ DEVICE IMPLEMENTATION
A driver which supports hardware time stamping must support the
SIOCSHWTSTAMP ioctl and update the supplied struct hwtstamp_config with
-the actual values as described in the section on SIOCSHWTSTAMP.
+the actual values as described in the section on SIOCSHWTSTAMP. It
+should also support SIOCGHWTSTAMP.
Time stamps for received packets must be stored in the skb. To get a pointer
to the shared time stamp structure of the skb call skb_hwtstamps(). Then
diff --git a/Documentation/networking/timestamping/.gitignore b/Documentation/networking/timestamping/.gitignore
index 71e81eb2e22f..a380159765ce 100644
--- a/Documentation/networking/timestamping/.gitignore
+++ b/Documentation/networking/timestamping/.gitignore
@@ -1 +1,2 @@
timestamping
+hwtstamp_config
diff --git a/Documentation/networking/timestamping/Makefile b/Documentation/networking/timestamping/Makefile
index e79973443e9f..d934afc8306a 100644
--- a/Documentation/networking/timestamping/Makefile
+++ b/Documentation/networking/timestamping/Makefile
@@ -2,12 +2,13 @@
obj- := dummy.o
# List of programs to build
-hostprogs-y := timestamping
+hostprogs-y := timestamping hwtstamp_config
# Tell kbuild to always build the programs
always := $(hostprogs-y)
HOSTCFLAGS_timestamping.o += -I$(objtree)/usr/include
+HOSTCFLAGS_hwtstamp_config.o += -I$(objtree)/usr/include
clean:
- rm -f timestamping
+ rm -f timestamping hwtstamp_config
diff --git a/Documentation/networking/timestamping/hwtstamp_config.c b/Documentation/networking/timestamping/hwtstamp_config.c
new file mode 100644
index 000000000000..e8b685a7f15f
--- /dev/null
+++ b/Documentation/networking/timestamping/hwtstamp_config.c
@@ -0,0 +1,134 @@
+/* Test program for SIOC{G,S}HWTSTAMP
+ * Copyright 2013 Solarflare Communications
+ * Author: Ben Hutchings
+ */
+
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <sys/socket.h>
+#include <sys/ioctl.h>
+
+#include <linux/if.h>
+#include <linux/net_tstamp.h>
+#include <linux/sockios.h>
+
+static int
+lookup_value(const char **names, int size, const char *name)
+{
+ int value;
+
+ for (value = 0; value < size; value++)
+ if (names[value] && strcasecmp(names[value], name) == 0)
+ return value;
+
+ return -1;
+}
+
+static const char *
+lookup_name(const char **names, int size, int value)
+{
+ return (value >= 0 && value < size) ? names[value] : NULL;
+}
+
+static void list_names(FILE *f, const char **names, int size)
+{
+ int value;
+
+ for (value = 0; value < size; value++)
+ if (names[value])
+ fprintf(f, " %s\n", names[value]);
+}
+
+static const char *tx_types[] = {
+#define TX_TYPE(name) [HWTSTAMP_TX_ ## name] = #name
+ TX_TYPE(OFF),
+ TX_TYPE(ON),
+ TX_TYPE(ONESTEP_SYNC)
+#undef TX_TYPE
+};
+#define N_TX_TYPES ((int)(sizeof(tx_types) / sizeof(tx_types[0])))
+
+static const char *rx_filters[] = {
+#define RX_FILTER(name) [HWTSTAMP_FILTER_ ## name] = #name
+ RX_FILTER(NONE),
+ RX_FILTER(ALL),
+ RX_FILTER(SOME),
+ RX_FILTER(PTP_V1_L4_EVENT),
+ RX_FILTER(PTP_V1_L4_SYNC),
+ RX_FILTER(PTP_V1_L4_DELAY_REQ),
+ RX_FILTER(PTP_V2_L4_EVENT),
+ RX_FILTER(PTP_V2_L4_SYNC),
+ RX_FILTER(PTP_V2_L4_DELAY_REQ),
+ RX_FILTER(PTP_V2_L2_EVENT),
+ RX_FILTER(PTP_V2_L2_SYNC),
+ RX_FILTER(PTP_V2_L2_DELAY_REQ),
+ RX_FILTER(PTP_V2_EVENT),
+ RX_FILTER(PTP_V2_SYNC),
+ RX_FILTER(PTP_V2_DELAY_REQ),
+#undef RX_FILTER
+};
+#define N_RX_FILTERS ((int)(sizeof(rx_filters) / sizeof(rx_filters[0])))
+
+static void usage(void)
+{
+ fputs("Usage: hwtstamp_config if_name [tx_type rx_filter]\n"
+ "tx_type is any of (case-insensitive):\n",
+ stderr);
+ list_names(stderr, tx_types, N_TX_TYPES);
+ fputs("rx_filter is any of (case-insensitive):\n", stderr);
+ list_names(stderr, rx_filters, N_RX_FILTERS);
+}
+
+int main(int argc, char **argv)
+{
+ struct ifreq ifr;
+ struct hwtstamp_config config;
+ const char *name;
+ int sock;
+
+ if ((argc != 2 && argc != 4) || (strlen(argv[1]) >= IFNAMSIZ)) {
+ usage();
+ return 2;
+ }
+
+ if (argc == 4) {
+ config.flags = 0;
+ config.tx_type = lookup_value(tx_types, N_TX_TYPES, argv[2]);
+ config.rx_filter = lookup_value(rx_filters, N_RX_FILTERS, argv[3]);
+ if (config.tx_type < 0 || config.rx_filter < 0) {
+ usage();
+ return 2;
+ }
+ }
+
+ sock = socket(AF_INET, SOCK_DGRAM, 0);
+ if (sock < 0) {
+ perror("socket");
+ return 1;
+ }
+
+ strcpy(ifr.ifr_name, argv[1]);
+ ifr.ifr_data = (caddr_t)&config;
+
+ if (ioctl(sock, (argc == 2) ? SIOCGHWTSTAMP : SIOCSHWTSTAMP, &ifr)) {
+ perror("ioctl");
+ return 1;
+ }
+
+ printf("flags = %#x\n", config.flags);
+ name = lookup_name(tx_types, N_TX_TYPES, config.tx_type);
+ if (name)
+ printf("tx_type = %s\n", name);
+ else
+ printf("tx_type = %d\n", config.tx_type);
+ name = lookup_name(rx_filters, N_RX_FILTERS, config.rx_filter);
+ if (name)
+ printf("rx_filter = %s\n", name);
+ else
+ printf("rx_filter = %d\n", config.rx_filter);
+
+ return 0;
+}
diff --git a/Documentation/power/runtime_pm.txt b/Documentation/power/runtime_pm.txt
index 0f54333b0ff2..b6ce00b2be9a 100644
--- a/Documentation/power/runtime_pm.txt
+++ b/Documentation/power/runtime_pm.txt
@@ -547,13 +547,11 @@ helper functions described in Section 4. In that case, pm_runtime_resume()
should be used. Of course, for this purpose the device's runtime PM has to be
enabled earlier by calling pm_runtime_enable().
-If the device bus type's or driver's ->probe() callback runs
-pm_runtime_suspend() or pm_runtime_idle() or their asynchronous counterparts,
-they will fail returning -EAGAIN, because the device's usage counter is
-incremented by the driver core before executing ->probe(). Still, it may be
-desirable to suspend the device as soon as ->probe() has finished, so the driver
-core uses pm_runtime_put_sync() to invoke the subsystem-level idle callback for
-the device at that time.
+It may be desirable to suspend the device once ->probe() has finished.
+Therefore the driver core uses the asyncronous pm_request_idle() to submit a
+request to execute the subsystem-level idle callback for the device at that
+time. A driver that makes use of the runtime autosuspend feature, may want to
+update the last busy mark before returning from ->probe().
Moreover, the driver core prevents runtime PM callbacks from racing with the bus
notifier callback in __device_release_driver(), which is necessary, because the
@@ -656,7 +654,7 @@ out the following operations:
__pm_runtime_disable() with 'false' as the second argument for every device
right before executing the subsystem-level .suspend_late() callback for it.
- * During system resume it calls pm_runtime_enable() and pm_runtime_put_sync()
+ * During system resume it calls pm_runtime_enable() and pm_runtime_put()
for every device right after executing the subsystem-level .resume_early()
callback and right after executing the subsystem-level .resume() callback
for it, respectively.
diff --git a/Documentation/security/00-INDEX b/Documentation/security/00-INDEX
index 414235c1fcfc..45c82fd3e9d3 100644
--- a/Documentation/security/00-INDEX
+++ b/Documentation/security/00-INDEX
@@ -22,3 +22,5 @@ keys.txt
- description of the kernel key retention service.
tomoyo.txt
- documentation on the TOMOYO Linux Security Module.
+IMA-templates.txt
+ - documentation on the template management mechanism for IMA.
diff --git a/Documentation/security/IMA-templates.txt b/Documentation/security/IMA-templates.txt
new file mode 100644
index 000000000000..a777e5f1df5b
--- /dev/null
+++ b/Documentation/security/IMA-templates.txt
@@ -0,0 +1,87 @@
+ IMA Template Management Mechanism
+
+
+==== INTRODUCTION ====
+
+The original 'ima' template is fixed length, containing the filedata hash
+and pathname. The filedata hash is limited to 20 bytes (md5/sha1).
+The pathname is a null terminated string, limited to 255 characters.
+To overcome these limitations and to add additional file metadata, it is
+necessary to extend the current version of IMA by defining additional
+templates. For example, information that could be possibly reported are
+the inode UID/GID or the LSM labels either of the inode and of the process
+that is accessing it.
+
+However, the main problem to introduce this feature is that, each time
+a new template is defined, the functions that generate and display
+the measurements list would include the code for handling a new format
+and, thus, would significantly grow over the time.
+
+The proposed solution solves this problem by separating the template
+management from the remaining IMA code. The core of this solution is the
+definition of two new data structures: a template descriptor, to determine
+which information should be included in the measurement list; a template
+field, to generate and display data of a given type.
+
+Managing templates with these structures is very simple. To support
+a new data type, developers define the field identifier and implement
+two functions, init() and show(), respectively to generate and display
+measurement entries. Defining a new template descriptor requires
+specifying the template format, a string of field identifiers separated
+by the '|' character. While in the current implementation it is possible
+to define new template descriptors only by adding their definition in the
+template specific code (ima_template.c), in a future version it will be
+possible to register a new template on a running kernel by supplying to IMA
+the desired format string. In this version, IMA initializes at boot time
+all defined template descriptors by translating the format into an array
+of template fields structures taken from the set of the supported ones.
+
+After the initialization step, IMA will call ima_alloc_init_template()
+(new function defined within the patches for the new template management
+mechanism) to generate a new measurement entry by using the template
+descriptor chosen through the kernel configuration or through the newly
+introduced 'ima_template=' kernel command line parameter. It is during this
+phase that the advantages of the new architecture are clearly shown:
+the latter function will not contain specific code to handle a given template
+but, instead, it simply calls the init() method of the template fields
+associated to the chosen template descriptor and store the result (pointer
+to allocated data and data length) in the measurement entry structure.
+
+The same mechanism is employed to display measurements entries.
+The functions ima[_ascii]_measurements_show() retrieve, for each entry,
+the template descriptor used to produce that entry and call the show()
+method for each item of the array of template fields structures.
+
+
+
+==== SUPPORTED TEMPLATE FIELDS AND DESCRIPTORS ====
+
+In the following, there is the list of supported template fields
+('<identifier>': description), that can be used to define new template
+descriptors by adding their identifier to the format string
+(support for more data types will be added later):
+
+ - 'd': the digest of the event (i.e. the digest of a measured file),
+ calculated with the SHA1 or MD5 hash algorithm;
+ - 'n': the name of the event (i.e. the file name), with size up to 255 bytes;
+ - 'd-ng': the digest of the event, calculated with an arbitrary hash
+ algorithm (field format: [<hash algo>:]digest, where the digest
+ prefix is shown only if the hash algorithm is not SHA1 or MD5);
+ - 'n-ng': the name of the event, without size limitations.
+
+
+Below, there is the list of defined template descriptors:
+ - "ima": its format is 'd|n';
+ - "ima-ng" (default): its format is 'd-ng|n-ng'.
+
+
+
+==== USE ====
+
+To specify the template descriptor to be used to generate measurement entries,
+currently the following methods are supported:
+
+ - select a template descriptor among those supported in the kernel
+ configuration ('ima-ng' is the default choice);
+ - specify a template descriptor name from the kernel command line through
+ the 'ima_template=' parameter.
diff --git a/Documentation/security/keys.txt b/Documentation/security/keys.txt
index 7b4145d00452..a4c33f1a7c6d 100644
--- a/Documentation/security/keys.txt
+++ b/Documentation/security/keys.txt
@@ -865,15 +865,14 @@ encountered:
calling processes has a searchable link to the key from one of its
keyrings. There are three functions for dealing with these:
- key_ref_t make_key_ref(const struct key *key,
- unsigned long possession);
+ key_ref_t make_key_ref(const struct key *key, bool possession);
struct key *key_ref_to_ptr(const key_ref_t key_ref);
- unsigned long is_key_possessed(const key_ref_t key_ref);
+ bool is_key_possessed(const key_ref_t key_ref);
The first function constructs a key reference from a key pointer and
- possession information (which must be 0 or 1 and not any other value).
+ possession information (which must be true or false).
The second function retrieves the key pointer from a reference and the
third retrieves the possession flag.
@@ -961,14 +960,17 @@ payload contents" for more information.
the argument will not be parsed.
-(*) Extra references can be made to a key by calling the following function:
+(*) Extra references can be made to a key by calling one of the following
+ functions:
+ struct key *__key_get(struct key *key);
struct key *key_get(struct key *key);
- These need to be disposed of by calling key_put() when they've been
- finished with. The key pointer passed in will be returned. If the pointer
- is NULL or CONFIG_KEYS is not set then the key will not be dereferenced and
- no increment will take place.
+ Keys so references will need to be disposed of by calling key_put() when
+ they've been finished with. The key pointer passed in will be returned.
+
+ In the case of key_get(), if the pointer is NULL or CONFIG_KEYS is not set
+ then the key will not be dereferenced and no increment will take place.
(*) A key's serial number can be obtained by calling:
diff --git a/Documentation/target/tcm_mod_builder.py b/Documentation/target/tcm_mod_builder.py
index 54d29c1320ed..230ce71f4d75 100755
--- a/Documentation/target/tcm_mod_builder.py
+++ b/Documentation/target/tcm_mod_builder.py
@@ -440,15 +440,15 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf += " /*\n"
buf += " * Setup default attribute lists for various fabric->tf_cit_tmpl\n"
buf += " */\n"
- buf += " TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = " + fabric_mod_name + "_wwn_attrs;\n"
- buf += " TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = NULL;\n"
- buf += " TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;\n"
- buf += " TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;\n"
- buf += " TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;\n"
- buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;\n"
- buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;\n"
- buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;\n"
- buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;\n"
+ buf += " fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = " + fabric_mod_name + "_wwn_attrs;\n"
+ buf += " fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = NULL;\n"
+ buf += " fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL;\n"
+ buf += " fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;\n"
+ buf += " fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;\n"
+ buf += " fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL;\n"
+ buf += " fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;\n"
+ buf += " fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL;\n"
+ buf += " fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL;\n"
buf += " /*\n"
buf += " * Register the fabric for use within TCM\n"
buf += " */\n"
diff --git a/Documentation/unaligned-memory-access.txt b/Documentation/unaligned-memory-access.txt
index f866c72291bf..a445da098bc6 100644
--- a/Documentation/unaligned-memory-access.txt
+++ b/Documentation/unaligned-memory-access.txt
@@ -137,24 +137,34 @@ Code that causes unaligned access
=================================
With the above in mind, let's move onto a real life example of a function
-that can cause an unaligned memory access. The following function adapted
+that can cause an unaligned memory access. The following function taken
from include/linux/etherdevice.h is an optimized routine to compare two
ethernet MAC addresses for equality.
-unsigned int compare_ether_addr(const u8 *addr1, const u8 *addr2)
+bool ether_addr_equal(const u8 *addr1, const u8 *addr2)
{
- const u16 *a = (const u16 *) addr1;
- const u16 *b = (const u16 *) addr2;
+#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+ u32 fold = ((*(const u32 *)addr1) ^ (*(const u32 *)addr2)) |
+ ((*(const u16 *)(addr1 + 4)) ^ (*(const u16 *)(addr2 + 4)));
+
+ return fold == 0;
+#else
+ const u16 *a = (const u16 *)addr1;
+ const u16 *b = (const u16 *)addr2;
return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | (a[2] ^ b[2])) != 0;
+#endif
}
-In the above function, the reference to a[0] causes 2 bytes (16 bits) to
-be read from memory starting at address addr1. Think about what would happen
-if addr1 was an odd address such as 0x10003. (Hint: it'd be an unaligned
-access.)
+In the above function, when the hardware has efficient unaligned access
+capability, there is no issue with this code. But when the hardware isn't
+able to access memory on arbitrary boundaries, the reference to a[0] causes
+2 bytes (16 bits) to be read from memory starting at address addr1.
+
+Think about what would happen if addr1 was an odd address such as 0x10003.
+(Hint: it'd be an unaligned access.)
Despite the potential unaligned access problems with the above function, it
-is included in the kernel anyway but is understood to only work on
+is included in the kernel anyway but is understood to only work normally on
16-bit-aligned addresses. It is up to the caller to ensure this alignment or
not use this function at all. This alignment-unsafe function is still useful
as it is a decent optimization for the cases when you can ensure alignment,
diff --git a/Documentation/vm/split_page_table_lock b/Documentation/vm/split_page_table_lock
index 7521d367f21d..6dea4fd5c961 100644
--- a/Documentation/vm/split_page_table_lock
+++ b/Documentation/vm/split_page_table_lock
@@ -63,9 +63,9 @@ levels.
PMD split lock enabling requires pgtable_pmd_page_ctor() call on PMD table
allocation and pgtable_pmd_page_dtor() on freeing.
-Allocation usually happens in pmd_alloc_one(), freeing in pmd_free(), but
-make sure you cover all PMD table allocation / freeing paths: i.e X86_PAE
-preallocate few PMDs on pgd_alloc().
+Allocation usually happens in pmd_alloc_one(), freeing in pmd_free() and
+pmd_free_tlb(), but make sure you cover all PMD table allocation / freeing
+paths: i.e X86_PAE preallocate few PMDs on pgd_alloc().
With everything in place you can set CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK.
diff --git a/MAINTAINERS b/MAINTAINERS
index f6e905216cc4..e11d4952bb26 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -783,7 +783,7 @@ F: arch/arm/boot/dts/sama*.dts
F: arch/arm/boot/dts/sama*.dtsi
ARM/CALXEDA HIGHBANK ARCHITECTURE
-M: Rob Herring <rob.herring@calxeda.com>
+M: Rob Herring <robh@kernel.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: arch/arm/mach-highbank/
@@ -893,20 +893,15 @@ F: arch/arm/include/asm/hardware/dec21285.h
F: arch/arm/mach-footbridge/
ARM/FREESCALE IMX / MXC ARM ARCHITECTURE
+M: Shawn Guo <shawn.guo@linaro.org>
M: Sascha Hauer <kernel@pengutronix.de>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
-T: git git://git.pengutronix.de/git/imx/linux-2.6.git
+T: git git://git.linaro.org/people/shawnguo/linux-2.6.git
F: arch/arm/mach-imx/
+F: arch/arm/boot/dts/imx*
F: arch/arm/configs/imx*_defconfig
-ARM/FREESCALE IMX6
-M: Shawn Guo <shawn.guo@linaro.org>
-L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-S: Maintained
-T: git git://git.linaro.org/people/shawnguo/linux-2.6.git
-F: arch/arm/mach-imx/*imx6*
-
ARM/FREESCALE MXS ARM ARCHITECTURE
M: Shawn Guo <shawn.guo@linaro.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -1013,6 +1008,8 @@ M: Santosh Shilimkar <santosh.shilimkar@ti.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: arch/arm/mach-keystone/
+F: drivers/clk/keystone/
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/ssantosh/linux-keystone.git
ARM/LOGICPD PXA270 MACHINE SUPPORT
M: Lennert Buytenhek <kernel@wantstofly.org>
@@ -1923,7 +1920,8 @@ S: Maintained
F: drivers/gpio/gpio-bt8xx.c
BTRFS FILE SYSTEM
-M: Chris Mason <chris.mason@fusionio.com>
+M: Chris Mason <clm@fb.com>
+M: Josef Bacik <jbacik@fb.com>
L: linux-btrfs@vger.kernel.org
W: http://btrfs.wiki.kernel.org/
Q: http://patchwork.kernel.org/project/linux-btrfs/list/
@@ -2012,6 +2010,7 @@ L: linux-can@vger.kernel.org
W: http://gitorious.org/linux-can
T: git git://gitorious.org/linux-can/linux-can-next.git
S: Maintained
+F: Documentation/networking/can.txt
F: net/can/
F: include/linux/can/core.h
F: include/uapi/linux/can.h
@@ -2126,11 +2125,17 @@ S: Maintained
F: Documentation/zh_CN/
CHIPIDEA USB HIGH SPEED DUAL ROLE CONTROLLER
-M: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+M: Peter Chen <Peter.Chen@freescale.com>
+T: git://github.com/hzpeterchen/linux-usb.git
L: linux-usb@vger.kernel.org
S: Maintained
F: drivers/usb/chipidea/
+CHROME HARDWARE PLATFORM SUPPORT
+M: Olof Johansson <olof@lixom.net>
+S: Maintained
+F: drivers/platform/chrome/
+
CISCO VIC ETHERNET NIC DRIVER
M: Christian Benvenuti <benve@cisco.com>
M: Sujith Sankar <ssujith@cisco.com>
@@ -3748,9 +3753,11 @@ F: include/uapi/linux/gigaset_dev.h
GPIO SUBSYSTEM
M: Linus Walleij <linus.walleij@linaro.org>
-S: Maintained
+M: Alexandre Courbot <gnurou@gmail.com>
L: linux-gpio@vger.kernel.org
-F: Documentation/gpio.txt
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-gpio.git
+S: Maintained
+F: Documentation/gpio/
F: drivers/gpio/
F: include/linux/gpio*
F: include/asm-generic/gpio.h
@@ -3818,6 +3825,12 @@ T: git git://linuxtv.org/media_tree.git
S: Maintained
F: drivers/media/usb/gspca/
+GUID PARTITION TABLE (GPT)
+M: Davidlohr Bueso <davidlohr@hp.com>
+L: linux-efi@vger.kernel.org
+S: Maintained
+F: block/partitions/efi.*
+
STK1160 USB VIDEO CAPTURE DRIVER
M: Ezequiel Garcia <elezegarcia@gmail.com>
L: linux-media@vger.kernel.org
@@ -4027,12 +4040,26 @@ W: http://artax.karlin.mff.cuni.cz/~mikulas/vyplody/hpfs/index-e.cgi
S: Maintained
F: fs/hpfs/
+HSI SUBSYSTEM
+M: Sebastian Reichel <sre@debian.org>
+S: Maintained
+F: Documentation/ABI/testing/sysfs-bus-hsi
+F: drivers/hsi/
+F: include/linux/hsi/
+F: include/uapi/linux/hsi/
+
HSO 3G MODEM DRIVER
M: Jan Dumon <j.dumon@option.com>
W: http://www.pharscape.org
S: Maintained
F: drivers/net/usb/hso.c
+HSR NETWORK PROTOCOL
+M: Arvid Brodin <arvid.brodin@alten.se>
+L: netdev@vger.kernel.org
+S: Maintained
+F: net/hsr/
+
HTCPEN TOUCHSCREEN DRIVER
M: Pau Oliva Fora <pof@eslack.org>
L: linux-input@vger.kernel.org
@@ -4054,6 +4081,7 @@ F: arch/x86/include/uapi/asm/hyperv.h
F: arch/x86/kernel/cpu/mshyperv.c
F: drivers/hid/hid-hyperv.c
F: drivers/hv/
+F: drivers/input/serio/hyperv-keyboard.c
F: drivers/net/hyperv/
F: drivers/scsi/storvsc_drv.c
F: drivers/video/hyperv_fb.c
@@ -4431,17 +4459,16 @@ M: Deepak Saxena <dsaxena@plexity.net>
S: Maintained
F: drivers/char/hw_random/ixp4xx-rng.c
-INTEL ETHERNET DRIVERS (e100/e1000/e1000e/igb/igbvf/ixgb/ixgbe/ixgbevf/i40e)
+INTEL ETHERNET DRIVERS (e100/e1000/e1000e/igb/igbvf/ixgb/ixgbe/ixgbevf/i40e/i40evf)
M: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
M: Jesse Brandeburg <jesse.brandeburg@intel.com>
M: Bruce Allan <bruce.w.allan@intel.com>
M: Carolyn Wyborny <carolyn.wyborny@intel.com>
M: Don Skidmore <donald.c.skidmore@intel.com>
M: Greg Rose <gregory.v.rose@intel.com>
-M: Peter P Waskiewicz Jr <peter.p.waskiewicz.jr@intel.com>
M: Alex Duyck <alexander.h.duyck@intel.com>
M: John Ronciak <john.ronciak@intel.com>
-M: Tushar Dave <tushar.n.dave@intel.com>
+M: Mitch Williams <mitch.a.williams@intel.com>
L: e1000-devel@lists.sourceforge.net
W: http://www.intel.com/support/feedback.htm
W: http://e1000.sourceforge.net/
@@ -4457,6 +4484,7 @@ F: Documentation/networking/ixgb.txt
F: Documentation/networking/ixgbe.txt
F: Documentation/networking/ixgbevf.txt
F: Documentation/networking/i40e.txt
+F: Documentation/networking/i40evf.txt
F: drivers/net/ethernet/intel/
INTEL-MID GPIO DRIVER
@@ -5244,7 +5272,7 @@ S: Maintained
F: Documentation/lockdep*.txt
F: Documentation/lockstat.txt
F: include/linux/lockdep.h
-F: kernel/lockdep*
+F: kernel/locking/
LOGICAL DISK MANAGER SUPPORT (LDM, Windows 2000/XP/Vista Dynamic Disks)
M: "Richard Russon (FlatCap)" <ldm@flatcap.org>
@@ -5885,12 +5913,21 @@ M: Steffen Klassert <steffen.klassert@secunet.com>
M: Herbert Xu <herbert@gondor.apana.org.au>
M: "David S. Miller" <davem@davemloft.net>
L: netdev@vger.kernel.org
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec-next.git
S: Maintained
F: net/xfrm/
F: net/key/
F: net/ipv4/xfrm*
+F: net/ipv4/esp4.c
+F: net/ipv4/ah4.c
+F: net/ipv4/ipcomp.c
+F: net/ipv4/ip_vti.c
F: net/ipv6/xfrm*
+F: net/ipv6/esp6.c
+F: net/ipv6/ah6.c
+F: net/ipv6/ipcomp6.c
+F: net/ipv6/ip6_vti.c
F: include/uapi/linux/xfrm.h
F: include/net/xfrm.h
@@ -5956,10 +5993,10 @@ F: drivers/nfc/
F: include/linux/platform_data/pn544.h
NFS, SUNRPC, AND LOCKD CLIENTS
-M: Trond Myklebust <Trond.Myklebust@netapp.com>
+M: Trond Myklebust <trond.myklebust@primarydata.com>
L: linux-nfs@vger.kernel.org
W: http://client.linux-nfs.org
-T: git git://git.linux-nfs.org/pub/linux/nfs-2.6.git
+T: git git://git.linux-nfs.org/projects/trondmy/linux-nfs.git
S: Maintained
F: fs/lockd/
F: fs/nfs/
@@ -6211,7 +6248,7 @@ F: drivers/i2c/busses/i2c-ocores.c
OPEN FIRMWARE AND FLATTENED DEVICE TREE
M: Grant Likely <grant.likely@linaro.org>
-M: Rob Herring <rob.herring@calxeda.com>
+M: Rob Herring <robh+dt@kernel.org>
L: devicetree@vger.kernel.org
W: http://fdt.secretlab.ca
T: git git://git.secretlab.ca/git/linux-2.6.git
@@ -6223,11 +6260,11 @@ K: of_get_property
K: of_match_table
OPEN FIRMWARE AND FLATTENED DEVICE TREE BINDINGS
-M: Rob Herring <rob.herring@calxeda.com>
+M: Rob Herring <robh+dt@kernel.org>
M: Pawel Moll <pawel.moll@arm.com>
M: Mark Rutland <mark.rutland@arm.com>
-M: Stephen Warren <swarren@wwwdotorg.org>
M: Ian Campbell <ijc+devicetree@hellion.org.uk>
+M: Kumar Gala <galak@codeaurora.org>
L: devicetree@vger.kernel.org
S: Maintained
F: Documentation/devicetree/
@@ -6437,19 +6474,52 @@ F: drivers/pci/
F: include/linux/pci*
F: arch/x86/pci/
+PCI DRIVER FOR IMX6
+M: Richard Zhu <r65037@freescale.com>
+M: Shawn Guo <shawn.guo@linaro.org>
+L: linux-pci@vger.kernel.org
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+S: Maintained
+F: drivers/pci/host/*imx6*
+
+PCI DRIVER FOR MVEBU (Marvell Armada 370 and Armada XP SOC support)
+M: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+M: Jason Cooper <jason@lakedaemon.net>
+L: linux-pci@vger.kernel.org
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+S: Maintained
+F: drivers/pci/host/*mvebu*
+
PCI DRIVER FOR NVIDIA TEGRA
M: Thierry Reding <thierry.reding@gmail.com>
L: linux-tegra@vger.kernel.org
+L: linux-pci@vger.kernel.org
S: Supported
F: Documentation/devicetree/bindings/pci/nvidia,tegra20-pcie.txt
F: drivers/pci/host/pci-tegra.c
+PCI DRIVER FOR RENESAS R-CAR
+M: Simon Horman <horms@verge.net.au>
+L: linux-pci@vger.kernel.org
+L: linux-sh@vger.kernel.org
+S: Maintained
+F: drivers/pci/host/*rcar*
+
PCI DRIVER FOR SAMSUNG EXYNOS
M: Jingoo Han <jg1.han@samsung.com>
L: linux-pci@vger.kernel.org
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
S: Maintained
F: drivers/pci/host/pci-exynos.c
+PCI DRIVER FOR SYNOPSIS DESIGNWARE
+M: Mohit Kumar <mohit.kumar@st.com>
+M: Jingoo Han <jg1.han@samsung.com>
+L: linux-pci@vger.kernel.org
+S: Maintained
+F: drivers/pci/host/*designware*
+
PCMCIA SUBSYSTEM
P: Linux PCMCIA Team
L: linux-pcmcia@lists.infradead.org
@@ -7376,7 +7446,6 @@ S: Maintained
F: kernel/sched/
F: include/linux/sched.h
F: include/uapi/linux/sched.h
-F: kernel/wait.c
F: include/linux/wait.h
SCORE ARCHITECTURE
@@ -7512,9 +7581,10 @@ SELINUX SECURITY MODULE
M: Stephen Smalley <sds@tycho.nsa.gov>
M: James Morris <james.l.morris@oracle.com>
M: Eric Paris <eparis@parisplace.org>
+M: Paul Moore <paul@paul-moore.com>
L: selinux@tycho.nsa.gov (subscribers-only, general discussion)
W: http://selinuxproject.org
-T: git git://git.infradead.org/users/eparis/selinux.git
+T: git git://git.infradead.org/users/pcmoore/selinux
S: Supported
F: include/linux/selinux*
F: security/selinux/
@@ -8660,6 +8730,7 @@ F: drivers/media/usb/tm6000/
TPM DEVICE DRIVER
M: Leonidas Da Silva Barbosa <leosilva@linux.vnet.ibm.com>
M: Ashley Lai <ashley@ashleylai.com>
+M: Peter Huewe <peterhuewe@gmx.de>
M: Rajiv Andrade <mail@srajiv.net>
W: http://tpmdd.sourceforge.net
M: Marcel Selhorst <tpmdd@selhorst.net>
@@ -9518,8 +9589,8 @@ F: drivers/xen/*swiotlb*
XFS FILESYSTEM
P: Silicon Graphics Inc
+M: Dave Chinner <david@fromorbit.com>
M: Ben Myers <bpm@sgi.com>
-M: Alex Elder <elder@kernel.org>
M: xfs@oss.sgi.com
L: xfs@oss.sgi.com
W: http://oss.sgi.com/projects/xfs
diff --git a/Makefile b/Makefile
index 920ad07180c9..ab80be7a38bc 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 3
-PATCHLEVEL = 12
+PATCHLEVEL = 13
SUBLEVEL = 0
-EXTRAVERSION =
+EXTRAVERSION = -rc6
NAME = One Giant Leap for Frogkind
# *DOCUMENTATION*
@@ -732,19 +732,15 @@ export mod_strip_cmd
# Select initial ramdisk compression format, default is gzip(1).
# This shall be used by the dracut(8) tool while creating an initramfs image.
#
-INITRD_COMPRESS=gzip
-ifeq ($(CONFIG_RD_BZIP2), y)
- INITRD_COMPRESS=bzip2
-else ifeq ($(CONFIG_RD_LZMA), y)
- INITRD_COMPRESS=lzma
-else ifeq ($(CONFIG_RD_XZ), y)
- INITRD_COMPRESS=xz
-else ifeq ($(CONFIG_RD_LZO), y)
- INITRD_COMPRESS=lzo
-else ifeq ($(CONFIG_RD_LZ4), y)
- INITRD_COMPRESS=lz4
-endif
-export INITRD_COMPRESS
+INITRD_COMPRESS-y := gzip
+INITRD_COMPRESS-$(CONFIG_RD_BZIP2) := bzip2
+INITRD_COMPRESS-$(CONFIG_RD_LZMA) := lzma
+INITRD_COMPRESS-$(CONFIG_RD_XZ) := xz
+INITRD_COMPRESS-$(CONFIG_RD_LZO) := lzo
+INITRD_COMPRESS-$(CONFIG_RD_LZ4) := lz4
+# do not export INITRD_COMPRESS, since we didn't actually
+# choose a sane default compression above.
+# export INITRD_COMPRESS := $(INITRD_COMPRESS-y)
ifdef CONFIG_MODULE_SIG_ALL
MODSECKEY = ./signing_key.priv
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index 135c674eaf9e..d39dc9b95a2c 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -16,8 +16,8 @@ config ALPHA
select ARCH_WANT_IPC_PARSE_VERSION
select ARCH_HAVE_NMI_SAFE_CMPXCHG
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
+ select GENERIC_CLOCKEVENTS
select GENERIC_SMP_IDLE_THREAD
- select GENERIC_CMOS_UPDATE
select GENERIC_STRNCPY_FROM_USER
select GENERIC_STRNLEN_USER
select HAVE_MOD_ARCH_SPECIFIC
@@ -488,6 +488,20 @@ config VGA_HOSE
which always have multiple hoses, and whose consoles support it.
+config ALPHA_QEMU
+ bool "Run under QEMU emulation"
+ depends on !ALPHA_GENERIC
+ ---help---
+ Assume the presence of special features supported by QEMU PALcode
+ that reduce the overhead of system emulation.
+
+ Generic kernels will auto-detect QEMU. But when building a
+ system-specific kernel, the assumption is that we want to
+ elimiate as many runtime tests as possible.
+
+ If unsure, say N.
+
+
config ALPHA_SRM
bool "Use SRM as bootloader" if ALPHA_CABRIOLET || ALPHA_AVANTI_CH || ALPHA_EB64P || ALPHA_PC164 || ALPHA_TAKARA || ALPHA_EB164 || ALPHA_ALCOR || ALPHA_MIATA || ALPHA_LX164 || ALPHA_SX164 || ALPHA_NAUTILUS || ALPHA_NONAME
depends on TTY
@@ -572,6 +586,30 @@ config NUMA
Access). This option is for configuring high-end multiprocessor
server machines. If in doubt, say N.
+config ALPHA_WTINT
+ bool "Use WTINT" if ALPHA_SRM || ALPHA_GENERIC
+ default y if ALPHA_QEMU
+ default n if ALPHA_EV5 || ALPHA_EV56 || (ALPHA_EV4 && !ALPHA_LCA)
+ default n if !ALPHA_SRM && !ALPHA_GENERIC
+ default y if SMP
+ ---help---
+ The Wait for Interrupt (WTINT) PALcall attempts to place the CPU
+ to sleep until the next interrupt. This may reduce the power
+ consumed, and the heat produced by the computer. However, it has
+ the side effect of making the cycle counter unreliable as a timing
+ device across the sleep.
+
+ For emulation under QEMU, definitely say Y here, as we have other
+ mechanisms for measuring time than the cycle counter.
+
+ For EV4 (but not LCA), EV5 and EV56 systems, or for systems running
+ MILO, sleep mode is not supported so you might as well say N here.
+
+ For SMP systems we cannot use the cycle counter for timing anyway,
+ so you might as well say Y here.
+
+ If unsure, say N.
+
config NODES_SHIFT
int
default "7"
@@ -613,9 +651,41 @@ config VERBOSE_MCHECK_ON
Take the default (1) unless you want more control or more info.
+choice
+ prompt "Timer interrupt frequency (HZ)?"
+ default HZ_128 if ALPHA_QEMU
+ default HZ_1200 if ALPHA_RAWHIDE
+ default HZ_1024
+ ---help---
+ The frequency at which timer interrupts occur. A high frequency
+ minimizes latency, whereas a low frequency minimizes overhead of
+ process accounting. The later effect is especially significant
+ when being run under QEMU.
+
+ Note that some Alpha hardware cannot change the interrupt frequency
+ of the timer. If unsure, say 1024 (or 1200 for Rawhide).
+
+ config HZ_32
+ bool "32 Hz"
+ config HZ_64
+ bool "64 Hz"
+ config HZ_128
+ bool "128 Hz"
+ config HZ_256
+ bool "256 Hz"
+ config HZ_1024
+ bool "1024 Hz"
+ config HZ_1200
+ bool "1200 Hz"
+endchoice
+
config HZ
- int
- default 1200 if ALPHA_RAWHIDE
+ int
+ default 32 if HZ_32
+ default 64 if HZ_64
+ default 128 if HZ_128
+ default 256 if HZ_256
+ default 1200 if HZ_1200
default 1024
source "drivers/pci/Kconfig"
diff --git a/arch/alpha/include/asm/Kbuild b/arch/alpha/include/asm/Kbuild
index f01fb505ad52..a73a8e208a4a 100644
--- a/arch/alpha/include/asm/Kbuild
+++ b/arch/alpha/include/asm/Kbuild
@@ -4,3 +4,4 @@ generic-y += clkdev.h
generic-y += exec.h
generic-y += trace_clock.h
generic-y += preempt.h
+generic-y += hash.h
diff --git a/arch/alpha/include/asm/machvec.h b/arch/alpha/include/asm/machvec.h
index 72dbf2359270..75cb3641ed2f 100644
--- a/arch/alpha/include/asm/machvec.h
+++ b/arch/alpha/include/asm/machvec.h
@@ -33,6 +33,7 @@ struct alpha_machine_vector
int nr_irqs;
int rtc_port;
+ int rtc_boot_cpu_only;
unsigned int max_asn;
unsigned long max_isa_dma_address;
unsigned long irq_probe_mask;
@@ -95,9 +96,6 @@ struct alpha_machine_vector
struct _alpha_agp_info *(*agp_info)(void);
- unsigned int (*rtc_get_time)(struct rtc_time *);
- int (*rtc_set_time)(struct rtc_time *);
-
const char *vector_name;
/* NUMA information */
@@ -126,13 +124,19 @@ extern struct alpha_machine_vector alpha_mv;
#ifdef CONFIG_ALPHA_GENERIC
extern int alpha_using_srm;
+extern int alpha_using_qemu;
#else
-#ifdef CONFIG_ALPHA_SRM
-#define alpha_using_srm 1
-#else
-#define alpha_using_srm 0
-#endif
+# ifdef CONFIG_ALPHA_SRM
+# define alpha_using_srm 1
+# else
+# define alpha_using_srm 0
+# endif
+# ifdef CONFIG_ALPHA_QEMU
+# define alpha_using_qemu 1
+# else
+# define alpha_using_qemu 0
+# endif
#endif /* GENERIC */
-#endif
+#endif /* __KERNEL__ */
#endif /* __ALPHA_MACHVEC_H */
diff --git a/arch/alpha/include/asm/pal.h b/arch/alpha/include/asm/pal.h
index 6fcd2b5b08f0..5422a47646fc 100644
--- a/arch/alpha/include/asm/pal.h
+++ b/arch/alpha/include/asm/pal.h
@@ -89,6 +89,7 @@ __CALL_PAL_W1(wrmces, unsigned long);
__CALL_PAL_RW2(wrperfmon, unsigned long, unsigned long, unsigned long);
__CALL_PAL_W1(wrusp, unsigned long);
__CALL_PAL_W1(wrvptptr, unsigned long);
+__CALL_PAL_RW1(wtint, unsigned long, unsigned long);
/*
* TB routines..
@@ -111,5 +112,75 @@ __CALL_PAL_W1(wrvptptr, unsigned long);
#define tbiap() __tbi(-1, /* no second argument */)
#define tbia() __tbi(-2, /* no second argument */)
+/*
+ * QEMU Cserv routines..
+ */
+
+static inline unsigned long
+qemu_get_walltime(void)
+{
+ register unsigned long v0 __asm__("$0");
+ register unsigned long a0 __asm__("$16") = 3;
+
+ asm("call_pal %2 # cserve get_time"
+ : "=r"(v0), "+r"(a0)
+ : "i"(PAL_cserve)
+ : "$17", "$18", "$19", "$20", "$21");
+
+ return v0;
+}
+
+static inline unsigned long
+qemu_get_alarm(void)
+{
+ register unsigned long v0 __asm__("$0");
+ register unsigned long a0 __asm__("$16") = 4;
+
+ asm("call_pal %2 # cserve get_alarm"
+ : "=r"(v0), "+r"(a0)
+ : "i"(PAL_cserve)
+ : "$17", "$18", "$19", "$20", "$21");
+
+ return v0;
+}
+
+static inline void
+qemu_set_alarm_rel(unsigned long expire)
+{
+ register unsigned long a0 __asm__("$16") = 5;
+ register unsigned long a1 __asm__("$17") = expire;
+
+ asm volatile("call_pal %2 # cserve set_alarm_rel"
+ : "+r"(a0), "+r"(a1)
+ : "i"(PAL_cserve)
+ : "$0", "$18", "$19", "$20", "$21");
+}
+
+static inline void
+qemu_set_alarm_abs(unsigned long expire)
+{
+ register unsigned long a0 __asm__("$16") = 6;
+ register unsigned long a1 __asm__("$17") = expire;
+
+ asm volatile("call_pal %2 # cserve set_alarm_abs"
+ : "+r"(a0), "+r"(a1)
+ : "i"(PAL_cserve)
+ : "$0", "$18", "$19", "$20", "$21");
+}
+
+static inline unsigned long
+qemu_get_vmtime(void)
+{
+ register unsigned long v0 __asm__("$0");
+ register unsigned long a0 __asm__("$16") = 7;
+
+ asm("call_pal %2 # cserve get_time"
+ : "=r"(v0), "+r"(a0)
+ : "i"(PAL_cserve)
+ : "$17", "$18", "$19", "$20", "$21");
+
+ return v0;
+}
+
#endif /* !__ASSEMBLY__ */
#endif /* __ALPHA_PAL_H */
diff --git a/arch/alpha/include/asm/rtc.h b/arch/alpha/include/asm/rtc.h
index d70408d36677..f71c3b0ed360 100644
--- a/arch/alpha/include/asm/rtc.h
+++ b/arch/alpha/include/asm/rtc.h
@@ -1,12 +1 @@
-#ifndef _ALPHA_RTC_H
-#define _ALPHA_RTC_H
-
-#if defined(CONFIG_ALPHA_MARVEL) && defined(CONFIG_SMP) \
- || defined(CONFIG_ALPHA_GENERIC)
-# define get_rtc_time alpha_mv.rtc_get_time
-# define set_rtc_time alpha_mv.rtc_set_time
-#endif
-
#include <asm-generic/rtc.h>
-
-#endif
diff --git a/arch/alpha/include/asm/string.h b/arch/alpha/include/asm/string.h
index b02b8a282940..c2911f591704 100644
--- a/arch/alpha/include/asm/string.h
+++ b/arch/alpha/include/asm/string.h
@@ -22,15 +22,27 @@ extern void * __memcpy(void *, const void *, size_t);
#define __HAVE_ARCH_MEMSET
extern void * __constant_c_memset(void *, unsigned long, size_t);
+extern void * ___memset(void *, int, size_t);
extern void * __memset(void *, int, size_t);
extern void * memset(void *, int, size_t);
-#define memset(s, c, n) \
-(__builtin_constant_p(c) \
- ? (__builtin_constant_p(n) && (c) == 0 \
- ? __builtin_memset((s),0,(n)) \
- : __constant_c_memset((s),0x0101010101010101UL*(unsigned char)(c),(n))) \
- : __memset((s),(c),(n)))
+/* For gcc 3.x, we cannot have the inline function named "memset" because
+ the __builtin_memset will attempt to resolve to the inline as well,
+ leading to a "sorry" about unimplemented recursive inlining. */
+extern inline void *__memset(void *s, int c, size_t n)
+{
+ if (__builtin_constant_p(c)) {
+ if (__builtin_constant_p(n)) {
+ return __builtin_memset(s, c, n);
+ } else {
+ unsigned long c8 = (c & 0xff) * 0x0101010101010101UL;
+ return __constant_c_memset(s, c8, n);
+ }
+ }
+ return ___memset(s, c, n);
+}
+
+#define memset __memset
#define __HAVE_ARCH_STRCPY
extern char * strcpy(char *,const char *);
diff --git a/arch/alpha/include/uapi/asm/pal.h b/arch/alpha/include/uapi/asm/pal.h
index 3c0ce08e5f59..dfc8140b9088 100644
--- a/arch/alpha/include/uapi/asm/pal.h
+++ b/arch/alpha/include/uapi/asm/pal.h
@@ -46,6 +46,7 @@
#define PAL_rdusp 58
#define PAL_whami 60
#define PAL_retsys 61
+#define PAL_wtint 62
#define PAL_rti 63
diff --git a/arch/alpha/kernel/Makefile b/arch/alpha/kernel/Makefile
index 84ec46b38f7d..0d54650e78fc 100644
--- a/arch/alpha/kernel/Makefile
+++ b/arch/alpha/kernel/Makefile
@@ -16,6 +16,7 @@ obj-$(CONFIG_PCI) += pci.o pci_iommu.o pci-sysfs.o
obj-$(CONFIG_SRM_ENV) += srm_env.o
obj-$(CONFIG_MODULES) += module.o
obj-$(CONFIG_PERF_EVENTS) += perf_event.o
+obj-$(CONFIG_RTC_DRV_ALPHA) += rtc.o
ifdef CONFIG_ALPHA_GENERIC
diff --git a/arch/alpha/kernel/alpha_ksyms.c b/arch/alpha/kernel/alpha_ksyms.c
index 89566b346c0f..f4c7ab6f43b0 100644
--- a/arch/alpha/kernel/alpha_ksyms.c
+++ b/arch/alpha/kernel/alpha_ksyms.c
@@ -40,6 +40,7 @@ EXPORT_SYMBOL(strrchr);
EXPORT_SYMBOL(memmove);
EXPORT_SYMBOL(__memcpy);
EXPORT_SYMBOL(__memset);
+EXPORT_SYMBOL(___memset);
EXPORT_SYMBOL(__memsetw);
EXPORT_SYMBOL(__constant_c_memset);
EXPORT_SYMBOL(copy_page);
diff --git a/arch/alpha/kernel/irq_alpha.c b/arch/alpha/kernel/irq_alpha.c
index 28e4429596f3..1c8625cb0e25 100644
--- a/arch/alpha/kernel/irq_alpha.c
+++ b/arch/alpha/kernel/irq_alpha.c
@@ -66,21 +66,7 @@ do_entInt(unsigned long type, unsigned long vector,
break;
case 1:
old_regs = set_irq_regs(regs);
-#ifdef CONFIG_SMP
- {
- long cpu;
-
- smp_percpu_timer_interrupt(regs);
- cpu = smp_processor_id();
- if (cpu != boot_cpuid) {
- kstat_incr_irqs_this_cpu(RTC_IRQ, irq_to_desc(RTC_IRQ));
- } else {
- handle_irq(RTC_IRQ);
- }
- }
-#else
handle_irq(RTC_IRQ);
-#endif
set_irq_regs(old_regs);
return;
case 2:
@@ -228,7 +214,7 @@ process_mcheck_info(unsigned long vector, unsigned long la_ptr,
*/
struct irqaction timer_irqaction = {
- .handler = timer_interrupt,
+ .handler = rtc_timer_interrupt,
.name = "timer",
};
diff --git a/arch/alpha/kernel/machvec_impl.h b/arch/alpha/kernel/machvec_impl.h
index 7fa62488bd16..f54bdf658cd0 100644
--- a/arch/alpha/kernel/machvec_impl.h
+++ b/arch/alpha/kernel/machvec_impl.h
@@ -43,10 +43,7 @@
#define CAT1(x,y) x##y
#define CAT(x,y) CAT1(x,y)
-#define DO_DEFAULT_RTC \
- .rtc_port = 0x70, \
- .rtc_get_time = common_get_rtc_time, \
- .rtc_set_time = common_set_rtc_time
+#define DO_DEFAULT_RTC .rtc_port = 0x70
#define DO_EV4_MMU \
.max_asn = EV4_MAX_ASN, \
diff --git a/arch/alpha/kernel/perf_event.c b/arch/alpha/kernel/perf_event.c
index d821b17047e0..c52e7f0ee5f6 100644
--- a/arch/alpha/kernel/perf_event.c
+++ b/arch/alpha/kernel/perf_event.c
@@ -83,6 +83,8 @@ struct alpha_pmu_t {
long pmc_left[3];
/* Subroutine for allocation of PMCs. Enforces constraints. */
int (*check_constraints)(struct perf_event **, unsigned long *, int);
+ /* Subroutine for checking validity of a raw event for this PMU. */
+ int (*raw_event_valid)(u64 config);
};
/*
@@ -203,6 +205,12 @@ success:
}
+static int ev67_raw_event_valid(u64 config)
+{
+ return config >= EV67_CYCLES && config < EV67_LAST_ET;
+};
+
+
static const struct alpha_pmu_t ev67_pmu = {
.event_map = ev67_perfmon_event_map,
.max_events = ARRAY_SIZE(ev67_perfmon_event_map),
@@ -211,7 +219,8 @@ static const struct alpha_pmu_t ev67_pmu = {
.pmc_count_mask = {EV67_PCTR_0_COUNT_MASK, EV67_PCTR_1_COUNT_MASK, 0},
.pmc_max_period = {(1UL<<20) - 1, (1UL<<20) - 1, 0},
.pmc_left = {16, 4, 0},
- .check_constraints = ev67_check_constraints
+ .check_constraints = ev67_check_constraints,
+ .raw_event_valid = ev67_raw_event_valid,
};
@@ -609,7 +618,9 @@ static int __hw_perf_event_init(struct perf_event *event)
} else if (attr->type == PERF_TYPE_HW_CACHE) {
return -EOPNOTSUPP;
} else if (attr->type == PERF_TYPE_RAW) {
- ev = attr->config & 0xff;
+ if (!alpha_pmu->raw_event_valid(attr->config))
+ return -EINVAL;
+ ev = attr->config;
} else {
return -EOPNOTSUPP;
}
diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c
index f2360a74e5d5..1941a07b5811 100644
--- a/arch/alpha/kernel/process.c
+++ b/arch/alpha/kernel/process.c
@@ -46,6 +46,23 @@
void (*pm_power_off)(void) = machine_power_off;
EXPORT_SYMBOL(pm_power_off);
+#ifdef CONFIG_ALPHA_WTINT
+/*
+ * Sleep the CPU.
+ * EV6, LCA45 and QEMU know how to power down, skipping N timer interrupts.
+ */
+void arch_cpu_idle(void)
+{
+ wtint(0);
+ local_irq_enable();
+}
+
+void arch_cpu_idle_dead(void)
+{
+ wtint(INT_MAX);
+}
+#endif /* ALPHA_WTINT */
+
struct halt_info {
int mode;
char *restart_cmd;
diff --git a/arch/alpha/kernel/proto.h b/arch/alpha/kernel/proto.h
index d3e52d3fd592..da2d6ec9c370 100644
--- a/arch/alpha/kernel/proto.h
+++ b/arch/alpha/kernel/proto.h
@@ -135,17 +135,15 @@ extern void unregister_srm_console(void);
/* smp.c */
extern void setup_smp(void);
extern void handle_ipi(struct pt_regs *);
-extern void smp_percpu_timer_interrupt(struct pt_regs *);
/* bios32.c */
/* extern void reset_for_srm(void); */
/* time.c */
-extern irqreturn_t timer_interrupt(int irq, void *dev);
+extern irqreturn_t rtc_timer_interrupt(int irq, void *dev);
+extern void init_clockevent(void);
extern void common_init_rtc(void);
extern unsigned long est_cycle_freq;
-extern unsigned int common_get_rtc_time(struct rtc_time *time);
-extern int common_set_rtc_time(struct rtc_time *time);
/* smc37c93x.c */
extern void SMC93x_Init(void);
diff --git a/arch/alpha/kernel/rtc.c b/arch/alpha/kernel/rtc.c
new file mode 100644
index 000000000000..c8d284d8521f
--- /dev/null
+++ b/arch/alpha/kernel/rtc.c
@@ -0,0 +1,323 @@
+/*
+ * linux/arch/alpha/kernel/rtc.c
+ *
+ * Copyright (C) 1991, 1992, 1995, 1999, 2000 Linus Torvalds
+ *
+ * This file contains date handling.
+ */
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/param.h>
+#include <linux/string.h>
+#include <linux/mc146818rtc.h>
+#include <linux/bcd.h>
+#include <linux/rtc.h>
+#include <linux/platform_device.h>
+
+#include <asm/rtc.h>
+
+#include "proto.h"
+
+
+/*
+ * Support for the RTC device.
+ *
+ * We don't want to use the rtc-cmos driver, because we don't want to support
+ * alarms, as that would be indistinguishable from timer interrupts.
+ *
+ * Further, generic code is really, really tied to a 1900 epoch. This is
+ * true in __get_rtc_time as well as the users of struct rtc_time e.g.
+ * rtc_tm_to_time. Thankfully all of the other epochs in use are later
+ * than 1900, and so it's easy to adjust.
+ */
+
+static unsigned long rtc_epoch;
+
+static int __init
+specifiy_epoch(char *str)
+{
+ unsigned long epoch = simple_strtoul(str, NULL, 0);
+ if (epoch < 1900)
+ printk("Ignoring invalid user specified epoch %lu\n", epoch);
+ else
+ rtc_epoch = epoch;
+ return 1;
+}
+__setup("epoch=", specifiy_epoch);
+
+static void __init
+init_rtc_epoch(void)
+{
+ int epoch, year, ctrl;
+
+ if (rtc_epoch != 0) {
+ /* The epoch was specified on the command-line. */
+ return;
+ }
+
+ /* Detect the epoch in use on this computer. */
+ ctrl = CMOS_READ(RTC_CONTROL);
+ year = CMOS_READ(RTC_YEAR);
+ if (!(ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
+ year = bcd2bin(year);
+
+ /* PC-like is standard; used for year >= 70 */
+ epoch = 1900;
+ if (year < 20) {
+ epoch = 2000;
+ } else if (year >= 20 && year < 48) {
+ /* NT epoch */
+ epoch = 1980;
+ } else if (year >= 48 && year < 70) {
+ /* Digital UNIX epoch */
+ epoch = 1952;
+ }
+ rtc_epoch = epoch;
+
+ printk(KERN_INFO "Using epoch %d for rtc year %d\n", epoch, year);
+}
+
+static int
+alpha_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ __get_rtc_time(tm);
+
+ /* Adjust for non-default epochs. It's easier to depend on the
+ generic __get_rtc_time and adjust the epoch here than create
+ a copy of __get_rtc_time with the edits we need. */
+ if (rtc_epoch != 1900) {
+ int year = tm->tm_year;
+ /* Undo the century adjustment made in __get_rtc_time. */
+ if (year >= 100)
+ year -= 100;
+ year += rtc_epoch - 1900;
+ /* Redo the century adjustment with the epoch in place. */
+ if (year <= 69)
+ year += 100;
+ tm->tm_year = year;
+ }
+
+ return rtc_valid_tm(tm);
+}
+
+static int
+alpha_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ struct rtc_time xtm;
+
+ if (rtc_epoch != 1900) {
+ xtm = *tm;
+ xtm.tm_year -= rtc_epoch - 1900;
+ tm = &xtm;
+ }
+
+ return __set_rtc_time(tm);
+}
+
+static int
+alpha_rtc_set_mmss(struct device *dev, unsigned long nowtime)
+{
+ int retval = 0;
+ int real_seconds, real_minutes, cmos_minutes;
+ unsigned char save_control, save_freq_select;
+
+ /* Note: This code only updates minutes and seconds. Comments
+ indicate this was to avoid messing with unknown time zones,
+ and with the epoch nonsense described above. In order for
+ this to work, the existing clock cannot be off by more than
+ 15 minutes.
+
+ ??? This choice is may be out of date. The x86 port does
+ not have problems with timezones, and the epoch processing has
+ now been fixed in alpha_set_rtc_time.
+
+ In either case, one can always force a full rtc update with
+ the userland hwclock program, so surely 15 minute accuracy
+ is no real burden. */
+
+ /* In order to set the CMOS clock precisely, we have to be called
+ 500 ms after the second nowtime has started, because when
+ nowtime is written into the registers of the CMOS clock, it will
+ jump to the next second precisely 500 ms later. Check the Motorola
+ MC146818A or Dallas DS12887 data sheet for details. */
+
+ /* irq are locally disabled here */
+ spin_lock(&rtc_lock);
+ /* Tell the clock it's being set */
+ save_control = CMOS_READ(RTC_CONTROL);
+ CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL);
+
+ /* Stop and reset prescaler */
+ save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
+ CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT);
+
+ cmos_minutes = CMOS_READ(RTC_MINUTES);
+ if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
+ cmos_minutes = bcd2bin(cmos_minutes);
+
+ real_seconds = nowtime % 60;
+ real_minutes = nowtime / 60;
+ if (((abs(real_minutes - cmos_minutes) + 15) / 30) & 1) {
+ /* correct for half hour time zone */
+ real_minutes += 30;
+ }
+ real_minutes %= 60;
+
+ if (abs(real_minutes - cmos_minutes) < 30) {
+ if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
+ real_seconds = bin2bcd(real_seconds);
+ real_minutes = bin2bcd(real_minutes);
+ }
+ CMOS_WRITE(real_seconds,RTC_SECONDS);
+ CMOS_WRITE(real_minutes,RTC_MINUTES);
+ } else {
+ printk_once(KERN_NOTICE
+ "set_rtc_mmss: can't update from %d to %d\n",
+ cmos_minutes, real_minutes);
+ retval = -1;
+ }
+
+ /* The following flags have to be released exactly in this order,
+ * otherwise the DS12887 (popular MC146818A clone with integrated
+ * battery and quartz) will not reset the oscillator and will not
+ * update precisely 500 ms later. You won't find this mentioned in
+ * the Dallas Semiconductor data sheets, but who believes data
+ * sheets anyway ... -- Markus Kuhn
+ */
+ CMOS_WRITE(save_control, RTC_CONTROL);
+ CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
+ spin_unlock(&rtc_lock);
+
+ return retval;
+}
+
+static int
+alpha_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
+{
+ switch (cmd) {
+ case RTC_EPOCH_READ:
+ return put_user(rtc_epoch, (unsigned long __user *)arg);
+ case RTC_EPOCH_SET:
+ if (arg < 1900)
+ return -EINVAL;
+ rtc_epoch = arg;
+ return 0;
+ default:
+ return -ENOIOCTLCMD;
+ }
+}
+
+static const struct rtc_class_ops alpha_rtc_ops = {
+ .read_time = alpha_rtc_read_time,
+ .set_time = alpha_rtc_set_time,
+ .set_mmss = alpha_rtc_set_mmss,
+ .ioctl = alpha_rtc_ioctl,
+};
+
+/*
+ * Similarly, except do the actual CMOS access on the boot cpu only.
+ * This requires marshalling the data across an interprocessor call.
+ */
+
+#if defined(CONFIG_SMP) && \
+ (defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_MARVEL))
+# define HAVE_REMOTE_RTC 1
+
+union remote_data {
+ struct rtc_time *tm;
+ unsigned long now;
+ long retval;
+};
+
+static void
+do_remote_read(void *data)
+{
+ union remote_data *x = data;
+ x->retval = alpha_rtc_read_time(NULL, x->tm);
+}
+
+static int
+remote_read_time(struct device *dev, struct rtc_time *tm)
+{
+ union remote_data x;
+ if (smp_processor_id() != boot_cpuid) {
+ x.tm = tm;
+ smp_call_function_single(boot_cpuid, do_remote_read, &x, 1);
+ return x.retval;
+ }
+ return alpha_rtc_read_time(NULL, tm);
+}
+
+static void
+do_remote_set(void *data)
+{
+ union remote_data *x = data;
+ x->retval = alpha_rtc_set_time(NULL, x->tm);
+}
+
+static int
+remote_set_time(struct device *dev, struct rtc_time *tm)
+{
+ union remote_data x;
+ if (smp_processor_id() != boot_cpuid) {
+ x.tm = tm;
+ smp_call_function_single(boot_cpuid, do_remote_set, &x, 1);
+ return x.retval;
+ }
+ return alpha_rtc_set_time(NULL, tm);
+}
+
+static void
+do_remote_mmss(void *data)
+{
+ union remote_data *x = data;
+ x->retval = alpha_rtc_set_mmss(NULL, x->now);
+}
+
+static int
+remote_set_mmss(struct device *dev, unsigned long now)
+{
+ union remote_data x;
+ if (smp_processor_id() != boot_cpuid) {
+ x.now = now;
+ smp_call_function_single(boot_cpuid, do_remote_mmss, &x, 1);
+ return x.retval;
+ }
+ return alpha_rtc_set_mmss(NULL, now);
+}
+
+static const struct rtc_class_ops remote_rtc_ops = {
+ .read_time = remote_read_time,
+ .set_time = remote_set_time,
+ .set_mmss = remote_set_mmss,
+ .ioctl = alpha_rtc_ioctl,
+};
+#endif
+
+static int __init
+alpha_rtc_init(void)
+{
+ const struct rtc_class_ops *ops;
+ struct platform_device *pdev;
+ struct rtc_device *rtc;
+ const char *name;
+
+ init_rtc_epoch();
+ name = "rtc-alpha";
+ ops = &alpha_rtc_ops;
+
+#ifdef HAVE_REMOTE_RTC
+ if (alpha_mv.rtc_boot_cpu_only)
+ ops = &remote_rtc_ops;
+#endif
+
+ pdev = platform_device_register_simple(name, -1, NULL, 0);
+ rtc = devm_rtc_device_register(&pdev->dev, name, ops, THIS_MODULE);
+ if (IS_ERR(rtc))
+ return PTR_ERR(rtc);
+
+ platform_set_drvdata(pdev, rtc);
+ return 0;
+}
+device_initcall(alpha_rtc_init);
diff --git a/arch/alpha/kernel/setup.c b/arch/alpha/kernel/setup.c
index 9e3107cc5ebb..b20af76f12c1 100644
--- a/arch/alpha/kernel/setup.c
+++ b/arch/alpha/kernel/setup.c
@@ -115,10 +115,17 @@ unsigned long alpha_agpgart_size = DEFAULT_AGP_APER_SIZE;
#ifdef CONFIG_ALPHA_GENERIC
struct alpha_machine_vector alpha_mv;
+#endif
+
+#ifndef alpha_using_srm
int alpha_using_srm;
EXPORT_SYMBOL(alpha_using_srm);
#endif
+#ifndef alpha_using_qemu
+int alpha_using_qemu;
+#endif
+
static struct alpha_machine_vector *get_sysvec(unsigned long, unsigned long,
unsigned long);
static struct alpha_machine_vector *get_sysvec_byname(const char *);
@@ -529,11 +536,15 @@ setup_arch(char **cmdline_p)
atomic_notifier_chain_register(&panic_notifier_list,
&alpha_panic_block);
-#ifdef CONFIG_ALPHA_GENERIC
+#ifndef alpha_using_srm
/* Assume that we've booted from SRM if we haven't booted from MILO.
Detect the later by looking for "MILO" in the system serial nr. */
alpha_using_srm = strncmp((const char *)hwrpb->ssn, "MILO", 4) != 0;
#endif
+#ifndef alpha_using_qemu
+ /* Similarly, look for QEMU. */
+ alpha_using_qemu = strstr((const char *)hwrpb->ssn, "QEMU") != 0;
+#endif
/* If we are using SRM, we want to allow callbacks
as early as possible, so do this NOW, and then
@@ -1207,6 +1218,7 @@ show_cpuinfo(struct seq_file *f, void *slot)
char *systype_name;
char *sysvariation_name;
int nr_processors;
+ unsigned long timer_freq;
cpu_index = (unsigned) (cpu->type - 1);
cpu_name = "Unknown";
@@ -1218,6 +1230,12 @@ show_cpuinfo(struct seq_file *f, void *slot)
nr_processors = get_nr_processors(cpu, hwrpb->nr_processors);
+#if CONFIG_HZ == 1024 || CONFIG_HZ == 1200
+ timer_freq = (100UL * hwrpb->intr_freq) / 4096;
+#else
+ timer_freq = 100UL * CONFIG_HZ;
+#endif
+
seq_printf(f, "cpu\t\t\t: Alpha\n"
"cpu model\t\t: %s\n"
"cpu variation\t\t: %ld\n"
@@ -1243,8 +1261,7 @@ show_cpuinfo(struct seq_file *f, void *slot)
(char*)hwrpb->ssn,
est_cycle_freq ? : hwrpb->cycle_freq,
est_cycle_freq ? "est." : "",
- hwrpb->intr_freq / 4096,
- (100 * hwrpb->intr_freq / 4096) % 100,
+ timer_freq / 100, timer_freq % 100,
hwrpb->pagesize,
hwrpb->pa_bits,
hwrpb->max_asn,
diff --git a/arch/alpha/kernel/smp.c b/arch/alpha/kernel/smp.c
index 9dbbcb3b9146..99ac36d5de4e 100644
--- a/arch/alpha/kernel/smp.c
+++ b/arch/alpha/kernel/smp.c
@@ -138,9 +138,11 @@ smp_callin(void)
/* Get our local ticker going. */
smp_setup_percpu_timer(cpuid);
+ init_clockevent();
/* Call platform-specific callin, if specified */
- if (alpha_mv.smp_callin) alpha_mv.smp_callin();
+ if (alpha_mv.smp_callin)
+ alpha_mv.smp_callin();
/* All kernel threads share the same mm context. */
atomic_inc(&init_mm.mm_count);
@@ -498,35 +500,6 @@ smp_cpus_done(unsigned int max_cpus)
((bogosum + 2500) / (5000/HZ)) % 100);
}
-
-void
-smp_percpu_timer_interrupt(struct pt_regs *regs)
-{
- struct pt_regs *old_regs;
- int cpu = smp_processor_id();
- unsigned long user = user_mode(regs);
- struct cpuinfo_alpha *data = &cpu_data[cpu];
-
- old_regs = set_irq_regs(regs);
-
- /* Record kernel PC. */
- profile_tick(CPU_PROFILING);
-
- if (!--data->prof_counter) {
- /* We need to make like a normal interrupt -- otherwise
- timer interrupts ignore the global interrupt lock,
- which would be a Bad Thing. */
- irq_enter();
-
- update_process_times(user);
-
- data->prof_counter = data->prof_multiplier;
-
- irq_exit();
- }
- set_irq_regs(old_regs);
-}
-
int
setup_profiling_timer(unsigned int multiplier)
{
diff --git a/arch/alpha/kernel/sys_jensen.c b/arch/alpha/kernel/sys_jensen.c
index 5a0af11b3a61..608f2a7fa0a3 100644
--- a/arch/alpha/kernel/sys_jensen.c
+++ b/arch/alpha/kernel/sys_jensen.c
@@ -224,8 +224,6 @@ struct alpha_machine_vector jensen_mv __initmv = {
.machine_check = jensen_machine_check,
.max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
.rtc_port = 0x170,
- .rtc_get_time = common_get_rtc_time,
- .rtc_set_time = common_set_rtc_time,
.nr_irqs = 16,
.device_interrupt = jensen_device_interrupt,
diff --git a/arch/alpha/kernel/sys_marvel.c b/arch/alpha/kernel/sys_marvel.c
index c92e389ff219..f21d61fab678 100644
--- a/arch/alpha/kernel/sys_marvel.c
+++ b/arch/alpha/kernel/sys_marvel.c
@@ -22,7 +22,6 @@
#include <asm/hwrpb.h>
#include <asm/tlbflush.h>
#include <asm/vga.h>
-#include <asm/rtc.h>
#include "proto.h"
#include "err_impl.h"
@@ -400,57 +399,6 @@ marvel_init_rtc(void)
init_rtc_irq();
}
-struct marvel_rtc_time {
- struct rtc_time *time;
- int retval;
-};
-
-#ifdef CONFIG_SMP
-static void
-smp_get_rtc_time(void *data)
-{
- struct marvel_rtc_time *mrt = data;
- mrt->retval = __get_rtc_time(mrt->time);
-}
-
-static void
-smp_set_rtc_time(void *data)
-{
- struct marvel_rtc_time *mrt = data;
- mrt->retval = __set_rtc_time(mrt->time);
-}
-#endif
-
-static unsigned int
-marvel_get_rtc_time(struct rtc_time *time)
-{
-#ifdef CONFIG_SMP
- struct marvel_rtc_time mrt;
-
- if (smp_processor_id() != boot_cpuid) {
- mrt.time = time;
- smp_call_function_single(boot_cpuid, smp_get_rtc_time, &mrt, 1);
- return mrt.retval;
- }
-#endif
- return __get_rtc_time(time);
-}
-
-static int
-marvel_set_rtc_time(struct rtc_time *time)
-{
-#ifdef CONFIG_SMP
- struct marvel_rtc_time mrt;
-
- if (smp_processor_id() != boot_cpuid) {
- mrt.time = time;
- smp_call_function_single(boot_cpuid, smp_set_rtc_time, &mrt, 1);
- return mrt.retval;
- }
-#endif
- return __set_rtc_time(time);
-}
-
static void
marvel_smp_callin(void)
{
@@ -492,8 +440,7 @@ struct alpha_machine_vector marvel_ev7_mv __initmv = {
.vector_name = "MARVEL/EV7",
DO_EV7_MMU,
.rtc_port = 0x70,
- .rtc_get_time = marvel_get_rtc_time,
- .rtc_set_time = marvel_set_rtc_time,
+ .rtc_boot_cpu_only = 1,
DO_MARVEL_IO,
.machine_check = marvel_machine_check,
.max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
diff --git a/arch/alpha/kernel/time.c b/arch/alpha/kernel/time.c
index ea3395036556..ee39cee8064c 100644
--- a/arch/alpha/kernel/time.c
+++ b/arch/alpha/kernel/time.c
@@ -3,13 +3,7 @@
*
* Copyright (C) 1991, 1992, 1995, 1999, 2000 Linus Torvalds
*
- * This file contains the PC-specific time handling details:
- * reading the RTC at bootup, etc..
- * 1994-07-02 Alan Modra
- * fixed set_rtc_mmss, fixed time.year for >= 2000, new mktime
- * 1995-03-26 Markus Kuhn
- * fixed 500 ms bug at call to set_rtc_mmss, fixed DS12887
- * precision CMOS clock update
+ * This file contains the clocksource time handling.
* 1997-09-10 Updated NTP code according to technical memorandum Jan '96
* "A Kernel Model for Precision Timekeeping" by Dave Mills
* 1997-01-09 Adrian Sun
@@ -21,9 +15,6 @@
* 1999-04-16 Thorsten Kranzkowski (dl8bcu@gmx.net)
* fixed algorithm in do_gettimeofday() for calculating the precise time
* from processor cycle counter (now taking lost_ticks into account)
- * 2000-08-13 Jan-Benedict Glaw <jbglaw@lug-owl.de>
- * Fixed time_init to be aware of epoches != 1900. This prevents
- * booting up in 2048 for me;) Code is stolen from rtc.c.
* 2003-06-03 R. Scott Bailey <scott.bailey@eds.com>
* Tighten sanity in time_init from 1% (10,000 PPM) to 250 PPM
*/
@@ -46,40 +37,19 @@
#include <asm/uaccess.h>
#include <asm/io.h>
#include <asm/hwrpb.h>
-#include <asm/rtc.h>
#include <linux/mc146818rtc.h>
#include <linux/time.h>
#include <linux/timex.h>
#include <linux/clocksource.h>
+#include <linux/clockchips.h>
#include "proto.h"
#include "irq_impl.h"
-static int set_rtc_mmss(unsigned long);
-
DEFINE_SPINLOCK(rtc_lock);
EXPORT_SYMBOL(rtc_lock);
-#define TICK_SIZE (tick_nsec / 1000)
-
-/*
- * Shift amount by which scaled_ticks_per_cycle is scaled. Shifting
- * by 48 gives us 16 bits for HZ while keeping the accuracy good even
- * for large CPU clock rates.
- */
-#define FIX_SHIFT 48
-
-/* lump static variables together for more efficient access: */
-static struct {
- /* cycle counter last time it got invoked */
- __u32 last_time;
- /* ticks/cycle * 2^48 */
- unsigned long scaled_ticks_per_cycle;
- /* partial unused tick */
- unsigned long partial_tick;
-} state;
-
unsigned long est_cycle_freq;
#ifdef CONFIG_IRQ_WORK
@@ -108,109 +78,156 @@ static inline __u32 rpcc(void)
return __builtin_alpha_rpcc();
}
-int update_persistent_clock(struct timespec now)
-{
- return set_rtc_mmss(now.tv_sec);
-}
-void read_persistent_clock(struct timespec *ts)
+
+/*
+ * The RTC as a clock_event_device primitive.
+ */
+
+static DEFINE_PER_CPU(struct clock_event_device, cpu_ce);
+
+irqreturn_t
+rtc_timer_interrupt(int irq, void *dev)
{
- unsigned int year, mon, day, hour, min, sec, epoch;
-
- sec = CMOS_READ(RTC_SECONDS);
- min = CMOS_READ(RTC_MINUTES);
- hour = CMOS_READ(RTC_HOURS);
- day = CMOS_READ(RTC_DAY_OF_MONTH);
- mon = CMOS_READ(RTC_MONTH);
- year = CMOS_READ(RTC_YEAR);
-
- if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
- sec = bcd2bin(sec);
- min = bcd2bin(min);
- hour = bcd2bin(hour);
- day = bcd2bin(day);
- mon = bcd2bin(mon);
- year = bcd2bin(year);
- }
+ int cpu = smp_processor_id();
+ struct clock_event_device *ce = &per_cpu(cpu_ce, cpu);
- /* PC-like is standard; used for year >= 70 */
- epoch = 1900;
- if (year < 20)
- epoch = 2000;
- else if (year >= 20 && year < 48)
- /* NT epoch */
- epoch = 1980;
- else if (year >= 48 && year < 70)
- /* Digital UNIX epoch */
- epoch = 1952;
+ /* Don't run the hook for UNUSED or SHUTDOWN. */
+ if (likely(ce->mode == CLOCK_EVT_MODE_PERIODIC))
+ ce->event_handler(ce);
- printk(KERN_INFO "Using epoch = %d\n", epoch);
+ if (test_irq_work_pending()) {
+ clear_irq_work_pending();
+ irq_work_run();
+ }
- if ((year += epoch) < 1970)
- year += 100;
+ return IRQ_HANDLED;
+}
- ts->tv_sec = mktime(year, mon, day, hour, min, sec);
- ts->tv_nsec = 0;
+static void
+rtc_ce_set_mode(enum clock_event_mode mode, struct clock_event_device *ce)
+{
+ /* The mode member of CE is updated in generic code.
+ Since we only support periodic events, nothing to do. */
+}
+
+static int
+rtc_ce_set_next_event(unsigned long evt, struct clock_event_device *ce)
+{
+ /* This hook is for oneshot mode, which we don't support. */
+ return -EINVAL;
}
+static void __init
+init_rtc_clockevent(void)
+{
+ int cpu = smp_processor_id();
+ struct clock_event_device *ce = &per_cpu(cpu_ce, cpu);
+
+ *ce = (struct clock_event_device){
+ .name = "rtc",
+ .features = CLOCK_EVT_FEAT_PERIODIC,
+ .rating = 100,
+ .cpumask = cpumask_of(cpu),
+ .set_mode = rtc_ce_set_mode,
+ .set_next_event = rtc_ce_set_next_event,
+ };
+ clockevents_config_and_register(ce, CONFIG_HZ, 0, 0);
+}
+
/*
- * timer_interrupt() needs to keep up the real-time clock,
- * as well as call the "xtime_update()" routine every clocktick
+ * The QEMU clock as a clocksource primitive.
*/
-irqreturn_t timer_interrupt(int irq, void *dev)
+
+static cycle_t
+qemu_cs_read(struct clocksource *cs)
{
- unsigned long delta;
- __u32 now;
- long nticks;
+ return qemu_get_vmtime();
+}
-#ifndef CONFIG_SMP
- /* Not SMP, do kernel PC profiling here. */
- profile_tick(CPU_PROFILING);
-#endif
+static struct clocksource qemu_cs = {
+ .name = "qemu",
+ .rating = 400,
+ .read = qemu_cs_read,
+ .mask = CLOCKSOURCE_MASK(64),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+ .max_idle_ns = LONG_MAX
+};
- /*
- * Calculate how many ticks have passed since the last update,
- * including any previous partial leftover. Save any resulting
- * fraction for the next pass.
- */
- now = rpcc();
- delta = now - state.last_time;
- state.last_time = now;
- delta = delta * state.scaled_ticks_per_cycle + state.partial_tick;
- state.partial_tick = delta & ((1UL << FIX_SHIFT) - 1);
- nticks = delta >> FIX_SHIFT;
- if (nticks)
- xtime_update(nticks);
+/*
+ * The QEMU alarm as a clock_event_device primitive.
+ */
- if (test_irq_work_pending()) {
- clear_irq_work_pending();
- irq_work_run();
- }
+static void
+qemu_ce_set_mode(enum clock_event_mode mode, struct clock_event_device *ce)
+{
+ /* The mode member of CE is updated for us in generic code.
+ Just make sure that the event is disabled. */
+ qemu_set_alarm_abs(0);
+}
-#ifndef CONFIG_SMP
- while (nticks--)
- update_process_times(user_mode(get_irq_regs()));
-#endif
+static int
+qemu_ce_set_next_event(unsigned long evt, struct clock_event_device *ce)
+{
+ qemu_set_alarm_rel(evt);
+ return 0;
+}
+static irqreturn_t
+qemu_timer_interrupt(int irq, void *dev)
+{
+ int cpu = smp_processor_id();
+ struct clock_event_device *ce = &per_cpu(cpu_ce, cpu);
+
+ ce->event_handler(ce);
return IRQ_HANDLED;
}
+static void __init
+init_qemu_clockevent(void)
+{
+ int cpu = smp_processor_id();
+ struct clock_event_device *ce = &per_cpu(cpu_ce, cpu);
+
+ *ce = (struct clock_event_device){
+ .name = "qemu",
+ .features = CLOCK_EVT_FEAT_ONESHOT,
+ .rating = 400,
+ .cpumask = cpumask_of(cpu),
+ .set_mode = qemu_ce_set_mode,
+ .set_next_event = qemu_ce_set_next_event,
+ };
+
+ clockevents_config_and_register(ce, NSEC_PER_SEC, 1000, LONG_MAX);
+}
+
+
void __init
common_init_rtc(void)
{
- unsigned char x;
+ unsigned char x, sel = 0;
/* Reset periodic interrupt frequency. */
- x = CMOS_READ(RTC_FREQ_SELECT) & 0x3f;
- /* Test includes known working values on various platforms
- where 0x26 is wrong; we refuse to change those. */
- if (x != 0x26 && x != 0x25 && x != 0x19 && x != 0x06) {
- printk("Setting RTC_FREQ to 1024 Hz (%x)\n", x);
- CMOS_WRITE(0x26, RTC_FREQ_SELECT);
+#if CONFIG_HZ == 1024 || CONFIG_HZ == 1200
+ x = CMOS_READ(RTC_FREQ_SELECT) & 0x3f;
+ /* Test includes known working values on various platforms
+ where 0x26 is wrong; we refuse to change those. */
+ if (x != 0x26 && x != 0x25 && x != 0x19 && x != 0x06) {
+ sel = RTC_REF_CLCK_32KHZ + 6;
}
+#elif CONFIG_HZ == 256 || CONFIG_HZ == 128 || CONFIG_HZ == 64 || CONFIG_HZ == 32
+ sel = RTC_REF_CLCK_32KHZ + __builtin_ffs(32768 / CONFIG_HZ);
+#else
+# error "Unknown HZ from arch/alpha/Kconfig"
+#endif
+ if (sel) {
+ printk(KERN_INFO "Setting RTC_FREQ to %d Hz (%x)\n",
+ CONFIG_HZ, sel);
+ CMOS_WRITE(sel, RTC_FREQ_SELECT);
+ }
/* Turn on periodic interrupts. */
x = CMOS_READ(RTC_CONTROL);
@@ -233,16 +250,37 @@ common_init_rtc(void)
init_rtc_irq();
}
-unsigned int common_get_rtc_time(struct rtc_time *time)
-{
- return __get_rtc_time(time);
-}
+
+#ifndef CONFIG_ALPHA_WTINT
+/*
+ * The RPCC as a clocksource primitive.
+ *
+ * While we have free-running timecounters running on all CPUs, and we make
+ * a half-hearted attempt in init_rtc_rpcc_info to sync the timecounter
+ * with the wall clock, that initialization isn't kept up-to-date across
+ * different time counters in SMP mode. Therefore we can only use this
+ * method when there's only one CPU enabled.
+ *
+ * When using the WTINT PALcall, the RPCC may shift to a lower frequency,
+ * or stop altogether, while waiting for the interrupt. Therefore we cannot
+ * use this method when WTINT is in use.
+ */
-int common_set_rtc_time(struct rtc_time *time)
+static cycle_t read_rpcc(struct clocksource *cs)
{
- return __set_rtc_time(time);
+ return rpcc();
}
+static struct clocksource clocksource_rpcc = {
+ .name = "rpcc",
+ .rating = 300,
+ .read = read_rpcc,
+ .mask = CLOCKSOURCE_MASK(32),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS
+};
+#endif /* ALPHA_WTINT */
+
+
/* Validate a computed cycle counter result against the known bounds for
the given processor core. There's too much brokenness in the way of
timing hardware for any one method to work everywhere. :-(
@@ -353,33 +391,6 @@ rpcc_after_update_in_progress(void)
return rpcc();
}
-#ifndef CONFIG_SMP
-/* Until and unless we figure out how to get cpu cycle counters
- in sync and keep them there, we can't use the rpcc. */
-static cycle_t read_rpcc(struct clocksource *cs)
-{
- cycle_t ret = (cycle_t)rpcc();
- return ret;
-}
-
-static struct clocksource clocksource_rpcc = {
- .name = "rpcc",
- .rating = 300,
- .read = read_rpcc,
- .mask = CLOCKSOURCE_MASK(32),
- .flags = CLOCK_SOURCE_IS_CONTINUOUS
-};
-
-static inline void register_rpcc_clocksource(long cycle_freq)
-{
- clocksource_register_hz(&clocksource_rpcc, cycle_freq);
-}
-#else /* !CONFIG_SMP */
-static inline void register_rpcc_clocksource(long cycle_freq)
-{
-}
-#endif /* !CONFIG_SMP */
-
void __init
time_init(void)
{
@@ -387,6 +398,15 @@ time_init(void)
unsigned long cycle_freq, tolerance;
long diff;
+ if (alpha_using_qemu) {
+ clocksource_register_hz(&qemu_cs, NSEC_PER_SEC);
+ init_qemu_clockevent();
+
+ timer_irqaction.handler = qemu_timer_interrupt;
+ init_rtc_irq();
+ return;
+ }
+
/* Calibrate CPU clock -- attempt #1. */
if (!est_cycle_freq)
est_cycle_freq = validate_cc_value(calibrate_cc_with_pit());
@@ -421,100 +441,25 @@ time_init(void)
"and unable to estimate a proper value!\n");
}
- /* From John Bowman <bowman@math.ualberta.ca>: allow the values
- to settle, as the Update-In-Progress bit going low isn't good
- enough on some hardware. 2ms is our guess; we haven't found
- bogomips yet, but this is close on a 500Mhz box. */
- __delay(1000000);
-
-
- if (HZ > (1<<16)) {
- extern void __you_loose (void);
- __you_loose();
- }
-
- register_rpcc_clocksource(cycle_freq);
-
- state.last_time = cc1;
- state.scaled_ticks_per_cycle
- = ((unsigned long) HZ << FIX_SHIFT) / cycle_freq;
- state.partial_tick = 0L;
+ /* See above for restrictions on using clocksource_rpcc. */
+#ifndef CONFIG_ALPHA_WTINT
+ if (hwrpb->nr_processors == 1)
+ clocksource_register_hz(&clocksource_rpcc, cycle_freq);
+#endif
/* Startup the timer source. */
alpha_mv.init_rtc();
+ init_rtc_clockevent();
}
-/*
- * In order to set the CMOS clock precisely, set_rtc_mmss has to be
- * called 500 ms after the second nowtime has started, because when
- * nowtime is written into the registers of the CMOS clock, it will
- * jump to the next second precisely 500 ms later. Check the Motorola
- * MC146818A or Dallas DS12887 data sheet for details.
- *
- * BUG: This routine does not handle hour overflow properly; it just
- * sets the minutes. Usually you won't notice until after reboot!
- */
-
-
-static int
-set_rtc_mmss(unsigned long nowtime)
+/* Initialize the clock_event_device for secondary cpus. */
+#ifdef CONFIG_SMP
+void __init
+init_clockevent(void)
{
- int retval = 0;
- int real_seconds, real_minutes, cmos_minutes;
- unsigned char save_control, save_freq_select;
-
- /* irq are locally disabled here */
- spin_lock(&rtc_lock);
- /* Tell the clock it's being set */
- save_control = CMOS_READ(RTC_CONTROL);
- CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL);
-
- /* Stop and reset prescaler */
- save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
- CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT);
-
- cmos_minutes = CMOS_READ(RTC_MINUTES);
- if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
- cmos_minutes = bcd2bin(cmos_minutes);
-
- /*
- * since we're only adjusting minutes and seconds,
- * don't interfere with hour overflow. This avoids
- * messing with unknown time zones but requires your
- * RTC not to be off by more than 15 minutes
- */
- real_seconds = nowtime % 60;
- real_minutes = nowtime / 60;
- if (((abs(real_minutes - cmos_minutes) + 15)/30) & 1) {
- /* correct for half hour time zone */
- real_minutes += 30;
- }
- real_minutes %= 60;
-
- if (abs(real_minutes - cmos_minutes) < 30) {
- if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
- real_seconds = bin2bcd(real_seconds);
- real_minutes = bin2bcd(real_minutes);
- }
- CMOS_WRITE(real_seconds,RTC_SECONDS);
- CMOS_WRITE(real_minutes,RTC_MINUTES);
- } else {
- printk_once(KERN_NOTICE
- "set_rtc_mmss: can't update from %d to %d\n",
- cmos_minutes, real_minutes);
- retval = -1;
- }
-
- /* The following flags have to be released exactly in this order,
- * otherwise the DS12887 (popular MC146818A clone with integrated
- * battery and quartz) will not reset the oscillator and will not
- * update precisely 500 ms later. You won't find this mentioned in
- * the Dallas Semiconductor data sheets, but who believes data
- * sheets anyway ... -- Markus Kuhn
- */
- CMOS_WRITE(save_control, RTC_CONTROL);
- CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
- spin_unlock(&rtc_lock);
-
- return retval;
+ if (alpha_using_qemu)
+ init_qemu_clockevent();
+ else
+ init_rtc_clockevent();
}
+#endif
diff --git a/arch/alpha/kernel/traps.c b/arch/alpha/kernel/traps.c
index bd0665cdc840..9c4c189eb22f 100644
--- a/arch/alpha/kernel/traps.c
+++ b/arch/alpha/kernel/traps.c
@@ -241,6 +241,21 @@ do_entIF(unsigned long type, struct pt_regs *regs)
(const char *)(data[1] | (long)data[2] << 32),
data[0]);
}
+#ifdef CONFIG_ALPHA_WTINT
+ if (type == 4) {
+ /* If CALL_PAL WTINT is totally unsupported by the
+ PALcode, e.g. MILO, "emulate" it by overwriting
+ the insn. */
+ unsigned int *pinsn
+ = (unsigned int *) regs->pc - 1;
+ if (*pinsn == PAL_wtint) {
+ *pinsn = 0x47e01400; /* mov 0,$0 */
+ imb();
+ regs->r0 = 0;
+ return;
+ }
+ }
+#endif /* ALPHA_WTINT */
die_if_kernel((type == 1 ? "Kernel Bug" : "Instruction fault"),
regs, type, NULL);
}
diff --git a/arch/alpha/lib/csum_partial_copy.c b/arch/alpha/lib/csum_partial_copy.c
index ffb19b7da999..ff3c10721caf 100644
--- a/arch/alpha/lib/csum_partial_copy.c
+++ b/arch/alpha/lib/csum_partial_copy.c
@@ -130,7 +130,7 @@ csum_partial_cfu_aligned(const unsigned long __user *src, unsigned long *dst,
*dst = word | tmp;
checksum += carry;
}
- if (err) *errp = err;
+ if (err && errp) *errp = err;
return checksum;
}
@@ -185,7 +185,7 @@ csum_partial_cfu_dest_aligned(const unsigned long __user *src,
*dst = word | tmp;
checksum += carry;
}
- if (err) *errp = err;
+ if (err && errp) *errp = err;
return checksum;
}
@@ -242,7 +242,7 @@ csum_partial_cfu_src_aligned(const unsigned long __user *src,
stq_u(partial_dest | second_dest, dst);
out:
checksum += carry;
- if (err) *errp = err;
+ if (err && errp) *errp = err;
return checksum;
}
@@ -325,7 +325,7 @@ csum_partial_cfu_unaligned(const unsigned long __user * src,
stq_u(partial_dest | word | second_dest, dst);
checksum += carry;
}
- if (err) *errp = err;
+ if (err && errp) *errp = err;
return checksum;
}
@@ -339,7 +339,7 @@ csum_partial_copy_from_user(const void __user *src, void *dst, int len,
if (len) {
if (!access_ok(VERIFY_READ, src, len)) {
- *errp = -EFAULT;
+ if (errp) *errp = -EFAULT;
memset(dst, 0, len);
return sum;
}
diff --git a/arch/alpha/lib/ev6-memset.S b/arch/alpha/lib/ev6-memset.S
index d8b94e1c7fca..356bb2fdd705 100644
--- a/arch/alpha/lib/ev6-memset.S
+++ b/arch/alpha/lib/ev6-memset.S
@@ -30,14 +30,15 @@
.set noat
.set noreorder
.text
+ .globl memset
.globl __memset
+ .globl ___memset
.globl __memsetw
.globl __constant_c_memset
- .globl memset
- .ent __memset
+ .ent ___memset
.align 5
-__memset:
+___memset:
.frame $30,0,$26,0
.prologue 0
@@ -227,7 +228,7 @@ end_b:
nop
nop
ret $31,($26),1 # L0 :
- .end __memset
+ .end ___memset
/*
* This is the original body of code, prior to replication and
@@ -594,4 +595,5 @@ end_w:
.end __memsetw
-memset = __memset
+memset = ___memset
+__memset = ___memset
diff --git a/arch/alpha/lib/memset.S b/arch/alpha/lib/memset.S
index 311b8cfc6914..76ccc6d1f364 100644
--- a/arch/alpha/lib/memset.S
+++ b/arch/alpha/lib/memset.S
@@ -19,11 +19,13 @@
.text
.globl memset
.globl __memset
+ .globl ___memset
.globl __memsetw
.globl __constant_c_memset
- .ent __memset
+
+ .ent ___memset
.align 5
-__memset:
+___memset:
.frame $30,0,$26,0
.prologue 0
@@ -103,7 +105,7 @@ within_one_quad:
end:
ret $31,($26),1 /* E1 */
- .end __memset
+ .end ___memset
.align 5
.ent __memsetw
@@ -121,4 +123,5 @@ __memsetw:
.end __memsetw
-memset = __memset
+memset = ___memset
+__memset = ___memset
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index 2ee0c9bfd032..9063ae6553cc 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -8,6 +8,7 @@
config ARC
def_bool y
+ select BUILDTIME_EXTABLE_SORT
select CLONE_BACKWARDS
# ARC Busybox based initramfs absolutely relies on DEVTMPFS for /dev
select DEVTMPFS if !INITRAMFS_SOURCE=""
diff --git a/arch/arc/include/asm/Kbuild b/arch/arc/include/asm/Kbuild
index 5943f7f9d325..93e6ca919620 100644
--- a/arch/arc/include/asm/Kbuild
+++ b/arch/arc/include/asm/Kbuild
@@ -47,3 +47,4 @@ generic-y += user.h
generic-y += vga.h
generic-y += xor.h
generic-y += preempt.h
+generic-y += hash.h
diff --git a/arch/arc/include/uapi/asm/unistd.h b/arch/arc/include/uapi/asm/unistd.h
index 6f30484f34b7..39e58d1cdf90 100644
--- a/arch/arc/include/uapi/asm/unistd.h
+++ b/arch/arc/include/uapi/asm/unistd.h
@@ -8,6 +8,13 @@
/******** no-legacy-syscalls-ABI *******/
+/*
+ * Non-typical guard macro to enable inclusion twice in ARCH sys.c
+ * That is how the Generic syscall wrapper generator works
+ */
+#if !defined(_UAPI_ASM_ARC_UNISTD_H) || defined(__SYSCALL)
+#define _UAPI_ASM_ARC_UNISTD_H
+
#define __ARCH_WANT_SYS_EXECVE
#define __ARCH_WANT_SYS_CLONE
#define __ARCH_WANT_SYS_VFORK
@@ -32,3 +39,7 @@ __SYSCALL(__NR_arc_gettls, sys_arc_gettls)
/* Generic syscall (fs/filesystems.c - lost in asm-generic/unistd.h */
#define __NR_sysfs (__NR_arch_specific_syscall + 3)
__SYSCALL(__NR_sysfs, sys_sysfs)
+
+#undef __SYSCALL
+
+#endif
diff --git a/arch/arc/kernel/perf_event.c b/arch/arc/kernel/perf_event.c
index e46d81f70979..63177e4cb66d 100644
--- a/arch/arc/kernel/perf_event.c
+++ b/arch/arc/kernel/perf_event.c
@@ -79,9 +79,9 @@ static int arc_pmu_cache_event(u64 config)
cache_result = (config >> 16) & 0xff;
if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
return -EINVAL;
- if (cache_type >= PERF_COUNT_HW_CACHE_OP_MAX)
+ if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
return -EINVAL;
- if (cache_type >= PERF_COUNT_HW_CACHE_RESULT_MAX)
+ if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
return -EINVAL;
ret = arc_pmu_cache_map[cache_type][cache_op][cache_result];
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 214b698cefea..c1f1a7eee953 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -25,7 +25,7 @@ config ARM
select HARDIRQS_SW_RESEND
select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL
select HAVE_ARCH_KGDB
- select HAVE_ARCH_SECCOMP_FILTER
+ select HAVE_ARCH_SECCOMP_FILTER if (AEABI && !OABI_COMPAT)
select HAVE_ARCH_TRACEHOOK
select HAVE_BPF_JIT
select HAVE_CONTEXT_TRACKING
@@ -1496,6 +1496,7 @@ config HAVE_ARM_ARCH_TIMER
bool "Architected timer support"
depends on CPU_V7
select ARM_ARCH_TIMER
+ select GENERIC_CLOCKEVENTS
help
This option enables support for the ARM architected timer
@@ -1719,7 +1720,6 @@ config AEABI
config OABI_COMPAT
bool "Allow old ABI binaries to run with this kernel (EXPERIMENTAL)"
depends on AEABI && !THUMB2_KERNEL
- default y
help
This option preserves the old syscall interface along with the
new (ARM EABI) one. It also provides a compatibility layer to
@@ -1727,11 +1727,16 @@ config OABI_COMPAT
in memory differs between the legacy ABI and the new ARM EABI
(only for non "thumb" binaries). This option adds a tiny
overhead to all syscalls and produces a slightly larger kernel.
+
+ The seccomp filter system will not be available when this is
+ selected, since there is no way yet to sensibly distinguish
+ between calling conventions during filtering.
+
If you know you'll be using only pure EABI user space then you
can say N here. If this option is not selected and you attempt
to execute a legacy ABI binary then the result will be
UNPREDICTABLE (in fact it can be predicted that it won't work
- at all). If in doubt say Y.
+ at all). If in doubt say N.
config ARCH_HAS_HOLES_MEMORYMODEL
bool
diff --git a/arch/arm/boot/dts/am335x-base0033.dts b/arch/arm/boot/dts/am335x-base0033.dts
index b4f95c2bbf74..72a9b3fc4251 100644
--- a/arch/arm/boot/dts/am335x-base0033.dts
+++ b/arch/arm/boot/dts/am335x-base0033.dts
@@ -13,4 +13,83 @@
/ {
model = "IGEP COM AM335x on AQUILA Expansion";
compatible = "isee,am335x-base0033", "isee,am335x-igep0033", "ti,am33xx";
+
+ hdmi {
+ compatible = "ti,tilcdc,slave";
+ i2c = <&i2c0>;
+ pinctrl-names = "default", "off";
+ pinctrl-0 = <&nxp_hdmi_pins>;
+ pinctrl-1 = <&nxp_hdmi_off_pins>;
+ status = "okay";
+ };
+
+ leds_base {
+ pinctrl-names = "default";
+ pinctrl-0 = <&leds_base_pins>;
+
+ compatible = "gpio-leds";
+
+ led@0 {
+ label = "base:red:user";
+ gpios = <&gpio1 21 GPIO_ACTIVE_HIGH>; /* gpio1_21 */
+ default-state = "off";
+ };
+
+ led@1 {
+ label = "base:green:user";
+ gpios = <&gpio2 0 GPIO_ACTIVE_HIGH>; /* gpio2_0 */
+ default-state = "off";
+ };
+ };
+};
+
+&am33xx_pinmux {
+ nxp_hdmi_pins: pinmux_nxp_hdmi_pins {
+ pinctrl-single,pins = <
+ 0x1b0 (PIN_OUTPUT | MUX_MODE3) /* xdma_event_intr0.clkout1 */
+ 0xa0 (PIN_OUTPUT | MUX_MODE0) /* lcd_data0 */
+ 0xa4 (PIN_OUTPUT | MUX_MODE0) /* lcd_data1 */
+ 0xa8 (PIN_OUTPUT | MUX_MODE0) /* lcd_data2 */
+ 0xac (PIN_OUTPUT | MUX_MODE0) /* lcd_data3 */
+ 0xb0 (PIN_OUTPUT | MUX_MODE0) /* lcd_data4 */
+ 0xb4 (PIN_OUTPUT | MUX_MODE0) /* lcd_data5 */
+ 0xb8 (PIN_OUTPUT | MUX_MODE0) /* lcd_data6 */
+ 0xbc (PIN_OUTPUT | MUX_MODE0) /* lcd_data7 */
+ 0xc0 (PIN_OUTPUT | MUX_MODE0) /* lcd_data8 */
+ 0xc4 (PIN_OUTPUT | MUX_MODE0) /* lcd_data9 */
+ 0xc8 (PIN_OUTPUT | MUX_MODE0) /* lcd_data10 */
+ 0xcc (PIN_OUTPUT | MUX_MODE0) /* lcd_data11 */
+ 0xd0 (PIN_OUTPUT | MUX_MODE0) /* lcd_data12 */
+ 0xd4 (PIN_OUTPUT | MUX_MODE0) /* lcd_data13 */
+ 0xd8 (PIN_OUTPUT | MUX_MODE0) /* lcd_data14 */
+ 0xdc (PIN_OUTPUT | MUX_MODE0) /* lcd_data15 */
+ 0xe0 (PIN_OUTPUT | MUX_MODE0) /* lcd_vsync */
+ 0xe4 (PIN_OUTPUT | MUX_MODE0) /* lcd_hsync */
+ 0xe8 (PIN_OUTPUT | MUX_MODE0) /* lcd_pclk */
+ 0xec (PIN_OUTPUT | MUX_MODE0) /* lcd_ac_bias_en */
+ >;
+ };
+ nxp_hdmi_off_pins: pinmux_nxp_hdmi_off_pins {
+ pinctrl-single,pins = <
+ 0x1b0 (PIN_OUTPUT | MUX_MODE3) /* xdma_event_intr0.clkout1 */
+ >;
+ };
+
+ leds_base_pins: pinmux_leds_base_pins {
+ pinctrl-single,pins = <
+ 0x54 (PIN_OUTPUT_PULLDOWN | MUX_MODE7) /* gpmc_a5.gpio1_21 */
+ 0x88 (PIN_OUTPUT_PULLDOWN | MUX_MODE7) /* gpmc_csn3.gpio2_0 */
+ >;
+ };
+};
+
+&lcdc {
+ status = "okay";
+};
+
+&i2c0 {
+ eeprom: eeprom@50 {
+ compatible = "at,24c256";
+ reg = <0x50>;
+ };
};
diff --git a/arch/arm/boot/dts/am335x-igep0033.dtsi b/arch/arm/boot/dts/am335x-igep0033.dtsi
index 619624479311..7063311a58d9 100644
--- a/arch/arm/boot/dts/am335x-igep0033.dtsi
+++ b/arch/arm/boot/dts/am335x-igep0033.dtsi
@@ -199,6 +199,35 @@
pinctrl-0 = <&uart0_pins>;
};
+&usb {
+ status = "okay";
+
+ control@44e10000 {
+ status = "okay";
+ };
+
+ usb-phy@47401300 {
+ status = "okay";
+ };
+
+ usb-phy@47401b00 {
+ status = "okay";
+ };
+
+ usb@47401000 {
+ status = "okay";
+ };
+
+ usb@47401800 {
+ status = "okay";
+ dr_mode = "host";
+ };
+
+ dma-controller@07402000 {
+ status = "okay";
+ };
+};
+
#include "tps65910.dtsi"
&tps {
diff --git a/arch/arm/boot/dts/am3517-evm.dts b/arch/arm/boot/dts/am3517-evm.dts
index e99dfaf70052..03fcbf0a88a8 100644
--- a/arch/arm/boot/dts/am3517-evm.dts
+++ b/arch/arm/boot/dts/am3517-evm.dts
@@ -7,11 +7,11 @@
*/
/dts-v1/;
-#include "omap34xx.dtsi"
+#include "am3517.dtsi"
/ {
- model = "TI AM3517 EVM (AM3517/05)";
- compatible = "ti,am3517-evm", "ti,omap3";
+ model = "TI AM3517 EVM (AM3517/05 TMDSEVM3517)";
+ compatible = "ti,am3517-evm", "ti,am3517", "ti,omap3";
memory {
device_type = "memory";
diff --git a/arch/arm/boot/dts/am3517.dtsi b/arch/arm/boot/dts/am3517.dtsi
new file mode 100644
index 000000000000..2fbe02faa8b1
--- /dev/null
+++ b/arch/arm/boot/dts/am3517.dtsi
@@ -0,0 +1,63 @@
+/*
+ * Device Tree Source for am3517 SoC
+ *
+ * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include "omap3.dtsi"
+
+/ {
+ aliases {
+ serial3 = &uart4;
+ };
+
+ ocp {
+ am35x_otg_hs: am35x_otg_hs@5c040000 {
+ compatible = "ti,omap3-musb";
+ ti,hwmods = "am35x_otg_hs";
+ status = "disabled";
+ reg = <0x5c040000 0x1000>;
+ interrupts = <71>;
+ interrupt-names = "mc";
+ };
+
+ davinci_emac: ethernet@0x5c000000 {
+ compatible = "ti,am3517-emac";
+ ti,hwmods = "davinci_emac";
+ status = "disabled";
+ reg = <0x5c000000 0x30000>;
+ interrupts = <67 68 69 70>;
+ ti,davinci-ctrl-reg-offset = <0x10000>;
+ ti,davinci-ctrl-mod-reg-offset = <0>;
+ ti,davinci-ctrl-ram-offset = <0x20000>;
+ ti,davinci-ctrl-ram-size = <0x2000>;
+ ti,davinci-rmii-en = /bits/ 8 <1>;
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ };
+
+ davinci_mdio: ethernet@0x5c030000 {
+ compatible = "ti,davinci_mdio";
+ ti,hwmods = "davinci_mdio";
+ status = "disabled";
+ reg = <0x5c030000 0x1000>;
+ bus_freq = <1000000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ uart4: serial@4809e000 {
+ compatible = "ti,omap3-uart";
+ ti,hwmods = "uart4";
+ status = "disabled";
+ reg = <0x4809e000 0x400>;
+ interrupts = <84>;
+ dmas = <&sdma 55 &sdma 54>;
+ dma-names = "tx", "rx";
+ clock-frequency = <48000000>;
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/armada-370-db.dts b/arch/arm/boot/dts/armada-370-db.dts
index 90ce29dbe119..08a56bcfc724 100644
--- a/arch/arm/boot/dts/armada-370-db.dts
+++ b/arch/arm/boot/dts/armada-370-db.dts
@@ -99,22 +99,22 @@
spi-max-frequency = <50000000>;
};
};
+ };
- pcie-controller {
+ pcie-controller {
+ status = "okay";
+ /*
+ * The two PCIe units are accessible through
+ * both standard PCIe slots and mini-PCIe
+ * slots on the board.
+ */
+ pcie@1,0 {
+ /* Port 0, Lane 0 */
+ status = "okay";
+ };
+ pcie@2,0 {
+ /* Port 1, Lane 0 */
status = "okay";
- /*
- * The two PCIe units are accessible through
- * both standard PCIe slots and mini-PCIe
- * slots on the board.
- */
- pcie@1,0 {
- /* Port 0, Lane 0 */
- status = "okay";
- };
- pcie@2,0 {
- /* Port 1, Lane 0 */
- status = "okay";
- };
};
};
};
diff --git a/arch/arm/boot/dts/armada-370-xp.dtsi b/arch/arm/boot/dts/armada-370-xp.dtsi
index 00d6a798c705..7f10f627ae5b 100644
--- a/arch/arm/boot/dts/armada-370-xp.dtsi
+++ b/arch/arm/boot/dts/armada-370-xp.dtsi
@@ -118,7 +118,7 @@
coherency-fabric@20200 {
compatible = "marvell,coherency-fabric";
- reg = <0x20200 0xb0>, <0x21810 0x1c>;
+ reg = <0x20200 0xb0>, <0x21010 0x1c>;
};
serial@12000 {
diff --git a/arch/arm/boot/dts/armada-xp-mv78230.dtsi b/arch/arm/boot/dts/armada-xp-mv78230.dtsi
index 3f5e6121c730..98335fb34b7a 100644
--- a/arch/arm/boot/dts/armada-xp-mv78230.dtsi
+++ b/arch/arm/boot/dts/armada-xp-mv78230.dtsi
@@ -47,7 +47,7 @@
/*
* MV78230 has 2 PCIe units Gen2.0: One unit can be
* configured as x4 or quad x1 lanes. One unit is
- * x4/x1.
+ * x1 only.
*/
pcie-controller {
compatible = "marvell,armada-xp-pcie";
@@ -62,10 +62,10 @@
ranges =
<0x82000000 0 0x40000 MBUS_ID(0xf0, 0x01) 0x40000 0 0x00002000 /* Port 0.0 registers */
- 0x82000000 0 0x42000 MBUS_ID(0xf0, 0x01) 0x42000 0 0x00002000 /* Port 2.0 registers */
0x82000000 0 0x44000 MBUS_ID(0xf0, 0x01) 0x44000 0 0x00002000 /* Port 0.1 registers */
0x82000000 0 0x48000 MBUS_ID(0xf0, 0x01) 0x48000 0 0x00002000 /* Port 0.2 registers */
0x82000000 0 0x4c000 MBUS_ID(0xf0, 0x01) 0x4c000 0 0x00002000 /* Port 0.3 registers */
+ 0x82000000 0 0x80000 MBUS_ID(0xf0, 0x01) 0x80000 0 0x00002000 /* Port 1.0 registers */
0x82000000 0x1 0 MBUS_ID(0x04, 0xe8) 0 1 0 /* Port 0.0 MEM */
0x81000000 0x1 0 MBUS_ID(0x04, 0xe0) 0 1 0 /* Port 0.0 IO */
0x82000000 0x2 0 MBUS_ID(0x04, 0xd8) 0 1 0 /* Port 0.1 MEM */
@@ -74,8 +74,8 @@
0x81000000 0x3 0 MBUS_ID(0x04, 0xb0) 0 1 0 /* Port 0.2 IO */
0x82000000 0x4 0 MBUS_ID(0x04, 0x78) 0 1 0 /* Port 0.3 MEM */
0x81000000 0x4 0 MBUS_ID(0x04, 0x70) 0 1 0 /* Port 0.3 IO */
- 0x82000000 0x9 0 MBUS_ID(0x04, 0xf8) 0 1 0 /* Port 2.0 MEM */
- 0x81000000 0x9 0 MBUS_ID(0x04, 0xf0) 0 1 0 /* Port 2.0 IO */>;
+ 0x82000000 0x5 0 MBUS_ID(0x08, 0xe8) 0 1 0 /* Port 1.0 MEM */
+ 0x81000000 0x5 0 MBUS_ID(0x08, 0xe0) 0 1 0 /* Port 1.0 IO */>;
pcie@1,0 {
device_type = "pci";
@@ -145,20 +145,20 @@
status = "disabled";
};
- pcie@9,0 {
+ pcie@5,0 {
device_type = "pci";
- assigned-addresses = <0x82000800 0 0x42000 0 0x2000>;
- reg = <0x4800 0 0 0 0>;
+ assigned-addresses = <0x82000800 0 0x80000 0 0x2000>;
+ reg = <0x2800 0 0 0 0>;
#address-cells = <3>;
#size-cells = <2>;
#interrupt-cells = <1>;
- ranges = <0x82000000 0 0 0x82000000 0x9 0 1 0
- 0x81000000 0 0 0x81000000 0x9 0 1 0>;
+ ranges = <0x82000000 0 0 0x82000000 0x5 0 1 0
+ 0x81000000 0 0 0x81000000 0x5 0 1 0>;
interrupt-map-mask = <0 0 0 0>;
- interrupt-map = <0 0 0 0 &mpic 99>;
- marvell,pcie-port = <2>;
+ interrupt-map = <0 0 0 0 &mpic 62>;
+ marvell,pcie-port = <1>;
marvell,pcie-lane = <0>;
- clocks = <&gateclk 26>;
+ clocks = <&gateclk 9>;
status = "disabled";
};
};
diff --git a/arch/arm/boot/dts/armada-xp-mv78260.dtsi b/arch/arm/boot/dts/armada-xp-mv78260.dtsi
index 3e9fd1353f89..66609684d41b 100644
--- a/arch/arm/boot/dts/armada-xp-mv78260.dtsi
+++ b/arch/arm/boot/dts/armada-xp-mv78260.dtsi
@@ -48,7 +48,7 @@
/*
* MV78260 has 3 PCIe units Gen2.0: Two units can be
* configured as x4 or quad x1 lanes. One unit is
- * x4/x1.
+ * x4 only.
*/
pcie-controller {
compatible = "marvell,armada-xp-pcie";
@@ -68,7 +68,9 @@
0x82000000 0 0x48000 MBUS_ID(0xf0, 0x01) 0x48000 0 0x00002000 /* Port 0.2 registers */
0x82000000 0 0x4c000 MBUS_ID(0xf0, 0x01) 0x4c000 0 0x00002000 /* Port 0.3 registers */
0x82000000 0 0x80000 MBUS_ID(0xf0, 0x01) 0x80000 0 0x00002000 /* Port 1.0 registers */
- 0x82000000 0 0x82000 MBUS_ID(0xf0, 0x01) 0x82000 0 0x00002000 /* Port 3.0 registers */
+ 0x82000000 0 0x84000 MBUS_ID(0xf0, 0x01) 0x84000 0 0x00002000 /* Port 1.1 registers */
+ 0x82000000 0 0x88000 MBUS_ID(0xf0, 0x01) 0x88000 0 0x00002000 /* Port 1.2 registers */
+ 0x82000000 0 0x8c000 MBUS_ID(0xf0, 0x01) 0x8c000 0 0x00002000 /* Port 1.3 registers */
0x82000000 0x1 0 MBUS_ID(0x04, 0xe8) 0 1 0 /* Port 0.0 MEM */
0x81000000 0x1 0 MBUS_ID(0x04, 0xe0) 0 1 0 /* Port 0.0 IO */
0x82000000 0x2 0 MBUS_ID(0x04, 0xd8) 0 1 0 /* Port 0.1 MEM */
@@ -77,10 +79,18 @@
0x81000000 0x3 0 MBUS_ID(0x04, 0xb0) 0 1 0 /* Port 0.2 IO */
0x82000000 0x4 0 MBUS_ID(0x04, 0x78) 0 1 0 /* Port 0.3 MEM */
0x81000000 0x4 0 MBUS_ID(0x04, 0x70) 0 1 0 /* Port 0.3 IO */
- 0x82000000 0x9 0 MBUS_ID(0x08, 0xe8) 0 1 0 /* Port 1.0 MEM */
- 0x81000000 0x9 0 MBUS_ID(0x08, 0xe0) 0 1 0 /* Port 1.0 IO */
- 0x82000000 0xa 0 MBUS_ID(0x08, 0xf8) 0 1 0 /* Port 3.0 MEM */
- 0x81000000 0xa 0 MBUS_ID(0x08, 0xf0) 0 1 0 /* Port 3.0 IO */>;
+
+ 0x82000000 0x5 0 MBUS_ID(0x08, 0xe8) 0 1 0 /* Port 1.0 MEM */
+ 0x81000000 0x5 0 MBUS_ID(0x08, 0xe0) 0 1 0 /* Port 1.0 IO */
+ 0x82000000 0x6 0 MBUS_ID(0x08, 0xd8) 0 1 0 /* Port 1.1 MEM */
+ 0x81000000 0x6 0 MBUS_ID(0x08, 0xd0) 0 1 0 /* Port 1.1 IO */
+ 0x82000000 0x7 0 MBUS_ID(0x08, 0xb8) 0 1 0 /* Port 1.2 MEM */
+ 0x81000000 0x7 0 MBUS_ID(0x08, 0xb0) 0 1 0 /* Port 1.2 IO */
+ 0x82000000 0x8 0 MBUS_ID(0x08, 0x78) 0 1 0 /* Port 1.3 MEM */
+ 0x81000000 0x8 0 MBUS_ID(0x08, 0x70) 0 1 0 /* Port 1.3 IO */
+
+ 0x82000000 0x9 0 MBUS_ID(0x04, 0xf8) 0 1 0 /* Port 2.0 MEM */
+ 0x81000000 0x9 0 MBUS_ID(0x04, 0xf0) 0 1 0 /* Port 2.0 IO */>;
pcie@1,0 {
device_type = "pci";
@@ -106,8 +116,8 @@
#address-cells = <3>;
#size-cells = <2>;
#interrupt-cells = <1>;
- ranges = <0x82000000 0 0 0x82000000 0x2 0 1 0
- 0x81000000 0 0 0x81000000 0x2 0 1 0>;
+ ranges = <0x82000000 0 0 0x82000000 0x2 0 1 0
+ 0x81000000 0 0 0x81000000 0x2 0 1 0>;
interrupt-map-mask = <0 0 0 0>;
interrupt-map = <0 0 0 0 &mpic 59>;
marvell,pcie-port = <0>;
@@ -150,37 +160,88 @@
status = "disabled";
};
- pcie@9,0 {
+ pcie@5,0 {
device_type = "pci";
- assigned-addresses = <0x82000800 0 0x42000 0 0x2000>;
- reg = <0x4800 0 0 0 0>;
+ assigned-addresses = <0x82000800 0 0x80000 0 0x2000>;
+ reg = <0x2800 0 0 0 0>;
#address-cells = <3>;
#size-cells = <2>;
#interrupt-cells = <1>;
- ranges = <0x82000000 0 0 0x82000000 0x9 0 1 0
- 0x81000000 0 0 0x81000000 0x9 0 1 0>;
+ ranges = <0x82000000 0 0 0x82000000 0x5 0 1 0
+ 0x81000000 0 0 0x81000000 0x5 0 1 0>;
interrupt-map-mask = <0 0 0 0>;
- interrupt-map = <0 0 0 0 &mpic 99>;
- marvell,pcie-port = <2>;
+ interrupt-map = <0 0 0 0 &mpic 62>;
+ marvell,pcie-port = <1>;
marvell,pcie-lane = <0>;
- clocks = <&gateclk 26>;
+ clocks = <&gateclk 9>;
status = "disabled";
};
- pcie@10,0 {
+ pcie@6,0 {
device_type = "pci";
- assigned-addresses = <0x82000800 0 0x82000 0 0x2000>;
- reg = <0x5000 0 0 0 0>;
+ assigned-addresses = <0x82000800 0 0x84000 0 0x2000>;
+ reg = <0x3000 0 0 0 0>;
#address-cells = <3>;
#size-cells = <2>;
#interrupt-cells = <1>;
- ranges = <0x82000000 0 0 0x82000000 0xa 0 1 0
- 0x81000000 0 0 0x81000000 0xa 0 1 0>;
+ ranges = <0x82000000 0 0 0x82000000 0x6 0 1 0
+ 0x81000000 0 0 0x81000000 0x6 0 1 0>;
interrupt-map-mask = <0 0 0 0>;
- interrupt-map = <0 0 0 0 &mpic 103>;
- marvell,pcie-port = <3>;
+ interrupt-map = <0 0 0 0 &mpic 63>;
+ marvell,pcie-port = <1>;
+ marvell,pcie-lane = <1>;
+ clocks = <&gateclk 10>;
+ status = "disabled";
+ };
+
+ pcie@7,0 {
+ device_type = "pci";
+ assigned-addresses = <0x82000800 0 0x88000 0 0x2000>;
+ reg = <0x3800 0 0 0 0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ #interrupt-cells = <1>;
+ ranges = <0x82000000 0 0 0x82000000 0x7 0 1 0
+ 0x81000000 0 0 0x81000000 0x7 0 1 0>;
+ interrupt-map-mask = <0 0 0 0>;
+ interrupt-map = <0 0 0 0 &mpic 64>;
+ marvell,pcie-port = <1>;
+ marvell,pcie-lane = <2>;
+ clocks = <&gateclk 11>;
+ status = "disabled";
+ };
+
+ pcie@8,0 {
+ device_type = "pci";
+ assigned-addresses = <0x82000800 0 0x8c000 0 0x2000>;
+ reg = <0x4000 0 0 0 0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ #interrupt-cells = <1>;
+ ranges = <0x82000000 0 0 0x82000000 0x8 0 1 0
+ 0x81000000 0 0 0x81000000 0x8 0 1 0>;
+ interrupt-map-mask = <0 0 0 0>;
+ interrupt-map = <0 0 0 0 &mpic 65>;
+ marvell,pcie-port = <1>;
+ marvell,pcie-lane = <3>;
+ clocks = <&gateclk 12>;
+ status = "disabled";
+ };
+
+ pcie@9,0 {
+ device_type = "pci";
+ assigned-addresses = <0x82000800 0 0x42000 0 0x2000>;
+ reg = <0x4800 0 0 0 0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ #interrupt-cells = <1>;
+ ranges = <0x82000000 0 0 0x82000000 0x9 0 1 0
+ 0x81000000 0 0 0x81000000 0x9 0 1 0>;
+ interrupt-map-mask = <0 0 0 0>;
+ interrupt-map = <0 0 0 0 &mpic 99>;
+ marvell,pcie-port = <2>;
marvell,pcie-lane = <0>;
- clocks = <&gateclk 27>;
+ clocks = <&gateclk 26>;
status = "disabled";
};
};
diff --git a/arch/arm/boot/dts/at91sam9x5_usart3.dtsi b/arch/arm/boot/dts/at91sam9x5_usart3.dtsi
index 2347e9563cef..6801106fa1f8 100644
--- a/arch/arm/boot/dts/at91sam9x5_usart3.dtsi
+++ b/arch/arm/boot/dts/at91sam9x5_usart3.dtsi
@@ -11,6 +11,10 @@
#include <dt-bindings/interrupt-controller/irq.h>
/ {
+ aliases {
+ serial4 = &usart3;
+ };
+
ahb {
apb {
pinctrl@fffff400 {
diff --git a/arch/arm/boot/dts/bcm2835.dtsi b/arch/arm/boot/dts/bcm2835.dtsi
index 1e12aeff403b..aa537ed13f0a 100644
--- a/arch/arm/boot/dts/bcm2835.dtsi
+++ b/arch/arm/boot/dts/bcm2835.dtsi
@@ -85,6 +85,8 @@
reg = <0x7e205000 0x1000>;
interrupts = <2 21>;
clocks = <&clk_i2c>;
+ #address-cells = <1>;
+ #size-cells = <0>;
status = "disabled";
};
@@ -93,6 +95,8 @@
reg = <0x7e804000 0x1000>;
interrupts = <2 21>;
clocks = <&clk_i2c>;
+ #address-cells = <1>;
+ #size-cells = <0>;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/cros5250-common.dtsi b/arch/arm/boot/dts/cros5250-common.dtsi
index dc259e8b8a73..9b186ac06c8b 100644
--- a/arch/arm/boot/dts/cros5250-common.dtsi
+++ b/arch/arm/boot/dts/cros5250-common.dtsi
@@ -27,6 +27,13 @@
i2c2_bus: i2c2-bus {
samsung,pin-pud = <0>;
};
+
+ max77686_irq: max77686-irq {
+ samsung,pins = "gpx3-2";
+ samsung,pin-function = <0>;
+ samsung,pin-pud = <0>;
+ samsung,pin-drv = <0>;
+ };
};
i2c@12C60000 {
@@ -35,6 +42,11 @@
max77686@09 {
compatible = "maxim,max77686";
+ interrupt-parent = <&gpx3>;
+ interrupts = <2 0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&max77686_irq>;
+ wakeup-source;
reg = <0x09>;
voltage-regulators {
diff --git a/arch/arm/boot/dts/imx6qdl.dtsi b/arch/arm/boot/dts/imx6qdl.dtsi
index 59154dc15fe4..fb28b2ecb1db 100644
--- a/arch/arm/boot/dts/imx6qdl.dtsi
+++ b/arch/arm/boot/dts/imx6qdl.dtsi
@@ -161,7 +161,7 @@
clocks = <&clks 197>, <&clks 3>,
<&clks 197>, <&clks 107>,
<&clks 0>, <&clks 118>,
- <&clks 62>, <&clks 139>,
+ <&clks 0>, <&clks 139>,
<&clks 0>;
clock-names = "core", "rxtx0",
"rxtx1", "rxtx2",
diff --git a/arch/arm/boot/dts/omap-gpmc-smsc911x.dtsi b/arch/arm/boot/dts/omap-gpmc-smsc911x.dtsi
index 9c18adf788f7..f577b7df9a29 100644
--- a/arch/arm/boot/dts/omap-gpmc-smsc911x.dtsi
+++ b/arch/arm/boot/dts/omap-gpmc-smsc911x.dtsi
@@ -44,8 +44,8 @@
gpmc,wr-access-ns = <186>;
gpmc,cycle2cycle-samecsen;
gpmc,cycle2cycle-diffcsen;
- vmmc-supply = <&vddvario>;
- vmmc_aux-supply = <&vdd33a>;
+ vddvario-supply = <&vddvario>;
+ vdd33a-supply = <&vdd33a>;
reg-io-width = <4>;
smsc,save-mac-address;
};
diff --git a/arch/arm/boot/dts/omap-zoom-common.dtsi b/arch/arm/boot/dts/omap-zoom-common.dtsi
index b0ee342598f0..68221fab978d 100644
--- a/arch/arm/boot/dts/omap-zoom-common.dtsi
+++ b/arch/arm/boot/dts/omap-zoom-common.dtsi
@@ -13,7 +13,7 @@
* they probably share the same GPIO IRQ
* REVISIT: Add timing support from slls644g.pdf
*/
- 8250@3,0 {
+ uart@3,0 {
compatible = "ns16550a";
reg = <3 0 0x100>;
bank-width = <2>;
diff --git a/arch/arm/boot/dts/omap2.dtsi b/arch/arm/boot/dts/omap2.dtsi
index a2bfcde858a6..d0c5b37e248c 100644
--- a/arch/arm/boot/dts/omap2.dtsi
+++ b/arch/arm/boot/dts/omap2.dtsi
@@ -9,6 +9,7 @@
*/
#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/pinctrl/omap.h>
#include "skeleton.dtsi"
@@ -21,6 +22,8 @@
serial0 = &uart1;
serial1 = &uart2;
serial2 = &uart3;
+ i2c0 = &i2c1;
+ i2c1 = &i2c2;
};
cpus {
@@ -53,6 +56,28 @@
ranges;
ti,hwmods = "l3_main";
+ aes: aes@480a6000 {
+ compatible = "ti,omap2-aes";
+ ti,hwmods = "aes";
+ reg = <0x480a6000 0x50>;
+ dmas = <&sdma 9 &sdma 10>;
+ dma-names = "tx", "rx";
+ };
+
+ hdq1w: 1w@480b2000 {
+ compatible = "ti,omap2420-1w";
+ ti,hwmods = "hdq1w";
+ reg = <0x480b2000 0x1000>;
+ interrupts = <58>;
+ };
+
+ mailbox: mailbox@48094000 {
+ compatible = "ti,omap2-mailbox";
+ ti,hwmods = "mailbox";
+ reg = <0x48094000 0x200>;
+ interrupts = <26>;
+ };
+
intc: interrupt-controller@1 {
compatible = "ti,omap2-intc";
interrupt-controller;
@@ -63,6 +88,7 @@
sdma: dma-controller@48056000 {
compatible = "ti,omap2430-sdma", "ti,omap2420-sdma";
+ ti,hwmods = "dma";
reg = <0x48056000 0x1000>;
interrupts = <12>,
<13>,
@@ -73,21 +99,91 @@
#dma-requests = <64>;
};
+ i2c1: i2c@48070000 {
+ compatible = "ti,omap2-i2c";
+ ti,hwmods = "i2c1";
+ reg = <0x48070000 0x80>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <56>;
+ dmas = <&sdma 27 &sdma 28>;
+ dma-names = "tx", "rx";
+ };
+
+ i2c2: i2c@48072000 {
+ compatible = "ti,omap2-i2c";
+ ti,hwmods = "i2c2";
+ reg = <0x48072000 0x80>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <57>;
+ dmas = <&sdma 29 &sdma 30>;
+ dma-names = "tx", "rx";
+ };
+
+ mcspi1: mcspi@48098000 {
+ compatible = "ti,omap2-mcspi";
+ ti,hwmods = "mcspi1";
+ reg = <0x48098000 0x100>;
+ interrupts = <65>;
+ dmas = <&sdma 35 &sdma 36 &sdma 37 &sdma 38
+ &sdma 39 &sdma 40 &sdma 41 &sdma 42>;
+ dma-names = "tx0", "rx0", "tx1", "rx1",
+ "tx2", "rx2", "tx3", "rx3";
+ };
+
+ mcspi2: mcspi@4809a000 {
+ compatible = "ti,omap2-mcspi";
+ ti,hwmods = "mcspi2";
+ reg = <0x4809a000 0x100>;
+ interrupts = <66>;
+ dmas = <&sdma 43 &sdma 44 &sdma 45 &sdma 46>;
+ dma-names = "tx0", "rx0", "tx1", "rx1";
+ };
+
+ rng: rng@480a0000 {
+ compatible = "ti,omap2-rng";
+ ti,hwmods = "rng";
+ reg = <0x480a0000 0x50>;
+ interrupts = <36>;
+ };
+
+ sham: sham@480a4000 {
+ compatible = "ti,omap2-sham";
+ ti,hwmods = "sham";
+ reg = <0x480a4000 0x64>;
+ interrupts = <51>;
+ dmas = <&sdma 13>;
+ dma-names = "rx";
+ };
+
uart1: serial@4806a000 {
compatible = "ti,omap2-uart";
ti,hwmods = "uart1";
+ reg = <0x4806a000 0x2000>;
+ interrupts = <72>;
+ dmas = <&sdma 49 &sdma 50>;
+ dma-names = "tx", "rx";
clock-frequency = <48000000>;
};
uart2: serial@4806c000 {
compatible = "ti,omap2-uart";
ti,hwmods = "uart2";
+ reg = <0x4806c000 0x400>;
+ interrupts = <73>;
+ dmas = <&sdma 51 &sdma 52>;
+ dma-names = "tx", "rx";
clock-frequency = <48000000>;
};
uart3: serial@4806e000 {
compatible = "ti,omap2-uart";
ti,hwmods = "uart3";
+ reg = <0x4806e000 0x400>;
+ interrupts = <74>;
+ dmas = <&sdma 53 &sdma 54>;
+ dma-names = "tx", "rx";
clock-frequency = <48000000>;
};
diff --git a/arch/arm/boot/dts/omap2420.dtsi b/arch/arm/boot/dts/omap2420.dtsi
index c8f9c55169ea..60c605de22dd 100644
--- a/arch/arm/boot/dts/omap2420.dtsi
+++ b/arch/arm/boot/dts/omap2420.dtsi
@@ -114,6 +114,15 @@
dma-names = "tx", "rx";
};
+ msdi1: mmc@4809c000 {
+ compatible = "ti,omap2420-mmc";
+ ti,hwmods = "msdi1";
+ reg = <0x4809c000 0x80>;
+ interrupts = <83>;
+ dmas = <&sdma 61 &sdma 62>;
+ dma-names = "tx", "rx";
+ };
+
timer1: timer@48028000 {
compatible = "ti,omap2420-timer";
reg = <0x48028000 0x400>;
@@ -121,5 +130,19 @@
ti,hwmods = "timer1";
ti,timer-alwon;
};
+
+ wd_timer2: wdt@48022000 {
+ compatible = "ti,omap2-wdt";
+ ti,hwmods = "wd_timer2";
+ reg = <0x48022000 0x80>;
+ };
};
};
+
+&i2c1 {
+ compatible = "ti,omap2420-i2c";
+};
+
+&i2c2 {
+ compatible = "ti,omap2420-i2c";
+};
diff --git a/arch/arm/boot/dts/omap2430.dtsi b/arch/arm/boot/dts/omap2430.dtsi
index c535a5a2b27f..d624345666f5 100644
--- a/arch/arm/boot/dts/omap2430.dtsi
+++ b/arch/arm/boot/dts/omap2430.dtsi
@@ -175,6 +175,25 @@
dma-names = "tx", "rx";
};
+ mmc1: mmc@4809c000 {
+ compatible = "ti,omap2-hsmmc";
+ reg = <0x4809c000 0x200>;
+ interrupts = <83>;
+ ti,hwmods = "mmc1";
+ ti,dual-volt;
+ dmas = <&sdma 61>, <&sdma 62>;
+ dma-names = "tx", "rx";
+ };
+
+ mmc2: mmc@480b4000 {
+ compatible = "ti,omap2-hsmmc";
+ reg = <0x480b4000 0x200>;
+ interrupts = <86>;
+ ti,hwmods = "mmc2";
+ dmas = <&sdma 47>, <&sdma 48>;
+ dma-names = "tx", "rx";
+ };
+
timer1: timer@49018000 {
compatible = "ti,omap2420-timer";
reg = <0x49018000 0x400>;
@@ -182,5 +201,35 @@
ti,hwmods = "timer1";
ti,timer-alwon;
};
+
+ mcspi3: mcspi@480b8000 {
+ compatible = "ti,omap2-mcspi";
+ ti,hwmods = "mcspi3";
+ reg = <0x480b8000 0x100>;
+ interrupts = <91>;
+ dmas = <&sdma 15 &sdma 16 &sdma 23 &sdma 24>;
+ dma-names = "tx0", "rx0", "tx1", "rx1";
+ };
+
+ usb_otg_hs: usb_otg_hs@480ac000 {
+ compatible = "ti,omap2-musb";
+ ti,hwmods = "usb_otg_hs";
+ reg = <0x480ac000 0x1000>;
+ interrupts = <93>;
+ };
+
+ wd_timer2: wdt@49016000 {
+ compatible = "ti,omap2-wdt";
+ ti,hwmods = "wd_timer2";
+ reg = <0x49016000 0x80>;
+ };
};
};
+
+&i2c1 {
+ compatible = "ti,omap2430-i2c";
+};
+
+&i2c2 {
+ compatible = "ti,omap2430-i2c";
+};
diff --git a/arch/arm/boot/dts/omap3-beagle-xm.dts b/arch/arm/boot/dts/omap3-beagle-xm.dts
index 31a632f7effb..df33a50bc070 100644
--- a/arch/arm/boot/dts/omap3-beagle-xm.dts
+++ b/arch/arm/boot/dts/omap3-beagle-xm.dts
@@ -215,3 +215,10 @@
&usbhsehci {
phys = <0 &hsusb2_phy>;
};
+
+&vaux2 {
+ regulator-name = "usb_1v8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+};
diff --git a/arch/arm/boot/dts/omap3-beagle.dts b/arch/arm/boot/dts/omap3-beagle.dts
index fa532aaacc68..3ba4a625ea5b 100644
--- a/arch/arm/boot/dts/omap3-beagle.dts
+++ b/arch/arm/boot/dts/omap3-beagle.dts
@@ -61,6 +61,14 @@
vcc-supply = <&hsusb2_power>;
};
+ sound {
+ compatible = "ti,omap-twl4030";
+ ti,model = "omap3beagle";
+
+ ti,mcbsp = <&mcbsp2>;
+ ti,codec = <&twl_audio>;
+ };
+
gpio_keys {
compatible = "gpio-keys";
@@ -120,6 +128,12 @@
reg = <0x48>;
interrupts = <7>; /* SYS_NIRQ cascaded to intc */
interrupt-parent = <&intc>;
+
+ twl_audio: audio {
+ compatible = "ti,twl4030-audio";
+ codec {
+ };
+ };
};
};
@@ -178,3 +192,10 @@
mode = <3>;
power = <50>;
};
+
+&vaux2 {
+ regulator-name = "vdd_ehci";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+};
diff --git a/arch/arm/boot/dts/omap3-igep.dtsi b/arch/arm/boot/dts/omap3-igep.dtsi
index ba1e58b7b7e3..165aaf7591ba 100644
--- a/arch/arm/boot/dts/omap3-igep.dtsi
+++ b/arch/arm/boot/dts/omap3-igep.dtsi
@@ -1,5 +1,5 @@
/*
- * Device Tree Source for IGEP Technology devices
+ * Common device tree for IGEP boards based on AM/DM37x
*
* Copyright (C) 2012 Javier Martinez Canillas <javier@collabora.co.uk>
* Copyright (C) 2012 Enric Balletbo i Serra <eballetbo@gmail.com>
@@ -10,7 +10,7 @@
*/
/dts-v1/;
-#include "omap34xx.dtsi"
+#include "omap36xx.dtsi"
/ {
memory {
@@ -24,6 +24,25 @@
ti,mcbsp = <&mcbsp2>;
ti,codec = <&twl_audio>;
};
+
+ vdd33: regulator-vdd33 {
+ compatible = "regulator-fixed";
+ regulator-name = "vdd33";
+ regulator-always-on;
+ };
+
+ lbee1usjyc_vmmc: lbee1usjyc_vmmc {
+ pinctrl-names = "default";
+ pinctrl-0 = <&lbee1usjyc_pins>;
+ compatible = "regulator-fixed";
+ regulator-name = "regulator-lbee1usjyc";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&gpio5 10 GPIO_ACTIVE_HIGH>; /* gpio_138 WIFI_PDN */
+ startup-delay-us = <10000>;
+ enable-active-high;
+ vin-supply = <&vdd33>;
+ };
};
&omap3_pmx_core {
@@ -48,6 +67,15 @@
>;
};
+ /* WiFi/BT combo */
+ lbee1usjyc_pins: pinmux_lbee1usjyc_pins {
+ pinctrl-single,pins = <
+ 0x136 (PIN_OUTPUT | MUX_MODE4) /* sdmmc2_dat5.gpio_137 */
+ 0x138 (PIN_OUTPUT | MUX_MODE4) /* sdmmc2_dat6.gpio_138 */
+ 0x13a (PIN_OUTPUT | MUX_MODE4) /* sdmmc2_dat7.gpio_139 */
+ >;
+ };
+
mcbsp2_pins: pinmux_mcbsp2_pins {
pinctrl-single,pins = <
0x10c (PIN_INPUT | MUX_MODE0) /* mcbsp2_fsx.mcbsp2_fsx */
@@ -65,10 +93,17 @@
0x11a (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc1_dat1.sdmmc1_dat1 */
0x11c (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc1_dat2.sdmmc1_dat2 */
0x11e (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc1_dat3.sdmmc1_dat3 */
- 0x120 (PIN_INPUT | MUX_MODE0) /* sdmmc1_dat4.sdmmc1_dat4 */
- 0x122 (PIN_INPUT | MUX_MODE0) /* sdmmc1_dat5.sdmmc1_dat5 */
- 0x124 (PIN_INPUT | MUX_MODE0) /* sdmmc1_dat6.sdmmc1_dat6 */
- 0x126 (PIN_INPUT | MUX_MODE0) /* sdmmc1_dat7.sdmmc1_dat7 */
+ >;
+ };
+
+ mmc2_pins: pinmux_mmc2_pins {
+ pinctrl-single,pins = <
+ 0x128 (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_clk.sdmmc2_clk */
+ 0x12a (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_cmd.sdmmc2_cmd */
+ 0x12c (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat0.sdmmc2_dat0 */
+ 0x12e (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat1.sdmmc2_dat1 */
+ 0x130 (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat2.sdmmc2_dat2 */
+ 0x132 (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat3.sdmmc2_dat3 */
>;
};
@@ -78,10 +113,33 @@
>;
};
+ i2c1_pins: pinmux_i2c1_pins {
+ pinctrl-single,pins = <
+ 0x18a (PIN_INPUT | MUX_MODE0) /* i2c1_scl.i2c1_scl */
+ 0x18c (PIN_INPUT | MUX_MODE0) /* i2c1_sda.i2c1_sda */
+ >;
+ };
+
+ i2c2_pins: pinmux_i2c2_pins {
+ pinctrl-single,pins = <
+ 0x18e (PIN_INPUT | MUX_MODE0) /* i2c2_scl.i2c2_scl */
+ 0x190 (PIN_INPUT | MUX_MODE0) /* i2c2_sda.i2c2_sda */
+ >;
+ };
+
+ i2c3_pins: pinmux_i2c3_pins {
+ pinctrl-single,pins = <
+ 0x192 (PIN_INPUT | MUX_MODE0) /* i2c3_scl.i2c3_scl */
+ 0x194 (PIN_INPUT | MUX_MODE0) /* i2c3_sda.i2c3_sda */
+ >;
+ };
+
leds_pins: pinmux_leds_pins { };
};
&i2c1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c1_pins>;
clock-frequency = <2600000>;
twl: twl@48 {
@@ -101,9 +159,16 @@
#include "twl4030_omap3.dtsi"
&i2c2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c2_pins>;
clock-frequency = <400000>;
};
+&i2c3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c3_pins>;
+};
+
&mcbsp2 {
pinctrl-names = "default";
pinctrl-0 = <&mcbsp2_pins>;
@@ -114,11 +179,15 @@
pinctrl-0 = <&mmc1_pins>;
vmmc-supply = <&vmmc1>;
vmmc_aux-supply = <&vsim>;
- bus-width = <8>;
+ bus-width = <4>;
};
&mmc2 {
- status = "disabled";
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc2_pins>;
+ vmmc-supply = <&lbee1usjyc_vmmc>;
+ bus-width = <4>;
+ non-removable;
};
&mmc3 {
diff --git a/arch/arm/boot/dts/omap3-igep0020.dts b/arch/arm/boot/dts/omap3-igep0020.dts
index d5cc79267250..1c7e74d2d2bc 100644
--- a/arch/arm/boot/dts/omap3-igep0020.dts
+++ b/arch/arm/boot/dts/omap3-igep0020.dts
@@ -1,5 +1,5 @@
/*
- * Device Tree Source for IGEPv2 board
+ * Device Tree Source for IGEPv2 Rev. (TI OMAP AM/DM37x)
*
* Copyright (C) 2012 Javier Martinez Canillas <javier@collabora.co.uk>
* Copyright (C) 2012 Enric Balletbo i Serra <eballetbo@gmail.com>
@@ -13,7 +13,7 @@
#include "omap-gpmc-smsc911x.dtsi"
/ {
- model = "IGEPv2";
+ model = "IGEPv2 (TI OMAP AM/DM37x)";
compatible = "isee,omap3-igep0020", "ti,omap3";
leds {
@@ -67,6 +67,8 @@
pinctrl-names = "default";
pinctrl-0 = <
&hsusbb1_pins
+ &tfp410_pins
+ &dss_pins
>;
hsusbb1_pins: pinmux_hsusbb1_pins {
@@ -85,6 +87,45 @@
0x5ba (PIN_INPUT_PULLDOWN | MUX_MODE3) /* etk_d7.hsusb1_data3 */
>;
};
+
+ tfp410_pins: tfp410_dvi_pins {
+ pinctrl-single,pins = <
+ 0x196 (PIN_OUTPUT | MUX_MODE4) /* hdq_sio.gpio_170 */
+ >;
+ };
+
+ dss_pins: pinmux_dss_dvi_pins {
+ pinctrl-single,pins = <
+ 0x0a4 (PIN_OUTPUT | MUX_MODE0) /* dss_pclk.dss_pclk */
+ 0x0a6 (PIN_OUTPUT | MUX_MODE0) /* dss_hsync.dss_hsync */
+ 0x0a8 (PIN_OUTPUT | MUX_MODE0) /* dss_vsync.dss_vsync */
+ 0x0aa (PIN_OUTPUT | MUX_MODE0) /* dss_acbias.dss_acbias */
+ 0x0ac (PIN_OUTPUT | MUX_MODE0) /* dss_data0.dss_data0 */
+ 0x0ae (PIN_OUTPUT | MUX_MODE0) /* dss_data1.dss_data1 */
+ 0x0b0 (PIN_OUTPUT | MUX_MODE0) /* dss_data2.dss_data2 */
+ 0x0b2 (PIN_OUTPUT | MUX_MODE0) /* dss_data3.dss_data3 */
+ 0x0b4 (PIN_OUTPUT | MUX_MODE0) /* dss_data4.dss_data4 */
+ 0x0b6 (PIN_OUTPUT | MUX_MODE0) /* dss_data5.dss_data5 */
+ 0x0b8 (PIN_OUTPUT | MUX_MODE0) /* dss_data6.dss_data6 */
+ 0x0ba (PIN_OUTPUT | MUX_MODE0) /* dss_data7.dss_data7 */
+ 0x0bc (PIN_OUTPUT | MUX_MODE0) /* dss_data8.dss_data8 */
+ 0x0be (PIN_OUTPUT | MUX_MODE0) /* dss_data9.dss_data9 */
+ 0x0c0 (PIN_OUTPUT | MUX_MODE0) /* dss_data10.dss_data10 */
+ 0x0c2 (PIN_OUTPUT | MUX_MODE0) /* dss_data11.dss_data11 */
+ 0x0c4 (PIN_OUTPUT | MUX_MODE0) /* dss_data12.dss_data12 */
+ 0x0c6 (PIN_OUTPUT | MUX_MODE0) /* dss_data13.dss_data13 */
+ 0x0c8 (PIN_OUTPUT | MUX_MODE0) /* dss_data14.dss_data14 */
+ 0x0ca (PIN_OUTPUT | MUX_MODE0) /* dss_data15.dss_data15 */
+ 0x0cc (PIN_OUTPUT | MUX_MODE0) /* dss_data16.dss_data16 */
+ 0x0ce (PIN_OUTPUT | MUX_MODE0) /* dss_data17.dss_data17 */
+ 0x0d0 (PIN_OUTPUT | MUX_MODE0) /* dss_data18.dss_data18 */
+ 0x0d2 (PIN_OUTPUT | MUX_MODE0) /* dss_data19.dss_data19 */
+ 0x0d4 (PIN_OUTPUT | MUX_MODE0) /* dss_data20.dss_data20 */
+ 0x0d6 (PIN_OUTPUT | MUX_MODE0) /* dss_data21.dss_data21 */
+ 0x0d8 (PIN_OUTPUT | MUX_MODE0) /* dss_data22.dss_data22 */
+ 0x0da (PIN_OUTPUT | MUX_MODE0) /* dss_data23.dss_data23 */
+ >;
+ };
};
&leds_pins {
@@ -174,3 +215,8 @@
&usbhsehci {
phys = <&hsusb1_phy>;
};
+
+&vpll2 {
+ /* Needed for DSS */
+ regulator-name = "vdds_dsi";
+};
diff --git a/arch/arm/boot/dts/omap3-igep0030.dts b/arch/arm/boot/dts/omap3-igep0030.dts
index 525e6d9b0978..02a23f8a3384 100644
--- a/arch/arm/boot/dts/omap3-igep0030.dts
+++ b/arch/arm/boot/dts/omap3-igep0030.dts
@@ -1,5 +1,5 @@
/*
- * Device Tree Source for IGEP COM Module
+ * Device Tree Source for IGEP COM MODULE (TI OMAP AM/DM37x)
*
* Copyright (C) 2012 Javier Martinez Canillas <javier@collabora.co.uk>
* Copyright (C) 2012 Enric Balletbo i Serra <eballetbo@gmail.com>
@@ -12,7 +12,7 @@
#include "omap3-igep.dtsi"
/ {
- model = "IGEP COM Module";
+ model = "IGEP COM MODULE (TI OMAP AM/DM37x)";
compatible = "isee,omap3-igep0030", "ti,omap3";
leds {
diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts
index c4f20bfe4cce..6fc85f963530 100644
--- a/arch/arm/boot/dts/omap3-n900.dts
+++ b/arch/arm/boot/dts/omap3-n900.dts
@@ -9,7 +9,7 @@
/dts-v1/;
-#include "omap34xx.dtsi"
+#include "omap34xx-hs.dtsi"
/ {
model = "Nokia N900";
@@ -125,6 +125,21 @@
>;
};
+ mmc2_pins: pinmux_mmc2_pins {
+ pinctrl-single,pins = <
+ 0x128 (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_clk */
+ 0x12a (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_cmd */
+ 0x12c (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat0 */
+ 0x12e (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat1 */
+ 0x130 (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat2 */
+ 0x132 (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat3 */
+ 0x134 (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat4 */
+ 0x136 (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat5 */
+ 0x138 (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat6 */
+ 0x13a (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat7 */
+ >;
+ };
+
display_pins: pinmux_display_pins {
pinctrl-single,pins = <
0x0d4 (PIN_OUTPUT | MUX_MODE4) /* RX51_LCD_RESET_GPIO */
@@ -358,8 +373,14 @@
cd-gpios = <&gpio6 0 GPIO_ACTIVE_HIGH>; /* 160 */
};
+/* most boards use vaux3, only some old versions use vmmc2 instead */
&mmc2 {
- status = "disabled";
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc2_pins>;
+ vmmc-supply = <&vaux3>;
+ vmmc_aux-supply = <&vsim>;
+ bus-width = <8>;
+ non-removable;
};
&mmc3 {
diff --git a/arch/arm/boot/dts/omap3-n950-n9.dtsi b/arch/arm/boot/dts/omap3-n950-n9.dtsi
index 94eb77d3b9dd..5c26c184f2c1 100644
--- a/arch/arm/boot/dts/omap3-n950-n9.dtsi
+++ b/arch/arm/boot/dts/omap3-n950-n9.dtsi
@@ -8,7 +8,7 @@
* published by the Free Software Foundation.
*/
-#include "omap36xx.dtsi"
+#include "omap36xx-hs.dtsi"
/ {
cpus {
diff --git a/arch/arm/boot/dts/omap3.dtsi b/arch/arm/boot/dts/omap3.dtsi
index f3a0c26ed0c2..daabf99d402a 100644
--- a/arch/arm/boot/dts/omap3.dtsi
+++ b/arch/arm/boot/dts/omap3.dtsi
@@ -82,6 +82,13 @@
ranges;
ti,hwmods = "l3_main";
+ aes: aes@480c5000 {
+ compatible = "ti,omap3-aes";
+ ti,hwmods = "aes";
+ reg = <0x480c5000 0x50>;
+ interrupts = <0>;
+ };
+
counter32k: counter@48320000 {
compatible = "ti,omap-counter32k";
reg = <0x48320000 0x20>;
@@ -260,6 +267,13 @@
ti,hwmods = "i2c3";
};
+ mailbox: mailbox@48094000 {
+ compatible = "ti,omap3-mailbox";
+ ti,hwmods = "mailbox";
+ reg = <0x48094000 0x200>;
+ interrupts = <26>;
+ };
+
mcspi1: spi@48098000 {
compatible = "ti,omap2-mcspi";
reg = <0x48098000 0x100>;
@@ -357,6 +371,13 @@
dma-names = "tx", "rx";
};
+ mmu_isp: mmu@480bd400 {
+ compatible = "ti,omap3-mmu-isp";
+ ti,hwmods = "mmu_isp";
+ reg = <0x480bd400 0x80>;
+ interrupts = <8>;
+ };
+
wdt2: wdt@48314000 {
compatible = "ti,omap3-wdt";
reg = <0x48314000 0x80>;
@@ -442,6 +463,27 @@
dma-names = "tx", "rx";
};
+ sham: sham@480c3000 {
+ compatible = "ti,omap3-sham";
+ ti,hwmods = "sham";
+ reg = <0x480c3000 0x64>;
+ interrupts = <49>;
+ };
+
+ smartreflex_core: smartreflex@480cb000 {
+ compatible = "ti,omap3-smartreflex-core";
+ ti,hwmods = "smartreflex_core";
+ reg = <0x480cb000 0x400>;
+ interrupts = <19>;
+ };
+
+ smartreflex_mpu_iva: smartreflex@480c9000 {
+ compatible = "ti,omap3-smartreflex-iva";
+ ti,hwmods = "smartreflex_mpu_iva";
+ reg = <0x480c9000 0x400>;
+ interrupts = <18>;
+ };
+
timer1: timer@48318000 {
compatible = "ti,omap3430-timer";
reg = <0x48318000 0x400>;
diff --git a/arch/arm/boot/dts/omap34xx-hs.dtsi b/arch/arm/boot/dts/omap34xx-hs.dtsi
new file mode 100644
index 000000000000..1ff626489546
--- /dev/null
+++ b/arch/arm/boot/dts/omap34xx-hs.dtsi
@@ -0,0 +1,16 @@
+/* Disabled modules for secure omaps */
+
+#include "omap34xx.dtsi"
+
+/* Secure omaps have some devices inaccessible depending on the firmware */
+&aes {
+ status = "disabled";
+};
+
+&sham {
+ status = "disabled";
+};
+
+&timer12 {
+ status = "disabled";
+};
diff --git a/arch/arm/boot/dts/omap36xx-hs.dtsi b/arch/arm/boot/dts/omap36xx-hs.dtsi
new file mode 100644
index 000000000000..2c7febb0e016
--- /dev/null
+++ b/arch/arm/boot/dts/omap36xx-hs.dtsi
@@ -0,0 +1,16 @@
+/* Disabled modules for secure omaps */
+
+#include "omap36xx.dtsi"
+
+/* Secure omaps have some devices inaccessible depending on the firmware */
+&aes {
+ status = "disabled";
+};
+
+&sham {
+ status = "disabled";
+};
+
+&timer12 {
+ status = "disabled";
+};
diff --git a/arch/arm/boot/dts/omap4-panda-common.dtsi b/arch/arm/boot/dts/omap4-panda-common.dtsi
index 298e85020e1b..88c6a05cab41 100644
--- a/arch/arm/boot/dts/omap4-panda-common.dtsi
+++ b/arch/arm/boot/dts/omap4-panda-common.dtsi
@@ -246,15 +246,6 @@
0xf0 (PIN_INPUT_PULLUP | MUX_MODE0) /* i2c4_sda */
>;
};
-};
-
-&omap4_pmx_wkup {
- led_wkgpio_pins: pinmux_leds_wkpins {
- pinctrl-single,pins = <
- 0x1a (PIN_OUTPUT | MUX_MODE3) /* gpio_wk7 */
- 0x1c (PIN_OUTPUT | MUX_MODE3) /* gpio_wk8 */
- >;
- };
/*
* wl12xx GPIO outputs for WLAN_EN, BT_EN, FM_EN, BT_WAKEUP
@@ -274,7 +265,7 @@
pinctrl-single,pins = <
0x38 (PIN_INPUT | MUX_MODE3) /* gpmc_ncs2.gpio_52 */
0x3a (PIN_INPUT | MUX_MODE3) /* gpmc_ncs3.gpio_53 */
- 0x108 (PIN_OUTPUT | MUX_MODE0) /* sdmmc5_clk.sdmmc5_clk */
+ 0x108 (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc5_clk.sdmmc5_clk */
0x10a (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc5_cmd.sdmmc5_cmd */
0x10c (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc5_dat0.sdmmc5_dat0 */
0x10e (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc5_dat1.sdmmc5_dat1 */
@@ -284,6 +275,15 @@
};
};
+&omap4_pmx_wkup {
+ led_wkgpio_pins: pinmux_leds_wkpins {
+ pinctrl-single,pins = <
+ 0x1a (PIN_OUTPUT | MUX_MODE3) /* gpio_wk7 */
+ 0x1c (PIN_OUTPUT | MUX_MODE3) /* gpio_wk8 */
+ >;
+ };
+};
+
&i2c1 {
pinctrl-names = "default";
pinctrl-0 = <&i2c1_pins>;
diff --git a/arch/arm/boot/dts/omap4-sdp.dts b/arch/arm/boot/dts/omap4-sdp.dts
index 5fc3f43c5a81..dbc81fb6ef03 100644
--- a/arch/arm/boot/dts/omap4-sdp.dts
+++ b/arch/arm/boot/dts/omap4-sdp.dts
@@ -300,12 +300,12 @@
wl12xx_pins: pinmux_wl12xx_pins {
pinctrl-single,pins = <
0x3a (PIN_INPUT | MUX_MODE3) /* gpmc_ncs3.gpio_53 */
- 0x108 (PIN_OUTPUT | MUX_MODE3) /* sdmmc5_clk.sdmmc5_clk */
- 0x10a (PIN_INPUT_PULLUP | MUX_MODE3) /* sdmmc5_cmd.sdmmc5_cmd */
- 0x10c (PIN_INPUT_PULLUP | MUX_MODE3) /* sdmmc5_dat0.sdmmc5_dat0 */
- 0x10e (PIN_INPUT_PULLUP | MUX_MODE3) /* sdmmc5_dat1.sdmmc5_dat1 */
- 0x110 (PIN_INPUT_PULLUP | MUX_MODE3) /* sdmmc5_dat2.sdmmc5_dat2 */
- 0x112 (PIN_INPUT_PULLUP | MUX_MODE3) /* sdmmc5_dat3.sdmmc5_dat3 */
+ 0x108 (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc5_clk.sdmmc5_clk */
+ 0x10a (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc5_cmd.sdmmc5_cmd */
+ 0x10c (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc5_dat0.sdmmc5_dat0 */
+ 0x10e (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc5_dat1.sdmmc5_dat1 */
+ 0x110 (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc5_dat2.sdmmc5_dat2 */
+ 0x112 (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc5_dat3.sdmmc5_dat3 */
>;
};
};
diff --git a/arch/arm/boot/dts/r8a7790.dtsi b/arch/arm/boot/dts/r8a7790.dtsi
index ee845fad939b..9987dd0e9c59 100644
--- a/arch/arm/boot/dts/r8a7790.dtsi
+++ b/arch/arm/boot/dts/r8a7790.dtsi
@@ -87,9 +87,9 @@
interrupts = <1 9 0xf04>;
};
- gpio0: gpio@ffc40000 {
+ gpio0: gpio@e6050000 {
compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar";
- reg = <0 0xffc40000 0 0x2c>;
+ reg = <0 0xe6050000 0 0x50>;
interrupt-parent = <&gic>;
interrupts = <0 4 0x4>;
#gpio-cells = <2>;
@@ -99,9 +99,9 @@
interrupt-controller;
};
- gpio1: gpio@ffc41000 {
+ gpio1: gpio@e6051000 {
compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar";
- reg = <0 0xffc41000 0 0x2c>;
+ reg = <0 0xe6051000 0 0x50>;
interrupt-parent = <&gic>;
interrupts = <0 5 0x4>;
#gpio-cells = <2>;
@@ -111,9 +111,9 @@
interrupt-controller;
};
- gpio2: gpio@ffc42000 {
+ gpio2: gpio@e6052000 {
compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar";
- reg = <0 0xffc42000 0 0x2c>;
+ reg = <0 0xe6052000 0 0x50>;
interrupt-parent = <&gic>;
interrupts = <0 6 0x4>;
#gpio-cells = <2>;
@@ -123,9 +123,9 @@
interrupt-controller;
};
- gpio3: gpio@ffc43000 {
+ gpio3: gpio@e6053000 {
compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar";
- reg = <0 0xffc43000 0 0x2c>;
+ reg = <0 0xe6053000 0 0x50>;
interrupt-parent = <&gic>;
interrupts = <0 7 0x4>;
#gpio-cells = <2>;
@@ -135,9 +135,9 @@
interrupt-controller;
};
- gpio4: gpio@ffc44000 {
+ gpio4: gpio@e6054000 {
compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar";
- reg = <0 0xffc44000 0 0x2c>;
+ reg = <0 0xe6054000 0 0x50>;
interrupt-parent = <&gic>;
interrupts = <0 8 0x4>;
#gpio-cells = <2>;
@@ -147,9 +147,9 @@
interrupt-controller;
};
- gpio5: gpio@ffc45000 {
+ gpio5: gpio@e6055000 {
compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar";
- reg = <0 0xffc45000 0 0x2c>;
+ reg = <0 0xe6055000 0 0x50>;
interrupt-parent = <&gic>;
interrupts = <0 9 0x4>;
#gpio-cells = <2>;
@@ -241,7 +241,7 @@
sdhi0: sdhi@ee100000 {
compatible = "renesas,sdhi-r8a7790";
- reg = <0 0xee100000 0 0x100>;
+ reg = <0 0xee100000 0 0x200>;
interrupt-parent = <&gic>;
interrupts = <0 165 4>;
cap-sd-highspeed;
@@ -250,7 +250,7 @@
sdhi1: sdhi@ee120000 {
compatible = "renesas,sdhi-r8a7790";
- reg = <0 0xee120000 0 0x100>;
+ reg = <0 0xee120000 0 0x200>;
interrupt-parent = <&gic>;
interrupts = <0 166 4>;
cap-sd-highspeed;
diff --git a/arch/arm/boot/dts/socfpga.dtsi b/arch/arm/boot/dts/socfpga.dtsi
index 6d09b8d42fdd..f936476c2753 100644
--- a/arch/arm/boot/dts/socfpga.dtsi
+++ b/arch/arm/boot/dts/socfpga.dtsi
@@ -245,14 +245,14 @@
mpu_periph_clk: mpu_periph_clk {
#clock-cells = <0>;
- compatible = "altr,socfpga-gate-clk";
+ compatible = "altr,socfpga-perip-clk";
clocks = <&mpuclk>;
fixed-divider = <4>;
};
mpu_l2_ram_clk: mpu_l2_ram_clk {
#clock-cells = <0>;
- compatible = "altr,socfpga-gate-clk";
+ compatible = "altr,socfpga-perip-clk";
clocks = <&mpuclk>;
fixed-divider = <2>;
};
@@ -266,8 +266,9 @@
l3_main_clk: l3_main_clk {
#clock-cells = <0>;
- compatible = "altr,socfpga-gate-clk";
+ compatible = "altr,socfpga-perip-clk";
clocks = <&mainclk>;
+ fixed-divider = <1>;
};
l3_mp_clk: l3_mp_clk {
diff --git a/arch/arm/boot/dts/sun6i-a31.dtsi b/arch/arm/boot/dts/sun6i-a31.dtsi
index c1751a64889a..7f5878c2784a 100644
--- a/arch/arm/boot/dts/sun6i-a31.dtsi
+++ b/arch/arm/boot/dts/sun6i-a31.dtsi
@@ -193,7 +193,10 @@
pio: pinctrl@01c20800 {
compatible = "allwinner,sun6i-a31-pinctrl";
reg = <0x01c20800 0x400>;
- interrupts = <0 11 1>, <0 15 1>, <0 16 1>, <0 17 1>;
+ interrupts = <0 11 4>,
+ <0 15 4>,
+ <0 16 4>,
+ <0 17 4>;
clocks = <&apb1_gates 5>;
gpio-controller;
interrupt-controller;
@@ -212,11 +215,11 @@
timer@01c20c00 {
compatible = "allwinner,sun4i-timer";
reg = <0x01c20c00 0xa0>;
- interrupts = <0 18 1>,
- <0 19 1>,
- <0 20 1>,
- <0 21 1>,
- <0 22 1>;
+ interrupts = <0 18 4>,
+ <0 19 4>,
+ <0 20 4>,
+ <0 21 4>,
+ <0 22 4>;
clocks = <&osc24M>;
};
@@ -228,7 +231,7 @@
uart0: serial@01c28000 {
compatible = "snps,dw-apb-uart";
reg = <0x01c28000 0x400>;
- interrupts = <0 0 1>;
+ interrupts = <0 0 4>;
reg-shift = <2>;
reg-io-width = <4>;
clocks = <&apb2_gates 16>;
@@ -238,7 +241,7 @@
uart1: serial@01c28400 {
compatible = "snps,dw-apb-uart";
reg = <0x01c28400 0x400>;
- interrupts = <0 1 1>;
+ interrupts = <0 1 4>;
reg-shift = <2>;
reg-io-width = <4>;
clocks = <&apb2_gates 17>;
@@ -248,7 +251,7 @@
uart2: serial@01c28800 {
compatible = "snps,dw-apb-uart";
reg = <0x01c28800 0x400>;
- interrupts = <0 2 1>;
+ interrupts = <0 2 4>;
reg-shift = <2>;
reg-io-width = <4>;
clocks = <&apb2_gates 18>;
@@ -258,7 +261,7 @@
uart3: serial@01c28c00 {
compatible = "snps,dw-apb-uart";
reg = <0x01c28c00 0x400>;
- interrupts = <0 3 1>;
+ interrupts = <0 3 4>;
reg-shift = <2>;
reg-io-width = <4>;
clocks = <&apb2_gates 19>;
@@ -268,7 +271,7 @@
uart4: serial@01c29000 {
compatible = "snps,dw-apb-uart";
reg = <0x01c29000 0x400>;
- interrupts = <0 4 1>;
+ interrupts = <0 4 4>;
reg-shift = <2>;
reg-io-width = <4>;
clocks = <&apb2_gates 20>;
@@ -278,7 +281,7 @@
uart5: serial@01c29400 {
compatible = "snps,dw-apb-uart";
reg = <0x01c29400 0x400>;
- interrupts = <0 5 1>;
+ interrupts = <0 5 4>;
reg-shift = <2>;
reg-io-width = <4>;
clocks = <&apb2_gates 21>;
diff --git a/arch/arm/boot/dts/sun7i-a20.dtsi b/arch/arm/boot/dts/sun7i-a20.dtsi
index e46cfedde74c..367611a0730b 100644
--- a/arch/arm/boot/dts/sun7i-a20.dtsi
+++ b/arch/arm/boot/dts/sun7i-a20.dtsi
@@ -170,7 +170,7 @@
emac: ethernet@01c0b000 {
compatible = "allwinner,sun4i-emac";
reg = <0x01c0b000 0x1000>;
- interrupts = <0 55 1>;
+ interrupts = <0 55 4>;
clocks = <&ahb_gates 17>;
status = "disabled";
};
@@ -186,7 +186,7 @@
pio: pinctrl@01c20800 {
compatible = "allwinner,sun7i-a20-pinctrl";
reg = <0x01c20800 0x400>;
- interrupts = <0 28 1>;
+ interrupts = <0 28 4>;
clocks = <&apb0_gates 5>;
gpio-controller;
interrupt-controller;
@@ -251,12 +251,12 @@
timer@01c20c00 {
compatible = "allwinner,sun4i-timer";
reg = <0x01c20c00 0x90>;
- interrupts = <0 22 1>,
- <0 23 1>,
- <0 24 1>,
- <0 25 1>,
- <0 67 1>,
- <0 68 1>;
+ interrupts = <0 22 4>,
+ <0 23 4>,
+ <0 24 4>,
+ <0 25 4>,
+ <0 67 4>,
+ <0 68 4>;
clocks = <&osc24M>;
};
@@ -273,7 +273,7 @@
uart0: serial@01c28000 {
compatible = "snps,dw-apb-uart";
reg = <0x01c28000 0x400>;
- interrupts = <0 1 1>;
+ interrupts = <0 1 4>;
reg-shift = <2>;
reg-io-width = <4>;
clocks = <&apb1_gates 16>;
@@ -283,7 +283,7 @@
uart1: serial@01c28400 {
compatible = "snps,dw-apb-uart";
reg = <0x01c28400 0x400>;
- interrupts = <0 2 1>;
+ interrupts = <0 2 4>;
reg-shift = <2>;
reg-io-width = <4>;
clocks = <&apb1_gates 17>;
@@ -293,7 +293,7 @@
uart2: serial@01c28800 {
compatible = "snps,dw-apb-uart";
reg = <0x01c28800 0x400>;
- interrupts = <0 3 1>;
+ interrupts = <0 3 4>;
reg-shift = <2>;
reg-io-width = <4>;
clocks = <&apb1_gates 18>;
@@ -303,7 +303,7 @@
uart3: serial@01c28c00 {
compatible = "snps,dw-apb-uart";
reg = <0x01c28c00 0x400>;
- interrupts = <0 4 1>;
+ interrupts = <0 4 4>;
reg-shift = <2>;
reg-io-width = <4>;
clocks = <&apb1_gates 19>;
@@ -313,7 +313,7 @@
uart4: serial@01c29000 {
compatible = "snps,dw-apb-uart";
reg = <0x01c29000 0x400>;
- interrupts = <0 17 1>;
+ interrupts = <0 17 4>;
reg-shift = <2>;
reg-io-width = <4>;
clocks = <&apb1_gates 20>;
@@ -323,7 +323,7 @@
uart5: serial@01c29400 {
compatible = "snps,dw-apb-uart";
reg = <0x01c29400 0x400>;
- interrupts = <0 18 1>;
+ interrupts = <0 18 4>;
reg-shift = <2>;
reg-io-width = <4>;
clocks = <&apb1_gates 21>;
@@ -333,7 +333,7 @@
uart6: serial@01c29800 {
compatible = "snps,dw-apb-uart";
reg = <0x01c29800 0x400>;
- interrupts = <0 19 1>;
+ interrupts = <0 19 4>;
reg-shift = <2>;
reg-io-width = <4>;
clocks = <&apb1_gates 22>;
@@ -343,7 +343,7 @@
uart7: serial@01c29c00 {
compatible = "snps,dw-apb-uart";
reg = <0x01c29c00 0x400>;
- interrupts = <0 20 1>;
+ interrupts = <0 20 4>;
reg-shift = <2>;
reg-io-width = <4>;
clocks = <&apb1_gates 23>;
@@ -353,7 +353,7 @@
i2c0: i2c@01c2ac00 {
compatible = "allwinner,sun4i-i2c";
reg = <0x01c2ac00 0x400>;
- interrupts = <0 7 1>;
+ interrupts = <0 7 4>;
clocks = <&apb1_gates 0>;
clock-frequency = <100000>;
status = "disabled";
@@ -362,7 +362,7 @@
i2c1: i2c@01c2b000 {
compatible = "allwinner,sun4i-i2c";
reg = <0x01c2b000 0x400>;
- interrupts = <0 8 1>;
+ interrupts = <0 8 4>;
clocks = <&apb1_gates 1>;
clock-frequency = <100000>;
status = "disabled";
@@ -371,7 +371,7 @@
i2c2: i2c@01c2b400 {
compatible = "allwinner,sun4i-i2c";
reg = <0x01c2b400 0x400>;
- interrupts = <0 9 1>;
+ interrupts = <0 9 4>;
clocks = <&apb1_gates 2>;
clock-frequency = <100000>;
status = "disabled";
@@ -380,7 +380,7 @@
i2c3: i2c@01c2b800 {
compatible = "allwinner,sun4i-i2c";
reg = <0x01c2b800 0x400>;
- interrupts = <0 88 1>;
+ interrupts = <0 88 4>;
clocks = <&apb1_gates 3>;
clock-frequency = <100000>;
status = "disabled";
@@ -389,7 +389,7 @@
i2c4: i2c@01c2bc00 {
compatible = "allwinner,sun4i-i2c";
reg = <0x01c2bc00 0x400>;
- interrupts = <0 89 1>;
+ interrupts = <0 89 4>;
clocks = <&apb1_gates 15>;
clock-frequency = <100000>;
status = "disabled";
diff --git a/arch/arm/common/edma.c b/arch/arm/common/edma.c
index 8e1a0245907f..41bca32409fc 100644
--- a/arch/arm/common/edma.c
+++ b/arch/arm/common/edma.c
@@ -404,7 +404,7 @@ static irqreturn_t dma_irq_handler(int irq, void *data)
BIT(slot));
if (edma_cc[ctlr]->intr_data[channel].callback)
edma_cc[ctlr]->intr_data[channel].callback(
- channel, DMA_COMPLETE,
+ channel, EDMA_DMA_COMPLETE,
edma_cc[ctlr]->intr_data[channel].data);
}
} while (sh_ipr);
@@ -459,7 +459,7 @@ static irqreturn_t dma_ccerr_handler(int irq, void *data)
callback) {
edma_cc[ctlr]->intr_data[k].
callback(k,
- DMA_CC_ERROR,
+ EDMA_DMA_CC_ERROR,
edma_cc[ctlr]->intr_data
[k].data);
}
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index 4a5903e04827..c1df4e9db140 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -69,6 +69,7 @@ CONFIG_KS8851=y
CONFIG_SMSC911X=y
CONFIG_STMMAC_ETH=y
CONFIG_MDIO_SUN4I=y
+CONFIG_TI_CPSW=y
CONFIG_KEYBOARD_SPEAR=y
CONFIG_SERIO_AMBAKMI=y
CONFIG_SERIAL_8250=y
@@ -133,12 +134,14 @@ CONFIG_USB_GPIO_VBUS=y
CONFIG_USB_ISP1301=y
CONFIG_USB_MXS_PHY=y
CONFIG_MMC=y
+CONFIG_MMC_BLOCK_MINORS=16
CONFIG_MMC_ARMMMCI=y
CONFIG_MMC_SDHCI=y
CONFIG_MMC_SDHCI_PLTFM=y
CONFIG_MMC_SDHCI_ESDHC_IMX=y
CONFIG_MMC_SDHCI_TEGRA=y
CONFIG_MMC_SDHCI_SPEAR=y
+CONFIG_MMC_SDHCI_BCM_KONA=y
CONFIG_MMC_OMAP=y
CONFIG_MMC_OMAP_HS=y
CONFIG_EDAC=y
diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig
index 98a50c309b90..bfa80a11e8c7 100644
--- a/arch/arm/configs/omap2plus_defconfig
+++ b/arch/arm/configs/omap2plus_defconfig
@@ -173,6 +173,7 @@ CONFIG_MFD_PALMAS=y
CONFIG_MFD_TPS65217=y
CONFIG_MFD_TPS65910=y
CONFIG_TWL6040_CORE=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
CONFIG_REGULATOR_PALMAS=y
CONFIG_REGULATOR_TPS65023=y
CONFIG_REGULATOR_TPS6507X=y
diff --git a/arch/arm/configs/sunxi_defconfig b/arch/arm/configs/sunxi_defconfig
index d57a85badb5e..3e2259b60236 100644
--- a/arch/arm/configs/sunxi_defconfig
+++ b/arch/arm/configs/sunxi_defconfig
@@ -12,6 +12,9 @@ CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
CONFIG_INET=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
@@ -58,4 +61,8 @@ CONFIG_LEDS_TRIGGER_HEARTBEAT=y
CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
CONFIG_COMMON_CLK_DEBUG=y
# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_TMPFS=y
+CONFIG_NFS_FS=y
+CONFIG_ROOT_NFS=y
CONFIG_NLS=y
+CONFIG_PRINTK_TIME=y
diff --git a/arch/arm/configs/u8500_defconfig b/arch/arm/configs/u8500_defconfig
index ac632cc38f24..c6ebc184bf68 100644
--- a/arch/arm/configs/u8500_defconfig
+++ b/arch/arm/configs/u8500_defconfig
@@ -22,6 +22,7 @@ CONFIG_CMDLINE="root=/dev/ram0 console=ttyAMA2,115200n8"
CONFIG_CPU_FREQ=y
CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
CONFIG_CPU_IDLE=y
+CONFIG_ARM_U8500_CPUIDLE=y
CONFIG_VFP=y
CONFIG_NEON=y
CONFIG_PM_RUNTIME=y
@@ -109,6 +110,8 @@ CONFIG_EXT2_FS_SECURITY=y
CONFIG_EXT3_FS=y
CONFIG_EXT4_FS=y
CONFIG_VFAT_FS=y
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
# CONFIG_MISC_FILESYSTEMS is not set
diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild
index c38b58c80202..3278afe2c3ab 100644
--- a/arch/arm/include/asm/Kbuild
+++ b/arch/arm/include/asm/Kbuild
@@ -34,3 +34,4 @@ generic-y += timex.h
generic-y += trace_clock.h
generic-y += unaligned.h
generic-y += preempt.h
+generic-y += hash.h
diff --git a/arch/arm/include/asm/hardware/iop3xx-adma.h b/arch/arm/include/asm/hardware/iop3xx-adma.h
index 9b28f1243bdc..240b29ef17db 100644
--- a/arch/arm/include/asm/hardware/iop3xx-adma.h
+++ b/arch/arm/include/asm/hardware/iop3xx-adma.h
@@ -393,36 +393,6 @@ static inline int iop_chan_zero_sum_slot_count(size_t len, int src_cnt,
return slot_cnt;
}
-static inline int iop_desc_is_pq(struct iop_adma_desc_slot *desc)
-{
- return 0;
-}
-
-static inline u32 iop_desc_get_dest_addr(struct iop_adma_desc_slot *desc,
- struct iop_adma_chan *chan)
-{
- union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
-
- switch (chan->device->id) {
- case DMA0_ID:
- case DMA1_ID:
- return hw_desc.dma->dest_addr;
- case AAU_ID:
- return hw_desc.aau->dest_addr;
- default:
- BUG();
- }
- return 0;
-}
-
-
-static inline u32 iop_desc_get_qdest_addr(struct iop_adma_desc_slot *desc,
- struct iop_adma_chan *chan)
-{
- BUG();
- return 0;
-}
-
static inline u32 iop_desc_get_byte_count(struct iop_adma_desc_slot *desc,
struct iop_adma_chan *chan)
{
diff --git a/arch/arm/include/asm/hardware/iop_adma.h b/arch/arm/include/asm/hardware/iop_adma.h
index 122f86d8c991..250760e08103 100644
--- a/arch/arm/include/asm/hardware/iop_adma.h
+++ b/arch/arm/include/asm/hardware/iop_adma.h
@@ -82,8 +82,6 @@ struct iop_adma_chan {
* @slot_cnt: total slots used in an transaction (group of operations)
* @slots_per_op: number of slots per operation
* @idx: pool index
- * @unmap_src_cnt: number of xor sources
- * @unmap_len: transaction bytecount
* @tx_list: list of descriptors that are associated with one operation
* @async_tx: support for the async_tx api
* @group_list: list of slots that make up a multi-descriptor transaction
@@ -99,8 +97,6 @@ struct iop_adma_desc_slot {
u16 slot_cnt;
u16 slots_per_op;
u16 idx;
- u16 unmap_src_cnt;
- size_t unmap_len;
struct list_head tx_list;
struct dma_async_tx_descriptor async_tx;
union {
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index 4dd21457ef9d..6976b03e5213 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -100,23 +100,19 @@
#define TASK_UNMAPPED_BASE UL(0x00000000)
#endif
-#ifndef PHYS_OFFSET
-#define PHYS_OFFSET UL(CONFIG_DRAM_BASE)
-#endif
-
#ifndef END_MEM
#define END_MEM (UL(CONFIG_DRAM_BASE) + CONFIG_DRAM_SIZE)
#endif
#ifndef PAGE_OFFSET
-#define PAGE_OFFSET (PHYS_OFFSET)
+#define PAGE_OFFSET PLAT_PHYS_OFFSET
#endif
/*
* The module can be at any place in ram in nommu mode.
*/
#define MODULES_END (END_MEM)
-#define MODULES_VADDR (PHYS_OFFSET)
+#define MODULES_VADDR PAGE_OFFSET
#define XIP_VIRT_ADDR(physaddr) (physaddr)
@@ -157,6 +153,16 @@
#endif
#define ARCH_PGD_MASK ((1 << ARCH_PGD_SHIFT) - 1)
+/*
+ * PLAT_PHYS_OFFSET is the offset (from zero) of the start of physical
+ * memory. This is used for XIP and NoMMU kernels, or by kernels which
+ * have their own mach/memory.h. Assembly code must always use
+ * PLAT_PHYS_OFFSET and not PHYS_OFFSET.
+ */
+#ifndef PLAT_PHYS_OFFSET
+#define PLAT_PHYS_OFFSET UL(CONFIG_PHYS_OFFSET)
+#endif
+
#ifndef __ASSEMBLY__
/*
@@ -226,12 +232,21 @@ static inline phys_addr_t __virt_to_phys(unsigned long x)
static inline unsigned long __phys_to_virt(phys_addr_t x)
{
unsigned long t;
- __pv_stub(x, t, "sub", __PV_BITS_31_24);
+
+ /*
+ * 'unsigned long' cast discard upper word when
+ * phys_addr_t is 64 bit, and makes sure that inline
+ * assembler expression receives 32 bit argument
+ * in place where 'r' 32 bit operand is expected.
+ */
+ __pv_stub((unsigned long) x, t, "sub", __PV_BITS_31_24);
return t;
}
#else
+#define PHYS_OFFSET PLAT_PHYS_OFFSET
+
static inline phys_addr_t __virt_to_phys(unsigned long x)
{
return (phys_addr_t)x - PAGE_OFFSET + PHYS_OFFSET;
@@ -244,17 +259,6 @@ static inline unsigned long __phys_to_virt(phys_addr_t x)
#endif
#endif
-#endif /* __ASSEMBLY__ */
-
-#ifndef PHYS_OFFSET
-#ifdef PLAT_PHYS_OFFSET
-#define PHYS_OFFSET PLAT_PHYS_OFFSET
-#else
-#define PHYS_OFFSET UL(CONFIG_PHYS_OFFSET)
-#endif
-#endif
-
-#ifndef __ASSEMBLY__
/*
* PFNs are used to describe any physical page; this means
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index be956dbf6bae..1571d126e9dd 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -61,7 +61,7 @@ extern void __pgd_error(const char *file, int line, pgd_t);
* mapping to be mapped at. This is particularly important for
* non-high vector CPUs.
*/
-#define FIRST_USER_ADDRESS PAGE_SIZE
+#define FIRST_USER_ADDRESS (PAGE_SIZE * 2)
/*
* Use TASK_SIZE as the ceiling argument for free_pgtables() and
diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S
index 14235ba64a90..716249cc2ee1 100644
--- a/arch/arm/kernel/head-nommu.S
+++ b/arch/arm/kernel/head-nommu.S
@@ -68,7 +68,7 @@ ENTRY(stext)
#ifdef CONFIG_ARM_MPU
/* Calculate the size of a region covering just the kernel */
- ldr r5, =PHYS_OFFSET @ Region start: PHYS_OFFSET
+ ldr r5, =PLAT_PHYS_OFFSET @ Region start: PHYS_OFFSET
ldr r6, =(_end) @ Cover whole kernel
sub r6, r6, r5 @ Minimum size of region to map
clz r6, r6 @ Region size must be 2^N...
@@ -213,7 +213,7 @@ ENTRY(__setup_mpu)
set_region_nr r0, #MPU_RAM_REGION
isb
/* Full access from PL0, PL1, shared for CONFIG_SMP, cacheable */
- ldr r0, =PHYS_OFFSET @ RAM starts at PHYS_OFFSET
+ ldr r0, =PLAT_PHYS_OFFSET @ RAM starts at PHYS_OFFSET
ldr r5,=(MPU_AP_PL1RW_PL0RW | MPU_RGN_NORMAL)
setup_region r0, r5, r6, MPU_DATA_SIDE @ PHYS_OFFSET, shared, enabled
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 7801866e626a..32f317e5828a 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -110,7 +110,7 @@ ENTRY(stext)
sub r4, r3, r4 @ (PHYS_OFFSET - PAGE_OFFSET)
add r8, r8, r4 @ PHYS_OFFSET
#else
- ldr r8, =PHYS_OFFSET @ always constant in this case
+ ldr r8, =PLAT_PHYS_OFFSET @ always constant in this case
#endif
/*
@@ -508,6 +508,7 @@ __fixup_smp:
teq r0, #0x0 @ '0' on actual UP A9 hardware
beq __fixup_smp_on_up @ So its an A9 UP
ldr r0, [r0, #4] @ read SCU Config
+ARM_BE8(rev r0, r0) @ byteswap if big endian
and r0, r0, #0x3 @ number of CPUs
teq r0, #0x0 @ is 1?
movne pc, lr
@@ -644,7 +645,11 @@ ARM_BE8(rev16 ip, ip)
bcc 1b
bx lr
#else
+#ifdef CONFIG_CPU_ENDIAN_BE8
+ moveq r0, #0x00004000 @ set bit 22, mov to mvn instruction
+#else
moveq r0, #0x400000 @ set bit 22, mov to mvn instruction
+#endif
b 2f
1: ldr ip, [r7, r3]
#ifdef CONFIG_CPU_ENDIAN_BE8
@@ -653,7 +658,7 @@ ARM_BE8(rev16 ip, ip)
tst ip, #0x000f0000 @ check the rotation field
orrne ip, ip, r6, lsl #24 @ mask in offset bits 31-24
biceq ip, ip, #0x00004000 @ clear bit 22
- orreq ip, ip, r0, lsl #24 @ mask in offset bits 7-0
+ orreq ip, ip, r0 @ mask in offset bits 7-0
#else
bic ip, ip, #0x000000ff
tst ip, #0xf00 @ check the rotation field
diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c
index 57221e349a7c..f0d180d8b29f 100644
--- a/arch/arm/kernel/machine_kexec.c
+++ b/arch/arm/kernel/machine_kexec.c
@@ -14,11 +14,12 @@
#include <asm/pgalloc.h>
#include <asm/mmu_context.h>
#include <asm/cacheflush.h>
+#include <asm/fncpy.h>
#include <asm/mach-types.h>
#include <asm/smp_plat.h>
#include <asm/system_misc.h>
-extern const unsigned char relocate_new_kernel[];
+extern void relocate_new_kernel(void);
extern const unsigned int relocate_new_kernel_size;
extern unsigned long kexec_start_address;
@@ -142,6 +143,8 @@ void machine_kexec(struct kimage *image)
{
unsigned long page_list;
unsigned long reboot_code_buffer_phys;
+ unsigned long reboot_entry = (unsigned long)relocate_new_kernel;
+ unsigned long reboot_entry_phys;
void *reboot_code_buffer;
/*
@@ -168,16 +171,16 @@ void machine_kexec(struct kimage *image)
/* copy our kernel relocation code to the control code page */
- memcpy(reboot_code_buffer,
- relocate_new_kernel, relocate_new_kernel_size);
+ reboot_entry = fncpy(reboot_code_buffer,
+ reboot_entry,
+ relocate_new_kernel_size);
+ reboot_entry_phys = (unsigned long)reboot_entry +
+ (reboot_code_buffer_phys - (unsigned long)reboot_code_buffer);
-
- flush_icache_range((unsigned long) reboot_code_buffer,
- (unsigned long) reboot_code_buffer + KEXEC_CONTROL_PAGE_SIZE);
printk(KERN_INFO "Bye!\n");
if (kexec_reinit)
kexec_reinit();
- soft_restart(reboot_code_buffer_phys);
+ soft_restart(reboot_entry_phys);
}
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 94f6b05f9e24..92f7b15dd221 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -404,6 +404,7 @@ EXPORT_SYMBOL(dump_fpu);
unsigned long get_wchan(struct task_struct *p)
{
struct stackframe frame;
+ unsigned long stack_page;
int count = 0;
if (!p || p == current || p->state == TASK_RUNNING)
return 0;
@@ -412,9 +413,11 @@ unsigned long get_wchan(struct task_struct *p)
frame.sp = thread_saved_sp(p);
frame.lr = 0; /* recovered from the stack */
frame.pc = thread_saved_pc(p);
+ stack_page = (unsigned long)task_stack_page(p);
do {
- int ret = unwind_frame(&frame);
- if (ret < 0)
+ if (frame.sp < stack_page ||
+ frame.sp >= stack_page + THREAD_SIZE ||
+ unwind_frame(&frame) < 0)
return 0;
if (!in_sched_functions(frame.pc))
return frame.pc;
diff --git a/arch/arm/kernel/relocate_kernel.S b/arch/arm/kernel/relocate_kernel.S
index d0cdedf4864d..95858966d84e 100644
--- a/arch/arm/kernel/relocate_kernel.S
+++ b/arch/arm/kernel/relocate_kernel.S
@@ -2,10 +2,12 @@
* relocate_kernel.S - put the kernel image in place to boot
*/
+#include <linux/linkage.h>
#include <asm/kexec.h>
- .globl relocate_new_kernel
-relocate_new_kernel:
+ .align 3 /* not needed for this code, but keeps fncpy() happy */
+
+ENTRY(relocate_new_kernel)
ldr r0,kexec_indirection_page
ldr r1,kexec_start_address
@@ -79,6 +81,8 @@ kexec_mach_type:
kexec_boot_atags:
.long 0x0
+ENDPROC(relocate_new_kernel)
+
relocate_new_kernel_end:
.globl relocate_new_kernel_size
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 6a1b8a81b1ae..987a7f5bce5f 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -873,8 +873,6 @@ void __init setup_arch(char **cmdline_p)
machine_desc = mdesc;
machine_name = mdesc->name;
- setup_dma_zone(mdesc);
-
if (mdesc->reboot_mode != REBOOT_HARD)
reboot_mode = mdesc->reboot_mode;
@@ -892,6 +890,7 @@ void __init setup_arch(char **cmdline_p)
sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
early_paging_init(mdesc, lookup_processor_type(read_cpuid_id()));
+ setup_dma_zone(mdesc);
sanity_check_meminfo();
arm_memblock_init(&meminfo, mdesc);
diff --git a/arch/arm/kernel/sigreturn_codes.S b/arch/arm/kernel/sigreturn_codes.S
index 3c5d0f2170fd..b84d0cb13682 100644
--- a/arch/arm/kernel/sigreturn_codes.S
+++ b/arch/arm/kernel/sigreturn_codes.S
@@ -30,6 +30,27 @@
* snippets.
*/
+/*
+ * In CPU_THUMBONLY case kernel arm opcodes are not allowed.
+ * Note in this case codes skips those instructions but it uses .org
+ * directive to keep correct layout of sigreturn_codes array.
+ */
+#ifndef CONFIG_CPU_THUMBONLY
+#define ARM_OK(code...) code
+#else
+#define ARM_OK(code...)
+#endif
+
+ .macro arm_slot n
+ .org sigreturn_codes + 12 * (\n)
+ARM_OK( .arm )
+ .endm
+
+ .macro thumb_slot n
+ .org sigreturn_codes + 12 * (\n) + 8
+ .thumb
+ .endm
+
#if __LINUX_ARM_ARCH__ <= 4
/*
* Note we manually set minimally required arch that supports
@@ -45,26 +66,27 @@
.global sigreturn_codes
.type sigreturn_codes, #object
- .arm
+ .align
sigreturn_codes:
/* ARM sigreturn syscall code snippet */
- mov r7, #(__NR_sigreturn - __NR_SYSCALL_BASE)
- swi #(__NR_sigreturn)|(__NR_OABI_SYSCALL_BASE)
+ arm_slot 0
+ARM_OK( mov r7, #(__NR_sigreturn - __NR_SYSCALL_BASE) )
+ARM_OK( swi #(__NR_sigreturn)|(__NR_OABI_SYSCALL_BASE) )
/* Thumb sigreturn syscall code snippet */
- .thumb
+ thumb_slot 0
movs r7, #(__NR_sigreturn - __NR_SYSCALL_BASE)
swi #0
/* ARM sigreturn_rt syscall code snippet */
- .arm
- mov r7, #(__NR_rt_sigreturn - __NR_SYSCALL_BASE)
- swi #(__NR_rt_sigreturn)|(__NR_OABI_SYSCALL_BASE)
+ arm_slot 1
+ARM_OK( mov r7, #(__NR_rt_sigreturn - __NR_SYSCALL_BASE) )
+ARM_OK( swi #(__NR_rt_sigreturn)|(__NR_OABI_SYSCALL_BASE) )
/* Thumb sigreturn_rt syscall code snippet */
- .thumb
+ thumb_slot 1
movs r7, #(__NR_rt_sigreturn - __NR_SYSCALL_BASE)
swi #0
@@ -74,7 +96,7 @@ sigreturn_codes:
* it is thumb case or not, so we need additional
* word after real last entry.
*/
- .arm
+ arm_slot 2
.space 4
.size sigreturn_codes, . - sigreturn_codes
diff --git a/arch/arm/kernel/stacktrace.c b/arch/arm/kernel/stacktrace.c
index 00f79e59985b..af4e8c8a5422 100644
--- a/arch/arm/kernel/stacktrace.c
+++ b/arch/arm/kernel/stacktrace.c
@@ -31,7 +31,7 @@ int notrace unwind_frame(struct stackframe *frame)
high = ALIGN(low, THREAD_SIZE);
/* check current frame pointer is within bounds */
- if (fp < (low + 12) || fp + 4 >= high)
+ if (fp < low + 12 || fp > high - 4)
return -EINVAL;
/* restore the registers from the stack frame */
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 6125f259b7b5..7940241f0576 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -509,9 +509,10 @@ static inline int
__do_cache_op(unsigned long start, unsigned long end)
{
int ret;
- unsigned long chunk = PAGE_SIZE;
do {
+ unsigned long chunk = min(PAGE_SIZE, end - start);
+
if (signal_pending(current)) {
struct thread_info *ti = current_thread_info();
@@ -856,7 +857,7 @@ static void __init kuser_init(void *vectors)
memcpy(vectors + 0xfe0, vectors + 0xfe8, 4);
}
#else
-static void __init kuser_init(void *vectors)
+static inline void __init kuser_init(void *vectors)
{
}
#endif
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 371958370de4..580906989db1 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -334,6 +334,17 @@ out:
return err;
}
+static phys_addr_t kvm_kaddr_to_phys(void *kaddr)
+{
+ if (!is_vmalloc_addr(kaddr)) {
+ BUG_ON(!virt_addr_valid(kaddr));
+ return __pa(kaddr);
+ } else {
+ return page_to_phys(vmalloc_to_page(kaddr)) +
+ offset_in_page(kaddr);
+ }
+}
+
/**
* create_hyp_mappings - duplicate a kernel virtual address range in Hyp mode
* @from: The virtual kernel start address of the range
@@ -345,16 +356,27 @@ out:
*/
int create_hyp_mappings(void *from, void *to)
{
- unsigned long phys_addr = virt_to_phys(from);
+ phys_addr_t phys_addr;
+ unsigned long virt_addr;
unsigned long start = KERN_TO_HYP((unsigned long)from);
unsigned long end = KERN_TO_HYP((unsigned long)to);
- /* Check for a valid kernel memory mapping */
- if (!virt_addr_valid(from) || !virt_addr_valid(to - 1))
- return -EINVAL;
+ start = start & PAGE_MASK;
+ end = PAGE_ALIGN(end);
- return __create_hyp_mappings(hyp_pgd, start, end,
- __phys_to_pfn(phys_addr), PAGE_HYP);
+ for (virt_addr = start; virt_addr < end; virt_addr += PAGE_SIZE) {
+ int err;
+
+ phys_addr = kvm_kaddr_to_phys(from + virt_addr - start);
+ err = __create_hyp_mappings(hyp_pgd, virt_addr,
+ virt_addr + PAGE_SIZE,
+ __phys_to_pfn(phys_addr),
+ PAGE_HYP);
+ if (err)
+ return err;
+ }
+
+ return 0;
}
/**
diff --git a/arch/arm/lib/bitops.h b/arch/arm/lib/bitops.h
index e0c68d5bb7dc..52886b89706c 100644
--- a/arch/arm/lib/bitops.h
+++ b/arch/arm/lib/bitops.h
@@ -10,7 +10,7 @@ UNWIND( .fnstart )
and r3, r0, #31 @ Get bit offset
mov r0, r0, lsr #5
add r1, r1, r0, lsl #2 @ Get word offset
-#if __LINUX_ARM_ARCH__ >= 7
+#if __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP)
.arch_extension mp
ALT_SMP(W(pldw) [r1])
ALT_UP(W(nop))
diff --git a/arch/arm/lib/delay-loop.S b/arch/arm/lib/delay-loop.S
index 36b668d8e121..bc1033b897b4 100644
--- a/arch/arm/lib/delay-loop.S
+++ b/arch/arm/lib/delay-loop.S
@@ -40,6 +40,7 @@ ENTRY(__loop_const_udelay) @ 0 <= r0 <= 0x7fffff06
/*
* loops = r0 * HZ * loops_per_jiffy / 1000000
*/
+ .align 3
@ Delay routine
ENTRY(__loop_delay)
diff --git a/arch/arm/mach-at91/at91rm9200_time.c b/arch/arm/mach-at91/at91rm9200_time.c
index f607deb40f4d..bc7b363a3083 100644
--- a/arch/arm/mach-at91/at91rm9200_time.c
+++ b/arch/arm/mach-at91/at91rm9200_time.c
@@ -174,7 +174,6 @@ clkevt32k_next_event(unsigned long delta, struct clock_event_device *dev)
static struct clock_event_device clkevt = {
.name = "at91_tick",
.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
- .shift = 32,
.rating = 150,
.set_next_event = clkevt32k_next_event,
.set_mode = clkevt32k_mode,
@@ -265,11 +264,9 @@ void __init at91rm9200_timer_init(void)
at91_st_write(AT91_ST_RTMR, 1);
/* Setup timer clockevent, with minimum of two ticks (important!!) */
- clkevt.mult = div_sc(AT91_SLOW_CLOCK, NSEC_PER_SEC, clkevt.shift);
- clkevt.max_delta_ns = clockevent_delta2ns(AT91_ST_ALMV, &clkevt);
- clkevt.min_delta_ns = clockevent_delta2ns(2, &clkevt) + 1;
clkevt.cpumask = cpumask_of(0);
- clockevents_register_device(&clkevt);
+ clockevents_config_and_register(&clkevt, AT91_SLOW_CLOCK,
+ 2, AT91_ST_ALMV);
/* register clocksource */
clocksource_register_hz(&clk32k, AT91_SLOW_CLOCK);
diff --git a/arch/arm/mach-at91/pm.h b/arch/arm/mach-at91/pm.h
index 3ed190ce062b..c5101dcb4fb0 100644
--- a/arch/arm/mach-at91/pm.h
+++ b/arch/arm/mach-at91/pm.h
@@ -16,7 +16,11 @@
#include <mach/at91_ramc.h>
#include <mach/at91rm9200_sdramc.h>
+#ifdef CONFIG_PM
extern void at91_pm_set_standby(void (*at91_standby)(void));
+#else
+static inline void at91_pm_set_standby(void (*at91_standby)(void)) { }
+#endif
/*
* The AT91RM9200 goes into self-refresh mode with this command, and will
diff --git a/arch/arm/mach-at91/sama5d3.c b/arch/arm/mach-at91/sama5d3.c
index 3ea86428ee09..a28873fe3049 100644
--- a/arch/arm/mach-at91/sama5d3.c
+++ b/arch/arm/mach-at91/sama5d3.c
@@ -95,19 +95,19 @@ static struct clk twi0_clk = {
.name = "twi0_clk",
.pid = SAMA5D3_ID_TWI0,
.type = CLK_TYPE_PERIPHERAL,
- .div = AT91_PMC_PCR_DIV2,
+ .div = AT91_PMC_PCR_DIV8,
};
static struct clk twi1_clk = {
.name = "twi1_clk",
.pid = SAMA5D3_ID_TWI1,
.type = CLK_TYPE_PERIPHERAL,
- .div = AT91_PMC_PCR_DIV2,
+ .div = AT91_PMC_PCR_DIV8,
};
static struct clk twi2_clk = {
.name = "twi2_clk",
.pid = SAMA5D3_ID_TWI2,
.type = CLK_TYPE_PERIPHERAL,
- .div = AT91_PMC_PCR_DIV2,
+ .div = AT91_PMC_PCR_DIV8,
};
static struct clk mmc0_clk = {
.name = "mci0_clk",
diff --git a/arch/arm/mach-davinci/devices-da8xx.c b/arch/arm/mach-davinci/devices-da8xx.c
index c46eccbbd512..78829c513fdc 100644
--- a/arch/arm/mach-davinci/devices-da8xx.c
+++ b/arch/arm/mach-davinci/devices-da8xx.c
@@ -487,7 +487,7 @@ int __init da8xx_register_emac(void)
static struct resource da830_mcasp1_resources[] = {
{
- .name = "mcasp1",
+ .name = "mpu",
.start = DAVINCI_DA830_MCASP1_REG_BASE,
.end = DAVINCI_DA830_MCASP1_REG_BASE + (SZ_1K * 12) - 1,
.flags = IORESOURCE_MEM,
@@ -515,7 +515,7 @@ static struct platform_device da830_mcasp1_device = {
static struct resource da850_mcasp_resources[] = {
{
- .name = "mcasp",
+ .name = "mpu",
.start = DAVINCI_DA8XX_MCASP0_REG_BASE,
.end = DAVINCI_DA8XX_MCASP0_REG_BASE + (SZ_1K * 12) - 1,
.flags = IORESOURCE_MEM,
diff --git a/arch/arm/mach-davinci/dm355.c b/arch/arm/mach-davinci/dm355.c
index ef9ff1fb6f52..6117fc644188 100644
--- a/arch/arm/mach-davinci/dm355.c
+++ b/arch/arm/mach-davinci/dm355.c
@@ -641,6 +641,7 @@ static struct platform_device dm355_edma_device = {
static struct resource dm355_asp1_resources[] = {
{
+ .name = "mpu",
.start = DAVINCI_ASP1_BASE,
.end = DAVINCI_ASP1_BASE + SZ_8K - 1,
.flags = IORESOURCE_MEM,
@@ -906,7 +907,7 @@ static struct davinci_gpio_platform_data dm355_gpio_platform_data = {
int __init dm355_gpio_register(void)
{
return davinci_gpio_register(dm355_gpio_resources,
- sizeof(dm355_gpio_resources),
+ ARRAY_SIZE(dm355_gpio_resources),
&dm355_gpio_platform_data);
}
/*----------------------------------------------------------------------*/
diff --git a/arch/arm/mach-davinci/dm365.c b/arch/arm/mach-davinci/dm365.c
index 1511a0680f9a..d7c6f85d3fc9 100644
--- a/arch/arm/mach-davinci/dm365.c
+++ b/arch/arm/mach-davinci/dm365.c
@@ -720,7 +720,7 @@ static struct davinci_gpio_platform_data dm365_gpio_platform_data = {
int __init dm365_gpio_register(void)
{
return davinci_gpio_register(dm365_gpio_resources,
- sizeof(dm365_gpio_resources),
+ ARRAY_SIZE(dm365_gpio_resources),
&dm365_gpio_platform_data);
}
@@ -942,6 +942,7 @@ static struct platform_device dm365_edma_device = {
static struct resource dm365_asp_resources[] = {
{
+ .name = "mpu",
.start = DAVINCI_DM365_ASP0_BASE,
.end = DAVINCI_DM365_ASP0_BASE + SZ_8K - 1,
.flags = IORESOURCE_MEM,
diff --git a/arch/arm/mach-davinci/dm644x.c b/arch/arm/mach-davinci/dm644x.c
index 143a3217e8ef..3ce47997bb46 100644
--- a/arch/arm/mach-davinci/dm644x.c
+++ b/arch/arm/mach-davinci/dm644x.c
@@ -572,6 +572,7 @@ static struct platform_device dm644x_edma_device = {
/* DM6446 EVM uses ASP0; line-out is a pair of RCA jacks */
static struct resource dm644x_asp_resources[] = {
{
+ .name = "mpu",
.start = DAVINCI_ASP0_BASE,
.end = DAVINCI_ASP0_BASE + SZ_8K - 1,
.flags = IORESOURCE_MEM,
@@ -792,7 +793,7 @@ static struct davinci_gpio_platform_data dm644_gpio_platform_data = {
int __init dm644x_gpio_register(void)
{
return davinci_gpio_register(dm644_gpio_resources,
- sizeof(dm644_gpio_resources),
+ ARRAY_SIZE(dm644_gpio_resources),
&dm644_gpio_platform_data);
}
/*----------------------------------------------------------------------*/
diff --git a/arch/arm/mach-davinci/dm646x.c b/arch/arm/mach-davinci/dm646x.c
index 2a73f299c1d0..0e81fea65e7f 100644
--- a/arch/arm/mach-davinci/dm646x.c
+++ b/arch/arm/mach-davinci/dm646x.c
@@ -621,7 +621,7 @@ static struct platform_device dm646x_edma_device = {
static struct resource dm646x_mcasp0_resources[] = {
{
- .name = "mcasp0",
+ .name = "mpu",
.start = DAVINCI_DM646X_MCASP0_REG_BASE,
.end = DAVINCI_DM646X_MCASP0_REG_BASE + (SZ_1K << 1) - 1,
.flags = IORESOURCE_MEM,
@@ -641,7 +641,7 @@ static struct resource dm646x_mcasp0_resources[] = {
static struct resource dm646x_mcasp1_resources[] = {
{
- .name = "mcasp1",
+ .name = "mpu",
.start = DAVINCI_DM646X_MCASP1_REG_BASE,
.end = DAVINCI_DM646X_MCASP1_REG_BASE + (SZ_1K << 1) - 1,
.flags = IORESOURCE_MEM,
@@ -769,7 +769,7 @@ static struct davinci_gpio_platform_data dm646x_gpio_platform_data = {
int __init dm646x_gpio_register(void)
{
return davinci_gpio_register(dm646x_gpio_resources,
- sizeof(dm646x_gpio_resources),
+ ARRAY_SIZE(dm646x_gpio_resources),
&dm646x_gpio_platform_data);
}
/*----------------------------------------------------------------------*/
diff --git a/arch/arm/mach-footbridge/common.c b/arch/arm/mach-footbridge/common.c
index 2739ca2c1334..e0091685fd48 100644
--- a/arch/arm/mach-footbridge/common.c
+++ b/arch/arm/mach-footbridge/common.c
@@ -15,6 +15,7 @@
#include <linux/init.h>
#include <linux/io.h>
#include <linux/spinlock.h>
+#include <video/vga.h>
#include <asm/pgtable.h>
#include <asm/page.h>
@@ -196,6 +197,8 @@ void __init footbridge_map_io(void)
iotable_init(ebsa285_host_io_desc, ARRAY_SIZE(ebsa285_host_io_desc));
pci_map_io_early(__phys_to_pfn(DC21285_PCI_IO));
}
+
+ vga_base = PCIMEM_BASE;
}
void footbridge_restart(enum reboot_mode mode, const char *cmd)
diff --git a/arch/arm/mach-footbridge/dc21285.c b/arch/arm/mach-footbridge/dc21285.c
index 3490a24f969e..7c2fdae9a38b 100644
--- a/arch/arm/mach-footbridge/dc21285.c
+++ b/arch/arm/mach-footbridge/dc21285.c
@@ -18,7 +18,6 @@
#include <linux/irq.h>
#include <linux/io.h>
#include <linux/spinlock.h>
-#include <video/vga.h>
#include <asm/irq.h>
#include <asm/mach/pci.h>
@@ -291,7 +290,6 @@ void __init dc21285_preinit(void)
int cfn_mode;
pcibios_min_mem = 0x81000000;
- vga_base = PCIMEM_BASE;
mem_size = (unsigned int)high_memory - PAGE_OFFSET;
for (mem_mask = 0x00100000; mem_mask < 0x10000000; mem_mask <<= 1)
diff --git a/arch/arm/mach-footbridge/ebsa285.c b/arch/arm/mach-footbridge/ebsa285.c
index b08243500e2e..1a7235fb52ac 100644
--- a/arch/arm/mach-footbridge/ebsa285.c
+++ b/arch/arm/mach-footbridge/ebsa285.c
@@ -30,21 +30,24 @@ static const struct {
const char *name;
const char *trigger;
} ebsa285_leds[] = {
- { "ebsa285:amber", "heartbeat", },
- { "ebsa285:green", "cpu0", },
+ { "ebsa285:amber", "cpu0", },
+ { "ebsa285:green", "heartbeat", },
{ "ebsa285:red",},
};
+static unsigned char hw_led_state;
+
static void ebsa285_led_set(struct led_classdev *cdev,
enum led_brightness b)
{
struct ebsa285_led *led = container_of(cdev,
struct ebsa285_led, cdev);
- if (b != LED_OFF)
- *XBUS_LEDS |= led->mask;
+ if (b == LED_OFF)
+ hw_led_state |= led->mask;
else
- *XBUS_LEDS &= ~led->mask;
+ hw_led_state &= ~led->mask;
+ *XBUS_LEDS = hw_led_state;
}
static enum led_brightness ebsa285_led_get(struct led_classdev *cdev)
@@ -52,18 +55,19 @@ static enum led_brightness ebsa285_led_get(struct led_classdev *cdev)
struct ebsa285_led *led = container_of(cdev,
struct ebsa285_led, cdev);
- return (*XBUS_LEDS & led->mask) ? LED_FULL : LED_OFF;
+ return hw_led_state & led->mask ? LED_OFF : LED_FULL;
}
static int __init ebsa285_leds_init(void)
{
int i;
- if (machine_is_ebsa285())
+ if (!machine_is_ebsa285())
return -ENODEV;
- /* 3 LEDS All ON */
- *XBUS_LEDS |= XBUS_LED_AMBER | XBUS_LED_GREEN | XBUS_LED_RED;
+ /* 3 LEDS all off */
+ hw_led_state = XBUS_LED_AMBER | XBUS_LED_GREEN | XBUS_LED_RED;
+ *XBUS_LEDS = hw_led_state;
for (i = 0; i < ARRAY_SIZE(ebsa285_leds); i++) {
struct ebsa285_led *led;
diff --git a/arch/arm/mach-highbank/highbank.c b/arch/arm/mach-highbank/highbank.c
index b3d7e5634b83..bd3bf66ce344 100644
--- a/arch/arm/mach-highbank/highbank.c
+++ b/arch/arm/mach-highbank/highbank.c
@@ -17,12 +17,15 @@
#include <linux/clkdev.h>
#include <linux/clocksource.h>
#include <linux/dma-mapping.h>
+#include <linux/input.h>
#include <linux/io.h>
#include <linux/irqchip.h>
+#include <linux/mailbox.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/of_address.h>
+#include <linux/reboot.h>
#include <linux/amba/bus.h>
#include <linux/platform_device.h>
@@ -130,6 +133,24 @@ static struct platform_device highbank_cpuidle_device = {
.name = "cpuidle-calxeda",
};
+static int hb_keys_notifier(struct notifier_block *nb, unsigned long event, void *data)
+{
+ u32 key = *(u32 *)data;
+
+ if (event != 0x1000)
+ return 0;
+
+ if (key == KEY_POWER)
+ orderly_poweroff(false);
+ else if (key == 0xffff)
+ ctrl_alt_del();
+
+ return 0;
+}
+static struct notifier_block hb_keys_nb = {
+ .notifier_call = hb_keys_notifier,
+};
+
static void __init highbank_init(void)
{
struct device_node *np;
@@ -145,6 +166,8 @@ static void __init highbank_init(void)
bus_register_notifier(&platform_bus_type, &highbank_platform_nb);
bus_register_notifier(&amba_bustype, &highbank_amba_nb);
+ pl320_ipc_register_notifier(&hb_keys_nb);
+
of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
if (psci_ops.cpu_suspend)
diff --git a/arch/arm/mach-iop13xx/include/mach/adma.h b/arch/arm/mach-iop13xx/include/mach/adma.h
index 6d3782d85a9f..a86fd0ed7757 100644
--- a/arch/arm/mach-iop13xx/include/mach/adma.h
+++ b/arch/arm/mach-iop13xx/include/mach/adma.h
@@ -218,20 +218,6 @@ iop_chan_xor_slot_count(size_t len, int src_cnt, int *slots_per_op)
#define iop_chan_pq_slot_count iop_chan_xor_slot_count
#define iop_chan_pq_zero_sum_slot_count iop_chan_xor_slot_count
-static inline u32 iop_desc_get_dest_addr(struct iop_adma_desc_slot *desc,
- struct iop_adma_chan *chan)
-{
- struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc;
- return hw_desc->dest_addr;
-}
-
-static inline u32 iop_desc_get_qdest_addr(struct iop_adma_desc_slot *desc,
- struct iop_adma_chan *chan)
-{
- struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc;
- return hw_desc->q_dest_addr;
-}
-
static inline u32 iop_desc_get_byte_count(struct iop_adma_desc_slot *desc,
struct iop_adma_chan *chan)
{
@@ -350,18 +336,6 @@ iop_desc_init_pq(struct iop_adma_desc_slot *desc, int src_cnt,
hw_desc->desc_ctrl = u_desc_ctrl.value;
}
-static inline int iop_desc_is_pq(struct iop_adma_desc_slot *desc)
-{
- struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc;
- union {
- u32 value;
- struct iop13xx_adma_desc_ctrl field;
- } u_desc_ctrl;
-
- u_desc_ctrl.value = hw_desc->desc_ctrl;
- return u_desc_ctrl.field.pq_xfer_en;
-}
-
static inline void
iop_desc_init_pq_zero_sum(struct iop_adma_desc_slot *desc, int src_cnt,
unsigned long flags)
diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile
index 1f25f3e99c05..adcef406ff0a 100644
--- a/arch/arm/mach-omap2/Makefile
+++ b/arch/arm/mach-omap2/Makefile
@@ -19,11 +19,11 @@ secure-common = omap-smc.o omap-secure.o
obj-$(CONFIG_ARCH_OMAP2) += $(omap-2-3-common) $(hwmod-common)
obj-$(CONFIG_ARCH_OMAP3) += $(omap-2-3-common) $(hwmod-common) $(secure-common)
-obj-$(CONFIG_ARCH_OMAP4) += prm44xx.o $(hwmod-common) $(secure-common)
+obj-$(CONFIG_ARCH_OMAP4) += $(hwmod-common) $(secure-common)
obj-$(CONFIG_SOC_AM33XX) += irq.o $(hwmod-common)
-obj-$(CONFIG_SOC_OMAP5) += prm44xx.o $(hwmod-common) $(secure-common)
+obj-$(CONFIG_SOC_OMAP5) += $(hwmod-common) $(secure-common)
obj-$(CONFIG_SOC_AM43XX) += $(hwmod-common) $(secure-common)
-obj-$(CONFIG_SOC_DRA7XX) += prm44xx.o $(hwmod-common) $(secure-common)
+obj-$(CONFIG_SOC_DRA7XX) += $(hwmod-common) $(secure-common)
ifneq ($(CONFIG_SND_OMAP_SOC_MCBSP),)
obj-y += mcbsp.o
diff --git a/arch/arm/mach-omap2/board-generic.c b/arch/arm/mach-omap2/board-generic.c
index 19f1652e94cf..8d972ff18c56 100644
--- a/arch/arm/mach-omap2/board-generic.c
+++ b/arch/arm/mach-omap2/board-generic.c
@@ -131,6 +131,24 @@ DT_MACHINE_START(OMAP3_GP_DT, "Generic OMAP3-GP (Flattened Device Tree)")
.dt_compat = omap3_gp_boards_compat,
.restart = omap3xxx_restart,
MACHINE_END
+
+static const char *am3517_boards_compat[] __initdata = {
+ "ti,am3517",
+ NULL,
+};
+
+DT_MACHINE_START(AM3517_DT, "Generic AM3517 (Flattened Device Tree)")
+ .reserve = omap_reserve,
+ .map_io = omap3_map_io,
+ .init_early = am35xx_init_early,
+ .init_irq = omap_intc_of_init,
+ .handle_irq = omap3_intc_handle_irq,
+ .init_machine = omap_generic_init,
+ .init_late = omap3_init_late,
+ .init_time = omap3_gptimer_timer_init,
+ .dt_compat = am3517_boards_compat,
+ .restart = omap3xxx_restart,
+MACHINE_END
#endif
#ifdef CONFIG_SOC_AM33XX
diff --git a/arch/arm/mach-omap2/board-ldp.c b/arch/arm/mach-omap2/board-ldp.c
index 4ec8d82b0492..44a59c3abfb0 100644
--- a/arch/arm/mach-omap2/board-ldp.c
+++ b/arch/arm/mach-omap2/board-ldp.c
@@ -242,12 +242,18 @@ static void __init ldp_display_init(void)
static int ldp_twl_gpio_setup(struct device *dev, unsigned gpio, unsigned ngpio)
{
+ int res;
+
/* LCD enable GPIO */
ldp_lcd_pdata.enable_gpio = gpio + 7;
/* Backlight enable GPIO */
ldp_lcd_pdata.backlight_gpio = gpio + 15;
+ res = platform_device_register(&ldp_lcd_device);
+ if (res)
+ pr_err("Unable to register LCD: %d\n", res);
+
return 0;
}
@@ -346,7 +352,6 @@ static struct omap2_hsmmc_info mmc[] __initdata = {
static struct platform_device *ldp_devices[] __initdata = {
&ldp_gpio_keys_device,
- &ldp_lcd_device,
};
#ifdef CONFIG_OMAP_MUX
diff --git a/arch/arm/mach-omap2/common.h b/arch/arm/mach-omap2/common.h
index f7644febee81..e30ef6797c63 100644
--- a/arch/arm/mach-omap2/common.h
+++ b/arch/arm/mach-omap2/common.h
@@ -299,7 +299,6 @@ struct omap_sdrc_params;
extern void omap_sdrc_init(struct omap_sdrc_params *sdrc_cs0,
struct omap_sdrc_params *sdrc_cs1);
struct omap2_hsmmc_info;
-extern int omap4_twl6030_hsmmc_init(struct omap2_hsmmc_info *controllers);
extern void omap_reserve(void);
struct omap_hwmod;
diff --git a/arch/arm/mach-omap2/display.c b/arch/arm/mach-omap2/display.c
index a4e536b11ec9..4cf165502b35 100644
--- a/arch/arm/mach-omap2/display.c
+++ b/arch/arm/mach-omap2/display.c
@@ -32,7 +32,6 @@
#include "soc.h"
#include "iomap.h"
-#include "mux.h"
#include "control.h"
#include "display.h"
#include "prm.h"
@@ -102,35 +101,6 @@ static const struct omap_dss_hwmod_data omap4_dss_hwmod_data[] __initconst = {
{ "dss_hdmi", "omapdss_hdmi", -1 },
};
-static void __init omap4_tpd12s015_mux_pads(void)
-{
- omap_mux_init_signal("hdmi_cec",
- OMAP_PIN_INPUT_PULLUP);
- omap_mux_init_signal("hdmi_ddc_scl",
- OMAP_PIN_INPUT_PULLUP);
- omap_mux_init_signal("hdmi_ddc_sda",
- OMAP_PIN_INPUT_PULLUP);
-}
-
-static void __init omap4_hdmi_mux_pads(enum omap_hdmi_flags flags)
-{
- u32 reg;
- u16 control_i2c_1;
-
- /*
- * CONTROL_I2C_1: HDMI_DDC_SDA_PULLUPRESX (bit 28) and
- * HDMI_DDC_SCL_PULLUPRESX (bit 24) are set to disable
- * internal pull up resistor.
- */
- if (flags & OMAP_HDMI_SDA_SCL_EXTERNAL_PULLUP) {
- control_i2c_1 = OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_I2C_1;
- reg = omap4_ctrl_pad_readl(control_i2c_1);
- reg |= (OMAP4_HDMI_DDC_SDA_PULLUPRESX_MASK |
- OMAP4_HDMI_DDC_SCL_PULLUPRESX_MASK);
- omap4_ctrl_pad_writel(reg, control_i2c_1);
- }
-}
-
static int omap4_dsi_mux_pads(int dsi_id, unsigned lanes)
{
u32 enable_mask, enable_shift;
@@ -164,16 +134,6 @@ static int omap4_dsi_mux_pads(int dsi_id, unsigned lanes)
return 0;
}
-int __init omap_hdmi_init(enum omap_hdmi_flags flags)
-{
- if (cpu_is_omap44xx()) {
- omap4_hdmi_mux_pads(flags);
- omap4_tpd12s015_mux_pads();
- }
-
- return 0;
-}
-
static int omap_dsi_enable_pads(int dsi_id, unsigned lane_mask)
{
if (cpu_is_omap44xx())
diff --git a/arch/arm/mach-omap2/dss-common.c b/arch/arm/mach-omap2/dss-common.c
index 365bfd3d9c68..dadccc91488c 100644
--- a/arch/arm/mach-omap2/dss-common.c
+++ b/arch/arm/mach-omap2/dss-common.c
@@ -223,7 +223,7 @@ void __init omap_4430sdp_display_init_of(void)
static struct connector_dvi_platform_data omap3_igep2_dvi_connector_pdata = {
.name = "dvi",
.source = "tfp410.0",
- .i2c_bus_num = 3,
+ .i2c_bus_num = 2,
};
static struct platform_device omap3_igep2_dvi_connector_device = {
diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c
index 81de56251955..d24926e6340f 100644
--- a/arch/arm/mach-omap2/gpmc.c
+++ b/arch/arm/mach-omap2/gpmc.c
@@ -1502,6 +1502,22 @@ static int gpmc_probe_generic_child(struct platform_device *pdev,
}
/*
+ * For some GPMC devices we still need to rely on the bootloader
+ * timings because the devices can be connected via FPGA. So far
+ * the list is smc91x on the omap2 SDP boards, and 8250 on zooms.
+ * REVISIT: Add timing support from slls644g.pdf and from the
+ * lan91c96 manual.
+ */
+ if (of_device_is_compatible(child, "ns16550a") ||
+ of_device_is_compatible(child, "smsc,lan91c94") ||
+ of_device_is_compatible(child, "smsc,lan91c111")) {
+ dev_warn(&pdev->dev,
+ "%s using bootloader timings on CS%d\n",
+ child->name, cs);
+ goto no_timings;
+ }
+
+ /*
* FIXME: gpmc_cs_request() will map the CS to an arbitary
* location in the gpmc address space. When booting with
* device-tree we want the NOR flash to be mapped to the
@@ -1529,6 +1545,7 @@ static int gpmc_probe_generic_child(struct platform_device *pdev,
gpmc_read_timings_dt(child, &gpmc_t);
gpmc_cs_set_timings(cs, &gpmc_t);
+no_timings:
if (of_platform_device_create(child, NULL, &pdev->dev))
return 0;
@@ -1541,42 +1558,6 @@ err:
return ret;
}
-/*
- * REVISIT: Add timing support from slls644g.pdf
- */
-static int gpmc_probe_8250(struct platform_device *pdev,
- struct device_node *child)
-{
- struct resource res;
- unsigned long base;
- int ret, cs;
-
- if (of_property_read_u32(child, "reg", &cs) < 0) {
- dev_err(&pdev->dev, "%s has no 'reg' property\n",
- child->full_name);
- return -ENODEV;
- }
-
- if (of_address_to_resource(child, 0, &res) < 0) {
- dev_err(&pdev->dev, "%s has malformed 'reg' property\n",
- child->full_name);
- return -ENODEV;
- }
-
- ret = gpmc_cs_request(cs, resource_size(&res), &base);
- if (ret < 0) {
- dev_err(&pdev->dev, "cannot request GPMC CS %d\n", cs);
- return ret;
- }
-
- if (of_platform_device_create(child, NULL, &pdev->dev))
- return 0;
-
- dev_err(&pdev->dev, "failed to create gpmc child %s\n", child->name);
-
- return -ENODEV;
-}
-
static int gpmc_probe_dt(struct platform_device *pdev)
{
int ret;
@@ -1618,10 +1599,9 @@ static int gpmc_probe_dt(struct platform_device *pdev)
else if (of_node_cmp(child->name, "onenand") == 0)
ret = gpmc_probe_onenand_child(pdev, child);
else if (of_node_cmp(child->name, "ethernet") == 0 ||
- of_node_cmp(child->name, "nor") == 0)
+ of_node_cmp(child->name, "nor") == 0 ||
+ of_node_cmp(child->name, "uart") == 0)
ret = gpmc_probe_generic_child(pdev, child);
- else if (of_node_cmp(child->name, "8250") == 0)
- ret = gpmc_probe_8250(pdev, child);
if (WARN(ret < 0, "%s: probing gpmc child %s failed\n",
__func__, child->full_name))
diff --git a/arch/arm/mach-omap2/omap-secure.h b/arch/arm/mach-omap2/omap-secure.h
index 8cc7d331437d..3e97c6c8ecf1 100644
--- a/arch/arm/mach-omap2/omap-secure.h
+++ b/arch/arm/mach-omap2/omap-secure.h
@@ -76,6 +76,13 @@ static inline void omap_barrier_reserve_memblock(void)
{ }
#endif
+#ifdef CONFIG_SOC_HAS_REALTIME_COUNTER
void set_cntfreq(void);
+#else
+static inline void set_cntfreq(void)
+{
+}
+#endif
+
#endif /* __ASSEMBLER__ */
#endif /* OMAP_ARCH_OMAP_SECURE_H */
diff --git a/arch/arm/mach-omap2/omap4-common.c b/arch/arm/mach-omap2/omap4-common.c
index 57911430324e..b39efd46abf9 100644
--- a/arch/arm/mach-omap2/omap4-common.c
+++ b/arch/arm/mach-omap2/omap4-common.c
@@ -35,7 +35,6 @@
#include "iomap.h"
#include "common.h"
#include "mmc.h"
-#include "hsmmc.h"
#include "prminst44xx.h"
#include "prcm_mpu44xx.h"
#include "omap4-sar-layout.h"
@@ -284,59 +283,3 @@ skip_errata_init:
omap_wakeupgen_init();
irqchip_init();
}
-
-#if defined(CONFIG_MMC_OMAP_HS) || defined(CONFIG_MMC_OMAP_HS_MODULE)
-static int omap4_twl6030_hsmmc_late_init(struct device *dev)
-{
- int irq = 0;
- struct platform_device *pdev = container_of(dev,
- struct platform_device, dev);
- struct omap_mmc_platform_data *pdata = dev->platform_data;
-
- /* Setting MMC1 Card detect Irq */
- if (pdev->id == 0) {
- irq = twl6030_mmc_card_detect_config();
- if (irq < 0) {
- dev_err(dev, "%s: Error card detect config(%d)\n",
- __func__, irq);
- return irq;
- }
- pdata->slots[0].card_detect_irq = irq;
- pdata->slots[0].card_detect = twl6030_mmc_card_detect;
- }
- return 0;
-}
-
-static __init void omap4_twl6030_hsmmc_set_late_init(struct device *dev)
-{
- struct omap_mmc_platform_data *pdata;
-
- /* dev can be null if CONFIG_MMC_OMAP_HS is not set */
- if (!dev) {
- pr_err("Failed %s\n", __func__);
- return;
- }
- pdata = dev->platform_data;
- pdata->init = omap4_twl6030_hsmmc_late_init;
-}
-
-int __init omap4_twl6030_hsmmc_init(struct omap2_hsmmc_info *controllers)
-{
- struct omap2_hsmmc_info *c;
-
- omap_hsmmc_init(controllers);
- for (c = controllers; c->mmc; c++) {
- /* pdev can be null if CONFIG_MMC_OMAP_HS is not set */
- if (!c->pdev)
- continue;
- omap4_twl6030_hsmmc_set_late_init(&c->pdev->dev);
- }
-
- return 0;
-}
-#else
-int __init omap4_twl6030_hsmmc_init(struct omap2_hsmmc_info *controllers)
-{
- return 0;
-}
-#endif
diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c
index 53f0735817bb..e0a398cf28d8 100644
--- a/arch/arm/mach-omap2/omap_device.c
+++ b/arch/arm/mach-omap2/omap_device.c
@@ -183,6 +183,10 @@ static int omap_device_build_from_dt(struct platform_device *pdev)
odbfd_exit1:
kfree(hwmods);
odbfd_exit:
+ /* if data/we are at fault.. load up a fail handler */
+ if (ret)
+ pdev->dev.pm_domain = &omap_device_fail_pm_domain;
+
return ret;
}
@@ -604,6 +608,19 @@ static int _od_runtime_resume(struct device *dev)
return pm_generic_runtime_resume(dev);
}
+
+static int _od_fail_runtime_suspend(struct device *dev)
+{
+ dev_warn(dev, "%s: FIXME: missing hwmod/omap_dev info\n", __func__);
+ return -ENODEV;
+}
+
+static int _od_fail_runtime_resume(struct device *dev)
+{
+ dev_warn(dev, "%s: FIXME: missing hwmod/omap_dev info\n", __func__);
+ return -ENODEV;
+}
+
#endif
#ifdef CONFIG_SUSPEND
@@ -657,6 +674,13 @@ static int _od_resume_noirq(struct device *dev)
#define _od_resume_noirq NULL
#endif
+struct dev_pm_domain omap_device_fail_pm_domain = {
+ .ops = {
+ SET_RUNTIME_PM_OPS(_od_fail_runtime_suspend,
+ _od_fail_runtime_resume, NULL)
+ }
+};
+
struct dev_pm_domain omap_device_pm_domain = {
.ops = {
SET_RUNTIME_PM_OPS(_od_runtime_suspend, _od_runtime_resume,
diff --git a/arch/arm/mach-omap2/omap_device.h b/arch/arm/mach-omap2/omap_device.h
index 17ca1aec2710..78c02b355179 100644
--- a/arch/arm/mach-omap2/omap_device.h
+++ b/arch/arm/mach-omap2/omap_device.h
@@ -29,6 +29,7 @@
#include "omap_hwmod.h"
extern struct dev_pm_domain omap_device_pm_domain;
+extern struct dev_pm_domain omap_device_fail_pm_domain;
/* omap_device._state values */
#define OMAP_DEVICE_STATE_UNKNOWN 0
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index e3f0ecaf87dd..8a1b5e0bad40 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -399,7 +399,7 @@ static int _set_clockactivity(struct omap_hwmod *oh, u8 clockact, u32 *v)
}
/**
- * _set_softreset: set OCP_SYSCONFIG.CLOCKACTIVITY bits in @v
+ * _set_softreset: set OCP_SYSCONFIG.SOFTRESET bit in @v
* @oh: struct omap_hwmod *
* @v: pointer to register contents to modify
*
@@ -427,6 +427,36 @@ static int _set_softreset(struct omap_hwmod *oh, u32 *v)
}
/**
+ * _clear_softreset: clear OCP_SYSCONFIG.SOFTRESET bit in @v
+ * @oh: struct omap_hwmod *
+ * @v: pointer to register contents to modify
+ *
+ * Clear the SOFTRESET bit in @v for hwmod @oh. Returns -EINVAL upon
+ * error or 0 upon success.
+ */
+static int _clear_softreset(struct omap_hwmod *oh, u32 *v)
+{
+ u32 softrst_mask;
+
+ if (!oh->class->sysc ||
+ !(oh->class->sysc->sysc_flags & SYSC_HAS_SOFTRESET))
+ return -EINVAL;
+
+ if (!oh->class->sysc->sysc_fields) {
+ WARN(1,
+ "omap_hwmod: %s: sysc_fields absent for sysconfig class\n",
+ oh->name);
+ return -EINVAL;
+ }
+
+ softrst_mask = (0x1 << oh->class->sysc->sysc_fields->srst_shift);
+
+ *v &= ~softrst_mask;
+
+ return 0;
+}
+
+/**
* _wait_softreset_complete - wait for an OCP softreset to complete
* @oh: struct omap_hwmod * to wait on
*
@@ -785,6 +815,7 @@ static int _init_interface_clks(struct omap_hwmod *oh)
pr_warning("omap_hwmod: %s: cannot clk_get interface_clk %s\n",
oh->name, os->clk);
ret = -EINVAL;
+ continue;
}
os->_clk = c;
/*
@@ -821,6 +852,7 @@ static int _init_opt_clks(struct omap_hwmod *oh)
pr_warning("omap_hwmod: %s: cannot clk_get opt_clk %s\n",
oh->name, oc->clk);
ret = -EINVAL;
+ continue;
}
oc->_clk = c;
/*
@@ -1911,6 +1943,12 @@ static int _ocp_softreset(struct omap_hwmod *oh)
ret = _set_softreset(oh, &v);
if (ret)
goto dis_opt_clks;
+
+ _write_sysconfig(v, oh);
+ ret = _clear_softreset(oh, &v);
+ if (ret)
+ goto dis_opt_clks;
+
_write_sysconfig(v, oh);
if (oh->class->sysc->srst_udelay)
@@ -2326,38 +2364,80 @@ static int _shutdown(struct omap_hwmod *oh)
return 0;
}
+static int of_dev_find_hwmod(struct device_node *np,
+ struct omap_hwmod *oh)
+{
+ int count, i, res;
+ const char *p;
+
+ count = of_property_count_strings(np, "ti,hwmods");
+ if (count < 1)
+ return -ENODEV;
+
+ for (i = 0; i < count; i++) {
+ res = of_property_read_string_index(np, "ti,hwmods",
+ i, &p);
+ if (res)
+ continue;
+ if (!strcmp(p, oh->name)) {
+ pr_debug("omap_hwmod: dt %s[%i] uses hwmod %s\n",
+ np->name, i, oh->name);
+ return i;
+ }
+ }
+
+ return -ENODEV;
+}
+
/**
* of_dev_hwmod_lookup - look up needed hwmod from dt blob
* @np: struct device_node *
* @oh: struct omap_hwmod *
+ * @index: index of the entry found
+ * @found: struct device_node * found or NULL
*
* Parse the dt blob and find out needed hwmod. Recursive function is
* implemented to take care hierarchical dt blob parsing.
- * Return: The device node on success or NULL on failure.
+ * Return: Returns 0 on success, -ENODEV when not found.
*/
-static struct device_node *of_dev_hwmod_lookup(struct device_node *np,
- struct omap_hwmod *oh)
+static int of_dev_hwmod_lookup(struct device_node *np,
+ struct omap_hwmod *oh,
+ int *index,
+ struct device_node **found)
{
- struct device_node *np0 = NULL, *np1 = NULL;
- const char *p;
+ struct device_node *np0 = NULL;
+ int res;
+
+ res = of_dev_find_hwmod(np, oh);
+ if (res >= 0) {
+ *found = np;
+ *index = res;
+ return 0;
+ }
for_each_child_of_node(np, np0) {
- if (of_find_property(np0, "ti,hwmods", NULL)) {
- p = of_get_property(np0, "ti,hwmods", NULL);
- if (!strcmp(p, oh->name))
- return np0;
- np1 = of_dev_hwmod_lookup(np0, oh);
- if (np1)
- return np1;
+ struct device_node *fc;
+ int i;
+
+ res = of_dev_hwmod_lookup(np0, oh, &i, &fc);
+ if (res == 0) {
+ *found = fc;
+ *index = i;
+ return 0;
}
}
- return NULL;
+
+ *found = NULL;
+ *index = 0;
+
+ return -ENODEV;
}
/**
* _init_mpu_rt_base - populate the virtual address for a hwmod
* @oh: struct omap_hwmod * to locate the virtual address
* @data: (unused, caller should pass NULL)
+ * @index: index of the reg entry iospace in device tree
* @np: struct device_node * of the IP block's device node in the DT data
*
* Cache the virtual address used by the MPU to access this IP block's
@@ -2368,7 +2448,7 @@ static struct device_node *of_dev_hwmod_lookup(struct device_node *np,
* -ENXIO on absent or invalid register target address space.
*/
static int __init _init_mpu_rt_base(struct omap_hwmod *oh, void *data,
- struct device_node *np)
+ int index, struct device_node *np)
{
struct omap_hwmod_addr_space *mem;
void __iomem *va_start = NULL;
@@ -2390,13 +2470,17 @@ static int __init _init_mpu_rt_base(struct omap_hwmod *oh, void *data,
if (!np)
return -ENXIO;
- va_start = of_iomap(np, oh->mpu_rt_idx);
+ va_start = of_iomap(np, index + oh->mpu_rt_idx);
} else {
va_start = ioremap(mem->pa_start, mem->pa_end - mem->pa_start);
}
if (!va_start) {
- pr_err("omap_hwmod: %s: Could not ioremap\n", oh->name);
+ if (mem)
+ pr_err("omap_hwmod: %s: Could not ioremap\n", oh->name);
+ else
+ pr_err("omap_hwmod: %s: Missing dt reg%i for %s\n",
+ oh->name, index, np->full_name);
return -ENXIO;
}
@@ -2422,17 +2506,29 @@ static int __init _init_mpu_rt_base(struct omap_hwmod *oh, void *data,
*/
static int __init _init(struct omap_hwmod *oh, void *data)
{
- int r;
+ int r, index;
struct device_node *np = NULL;
if (oh->_state != _HWMOD_STATE_REGISTERED)
return 0;
- if (of_have_populated_dt())
- np = of_dev_hwmod_lookup(of_find_node_by_name(NULL, "ocp"), oh);
+ if (of_have_populated_dt()) {
+ struct device_node *bus;
+
+ bus = of_find_node_by_name(NULL, "ocp");
+ if (!bus)
+ return -ENODEV;
+
+ r = of_dev_hwmod_lookup(bus, oh, &index, &np);
+ if (r)
+ pr_debug("omap_hwmod: %s missing dt data\n", oh->name);
+ else if (np && index)
+ pr_warn("omap_hwmod: %s using broken dt data from %s\n",
+ oh->name, np->name);
+ }
if (oh->class->sysc) {
- r = _init_mpu_rt_base(oh, NULL, np);
+ r = _init_mpu_rt_base(oh, NULL, index, np);
if (r < 0) {
WARN(1, "omap_hwmod: %s: doesn't have mpu register target base\n",
oh->name);
@@ -3169,6 +3265,11 @@ int omap_hwmod_softreset(struct omap_hwmod *oh)
goto error;
_write_sysconfig(v, oh);
+ ret = _clear_softreset(oh, &v);
+ if (ret)
+ goto error;
+ _write_sysconfig(v, oh);
+
error:
return ret;
}
diff --git a/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c b/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c
index 56cebb05509e..d23c77fadb31 100644
--- a/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c
@@ -796,7 +796,7 @@ struct omap_hwmod omap2xxx_counter_32k_hwmod = {
/* gpmc */
static struct omap_hwmod_irq_info omap2xxx_gpmc_irqs[] = {
- { .irq = 20 },
+ { .irq = 20 + OMAP_INTC_START, },
{ .irq = -1 }
};
@@ -841,7 +841,7 @@ static struct omap_hwmod_class omap2_rng_hwmod_class = {
};
static struct omap_hwmod_irq_info omap2_rng_mpu_irqs[] = {
- { .irq = 52 },
+ { .irq = 52 + OMAP_INTC_START, },
{ .irq = -1 }
};
diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
index 9e56fabd7fa3..4c3b1e6df508 100644
--- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
@@ -1943,7 +1943,8 @@ static struct omap_hwmod_class_sysconfig omap3xxx_usb_host_hs_sysc = {
.syss_offs = 0x0014,
.sysc_flags = (SYSC_HAS_MIDLEMODE | SYSC_HAS_CLOCKACTIVITY |
SYSC_HAS_SIDLEMODE | SYSC_HAS_ENAWAKEUP |
- SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE),
+ SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE |
+ SYSS_HAS_RESET_STATUS),
.idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART),
.sysc_fields = &omap_hwmod_sysc_type1,
@@ -2021,15 +2022,7 @@ static struct omap_hwmod omap3xxx_usb_host_hs_hwmod = {
* hence HWMOD_SWSUP_MSTANDBY
*/
- /*
- * During system boot; If the hwmod framework resets the module
- * the module will have smart idle settings; which can lead to deadlock
- * (above Errata Id:i660); so, dont reset the module during boot;
- * Use HWMOD_INIT_NO_RESET.
- */
-
- .flags = HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY |
- HWMOD_INIT_NO_RESET,
+ .flags = HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY,
};
/*
@@ -2172,7 +2165,7 @@ static struct omap_hwmod_class omap3xxx_gpmc_hwmod_class = {
};
static struct omap_hwmod_irq_info omap3xxx_gpmc_irqs[] = {
- { .irq = 20 },
+ { .irq = 20 + OMAP_INTC_START, },
{ .irq = -1 }
};
@@ -3006,7 +2999,7 @@ static struct omap_mmu_dev_attr mmu_isp_dev_attr = {
static struct omap_hwmod omap3xxx_mmu_isp_hwmod;
static struct omap_hwmod_irq_info omap3xxx_mmu_isp_irqs[] = {
- { .irq = 24 },
+ { .irq = 24 + OMAP_INTC_START, },
{ .irq = -1 }
};
@@ -3048,7 +3041,7 @@ static struct omap_mmu_dev_attr mmu_iva_dev_attr = {
static struct omap_hwmod omap3xxx_mmu_iva_hwmod;
static struct omap_hwmod_irq_info omap3xxx_mmu_iva_irqs[] = {
- { .irq = 28 },
+ { .irq = 28 + OMAP_INTC_START, },
{ .irq = -1 }
};
diff --git a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
index 1e5b12cb8246..3318cae96e7d 100644
--- a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
@@ -2937,7 +2937,7 @@ static struct omap_hwmod_class_sysconfig omap44xx_usb_host_hs_sysc = {
.sysc_offs = 0x0010,
.syss_offs = 0x0014,
.sysc_flags = (SYSC_HAS_MIDLEMODE | SYSC_HAS_SIDLEMODE |
- SYSC_HAS_SOFTRESET),
+ SYSC_HAS_SOFTRESET | SYSC_HAS_RESET_STATUS),
.idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
SIDLE_SMART_WKUP | MSTANDBY_FORCE | MSTANDBY_NO |
MSTANDBY_SMART | MSTANDBY_SMART_WKUP),
@@ -3001,15 +3001,7 @@ static struct omap_hwmod omap44xx_usb_host_hs_hwmod = {
* hence HWMOD_SWSUP_MSTANDBY
*/
- /*
- * During system boot; If the hwmod framework resets the module
- * the module will have smart idle settings; which can lead to deadlock
- * (above Errata Id:i660); so, dont reset the module during boot;
- * Use HWMOD_INIT_NO_RESET.
- */
-
- .flags = HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY |
- HWMOD_INIT_NO_RESET,
+ .flags = HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY,
};
/*
diff --git a/arch/arm/mach-omap2/omap_hwmod_54xx_data.c b/arch/arm/mach-omap2/omap_hwmod_54xx_data.c
index 9e08d6994a0b..e297d6231c3a 100644
--- a/arch/arm/mach-omap2/omap_hwmod_54xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_54xx_data.c
@@ -1544,7 +1544,8 @@ static struct omap_hwmod_class_sysconfig omap54xx_usb_host_hs_sysc = {
.rev_offs = 0x0000,
.sysc_offs = 0x0010,
.sysc_flags = (SYSC_HAS_MIDLEMODE | SYSC_HAS_RESET_STATUS |
- SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET),
+ SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET |
+ SYSC_HAS_RESET_STATUS),
.idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
SIDLE_SMART_WKUP | MSTANDBY_FORCE | MSTANDBY_NO |
MSTANDBY_SMART | MSTANDBY_SMART_WKUP),
@@ -1598,15 +1599,7 @@ static struct omap_hwmod omap54xx_usb_host_hs_hwmod = {
* hence HWMOD_SWSUP_MSTANDBY
*/
- /*
- * During system boot; If the hwmod framework resets the module
- * the module will have smart idle settings; which can lead to deadlock
- * (above Errata Id:i660); so, dont reset the module during boot;
- * Use HWMOD_INIT_NO_RESET.
- */
-
- .flags = HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY |
- HWMOD_INIT_NO_RESET,
+ .flags = HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY,
.main_clk = "l3init_60m_fclk",
.prcm = {
.omap4 = {
diff --git a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
index db32d5380b11..18f333c440db 100644
--- a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
@@ -1637,7 +1637,7 @@ static struct omap_hwmod dra7xx_uart1_hwmod = {
.class = &dra7xx_uart_hwmod_class,
.clkdm_name = "l4per_clkdm",
.main_clk = "uart1_gfclk_mux",
- .flags = HWMOD_SWSUP_SIDLE_ACT,
+ .flags = HWMOD_SWSUP_SIDLE_ACT | DEBUG_OMAP2UART1_FLAGS,
.prcm = {
.omap4 = {
.clkctrl_offs = DRA7XX_CM_L4PER_UART1_CLKCTRL_OFFSET,
diff --git a/arch/arm/mach-omap2/pdata-quirks.c b/arch/arm/mach-omap2/pdata-quirks.c
index 10c71450cf63..39f020c982e8 100644
--- a/arch/arm/mach-omap2/pdata-quirks.c
+++ b/arch/arm/mach-omap2/pdata-quirks.c
@@ -139,6 +139,7 @@ struct of_dev_auxdata omap_auxdata_lookup[] __initdata = {
static struct pdata_init pdata_quirks[] __initdata = {
#ifdef CONFIG_ARCH_OMAP3
+ { "nokia,omap3-n900", hsmmc2_internal_input_clk, },
{ "nokia,omap3-n9", hsmmc2_internal_input_clk, },
{ "nokia,omap3-n950", hsmmc2_internal_input_clk, },
{ "isee,omap3-igep0020", omap3_igep0020_legacy_init, },
diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c
index 93b80e5da8d4..1f3770a8a728 100644
--- a/arch/arm/mach-omap2/pm34xx.c
+++ b/arch/arm/mach-omap2/pm34xx.c
@@ -120,7 +120,7 @@ static void omap3_save_secure_ram_context(void)
* will hang the system.
*/
pwrdm_set_next_pwrst(mpu_pwrdm, PWRDM_POWER_ON);
- ret = _omap_save_secure_sram((u32 *)
+ ret = _omap_save_secure_sram((u32 *)(unsigned long)
__pa(omap3_secure_ram_storage));
pwrdm_set_next_pwrst(mpu_pwrdm, mpu_next_state);
/* Following is for error tracking, it should not happen */
diff --git a/arch/arm/mach-omap2/powerdomain.c b/arch/arm/mach-omap2/powerdomain.c
index e233dfcbc186..93a2a6e4260f 100644
--- a/arch/arm/mach-omap2/powerdomain.c
+++ b/arch/arm/mach-omap2/powerdomain.c
@@ -128,7 +128,8 @@ skip_voltdm:
for (i = 0; i < pwrdm->banks; i++)
pwrdm->ret_mem_off_counter[i] = 0;
- arch_pwrdm->pwrdm_wait_transition(pwrdm);
+ if (arch_pwrdm && arch_pwrdm->pwrdm_wait_transition)
+ arch_pwrdm->pwrdm_wait_transition(pwrdm);
pwrdm->state = pwrdm_read_pwrst(pwrdm);
pwrdm->state_counter[pwrdm->state] = 1;
diff --git a/arch/arm/mach-omap2/prm44xx_54xx.h b/arch/arm/mach-omap2/prm44xx_54xx.h
index 7a976065e138..8d95aa543ef5 100644
--- a/arch/arm/mach-omap2/prm44xx_54xx.h
+++ b/arch/arm/mach-omap2/prm44xx_54xx.h
@@ -43,7 +43,7 @@ extern void omap4_prm_vcvp_write(u32 val, u8 offset);
extern u32 omap4_prm_vcvp_rmw(u32 mask, u32 bits, u8 offset);
#if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) || \
- defined(CONFIG_SOC_DRA7XX)
+ defined(CONFIG_SOC_DRA7XX) || defined(CONFIG_SOC_AM43XX)
void omap44xx_prm_reconfigure_io_chain(void);
#else
static inline void omap44xx_prm_reconfigure_io_chain(void)
diff --git a/arch/arm/mach-pxa/include/mach/lubbock.h b/arch/arm/mach-pxa/include/mach/lubbock.h
index 2a086e8373eb..958cd6af9384 100644
--- a/arch/arm/mach-pxa/include/mach/lubbock.h
+++ b/arch/arm/mach-pxa/include/mach/lubbock.h
@@ -10,6 +10,8 @@
* published by the Free Software Foundation.
*/
+#include <mach/irqs.h>
+
#define LUBBOCK_ETH_PHYS PXA_CS3_PHYS
#define LUBBOCK_FPGA_PHYS PXA_CS2_PHYS
diff --git a/arch/arm/mach-pxa/reset.c b/arch/arm/mach-pxa/reset.c
index 0d5dd646f61f..263b15249b5b 100644
--- a/arch/arm/mach-pxa/reset.c
+++ b/arch/arm/mach-pxa/reset.c
@@ -13,6 +13,7 @@
#include <mach/regs-ost.h>
#include <mach/reset.h>
+#include <mach/smemc.h>
unsigned int reset_status;
EXPORT_SYMBOL(reset_status);
@@ -81,6 +82,12 @@ static void do_hw_reset(void)
writel_relaxed(OSSR_M3, OSSR);
/* ... in 100 ms */
writel_relaxed(readl_relaxed(OSCR) + 368640, OSMR3);
+ /*
+ * SDRAM hangs on watchdog reset on Marvell PXA270 (erratum 71)
+ * we put SDRAM into self-refresh to prevent that
+ */
+ while (1)
+ writel_relaxed(MDREFR_SLFRSH, MDREFR);
}
void pxa_restart(enum reboot_mode mode, const char *cmd)
@@ -104,4 +111,3 @@ void pxa_restart(enum reboot_mode mode, const char *cmd)
break;
}
}
-
diff --git a/arch/arm/mach-pxa/tosa.c b/arch/arm/mach-pxa/tosa.c
index 0206b915a6f6..ef5557b807ed 100644
--- a/arch/arm/mach-pxa/tosa.c
+++ b/arch/arm/mach-pxa/tosa.c
@@ -425,57 +425,57 @@ static struct platform_device tosa_power_device = {
* Tosa Keyboard
*/
static const uint32_t tosakbd_keymap[] = {
- KEY(0, 2, KEY_W),
- KEY(0, 6, KEY_K),
- KEY(0, 7, KEY_BACKSPACE),
- KEY(0, 8, KEY_P),
- KEY(1, 1, KEY_Q),
- KEY(1, 2, KEY_E),
- KEY(1, 3, KEY_T),
- KEY(1, 4, KEY_Y),
- KEY(1, 6, KEY_O),
- KEY(1, 7, KEY_I),
- KEY(1, 8, KEY_COMMA),
- KEY(2, 1, KEY_A),
- KEY(2, 2, KEY_D),
- KEY(2, 3, KEY_G),
- KEY(2, 4, KEY_U),
- KEY(2, 6, KEY_L),
- KEY(2, 7, KEY_ENTER),
- KEY(2, 8, KEY_DOT),
- KEY(3, 1, KEY_Z),
- KEY(3, 2, KEY_C),
- KEY(3, 3, KEY_V),
- KEY(3, 4, KEY_J),
- KEY(3, 5, TOSA_KEY_ADDRESSBOOK),
- KEY(3, 6, TOSA_KEY_CANCEL),
- KEY(3, 7, TOSA_KEY_CENTER),
- KEY(3, 8, TOSA_KEY_OK),
- KEY(3, 9, KEY_LEFTSHIFT),
- KEY(4, 1, KEY_S),
- KEY(4, 2, KEY_R),
- KEY(4, 3, KEY_B),
- KEY(4, 4, KEY_N),
- KEY(4, 5, TOSA_KEY_CALENDAR),
- KEY(4, 6, TOSA_KEY_HOMEPAGE),
- KEY(4, 7, KEY_LEFTCTRL),
- KEY(4, 8, TOSA_KEY_LIGHT),
- KEY(4, 10, KEY_RIGHTSHIFT),
- KEY(5, 1, KEY_TAB),
- KEY(5, 2, KEY_SLASH),
- KEY(5, 3, KEY_H),
- KEY(5, 4, KEY_M),
- KEY(5, 5, TOSA_KEY_MENU),
- KEY(5, 7, KEY_UP),
- KEY(5, 11, TOSA_KEY_FN),
- KEY(6, 1, KEY_X),
- KEY(6, 2, KEY_F),
- KEY(6, 3, KEY_SPACE),
- KEY(6, 4, KEY_APOSTROPHE),
- KEY(6, 5, TOSA_KEY_MAIL),
- KEY(6, 6, KEY_LEFT),
- KEY(6, 7, KEY_DOWN),
- KEY(6, 8, KEY_RIGHT),
+ KEY(0, 1, KEY_W),
+ KEY(0, 5, KEY_K),
+ KEY(0, 6, KEY_BACKSPACE),
+ KEY(0, 7, KEY_P),
+ KEY(1, 0, KEY_Q),
+ KEY(1, 1, KEY_E),
+ KEY(1, 2, KEY_T),
+ KEY(1, 3, KEY_Y),
+ KEY(1, 5, KEY_O),
+ KEY(1, 6, KEY_I),
+ KEY(1, 7, KEY_COMMA),
+ KEY(2, 0, KEY_A),
+ KEY(2, 1, KEY_D),
+ KEY(2, 2, KEY_G),
+ KEY(2, 3, KEY_U),
+ KEY(2, 5, KEY_L),
+ KEY(2, 6, KEY_ENTER),
+ KEY(2, 7, KEY_DOT),
+ KEY(3, 0, KEY_Z),
+ KEY(3, 1, KEY_C),
+ KEY(3, 2, KEY_V),
+ KEY(3, 3, KEY_J),
+ KEY(3, 4, TOSA_KEY_ADDRESSBOOK),
+ KEY(3, 5, TOSA_KEY_CANCEL),
+ KEY(3, 6, TOSA_KEY_CENTER),
+ KEY(3, 7, TOSA_KEY_OK),
+ KEY(3, 8, KEY_LEFTSHIFT),
+ KEY(4, 0, KEY_S),
+ KEY(4, 1, KEY_R),
+ KEY(4, 2, KEY_B),
+ KEY(4, 3, KEY_N),
+ KEY(4, 4, TOSA_KEY_CALENDAR),
+ KEY(4, 5, TOSA_KEY_HOMEPAGE),
+ KEY(4, 6, KEY_LEFTCTRL),
+ KEY(4, 7, TOSA_KEY_LIGHT),
+ KEY(4, 9, KEY_RIGHTSHIFT),
+ KEY(5, 0, KEY_TAB),
+ KEY(5, 1, KEY_SLASH),
+ KEY(5, 2, KEY_H),
+ KEY(5, 3, KEY_M),
+ KEY(5, 4, TOSA_KEY_MENU),
+ KEY(5, 6, KEY_UP),
+ KEY(5, 10, TOSA_KEY_FN),
+ KEY(6, 0, KEY_X),
+ KEY(6, 1, KEY_F),
+ KEY(6, 2, KEY_SPACE),
+ KEY(6, 3, KEY_APOSTROPHE),
+ KEY(6, 4, TOSA_KEY_MAIL),
+ KEY(6, 5, KEY_LEFT),
+ KEY(6, 6, KEY_DOWN),
+ KEY(6, 7, KEY_RIGHT),
};
static struct matrix_keymap_data tosakbd_keymap_data = {
diff --git a/arch/arm/mach-s3c64xx/mach-s3c64xx-dt.c b/arch/arm/mach-s3c64xx/mach-s3c64xx-dt.c
index 7eb9a10fc1af..2fddf38192df 100644
--- a/arch/arm/mach-s3c64xx/mach-s3c64xx-dt.c
+++ b/arch/arm/mach-s3c64xx/mach-s3c64xx-dt.c
@@ -8,8 +8,6 @@
* published by the Free Software Foundation.
*/
-#include <linux/clk-provider.h>
-#include <linux/irqchip.h>
#include <linux/of_platform.h>
#include <asm/mach/arch.h>
@@ -48,15 +46,9 @@ static void __init s3c64xx_dt_map_io(void)
panic("SoC is not S3C64xx!");
}
-static void __init s3c64xx_dt_init_irq(void)
-{
- of_clk_init(NULL);
- samsung_wdt_reset_of_init();
- irqchip_init();
-};
-
static void __init s3c64xx_dt_init_machine(void)
{
+ samsung_wdt_reset_of_init();
of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
}
@@ -79,7 +71,6 @@ DT_MACHINE_START(S3C6400_DT, "Samsung S3C64xx (Flattened Device Tree)")
/* Maintainer: Tomasz Figa <tomasz.figa@gmail.com> */
.dt_compat = s3c64xx_dt_compat,
.map_io = s3c64xx_dt_map_io,
- .init_irq = s3c64xx_dt_init_irq,
.init_machine = s3c64xx_dt_init_machine,
.restart = s3c64xx_dt_restart,
MACHINE_END
diff --git a/arch/arm/mach-shmobile/board-armadillo800eva.c b/arch/arm/mach-shmobile/board-armadillo800eva.c
index 958e3cbf0ac2..c18689123023 100644
--- a/arch/arm/mach-shmobile/board-armadillo800eva.c
+++ b/arch/arm/mach-shmobile/board-armadillo800eva.c
@@ -614,6 +614,11 @@ static struct regulator_consumer_supply fixed3v3_power_consumers[] = {
REGULATOR_SUPPLY("vqmmc", "sh_mmcif"),
};
+/* Fixed 3.3V regulator used by LCD backlight */
+static struct regulator_consumer_supply fixed5v0_power_consumers[] = {
+ REGULATOR_SUPPLY("power", "pwm-backlight.0"),
+};
+
/* Fixed 3.3V regulator to be used by SDHI0 */
static struct regulator_consumer_supply vcc_sdhi0_consumers[] = {
REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.0"),
@@ -1196,6 +1201,8 @@ static void __init eva_init(void)
regulator_register_always_on(0, "fixed-3.3V", fixed3v3_power_consumers,
ARRAY_SIZE(fixed3v3_power_consumers), 3300000);
+ regulator_register_always_on(3, "fixed-5.0V", fixed5v0_power_consumers,
+ ARRAY_SIZE(fixed5v0_power_consumers), 5000000);
pinctrl_register_mappings(eva_pinctrl_map, ARRAY_SIZE(eva_pinctrl_map));
pwm_add_table(pwm_lookup, ARRAY_SIZE(pwm_lookup));
diff --git a/arch/arm/mach-shmobile/board-bockw.c b/arch/arm/mach-shmobile/board-bockw.c
index 38611526fe9a..3c4995aebd22 100644
--- a/arch/arm/mach-shmobile/board-bockw.c
+++ b/arch/arm/mach-shmobile/board-bockw.c
@@ -679,7 +679,7 @@ static void __init bockw_init(void)
.id = i,
.data = &rsnd_card_info[i],
.size_data = sizeof(struct asoc_simple_card_info),
- .dma_mask = ~0,
+ .dma_mask = DMA_BIT_MASK(32),
};
platform_device_register_full(&cardinfo);
diff --git a/arch/arm/mach-shmobile/board-lager.c b/arch/arm/mach-shmobile/board-lager.c
index a8d3ce646fb9..e0406fd37390 100644
--- a/arch/arm/mach-shmobile/board-lager.c
+++ b/arch/arm/mach-shmobile/board-lager.c
@@ -245,7 +245,9 @@ static void __init lager_init(void)
{
lager_add_standard_devices();
- phy_register_fixup_for_id("r8a7790-ether-ff:01", lager_ksz8041_fixup);
+ if (IS_ENABLED(CONFIG_PHYLIB))
+ phy_register_fixup_for_id("r8a7790-ether-ff:01",
+ lager_ksz8041_fixup);
}
static const char * const lager_boards_compat_dt[] __initconst = {
diff --git a/arch/arm/mach-socfpga/Kconfig b/arch/arm/mach-socfpga/Kconfig
index 037100a1563a..aee77f06f887 100644
--- a/arch/arm/mach-socfpga/Kconfig
+++ b/arch/arm/mach-socfpga/Kconfig
@@ -10,6 +10,7 @@ config ARCH_SOCFPGA
select GENERIC_CLOCKEVENTS
select GPIO_PL061 if GPIOLIB
select HAVE_ARM_SCU
+ select HAVE_ARM_TWD if SMP
select HAVE_SMP
select MFD_SYSCON
select SPARSE_IRQ
diff --git a/arch/arm/mach-tegra/fuse.c b/arch/arm/mach-tegra/fuse.c
index d4639c506622..3a9c1f1c219d 100644
--- a/arch/arm/mach-tegra/fuse.c
+++ b/arch/arm/mach-tegra/fuse.c
@@ -198,10 +198,12 @@ void __init tegra_init_fuse(void)
switch (tegra_chip_id) {
case TEGRA20:
tegra20_fuse_init_randomness();
+ break;
case TEGRA30:
case TEGRA114:
default:
tegra30_fuse_init_randomness();
+ break;
}
pr_info("Tegra Revision: %s SKU: %d CPU Process: %d Core Process: %d\n",
@@ -209,13 +211,3 @@ void __init tegra_init_fuse(void)
tegra_sku_id, tegra_cpu_process_id,
tegra_core_process_id);
}
-
-unsigned long long tegra_chip_uid(void)
-{
- unsigned long long lo, hi;
-
- lo = tegra_fuse_readl(FUSE_UID_LOW);
- hi = tegra_fuse_readl(FUSE_UID_HIGH);
- return (hi << 32ull) | lo;
-}
-EXPORT_SYMBOL(tegra_chip_uid);
diff --git a/arch/arm/mach-ux500/cpu-db8500.c b/arch/arm/mach-ux500/cpu-db8500.c
index 2e85c1e72535..12c7e5c03ea4 100644
--- a/arch/arm/mach-ux500/cpu-db8500.c
+++ b/arch/arm/mach-ux500/cpu-db8500.c
@@ -140,6 +140,10 @@ static struct of_dev_auxdata u8500_auxdata_lookup[] __initdata = {
/* Requires call-back bindings. */
OF_DEV_AUXDATA("arm,cortex-a9-pmu", 0, "arm-pmu", &db8500_pmu_platdata),
/* Requires DMA bindings. */
+ OF_DEV_AUXDATA("arm,pl18x", 0x80126000, "sdi0", &mop500_sdi0_data),
+ OF_DEV_AUXDATA("arm,pl18x", 0x80118000, "sdi1", &mop500_sdi1_data),
+ OF_DEV_AUXDATA("arm,pl18x", 0x80005000, "sdi2", &mop500_sdi2_data),
+ OF_DEV_AUXDATA("arm,pl18x", 0x80114000, "sdi4", &mop500_sdi4_data),
OF_DEV_AUXDATA("stericsson,ux500-msp-i2s", 0x80123000,
"ux500-msp-i2s.0", &msp0_platform_data),
OF_DEV_AUXDATA("stericsson,ux500-msp-i2s", 0x80124000,
diff --git a/arch/arm/mach-vexpress/spc.c b/arch/arm/mach-vexpress/spc.c
index 033d34dcbd3f..c26ef5b92ca7 100644
--- a/arch/arm/mach-vexpress/spc.c
+++ b/arch/arm/mach-vexpress/spc.c
@@ -53,6 +53,11 @@
#define A15_BX_ADDR0 0x68
#define A7_BX_ADDR0 0x78
+/* SPC CPU/cluster reset statue */
+#define STANDBYWFI_STAT 0x3c
+#define STANDBYWFI_STAT_A15_CPU_MASK(cpu) (1 << (cpu))
+#define STANDBYWFI_STAT_A7_CPU_MASK(cpu) (1 << (3 + (cpu)))
+
/* SPC system config interface registers */
#define SYSCFG_WDATA 0x70
#define SYSCFG_RDATA 0x74
@@ -213,6 +218,41 @@ void ve_spc_powerdown(u32 cluster, bool enable)
writel_relaxed(enable, info->baseaddr + pwdrn_reg);
}
+static u32 standbywfi_cpu_mask(u32 cpu, u32 cluster)
+{
+ return cluster_is_a15(cluster) ?
+ STANDBYWFI_STAT_A15_CPU_MASK(cpu)
+ : STANDBYWFI_STAT_A7_CPU_MASK(cpu);
+}
+
+/**
+ * ve_spc_cpu_in_wfi(u32 cpu, u32 cluster)
+ *
+ * @cpu: mpidr[7:0] bitfield describing CPU affinity level within cluster
+ * @cluster: mpidr[15:8] bitfield describing cluster affinity level
+ *
+ * @return: non-zero if and only if the specified CPU is in WFI
+ *
+ * Take care when interpreting the result of this function: a CPU might
+ * be in WFI temporarily due to idle, and is not necessarily safely
+ * parked.
+ */
+int ve_spc_cpu_in_wfi(u32 cpu, u32 cluster)
+{
+ int ret;
+ u32 mask = standbywfi_cpu_mask(cpu, cluster);
+
+ if (cluster >= MAX_CLUSTERS)
+ return 1;
+
+ ret = readl_relaxed(info->baseaddr + STANDBYWFI_STAT);
+
+ pr_debug("%s: PCFGREG[0x%X] = 0x%08X, mask = 0x%X\n",
+ __func__, STANDBYWFI_STAT, ret, mask);
+
+ return ret & mask;
+}
+
static int ve_spc_get_performance(int cluster, u32 *freq)
{
struct ve_spc_opp *opps = info->opps[cluster];
diff --git a/arch/arm/mach-vexpress/spc.h b/arch/arm/mach-vexpress/spc.h
index dbd44c3720f9..793d065243b9 100644
--- a/arch/arm/mach-vexpress/spc.h
+++ b/arch/arm/mach-vexpress/spc.h
@@ -20,5 +20,6 @@ void ve_spc_global_wakeup_irq(bool set);
void ve_spc_cpu_wakeup_irq(u32 cluster, u32 cpu, bool set);
void ve_spc_set_resume_addr(u32 cluster, u32 cpu, u32 addr);
void ve_spc_powerdown(u32 cluster, bool enable);
+int ve_spc_cpu_in_wfi(u32 cpu, u32 cluster);
#endif
diff --git a/arch/arm/mach-vexpress/tc2_pm.c b/arch/arm/mach-vexpress/tc2_pm.c
index 05a364c5077a..29e7785a54bc 100644
--- a/arch/arm/mach-vexpress/tc2_pm.c
+++ b/arch/arm/mach-vexpress/tc2_pm.c
@@ -12,6 +12,7 @@
* published by the Free Software Foundation.
*/
+#include <linux/delay.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/kernel.h>
@@ -32,11 +33,17 @@
#include "spc.h"
/* SCC conf registers */
+#define RESET_CTRL 0x018
+#define RESET_A15_NCORERESET(cpu) (1 << (2 + (cpu)))
+#define RESET_A7_NCORERESET(cpu) (1 << (16 + (cpu)))
+
#define A15_CONF 0x400
#define A7_CONF 0x500
#define SYS_INFO 0x700
#define SPC_BASE 0xb00
+static void __iomem *scc;
+
/*
* We can't use regular spinlocks. In the switcher case, it is possible
* for an outbound CPU to call power_down() after its inbound counterpart
@@ -190,6 +197,55 @@ static void tc2_pm_power_down(void)
tc2_pm_down(0);
}
+static int tc2_core_in_reset(unsigned int cpu, unsigned int cluster)
+{
+ u32 mask = cluster ?
+ RESET_A7_NCORERESET(cpu)
+ : RESET_A15_NCORERESET(cpu);
+
+ return !(readl_relaxed(scc + RESET_CTRL) & mask);
+}
+
+#define POLL_MSEC 10
+#define TIMEOUT_MSEC 1000
+
+static int tc2_pm_power_down_finish(unsigned int cpu, unsigned int cluster)
+{
+ unsigned tries;
+
+ pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
+ BUG_ON(cluster >= TC2_CLUSTERS || cpu >= TC2_MAX_CPUS_PER_CLUSTER);
+
+ for (tries = 0; tries < TIMEOUT_MSEC / POLL_MSEC; ++tries) {
+ /*
+ * Only examine the hardware state if the target CPU has
+ * caught up at least as far as tc2_pm_down():
+ */
+ if (ACCESS_ONCE(tc2_pm_use_count[cpu][cluster]) == 0) {
+ pr_debug("%s(cpu=%u, cluster=%u): RESET_CTRL = 0x%08X\n",
+ __func__, cpu, cluster,
+ readl_relaxed(scc + RESET_CTRL));
+
+ /*
+ * We need the CPU to reach WFI, but the power
+ * controller may put the cluster in reset and
+ * power it off as soon as that happens, before
+ * we have a chance to see STANDBYWFI.
+ *
+ * So we need to check for both conditions:
+ */
+ if (tc2_core_in_reset(cpu, cluster) ||
+ ve_spc_cpu_in_wfi(cpu, cluster))
+ return 0; /* success: the CPU is halted */
+ }
+
+ /* Otherwise, wait and retry: */
+ msleep(POLL_MSEC);
+ }
+
+ return -ETIMEDOUT; /* timeout */
+}
+
static void tc2_pm_suspend(u64 residency)
{
unsigned int mpidr, cpu, cluster;
@@ -232,10 +288,11 @@ static void tc2_pm_powered_up(void)
}
static const struct mcpm_platform_ops tc2_pm_power_ops = {
- .power_up = tc2_pm_power_up,
- .power_down = tc2_pm_power_down,
- .suspend = tc2_pm_suspend,
- .powered_up = tc2_pm_powered_up,
+ .power_up = tc2_pm_power_up,
+ .power_down = tc2_pm_power_down,
+ .power_down_finish = tc2_pm_power_down_finish,
+ .suspend = tc2_pm_suspend,
+ .powered_up = tc2_pm_powered_up,
};
static bool __init tc2_pm_usage_count_init(void)
@@ -269,7 +326,6 @@ static void __naked tc2_pm_power_up_setup(unsigned int affinity_level)
static int __init tc2_pm_init(void)
{
int ret, irq;
- void __iomem *scc;
u32 a15_cluster_id, a7_cluster_id, sys_info;
struct device_node *np;
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 79f8b39801a8..f61a5707823a 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -9,6 +9,7 @@
*
* DMA uncached mapping support.
*/
+#include <linux/bootmem.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/gfp.h>
@@ -157,6 +158,44 @@ struct dma_map_ops arm_coherent_dma_ops = {
};
EXPORT_SYMBOL(arm_coherent_dma_ops);
+static int __dma_supported(struct device *dev, u64 mask, bool warn)
+{
+ unsigned long max_dma_pfn;
+
+ /*
+ * If the mask allows for more memory than we can address,
+ * and we actually have that much memory, then we must
+ * indicate that DMA to this device is not supported.
+ */
+ if (sizeof(mask) != sizeof(dma_addr_t) &&
+ mask > (dma_addr_t)~0 &&
+ dma_to_pfn(dev, ~0) < max_pfn) {
+ if (warn) {
+ dev_warn(dev, "Coherent DMA mask %#llx is larger than dma_addr_t allows\n",
+ mask);
+ dev_warn(dev, "Driver did not use or check the return value from dma_set_coherent_mask()?\n");
+ }
+ return 0;
+ }
+
+ max_dma_pfn = min(max_pfn, arm_dma_pfn_limit);
+
+ /*
+ * Translate the device's DMA mask to a PFN limit. This
+ * PFN number includes the page which we can DMA to.
+ */
+ if (dma_to_pfn(dev, mask) < max_dma_pfn) {
+ if (warn)
+ dev_warn(dev, "Coherent DMA mask %#llx (pfn %#lx-%#lx) covers a smaller range of system memory than the DMA zone pfn 0x0-%#lx\n",
+ mask,
+ dma_to_pfn(dev, 0), dma_to_pfn(dev, mask) + 1,
+ max_dma_pfn + 1);
+ return 0;
+ }
+
+ return 1;
+}
+
static u64 get_coherent_dma_mask(struct device *dev)
{
u64 mask = (u64)DMA_BIT_MASK(32);
@@ -173,32 +212,8 @@ static u64 get_coherent_dma_mask(struct device *dev)
return 0;
}
- /*
- * If the mask allows for more memory than we can address,
- * and we actually have that much memory, then fail the
- * allocation.
- */
- if (sizeof(mask) != sizeof(dma_addr_t) &&
- mask > (dma_addr_t)~0 &&
- dma_to_pfn(dev, ~0) > arm_dma_pfn_limit) {
- dev_warn(dev, "Coherent DMA mask %#llx is larger than dma_addr_t allows\n",
- mask);
- dev_warn(dev, "Driver did not use or check the return value from dma_set_coherent_mask()?\n");
- return 0;
- }
-
- /*
- * Now check that the mask, when translated to a PFN,
- * fits within the allowable addresses which we can
- * allocate.
- */
- if (dma_to_pfn(dev, mask) < arm_dma_pfn_limit) {
- dev_warn(dev, "Coherent DMA mask %#llx (pfn %#lx-%#lx) covers a smaller range of system memory than the DMA zone pfn 0x0-%#lx\n",
- mask,
- dma_to_pfn(dev, 0), dma_to_pfn(dev, mask) + 1,
- arm_dma_pfn_limit + 1);
+ if (!__dma_supported(dev, mask, true))
return 0;
- }
}
return mask;
@@ -1027,28 +1042,7 @@ void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
*/
int dma_supported(struct device *dev, u64 mask)
{
- unsigned long limit;
-
- /*
- * If the mask allows for more memory than we can address,
- * and we actually have that much memory, then we must
- * indicate that DMA to this device is not supported.
- */
- if (sizeof(mask) != sizeof(dma_addr_t) &&
- mask > (dma_addr_t)~0 &&
- dma_to_pfn(dev, ~0) > arm_dma_pfn_limit)
- return 0;
-
- /*
- * Translate the device's DMA mask to a PFN limit. This
- * PFN number includes the page which we can DMA to.
- */
- limit = dma_to_pfn(dev, mask);
-
- if (limit < arm_dma_pfn_limit)
- return 0;
-
- return 1;
+ return __dma_supported(dev, mask, false);
}
EXPORT_SYMBOL(dma_supported);
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 3e8f106ee5fe..1f7b19a47060 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -229,7 +229,7 @@ void __init setup_dma_zone(const struct machine_desc *mdesc)
#ifdef CONFIG_ZONE_DMA
if (mdesc->dma_zone_size) {
arm_dma_zone_size = mdesc->dma_zone_size;
- arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1;
+ arm_dma_limit = __pv_phys_offset + arm_dma_zone_size - 1;
} else
arm_dma_limit = 0xffffffff;
arm_dma_pfn_limit = arm_dma_limit >> PAGE_SHIFT;
diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
index d27158c38eb0..5e85ed371364 100644
--- a/arch/arm/mm/mmap.c
+++ b/arch/arm/mm/mmap.c
@@ -146,7 +146,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
info.flags = VM_UNMAPPED_AREA_TOPDOWN;
info.length = len;
- info.low_limit = PAGE_SIZE;
+ info.low_limit = FIRST_USER_ADDRESS;
info.high_limit = mm->mmap_base;
info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
info.align_offset = pgoff << PAGE_SHIFT;
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 78eeeca78f5a..580ef2de82d7 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -558,8 +558,8 @@ static void __init build_mem_type_table(void)
mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB;
break;
}
- printk("Memory policy: ECC %sabled, Data cache %s\n",
- ecc_mask ? "en" : "dis", cp->policy);
+ pr_info("Memory policy: %sData cache %s\n",
+ ecc_mask ? "ECC enabled, " : "", cp->policy);
for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
struct mem_type *t = &mem_types[i];
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
index 5c668b7a31f9..55764a7ef1f0 100644
--- a/arch/arm/mm/nommu.c
+++ b/arch/arm/mm/nommu.c
@@ -18,6 +18,7 @@
#include <asm/mach/arch.h>
#include <asm/cputype.h>
#include <asm/mpu.h>
+#include <asm/procinfo.h>
#include "mm.h"
diff --git a/arch/arm/mm/pgd.c b/arch/arm/mm/pgd.c
index 0acb089d0f70..1046b373d1ae 100644
--- a/arch/arm/mm/pgd.c
+++ b/arch/arm/mm/pgd.c
@@ -87,7 +87,8 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
init_pud = pud_offset(init_pgd, 0);
init_pmd = pmd_offset(init_pud, 0);
init_pte = pte_offset_map(init_pmd, 0);
- set_pte_ext(new_pte, *init_pte, 0);
+ set_pte_ext(new_pte + 0, init_pte[0], 0);
+ set_pte_ext(new_pte + 1, init_pte[1], 0);
pte_unmap(init_pte);
pte_unmap(new_pte);
}
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 60920f62fdf5..bd1781979a39 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -92,7 +92,7 @@ ENDPROC(cpu_v7_dcache_clean_area)
/* Suspend/resume support: derived from arch/arm/mach-s5pv210/sleep.S */
.globl cpu_v7_suspend_size
-.equ cpu_v7_suspend_size, 4 * 8
+.equ cpu_v7_suspend_size, 4 * 9
#ifdef CONFIG_ARM_CPU_SUSPEND
ENTRY(cpu_v7_do_suspend)
stmfd sp!, {r4 - r10, lr}
@@ -101,13 +101,17 @@ ENTRY(cpu_v7_do_suspend)
stmia r0!, {r4 - r5}
#ifdef CONFIG_MMU
mrc p15, 0, r6, c3, c0, 0 @ Domain ID
+#ifdef CONFIG_ARM_LPAE
+ mrrc p15, 1, r5, r7, c2 @ TTB 1
+#else
mrc p15, 0, r7, c2, c0, 1 @ TTB 1
+#endif
mrc p15, 0, r11, c2, c0, 2 @ TTB control register
#endif
mrc p15, 0, r8, c1, c0, 0 @ Control register
mrc p15, 0, r9, c1, c0, 1 @ Auxiliary control register
mrc p15, 0, r10, c1, c0, 2 @ Co-processor access control
- stmia r0, {r6 - r11}
+ stmia r0, {r5 - r11}
ldmfd sp!, {r4 - r10, pc}
ENDPROC(cpu_v7_do_suspend)
@@ -118,16 +122,19 @@ ENTRY(cpu_v7_do_resume)
ldmia r0!, {r4 - r5}
mcr p15, 0, r4, c13, c0, 0 @ FCSE/PID
mcr p15, 0, r5, c13, c0, 3 @ User r/o thread ID
- ldmia r0, {r6 - r11}
+ ldmia r0, {r5 - r11}
#ifdef CONFIG_MMU
mcr p15, 0, ip, c8, c7, 0 @ invalidate TLBs
mcr p15, 0, r6, c3, c0, 0 @ Domain ID
-#ifndef CONFIG_ARM_LPAE
+#ifdef CONFIG_ARM_LPAE
+ mcrr p15, 0, r1, ip, c2 @ TTB 0
+ mcrr p15, 1, r5, r7, c2 @ TTB 1
+#else
ALT_SMP(orr r1, r1, #TTB_FLAGS_SMP)
ALT_UP(orr r1, r1, #TTB_FLAGS_UP)
-#endif
mcr p15, 0, r1, c2, c0, 0 @ TTB 0
mcr p15, 0, r7, c2, c0, 1 @ TTB 1
+#endif
mcr p15, 0, r11, c2, c0, 2 @ TTB control register
ldr r4, =PRRR @ PRRR
ldr r5, =NMRR @ NMRR
diff --git a/arch/arm/plat-omap/include/plat/dmtimer.h b/arch/arm/plat-omap/include/plat/dmtimer.h
index fb92abb91628..2861b155485a 100644
--- a/arch/arm/plat-omap/include/plat/dmtimer.h
+++ b/arch/arm/plat-omap/include/plat/dmtimer.h
@@ -336,8 +336,11 @@ static inline void __omap_dm_timer_enable_posted(struct omap_dm_timer *timer)
if (timer->posted)
return;
- if (timer->errata & OMAP_TIMER_ERRATA_I103_I767)
+ if (timer->errata & OMAP_TIMER_ERRATA_I103_I767) {
+ timer->posted = OMAP_TIMER_NONPOSTED;
+ __omap_dm_timer_write(timer, OMAP_TIMER_IF_CTRL_REG, 0, 0);
return;
+ }
__omap_dm_timer_write(timer, OMAP_TIMER_IF_CTRL_REG,
OMAP_TIMER_CTRL_POSTED, 0);
diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
index 83e4f959ee47..85501238b425 100644
--- a/arch/arm/xen/enlighten.c
+++ b/arch/arm/xen/enlighten.c
@@ -96,7 +96,7 @@ static int remap_pte_fn(pte_t *ptep, pgtable_t token, unsigned long addr,
struct remap_data *info = data;
struct page *page = info->pages[info->index++];
unsigned long pfn = page_to_pfn(page);
- pte_t pte = pfn_pte(pfn, info->prot);
+ pte_t pte = pte_mkspecial(pfn_pte(pfn, info->prot));
if (map_foreign_page(pfn, info->fgmfn, info->domid))
return -EFAULT;
@@ -224,10 +224,10 @@ static int __init xen_guest_init(void)
}
if (of_address_to_resource(node, GRANT_TABLE_PHYSADDR, &res))
return 0;
- xen_hvm_resume_frames = res.start >> PAGE_SHIFT;
+ xen_hvm_resume_frames = res.start;
xen_events_irq = irq_of_parse_and_map(node, 0);
pr_info("Xen %s support found, events_irq=%d gnttab_frame_pfn=%lx\n",
- version, xen_events_irq, xen_hvm_resume_frames);
+ version, xen_events_irq, (xen_hvm_resume_frames >> PAGE_SHIFT));
xen_domain_type = XEN_HVM_DOMAIN;
xen_setup_features();
diff --git a/arch/arm/xen/p2m.c b/arch/arm/xen/p2m.c
index 23732cdff551..b31ee1b275b0 100644
--- a/arch/arm/xen/p2m.c
+++ b/arch/arm/xen/p2m.c
@@ -25,8 +25,9 @@ struct xen_p2m_entry {
struct rb_node rbnode_phys;
};
-rwlock_t p2m_lock;
+static rwlock_t p2m_lock;
struct rb_root phys_to_mach = RB_ROOT;
+EXPORT_SYMBOL_GPL(phys_to_mach);
static struct rb_root mach_to_phys = RB_ROOT;
static int xen_add_phys_to_mach_entry(struct xen_p2m_entry *new)
@@ -200,7 +201,7 @@ bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
}
EXPORT_SYMBOL_GPL(__set_phys_to_machine);
-int p2m_init(void)
+static int p2m_init(void)
{
rwlock_init(&p2m_lock);
return 0;
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 88c8b6c1341a..6d4dd22ee4b7 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -159,8 +159,7 @@ config NR_CPUS
range 2 32
depends on SMP
# These have to remain sorted largest to smallest
- default "8" if ARCH_XGENE
- default "4"
+ default "8"
config HOTPLUG_CPU
bool "Support for hot-pluggable CPUs"
diff --git a/arch/arm64/boot/dts/foundation-v8.dts b/arch/arm64/boot/dts/foundation-v8.dts
index 84fcc5018284..519c4b2c0687 100644
--- a/arch/arm64/boot/dts/foundation-v8.dts
+++ b/arch/arm64/boot/dts/foundation-v8.dts
@@ -6,6 +6,8 @@
/dts-v1/;
+/memreserve/ 0x80000000 0x00010000;
+
/ {
model = "Foundation-v8A";
compatible = "arm,foundation-aarch64", "arm,vexpress";
diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild
index 519f89f5b6a3..626d4a92521f 100644
--- a/arch/arm64/include/asm/Kbuild
+++ b/arch/arm64/include/asm/Kbuild
@@ -51,3 +51,4 @@ generic-y += user.h
generic-y += vga.h
generic-y += xor.h
generic-y += preempt.h
+generic-y += hash.h
diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h
index 4cc813eddacb..572769727227 100644
--- a/arch/arm64/include/asm/io.h
+++ b/arch/arm64/include/asm/io.h
@@ -229,7 +229,7 @@ extern void __iomem *__ioremap(phys_addr_t phys_addr, size_t size, pgprot_t prot
extern void __iounmap(volatile void __iomem *addr);
extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size);
-#define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_DIRTY)
+#define PROT_DEFAULT (pgprot_default | PTE_DIRTY)
#define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRE))
#define PROT_NORMAL_NC (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL_NC))
#define PROT_NORMAL (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL))
diff --git a/arch/arm64/include/asm/irqflags.h b/arch/arm64/include/asm/irqflags.h
index aa11943b8502..b2fcfbc51ecc 100644
--- a/arch/arm64/include/asm/irqflags.h
+++ b/arch/arm64/include/asm/irqflags.h
@@ -56,6 +56,9 @@ static inline void arch_local_irq_disable(void)
#define local_fiq_enable() asm("msr daifclr, #1" : : : "memory")
#define local_fiq_disable() asm("msr daifset, #1" : : : "memory")
+#define local_async_enable() asm("msr daifclr, #4" : : : "memory")
+#define local_async_disable() asm("msr daifset, #4" : : : "memory")
+
/*
* Save the current interrupt enable state.
*/
diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h
index 755f86143320..b1d2e26c3c88 100644
--- a/arch/arm64/include/asm/pgtable-hwdef.h
+++ b/arch/arm64/include/asm/pgtable-hwdef.h
@@ -43,7 +43,7 @@
* Section
*/
#define PMD_SECT_VALID (_AT(pmdval_t, 1) << 0)
-#define PMD_SECT_PROT_NONE (_AT(pmdval_t, 1) << 2)
+#define PMD_SECT_PROT_NONE (_AT(pmdval_t, 1) << 58)
#define PMD_SECT_USER (_AT(pmdval_t, 1) << 6) /* AP[1] */
#define PMD_SECT_RDONLY (_AT(pmdval_t, 1) << 7) /* AP[2] */
#define PMD_SECT_S (_AT(pmdval_t, 3) << 8)
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 17bd3af0a117..7f2b60affbb4 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -25,10 +25,11 @@
* Software defined PTE bits definition.
*/
#define PTE_VALID (_AT(pteval_t, 1) << 0)
-#define PTE_PROT_NONE (_AT(pteval_t, 1) << 2) /* only when !PTE_VALID */
-#define PTE_FILE (_AT(pteval_t, 1) << 3) /* only when !pte_present() */
+#define PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !pte_present() */
#define PTE_DIRTY (_AT(pteval_t, 1) << 55)
#define PTE_SPECIAL (_AT(pteval_t, 1) << 56)
+ /* bit 57 for PMD_SECT_SPLITTING */
+#define PTE_PROT_NONE (_AT(pteval_t, 1) << 58) /* only when !PTE_VALID */
/*
* VMALLOC and SPARSEMEM_VMEMMAP ranges.
@@ -254,7 +255,7 @@ static inline int has_transparent_hugepage(void)
#define pgprot_noncached(prot) \
__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE))
#define pgprot_writecombine(prot) \
- __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_GRE))
+ __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC))
#define pgprot_dmacoherent(prot) \
__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC))
#define __HAVE_PHYS_MEM_ACCESS_PROT
@@ -357,18 +358,20 @@ extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
/*
* Encode and decode a swap entry:
- * bits 0, 2: present (must both be zero)
- * bit 3: PTE_FILE
- * bits 4-8: swap type
- * bits 9-63: swap offset
+ * bits 0-1: present (must be zero)
+ * bit 2: PTE_FILE
+ * bits 3-8: swap type
+ * bits 9-57: swap offset
*/
-#define __SWP_TYPE_SHIFT 4
+#define __SWP_TYPE_SHIFT 3
#define __SWP_TYPE_BITS 6
+#define __SWP_OFFSET_BITS 49
#define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1)
#define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
+#define __SWP_OFFSET_MASK ((1UL << __SWP_OFFSET_BITS) - 1)
#define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
-#define __swp_offset(x) ((x).val >> __SWP_OFFSET_SHIFT)
+#define __swp_offset(x) (((x).val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK)
#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) })
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
@@ -382,15 +385,15 @@ extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
/*
* Encode and decode a file entry:
- * bits 0, 2: present (must both be zero)
- * bit 3: PTE_FILE
- * bits 4-63: file offset / PAGE_SIZE
+ * bits 0-1: present (must be zero)
+ * bit 2: PTE_FILE
+ * bits 3-57: file offset / PAGE_SIZE
*/
#define pte_file(pte) (pte_val(pte) & PTE_FILE)
-#define pte_to_pgoff(x) (pte_val(x) >> 4)
-#define pgoff_to_pte(x) __pte(((x) << 4) | PTE_FILE)
+#define pte_to_pgoff(x) (pte_val(x) >> 3)
+#define pgoff_to_pte(x) __pte(((x) << 3) | PTE_FILE)
-#define PTE_FILE_MAX_BITS 60
+#define PTE_FILE_MAX_BITS 55
extern int kern_addr_valid(unsigned long addr);
diff --git a/arch/arm64/include/asm/xen/page-coherent.h b/arch/arm64/include/asm/xen/page-coherent.h
index 2820f1a6eebe..dde3fc9c49f0 100644
--- a/arch/arm64/include/asm/xen/page-coherent.h
+++ b/arch/arm64/include/asm/xen/page-coherent.h
@@ -23,25 +23,21 @@ static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction dir,
struct dma_attrs *attrs)
{
- __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
}
static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
size_t size, enum dma_data_direction dir,
struct dma_attrs *attrs)
{
- __generic_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs);
}
static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
dma_addr_t handle, size_t size, enum dma_data_direction dir)
{
- __generic_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir);
}
static inline void xen_dma_sync_single_for_device(struct device *hwdev,
dma_addr_t handle, size_t size, enum dma_data_direction dir)
{
- __generic_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir);
}
#endif /* _ASM_ARM64_XEN_PAGE_COHERENT_H */
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
index 6a0a9b132d7a..4ae68579031d 100644
--- a/arch/arm64/kernel/debug-monitors.c
+++ b/arch/arm64/kernel/debug-monitors.c
@@ -248,7 +248,8 @@ static int brk_handler(unsigned long addr, unsigned int esr,
int aarch32_break_handler(struct pt_regs *regs)
{
siginfo_t info;
- unsigned int instr;
+ u32 arm_instr;
+ u16 thumb_instr;
bool bp = false;
void __user *pc = (void __user *)instruction_pointer(regs);
@@ -257,18 +258,21 @@ int aarch32_break_handler(struct pt_regs *regs)
if (compat_thumb_mode(regs)) {
/* get 16-bit Thumb instruction */
- get_user(instr, (u16 __user *)pc);
- if (instr == AARCH32_BREAK_THUMB2_LO) {
+ get_user(thumb_instr, (u16 __user *)pc);
+ thumb_instr = le16_to_cpu(thumb_instr);
+ if (thumb_instr == AARCH32_BREAK_THUMB2_LO) {
/* get second half of 32-bit Thumb-2 instruction */
- get_user(instr, (u16 __user *)(pc + 2));
- bp = instr == AARCH32_BREAK_THUMB2_HI;
+ get_user(thumb_instr, (u16 __user *)(pc + 2));
+ thumb_instr = le16_to_cpu(thumb_instr);
+ bp = thumb_instr == AARCH32_BREAK_THUMB2_HI;
} else {
- bp = instr == AARCH32_BREAK_THUMB;
+ bp = thumb_instr == AARCH32_BREAK_THUMB;
}
} else {
/* 32-bit ARM instruction */
- get_user(instr, (u32 __user *)pc);
- bp = (instr & ~0xf0000000) == AARCH32_BREAK_ARM;
+ get_user(arm_instr, (u32 __user *)pc);
+ arm_instr = le32_to_cpu(arm_instr);
+ bp = (arm_instr & ~0xf0000000) == AARCH32_BREAK_ARM;
}
if (!bp)
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index e1166145ca29..4d2c6f3f0c41 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -309,15 +309,12 @@ el1_irq:
#ifdef CONFIG_TRACE_IRQFLAGS
bl trace_hardirqs_off
#endif
-#ifdef CONFIG_PREEMPT
- get_thread_info tsk
- ldr w24, [tsk, #TI_PREEMPT] // get preempt count
- add w0, w24, #1 // increment it
- str w0, [tsk, #TI_PREEMPT]
-#endif
+
irq_handler
+
#ifdef CONFIG_PREEMPT
- str w24, [tsk, #TI_PREEMPT] // restore preempt count
+ get_thread_info tsk
+ ldr w24, [tsk, #TI_PREEMPT] // restore preempt count
cbnz w24, 1f // preempt count != 0
ldr x0, [tsk, #TI_FLAGS] // get flags
tbz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling?
@@ -507,22 +504,10 @@ el0_irq_naked:
#ifdef CONFIG_TRACE_IRQFLAGS
bl trace_hardirqs_off
#endif
- get_thread_info tsk
-#ifdef CONFIG_PREEMPT
- ldr w24, [tsk, #TI_PREEMPT] // get preempt count
- add w23, w24, #1 // increment it
- str w23, [tsk, #TI_PREEMPT]
-#endif
+
irq_handler
-#ifdef CONFIG_PREEMPT
- ldr w0, [tsk, #TI_PREEMPT]
- str w24, [tsk, #TI_PREEMPT]
- cmp w0, w23
- b.eq 1f
- mov x1, #0
- str x1, [x1] // BUG
-1:
-#endif
+ get_thread_info tsk
+
#ifdef CONFIG_TRACE_IRQFLAGS
bl trace_hardirqs_on
#endif
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 7009387348b7..c68cca5c3523 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -282,8 +282,9 @@ ENDPROC(secondary_holding_pen)
* be used where CPUs are brought online dynamically by the kernel.
*/
ENTRY(secondary_entry)
- bl __calc_phys_offset // x2=phys offset
bl el2_setup // Drop to EL1
+ bl __calc_phys_offset // x24=PHYS_OFFSET, x28=PHYS_OFFSET-PAGE_OFFSET
+ bl set_cpu_boot_mode_flag
b secondary_startup
ENDPROC(secondary_entry)
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index fecdbf7de82e..6a8928bba03c 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -214,31 +214,29 @@ static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type,
{
int err, len, type, disabled = !ctrl.enabled;
- if (disabled) {
- len = 0;
- type = HW_BREAKPOINT_EMPTY;
- } else {
- err = arch_bp_generic_fields(ctrl, &len, &type);
- if (err)
- return err;
-
- switch (note_type) {
- case NT_ARM_HW_BREAK:
- if ((type & HW_BREAKPOINT_X) != type)
- return -EINVAL;
- break;
- case NT_ARM_HW_WATCH:
- if ((type & HW_BREAKPOINT_RW) != type)
- return -EINVAL;
- break;
- default:
+ attr->disabled = disabled;
+ if (disabled)
+ return 0;
+
+ err = arch_bp_generic_fields(ctrl, &len, &type);
+ if (err)
+ return err;
+
+ switch (note_type) {
+ case NT_ARM_HW_BREAK:
+ if ((type & HW_BREAKPOINT_X) != type)
return -EINVAL;
- }
+ break;
+ case NT_ARM_HW_WATCH:
+ if ((type & HW_BREAKPOINT_RW) != type)
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
}
attr->bp_len = len;
attr->bp_type = type;
- attr->disabled = disabled;
return 0;
}
@@ -636,28 +634,27 @@ static int compat_gpr_get(struct task_struct *target,
for (i = 0; i < num_regs; ++i) {
unsigned int idx = start + i;
- void *reg;
+ compat_ulong_t reg;
switch (idx) {
case 15:
- reg = (void *)&task_pt_regs(target)->pc;
+ reg = task_pt_regs(target)->pc;
break;
case 16:
- reg = (void *)&task_pt_regs(target)->pstate;
+ reg = task_pt_regs(target)->pstate;
break;
case 17:
- reg = (void *)&task_pt_regs(target)->orig_x0;
+ reg = task_pt_regs(target)->orig_x0;
break;
default:
- reg = (void *)&task_pt_regs(target)->regs[idx];
+ reg = task_pt_regs(target)->regs[idx];
}
- ret = copy_to_user(ubuf, reg, sizeof(compat_ulong_t));
-
+ ret = copy_to_user(ubuf, &reg, sizeof(reg));
if (ret)
break;
- else
- ubuf += sizeof(compat_ulong_t);
+
+ ubuf += sizeof(reg);
}
return ret;
@@ -685,28 +682,28 @@ static int compat_gpr_set(struct task_struct *target,
for (i = 0; i < num_regs; ++i) {
unsigned int idx = start + i;
- void *reg;
+ compat_ulong_t reg;
+
+ ret = copy_from_user(&reg, ubuf, sizeof(reg));
+ if (ret)
+ return ret;
+
+ ubuf += sizeof(reg);
switch (idx) {
case 15:
- reg = (void *)&newregs.pc;
+ newregs.pc = reg;
break;
case 16:
- reg = (void *)&newregs.pstate;
+ newregs.pstate = reg;
break;
case 17:
- reg = (void *)&newregs.orig_x0;
+ newregs.orig_x0 = reg;
break;
default:
- reg = (void *)&newregs.regs[idx];
+ newregs.regs[idx] = reg;
}
- ret = copy_from_user(reg, ubuf, sizeof(compat_ulong_t));
-
- if (ret)
- goto out;
- else
- ubuf += sizeof(compat_ulong_t);
}
if (valid_user_regs(&newregs.user_regs))
@@ -714,7 +711,6 @@ static int compat_gpr_set(struct task_struct *target,
else
ret = -EINVAL;
-out:
return ret;
}
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 0bc5e4cbc017..bd9bbd0e44ed 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -205,6 +205,11 @@ u64 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID };
void __init setup_arch(char **cmdline_p)
{
+ /*
+ * Unmask asynchronous aborts early to catch possible system errors.
+ */
+ local_async_enable();
+
setup_processor();
setup_machine_fdt(__fdt_pointer);
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index a5aeefab03c3..a0c2ca602cf8 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -160,6 +160,7 @@ asmlinkage void secondary_start_kernel(void)
local_irq_enable();
local_fiq_enable();
+ local_async_enable();
/*
* OK, it's off to the idle thread for us
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index 421b99fd635d..0f7fec52c7f8 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -111,12 +111,12 @@ ENTRY(__cpu_setup)
bl __flush_dcache_all
mov lr, x28
ic iallu // I+BTB cache invalidate
+ tlbi vmalle1is // invalidate I + D TLBs
dsb sy
mov x0, #3 << 20
msr cpacr_el1, x0 // Enable FP/ASIMD
msr mdscr_el1, xzr // Reset mdscr_el1
- tlbi vmalle1is // invalidate I + D TLBs
/*
* Memory region attributes for LPAE:
*
diff --git a/arch/avr32/boards/favr-32/setup.c b/arch/avr32/boards/favr-32/setup.c
index 7b1f2cd85400..1f121497b517 100644
--- a/arch/avr32/boards/favr-32/setup.c
+++ b/arch/avr32/boards/favr-32/setup.c
@@ -298,8 +298,10 @@ static int __init set_abdac_rate(struct platform_device *pdev)
*/
retval = clk_round_rate(pll1,
CONFIG_BOARD_FAVR32_ABDAC_RATE * 256 * 16);
- if (retval < 0)
+ if (retval <= 0) {
+ retval = -EINVAL;
goto out_abdac;
+ }
retval = clk_set_rate(pll1, retval);
if (retval != 0)
diff --git a/arch/avr32/boot/u-boot/head.S b/arch/avr32/boot/u-boot/head.S
index 4488fa27fe94..2ffc298f061b 100644
--- a/arch/avr32/boot/u-boot/head.S
+++ b/arch/avr32/boot/u-boot/head.S
@@ -8,6 +8,8 @@
* published by the Free Software Foundation.
*/
#include <asm/setup.h>
+#include <asm/thread_info.h>
+#include <asm/sysreg.h>
/*
* The kernel is loaded where we want it to be and all caches
@@ -20,11 +22,6 @@
.section .init.text,"ax"
.global _start
_start:
- /* Check if the boot loader actually provided a tag table */
- lddpc r0, magic_number
- cp.w r12, r0
- brne no_tag_table
-
/* Initialize .bss */
lddpc r2, bss_start_addr
lddpc r3, end_addr
@@ -34,6 +31,25 @@ _start:
cp r2, r3
brlo 1b
+ /* Initialize status register */
+ lddpc r0, init_sr
+ mtsr SYSREG_SR, r0
+
+ /* Set initial stack pointer */
+ lddpc sp, stack_addr
+ sub sp, -THREAD_SIZE
+
+#ifdef CONFIG_FRAME_POINTER
+ /* Mark last stack frame */
+ mov lr, 0
+ mov r7, 0
+#endif
+
+ /* Check if the boot loader actually provided a tag table */
+ lddpc r0, magic_number
+ cp.w r12, r0
+ brne no_tag_table
+
/*
* Save the tag table address for later use. This must be done
* _after_ .bss has been initialized...
@@ -53,8 +69,15 @@ bss_start_addr:
.long __bss_start
end_addr:
.long _end
+init_sr:
+ .long 0x007f0000 /* Supervisor mode, everything masked */
+stack_addr:
+ .long init_thread_union
+panic_addr:
+ .long panic
no_tag_table:
sub r12, pc, (. - 2f)
- bral panic
+ /* branch to panic() which can be far away with that construct */
+ lddpc pc, panic_addr
2: .asciz "Boot loader didn't provide correct magic number\n"
diff --git a/arch/avr32/configs/atngw100_defconfig b/arch/avr32/configs/atngw100_defconfig
index d5aff36ade92..4733e38e7ae6 100644
--- a/arch/avr32/configs/atngw100_defconfig
+++ b/arch/avr32/configs/atngw100_defconfig
@@ -59,7 +59,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
diff --git a/arch/avr32/configs/atngw100_evklcd100_defconfig b/arch/avr32/configs/atngw100_evklcd100_defconfig
index 4abcf435d599..1be0ee31bd91 100644
--- a/arch/avr32/configs/atngw100_evklcd100_defconfig
+++ b/arch/avr32/configs/atngw100_evklcd100_defconfig
@@ -61,7 +61,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
diff --git a/arch/avr32/configs/atngw100_evklcd101_defconfig b/arch/avr32/configs/atngw100_evklcd101_defconfig
index 18f3fa0470ff..796e536f7bc4 100644
--- a/arch/avr32/configs/atngw100_evklcd101_defconfig
+++ b/arch/avr32/configs/atngw100_evklcd101_defconfig
@@ -60,7 +60,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
diff --git a/arch/avr32/configs/atngw100_mrmt_defconfig b/arch/avr32/configs/atngw100_mrmt_defconfig
index 06e389cfcd12..9a57da44eb6f 100644
--- a/arch/avr32/configs/atngw100_mrmt_defconfig
+++ b/arch/avr32/configs/atngw100_mrmt_defconfig
@@ -48,7 +48,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
diff --git a/arch/avr32/configs/atngw100mkii_defconfig b/arch/avr32/configs/atngw100mkii_defconfig
index 2518a1368d7c..97fe1b399b06 100644
--- a/arch/avr32/configs/atngw100mkii_defconfig
+++ b/arch/avr32/configs/atngw100mkii_defconfig
@@ -59,7 +59,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
diff --git a/arch/avr32/configs/atngw100mkii_evklcd100_defconfig b/arch/avr32/configs/atngw100mkii_evklcd100_defconfig
index 245ef6bd0fa6..a176d24467e9 100644
--- a/arch/avr32/configs/atngw100mkii_evklcd100_defconfig
+++ b/arch/avr32/configs/atngw100mkii_evklcd100_defconfig
@@ -62,7 +62,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
diff --git a/arch/avr32/configs/atngw100mkii_evklcd101_defconfig b/arch/avr32/configs/atngw100mkii_evklcd101_defconfig
index fa6cbac6e418..d1bf6dcfc47d 100644
--- a/arch/avr32/configs/atngw100mkii_evklcd101_defconfig
+++ b/arch/avr32/configs/atngw100mkii_evklcd101_defconfig
@@ -61,7 +61,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
diff --git a/arch/avr32/configs/atstk1002_defconfig b/arch/avr32/configs/atstk1002_defconfig
index bbd5131021a5..2813dd2b9138 100644
--- a/arch/avr32/configs/atstk1002_defconfig
+++ b/arch/avr32/configs/atstk1002_defconfig
@@ -53,7 +53,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
diff --git a/arch/avr32/configs/atstk1003_defconfig b/arch/avr32/configs/atstk1003_defconfig
index c1cd726f9012..f8ff3a3baad4 100644
--- a/arch/avr32/configs/atstk1003_defconfig
+++ b/arch/avr32/configs/atstk1003_defconfig
@@ -42,7 +42,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
diff --git a/arch/avr32/configs/atstk1004_defconfig b/arch/avr32/configs/atstk1004_defconfig
index 754ae56b2767..992228e54e38 100644
--- a/arch/avr32/configs/atstk1004_defconfig
+++ b/arch/avr32/configs/atstk1004_defconfig
@@ -42,7 +42,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
diff --git a/arch/avr32/configs/atstk1006_defconfig b/arch/avr32/configs/atstk1006_defconfig
index 58589d8cc0ac..b8e698b0d1fa 100644
--- a/arch/avr32/configs/atstk1006_defconfig
+++ b/arch/avr32/configs/atstk1006_defconfig
@@ -54,7 +54,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
diff --git a/arch/avr32/configs/favr-32_defconfig b/arch/avr32/configs/favr-32_defconfig
index c90fbf6d35bc..07bed3f7eb5e 100644
--- a/arch/avr32/configs/favr-32_defconfig
+++ b/arch/avr32/configs/favr-32_defconfig
@@ -58,7 +58,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
diff --git a/arch/avr32/configs/hammerhead_defconfig b/arch/avr32/configs/hammerhead_defconfig
index ba7c31e269cb..18db853386c8 100644
--- a/arch/avr32/configs/hammerhead_defconfig
+++ b/arch/avr32/configs/hammerhead_defconfig
@@ -58,7 +58,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
diff --git a/arch/avr32/configs/merisc_defconfig b/arch/avr32/configs/merisc_defconfig
index 65de4431108c..91df6b2986be 100644
--- a/arch/avr32/configs/merisc_defconfig
+++ b/arch/avr32/configs/merisc_defconfig
@@ -46,7 +46,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
CONFIG_MTD_CONCAT=y
-CONFIG_MTD_PARTITIONS=y
CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
diff --git a/arch/avr32/configs/mimc200_defconfig b/arch/avr32/configs/mimc200_defconfig
index 0a8bfdc420e0..d630e089dd32 100644
--- a/arch/avr32/configs/mimc200_defconfig
+++ b/arch/avr32/configs/mimc200_defconfig
@@ -49,7 +49,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
diff --git a/arch/avr32/include/asm/Kbuild b/arch/avr32/include/asm/Kbuild
index 658001b52400..cfb9fe1b8df9 100644
--- a/arch/avr32/include/asm/Kbuild
+++ b/arch/avr32/include/asm/Kbuild
@@ -18,3 +18,4 @@ generic-y += sections.h
generic-y += topology.h
generic-y += trace_clock.h
generic-y += xor.h
+generic-y += hash.h
diff --git a/arch/avr32/include/asm/kprobes.h b/arch/avr32/include/asm/kprobes.h
index 996cb656474e..45f563ed73fd 100644
--- a/arch/avr32/include/asm/kprobes.h
+++ b/arch/avr32/include/asm/kprobes.h
@@ -16,6 +16,7 @@
typedef u16 kprobe_opcode_t;
#define BREAKPOINT_INSTRUCTION 0xd673 /* breakpoint */
#define MAX_INSN_SIZE 2
+#define MAX_STACK_SIZE 64 /* 32 would probably be OK */
#define kretprobe_blacklist_size 0
@@ -26,6 +27,19 @@ struct arch_specific_insn {
kprobe_opcode_t insn[MAX_INSN_SIZE];
};
+struct prev_kprobe {
+ struct kprobe *kp;
+ unsigned int status;
+};
+
+/* per-cpu kprobe control block */
+struct kprobe_ctlblk {
+ unsigned int kprobe_status;
+ struct prev_kprobe prev_kprobe;
+ struct pt_regs jprobe_saved_regs;
+ char jprobes_stack[MAX_STACK_SIZE];
+};
+
extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr);
extern int kprobe_exceptions_notify(struct notifier_block *self,
unsigned long val, void *data);
diff --git a/arch/avr32/include/uapi/asm/Kbuild b/arch/avr32/include/uapi/asm/Kbuild
index 3b85eaddf525..08d8a3d76ea8 100644
--- a/arch/avr32/include/uapi/asm/Kbuild
+++ b/arch/avr32/include/uapi/asm/Kbuild
@@ -2,35 +2,35 @@
include include/uapi/asm-generic/Kbuild.asm
header-y += auxvec.h
-header-y += bitsperlong.h
header-y += byteorder.h
header-y += cachectl.h
-header-y += errno.h
-header-y += fcntl.h
-header-y += ioctl.h
-header-y += ioctls.h
-header-y += ipcbuf.h
-header-y += kvm_para.h
-header-y += mman.h
header-y += msgbuf.h
header-y += param.h
-header-y += poll.h
header-y += posix_types.h
header-y += ptrace.h
-header-y += resource.h
header-y += sembuf.h
header-y += setup.h
header-y += shmbuf.h
header-y += sigcontext.h
-header-y += siginfo.h
header-y += signal.h
header-y += socket.h
header-y += sockios.h
header-y += stat.h
-header-y += statfs.h
header-y += swab.h
header-y += termbits.h
header-y += termios.h
header-y += types.h
header-y += unistd.h
+generic-y += bitsperlong.h
+generic-y += errno.h
+generic-y += fcntl.h
+generic-y += ioctl.h
+generic-y += ioctls.h
+generic-y += ipcbuf.h
+generic-y += kvm_para.h
+generic-y += mman.h
generic-y += param.h
+generic-y += poll.h
+generic-y += resource.h
+generic-y += siginfo.h
+generic-y += statfs.h
diff --git a/arch/avr32/include/uapi/asm/auxvec.h b/arch/avr32/include/uapi/asm/auxvec.h
index d5dd435bf8f4..4f02da3ffefa 100644
--- a/arch/avr32/include/uapi/asm/auxvec.h
+++ b/arch/avr32/include/uapi/asm/auxvec.h
@@ -1,4 +1,4 @@
-#ifndef __ASM_AVR32_AUXVEC_H
-#define __ASM_AVR32_AUXVEC_H
+#ifndef _UAPI__ASM_AVR32_AUXVEC_H
+#define _UAPI__ASM_AVR32_AUXVEC_H
-#endif /* __ASM_AVR32_AUXVEC_H */
+#endif /* _UAPI__ASM_AVR32_AUXVEC_H */
diff --git a/arch/avr32/include/uapi/asm/bitsperlong.h b/arch/avr32/include/uapi/asm/bitsperlong.h
deleted file mode 100644
index 6dc0bb0c13b2..000000000000
--- a/arch/avr32/include/uapi/asm/bitsperlong.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/bitsperlong.h>
diff --git a/arch/avr32/include/uapi/asm/byteorder.h b/arch/avr32/include/uapi/asm/byteorder.h
index 50abc21619a8..71242f0d39c6 100644
--- a/arch/avr32/include/uapi/asm/byteorder.h
+++ b/arch/avr32/include/uapi/asm/byteorder.h
@@ -1,9 +1,9 @@
/*
* AVR32 endian-conversion functions.
*/
-#ifndef __ASM_AVR32_BYTEORDER_H
-#define __ASM_AVR32_BYTEORDER_H
+#ifndef _UAPI__ASM_AVR32_BYTEORDER_H
+#define _UAPI__ASM_AVR32_BYTEORDER_H
#include <linux/byteorder/big_endian.h>
-#endif /* __ASM_AVR32_BYTEORDER_H */
+#endif /* _UAPI__ASM_AVR32_BYTEORDER_H */
diff --git a/arch/avr32/include/uapi/asm/cachectl.h b/arch/avr32/include/uapi/asm/cachectl.h
index 4faf1ce60061..573a9584dd57 100644
--- a/arch/avr32/include/uapi/asm/cachectl.h
+++ b/arch/avr32/include/uapi/asm/cachectl.h
@@ -1,5 +1,5 @@
-#ifndef __ASM_AVR32_CACHECTL_H
-#define __ASM_AVR32_CACHECTL_H
+#ifndef _UAPI__ASM_AVR32_CACHECTL_H
+#define _UAPI__ASM_AVR32_CACHECTL_H
/*
* Operations that can be performed through the cacheflush system call
@@ -8,4 +8,4 @@
/* Clean the data cache, then invalidate the icache */
#define CACHE_IFLUSH 0
-#endif /* __ASM_AVR32_CACHECTL_H */
+#endif /* _UAPI__ASM_AVR32_CACHECTL_H */
diff --git a/arch/avr32/include/uapi/asm/errno.h b/arch/avr32/include/uapi/asm/errno.h
deleted file mode 100644
index 558a7249f06d..000000000000
--- a/arch/avr32/include/uapi/asm/errno.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ASM_AVR32_ERRNO_H
-#define __ASM_AVR32_ERRNO_H
-
-#include <asm-generic/errno.h>
-
-#endif /* __ASM_AVR32_ERRNO_H */
diff --git a/arch/avr32/include/uapi/asm/fcntl.h b/arch/avr32/include/uapi/asm/fcntl.h
deleted file mode 100644
index 14c0c4402b11..000000000000
--- a/arch/avr32/include/uapi/asm/fcntl.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ASM_AVR32_FCNTL_H
-#define __ASM_AVR32_FCNTL_H
-
-#include <asm-generic/fcntl.h>
-
-#endif /* __ASM_AVR32_FCNTL_H */
diff --git a/arch/avr32/include/uapi/asm/ioctl.h b/arch/avr32/include/uapi/asm/ioctl.h
deleted file mode 100644
index c8472c1398ef..000000000000
--- a/arch/avr32/include/uapi/asm/ioctl.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ASM_AVR32_IOCTL_H
-#define __ASM_AVR32_IOCTL_H
-
-#include <asm-generic/ioctl.h>
-
-#endif /* __ASM_AVR32_IOCTL_H */
diff --git a/arch/avr32/include/uapi/asm/ioctls.h b/arch/avr32/include/uapi/asm/ioctls.h
deleted file mode 100644
index 909cf66feaf5..000000000000
--- a/arch/avr32/include/uapi/asm/ioctls.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ASM_AVR32_IOCTLS_H
-#define __ASM_AVR32_IOCTLS_H
-
-#include <asm-generic/ioctls.h>
-
-#endif /* __ASM_AVR32_IOCTLS_H */
diff --git a/arch/avr32/include/uapi/asm/ipcbuf.h b/arch/avr32/include/uapi/asm/ipcbuf.h
deleted file mode 100644
index 84c7e51cb6d0..000000000000
--- a/arch/avr32/include/uapi/asm/ipcbuf.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/ipcbuf.h>
diff --git a/arch/avr32/include/uapi/asm/kvm_para.h b/arch/avr32/include/uapi/asm/kvm_para.h
deleted file mode 100644
index 14fab8f0b957..000000000000
--- a/arch/avr32/include/uapi/asm/kvm_para.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/kvm_para.h>
diff --git a/arch/avr32/include/uapi/asm/mman.h b/arch/avr32/include/uapi/asm/mman.h
deleted file mode 100644
index 8eebf89f5ab1..000000000000
--- a/arch/avr32/include/uapi/asm/mman.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/mman.h>
diff --git a/arch/avr32/include/uapi/asm/msgbuf.h b/arch/avr32/include/uapi/asm/msgbuf.h
index ac18bc4da7f7..9eae6effad14 100644
--- a/arch/avr32/include/uapi/asm/msgbuf.h
+++ b/arch/avr32/include/uapi/asm/msgbuf.h
@@ -1,5 +1,5 @@
-#ifndef __ASM_AVR32_MSGBUF_H
-#define __ASM_AVR32_MSGBUF_H
+#ifndef _UAPI__ASM_AVR32_MSGBUF_H
+#define _UAPI__ASM_AVR32_MSGBUF_H
/*
* The msqid64_ds structure for i386 architecture.
@@ -28,4 +28,4 @@ struct msqid64_ds {
unsigned long __unused5;
};
-#endif /* __ASM_AVR32_MSGBUF_H */
+#endif /* _UAPI__ASM_AVR32_MSGBUF_H */
diff --git a/arch/avr32/include/uapi/asm/poll.h b/arch/avr32/include/uapi/asm/poll.h
deleted file mode 100644
index c98509d3149e..000000000000
--- a/arch/avr32/include/uapi/asm/poll.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/poll.h>
diff --git a/arch/avr32/include/uapi/asm/posix_types.h b/arch/avr32/include/uapi/asm/posix_types.h
index 9ba9e749b3f3..5b813a8abf09 100644
--- a/arch/avr32/include/uapi/asm/posix_types.h
+++ b/arch/avr32/include/uapi/asm/posix_types.h
@@ -5,8 +5,8 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-#ifndef __ASM_AVR32_POSIX_TYPES_H
-#define __ASM_AVR32_POSIX_TYPES_H
+#ifndef _UAPI__ASM_AVR32_POSIX_TYPES_H
+#define _UAPI__ASM_AVR32_POSIX_TYPES_H
/*
* This file is generally used by user-level software, so you need to
@@ -34,4 +34,4 @@ typedef unsigned short __kernel_old_dev_t;
#include <asm-generic/posix_types.h>
-#endif /* __ASM_AVR32_POSIX_TYPES_H */
+#endif /* _UAPI__ASM_AVR32_POSIX_TYPES_H */
diff --git a/arch/avr32/include/uapi/asm/resource.h b/arch/avr32/include/uapi/asm/resource.h
deleted file mode 100644
index c6dd101472b1..000000000000
--- a/arch/avr32/include/uapi/asm/resource.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ASM_AVR32_RESOURCE_H
-#define __ASM_AVR32_RESOURCE_H
-
-#include <asm-generic/resource.h>
-
-#endif /* __ASM_AVR32_RESOURCE_H */
diff --git a/arch/avr32/include/uapi/asm/sembuf.h b/arch/avr32/include/uapi/asm/sembuf.h
index e472216e0c97..6c6f7cf1e75a 100644
--- a/arch/avr32/include/uapi/asm/sembuf.h
+++ b/arch/avr32/include/uapi/asm/sembuf.h
@@ -1,5 +1,5 @@
-#ifndef __ASM_AVR32_SEMBUF_H
-#define __ASM_AVR32_SEMBUF_H
+#ifndef _UAPI__ASM_AVR32_SEMBUF_H
+#define _UAPI__ASM_AVR32_SEMBUF_H
/*
* The semid64_ds structure for AVR32 architecture.
@@ -22,4 +22,4 @@ struct semid64_ds {
unsigned long __unused4;
};
-#endif /* __ASM_AVR32_SEMBUF_H */
+#endif /* _UAPI__ASM_AVR32_SEMBUF_H */
diff --git a/arch/avr32/include/uapi/asm/setup.h b/arch/avr32/include/uapi/asm/setup.h
index e58aa9356faf..a654df7dba46 100644
--- a/arch/avr32/include/uapi/asm/setup.h
+++ b/arch/avr32/include/uapi/asm/setup.h
@@ -13,5 +13,4 @@
#define COMMAND_LINE_SIZE 256
-
#endif /* _UAPI__ASM_AVR32_SETUP_H__ */
diff --git a/arch/avr32/include/uapi/asm/shmbuf.h b/arch/avr32/include/uapi/asm/shmbuf.h
index c62fba41739a..b94cf8b60b73 100644
--- a/arch/avr32/include/uapi/asm/shmbuf.h
+++ b/arch/avr32/include/uapi/asm/shmbuf.h
@@ -1,5 +1,5 @@
-#ifndef __ASM_AVR32_SHMBUF_H
-#define __ASM_AVR32_SHMBUF_H
+#ifndef _UAPI__ASM_AVR32_SHMBUF_H
+#define _UAPI__ASM_AVR32_SHMBUF_H
/*
* The shmid64_ds structure for i386 architecture.
@@ -39,4 +39,4 @@ struct shminfo64 {
unsigned long __unused4;
};
-#endif /* __ASM_AVR32_SHMBUF_H */
+#endif /* _UAPI__ASM_AVR32_SHMBUF_H */
diff --git a/arch/avr32/include/uapi/asm/sigcontext.h b/arch/avr32/include/uapi/asm/sigcontext.h
index e04062b5f39f..27e56bf6377f 100644
--- a/arch/avr32/include/uapi/asm/sigcontext.h
+++ b/arch/avr32/include/uapi/asm/sigcontext.h
@@ -5,8 +5,8 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-#ifndef __ASM_AVR32_SIGCONTEXT_H
-#define __ASM_AVR32_SIGCONTEXT_H
+#ifndef _UAPI__ASM_AVR32_SIGCONTEXT_H
+#define _UAPI__ASM_AVR32_SIGCONTEXT_H
struct sigcontext {
unsigned long oldmask;
@@ -31,4 +31,4 @@ struct sigcontext {
unsigned long r0;
};
-#endif /* __ASM_AVR32_SIGCONTEXT_H */
+#endif /* _UAPI__ASM_AVR32_SIGCONTEXT_H */
diff --git a/arch/avr32/include/uapi/asm/siginfo.h b/arch/avr32/include/uapi/asm/siginfo.h
deleted file mode 100644
index 5ee93f40a8a8..000000000000
--- a/arch/avr32/include/uapi/asm/siginfo.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _AVR32_SIGINFO_H
-#define _AVR32_SIGINFO_H
-
-#include <asm-generic/siginfo.h>
-
-#endif
diff --git a/arch/avr32/include/uapi/asm/signal.h b/arch/avr32/include/uapi/asm/signal.h
index 1b77a93eff50..ffe8c770cafd 100644
--- a/arch/avr32/include/uapi/asm/signal.h
+++ b/arch/avr32/include/uapi/asm/signal.h
@@ -118,5 +118,4 @@ typedef struct sigaltstack {
size_t ss_size;
} stack_t;
-
#endif /* _UAPI__ASM_AVR32_SIGNAL_H */
diff --git a/arch/avr32/include/uapi/asm/socket.h b/arch/avr32/include/uapi/asm/socket.h
index 439936421434..cbf902e4cd9e 100644
--- a/arch/avr32/include/uapi/asm/socket.h
+++ b/arch/avr32/include/uapi/asm/socket.h
@@ -1,5 +1,5 @@
-#ifndef __ASM_AVR32_SOCKET_H
-#define __ASM_AVR32_SOCKET_H
+#ifndef _UAPI__ASM_AVR32_SOCKET_H
+#define _UAPI__ASM_AVR32_SOCKET_H
#include <asm/sockios.h>
@@ -78,4 +78,4 @@
#define SO_MAX_PACING_RATE 47
-#endif /* __ASM_AVR32_SOCKET_H */
+#endif /* _UAPI__ASM_AVR32_SOCKET_H */
diff --git a/arch/avr32/include/uapi/asm/sockios.h b/arch/avr32/include/uapi/asm/sockios.h
index 0802d742f97d..d04785453532 100644
--- a/arch/avr32/include/uapi/asm/sockios.h
+++ b/arch/avr32/include/uapi/asm/sockios.h
@@ -1,5 +1,5 @@
-#ifndef __ASM_AVR32_SOCKIOS_H
-#define __ASM_AVR32_SOCKIOS_H
+#ifndef _UAPI__ASM_AVR32_SOCKIOS_H
+#define _UAPI__ASM_AVR32_SOCKIOS_H
/* Socket-level I/O control calls. */
#define FIOSETOWN 0x8901
@@ -10,4 +10,4 @@
#define SIOCGSTAMP 0x8906 /* Get stamp (timeval) */
#define SIOCGSTAMPNS 0x8907 /* Get stamp (timespec) */
-#endif /* __ASM_AVR32_SOCKIOS_H */
+#endif /* _UAPI__ASM_AVR32_SOCKIOS_H */
diff --git a/arch/avr32/include/uapi/asm/stat.h b/arch/avr32/include/uapi/asm/stat.h
index e72881e10230..c06acef7fce7 100644
--- a/arch/avr32/include/uapi/asm/stat.h
+++ b/arch/avr32/include/uapi/asm/stat.h
@@ -5,8 +5,8 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-#ifndef __ASM_AVR32_STAT_H
-#define __ASM_AVR32_STAT_H
+#ifndef _UAPI__ASM_AVR32_STAT_H
+#define _UAPI__ASM_AVR32_STAT_H
struct __old_kernel_stat {
unsigned short st_dev;
@@ -76,4 +76,4 @@ struct stat64 {
unsigned long __unused2;
};
-#endif /* __ASM_AVR32_STAT_H */
+#endif /* _UAPI__ASM_AVR32_STAT_H */
diff --git a/arch/avr32/include/uapi/asm/statfs.h b/arch/avr32/include/uapi/asm/statfs.h
deleted file mode 100644
index 2961bd18c50e..000000000000
--- a/arch/avr32/include/uapi/asm/statfs.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ASM_AVR32_STATFS_H
-#define __ASM_AVR32_STATFS_H
-
-#include <asm-generic/statfs.h>
-
-#endif /* __ASM_AVR32_STATFS_H */
diff --git a/arch/avr32/include/uapi/asm/swab.h b/arch/avr32/include/uapi/asm/swab.h
index 14cc737bbca6..1a03549e7dc5 100644
--- a/arch/avr32/include/uapi/asm/swab.h
+++ b/arch/avr32/include/uapi/asm/swab.h
@@ -1,8 +1,8 @@
/*
* AVR32 byteswapping functions.
*/
-#ifndef __ASM_AVR32_SWAB_H
-#define __ASM_AVR32_SWAB_H
+#ifndef _UAPI__ASM_AVR32_SWAB_H
+#define _UAPI__ASM_AVR32_SWAB_H
#include <linux/types.h>
#include <linux/compiler.h>
@@ -32,4 +32,4 @@ static inline __attribute_const__ __u32 __arch_swab32(__u32 val)
#define __arch_swab32 __arch_swab32
#endif
-#endif /* __ASM_AVR32_SWAB_H */
+#endif /* _UAPI__ASM_AVR32_SWAB_H */
diff --git a/arch/avr32/include/uapi/asm/termbits.h b/arch/avr32/include/uapi/asm/termbits.h
index 366adc5ebb10..32789ccb38f8 100644
--- a/arch/avr32/include/uapi/asm/termbits.h
+++ b/arch/avr32/include/uapi/asm/termbits.h
@@ -1,5 +1,5 @@
-#ifndef __ASM_AVR32_TERMBITS_H
-#define __ASM_AVR32_TERMBITS_H
+#ifndef _UAPI__ASM_AVR32_TERMBITS_H
+#define _UAPI__ASM_AVR32_TERMBITS_H
#include <linux/posix_types.h>
@@ -193,4 +193,4 @@ struct ktermios {
#define TCSADRAIN 1
#define TCSAFLUSH 2
-#endif /* __ASM_AVR32_TERMBITS_H */
+#endif /* _UAPI__ASM_AVR32_TERMBITS_H */
diff --git a/arch/avr32/include/uapi/asm/termios.h b/arch/avr32/include/uapi/asm/termios.h
index b8ef8ea63352..c8a0081556c4 100644
--- a/arch/avr32/include/uapi/asm/termios.h
+++ b/arch/avr32/include/uapi/asm/termios.h
@@ -46,5 +46,4 @@ struct termio {
/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */
-
#endif /* _UAPI__ASM_AVR32_TERMIOS_H */
diff --git a/arch/avr32/include/uapi/asm/types.h b/arch/avr32/include/uapi/asm/types.h
index bb34ad349dfc..7c986c4e99b5 100644
--- a/arch/avr32/include/uapi/asm/types.h
+++ b/arch/avr32/include/uapi/asm/types.h
@@ -5,4 +5,9 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
+#ifndef _UAPI__ASM_AVR32_TYPES_H
+#define _UAPI__ASM_AVR32_TYPES_H
+
#include <asm-generic/int-ll64.h>
+
+#endif /* _UAPI__ASM_AVR32_TYPES_H */
diff --git a/arch/avr32/include/uapi/asm/unistd.h b/arch/avr32/include/uapi/asm/unistd.h
index 3eaa68753adb..8822bf46ddc6 100644
--- a/arch/avr32/include/uapi/asm/unistd.h
+++ b/arch/avr32/include/uapi/asm/unistd.h
@@ -301,5 +301,4 @@
#define __NR_eventfd 281
#define __NR_setns 283
-
#endif /* _UAPI__ASM_AVR32_UNISTD_H */
diff --git a/arch/avr32/kernel/entry-avr32b.S b/arch/avr32/kernel/entry-avr32b.S
index 9899d3cc6f03..7301f4806bbe 100644
--- a/arch/avr32/kernel/entry-avr32b.S
+++ b/arch/avr32/kernel/entry-avr32b.S
@@ -401,9 +401,10 @@ handle_critical:
/* We should never get here... */
bad_return:
sub r12, pc, (. - 1f)
- bral panic
+ lddpc pc, 2f
.align 2
1: .asciz "Return from critical exception!"
+2: .long panic
.align 1
do_bus_error_write:
diff --git a/arch/avr32/kernel/head.S b/arch/avr32/kernel/head.S
index 6163bd0acb95..59eae6dfbed2 100644
--- a/arch/avr32/kernel/head.S
+++ b/arch/avr32/kernel/head.S
@@ -10,33 +10,13 @@
#include <linux/linkage.h>
#include <asm/page.h>
-#include <asm/thread_info.h>
-#include <asm/sysreg.h>
.section .init.text,"ax"
.global kernel_entry
kernel_entry:
- /* Initialize status register */
- lddpc r0, init_sr
- mtsr SYSREG_SR, r0
-
- /* Set initial stack pointer */
- lddpc sp, stack_addr
- sub sp, -THREAD_SIZE
-
-#ifdef CONFIG_FRAME_POINTER
- /* Mark last stack frame */
- mov lr, 0
- mov r7, 0
-#endif
-
/* Start the show */
lddpc pc, kernel_start_addr
.align 2
-init_sr:
- .long 0x007f0000 /* Supervisor mode, everything masked */
-stack_addr:
- .long init_thread_union
kernel_start_addr:
.long start_kernel
diff --git a/arch/avr32/kernel/time.c b/arch/avr32/kernel/time.c
index 12f828ad5058..d0f771be9e96 100644
--- a/arch/avr32/kernel/time.c
+++ b/arch/avr32/kernel/time.c
@@ -59,7 +59,7 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id)
static struct irqaction timer_irqaction = {
.handler = timer_interrupt,
/* Oprofile uses the same irq as the timer, so allow it to be shared */
- .flags = IRQF_TIMER | IRQF_DISABLED | IRQF_SHARED,
+ .flags = IRQF_TIMER | IRQF_SHARED,
.name = "avr32_comparator",
};
diff --git a/arch/avr32/mach-at32ap/pm.c b/arch/avr32/mach-at32ap/pm.c
index 32d680eb6f48..db190842b80c 100644
--- a/arch/avr32/mach-at32ap/pm.c
+++ b/arch/avr32/mach-at32ap/pm.c
@@ -181,7 +181,7 @@ static const struct platform_suspend_ops avr32_pm_ops = {
.enter = avr32_pm_enter,
};
-static unsigned long avr32_pm_offset(void *symbol)
+static unsigned long __init avr32_pm_offset(void *symbol)
{
extern u8 pm_exception[];
diff --git a/arch/blackfin/include/asm/Kbuild b/arch/blackfin/include/asm/Kbuild
index f2b43474b0e2..359d36fdc247 100644
--- a/arch/blackfin/include/asm/Kbuild
+++ b/arch/blackfin/include/asm/Kbuild
@@ -45,3 +45,4 @@ generic-y += unaligned.h
generic-y += user.h
generic-y += xor.h
generic-y += preempt.h
+generic-y += hash.h
diff --git a/arch/c6x/include/asm/Kbuild b/arch/c6x/include/asm/Kbuild
index fc0b3c356027..d73bb85ccdd3 100644
--- a/arch/c6x/include/asm/Kbuild
+++ b/arch/c6x/include/asm/Kbuild
@@ -57,3 +57,4 @@ generic-y += user.h
generic-y += vga.h
generic-y += xor.h
generic-y += preempt.h
+generic-y += hash.h
diff --git a/arch/cris/include/asm/Kbuild b/arch/cris/include/asm/Kbuild
index b06caf649a95..c5963b3e4624 100644
--- a/arch/cris/include/asm/Kbuild
+++ b/arch/cris/include/asm/Kbuild
@@ -12,3 +12,4 @@ generic-y += trace_clock.h
generic-y += vga.h
generic-y += xor.h
generic-y += preempt.h
+generic-y += hash.h
diff --git a/arch/frv/include/asm/Kbuild b/arch/frv/include/asm/Kbuild
index 74742dc6a3da..bc42f14c9c2e 100644
--- a/arch/frv/include/asm/Kbuild
+++ b/arch/frv/include/asm/Kbuild
@@ -3,3 +3,4 @@ generic-y += clkdev.h
generic-y += exec.h
generic-y += trace_clock.h
generic-y += preempt.h
+generic-y += hash.h
diff --git a/arch/hexagon/include/asm/Kbuild b/arch/hexagon/include/asm/Kbuild
index 67c3450309b7..469d223950ff 100644
--- a/arch/hexagon/include/asm/Kbuild
+++ b/arch/hexagon/include/asm/Kbuild
@@ -54,3 +54,4 @@ generic-y += ucontext.h
generic-y += unaligned.h
generic-y += xor.h
generic-y += preempt.h
+generic-y += hash.h
diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c
index d43daf192b21..4c530a82fc46 100644
--- a/arch/ia64/hp/common/sba_iommu.c
+++ b/arch/ia64/hp/common/sba_iommu.c
@@ -1992,7 +1992,7 @@ sba_connect_bus(struct pci_bus *bus)
if (PCI_CONTROLLER(bus)->iommu)
return;
- handle = PCI_CONTROLLER(bus)->acpi_handle;
+ handle = acpi_device_handle(PCI_CONTROLLER(bus)->companion);
if (!handle)
return;
diff --git a/arch/ia64/include/asm/Kbuild b/arch/ia64/include/asm/Kbuild
index f93ee087e8fe..283a83154b5e 100644
--- a/arch/ia64/include/asm/Kbuild
+++ b/arch/ia64/include/asm/Kbuild
@@ -4,4 +4,5 @@ generic-y += exec.h
generic-y += kvm_para.h
generic-y += trace_clock.h
generic-y += preempt.h
-generic-y += vtime.h \ No newline at end of file
+generic-y += vtime.h
+generic-y += hash.h
diff --git a/arch/ia64/include/asm/pci.h b/arch/ia64/include/asm/pci.h
index 80775f55f03f..71fbaaa495cc 100644
--- a/arch/ia64/include/asm/pci.h
+++ b/arch/ia64/include/asm/pci.h
@@ -95,7 +95,7 @@ struct iospace_resource {
};
struct pci_controller {
- void *acpi_handle;
+ struct acpi_device *companion;
void *iommu;
int segment;
int node; /* nearest node with memory or -1 for global allocation */
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index 5a9ff1c3c3e9..cb592773c78b 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -2166,12 +2166,6 @@ static const struct file_operations pfm_file_ops = {
.flush = pfm_flush
};
-static int
-pfmfs_delete_dentry(const struct dentry *dentry)
-{
- return 1;
-}
-
static char *pfmfs_dname(struct dentry *dentry, char *buffer, int buflen)
{
return dynamic_dname(dentry, buffer, buflen, "pfm:[%lu]",
@@ -2179,7 +2173,7 @@ static char *pfmfs_dname(struct dentry *dentry, char *buffer, int buflen)
}
static const struct dentry_operations pfmfs_dentry_operations = {
- .d_delete = pfmfs_delete_dentry,
+ .d_delete = always_delete_dentry,
.d_dname = pfmfs_dname,
};
diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c
index 2326790b7d8b..9e4938d8ca4d 100644
--- a/arch/ia64/pci/pci.c
+++ b/arch/ia64/pci/pci.c
@@ -436,9 +436,9 @@ struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
if (!controller)
return NULL;
- controller->acpi_handle = device->handle;
+ controller->companion = device;
- pxm = acpi_get_pxm(controller->acpi_handle);
+ pxm = acpi_get_pxm(device->handle);
#ifdef CONFIG_NUMA
if (pxm >= 0)
controller->node = pxm_to_node(pxm);
@@ -489,7 +489,7 @@ int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
{
struct pci_controller *controller = bridge->bus->sysdata;
- ACPI_HANDLE_SET(&bridge->dev, controller->acpi_handle);
+ ACPI_COMPANION_SET(&bridge->dev, controller->companion);
return 0;
}
diff --git a/arch/ia64/sn/kernel/io_acpi_init.c b/arch/ia64/sn/kernel/io_acpi_init.c
index b1725398b5af..0640739cc20c 100644
--- a/arch/ia64/sn/kernel/io_acpi_init.c
+++ b/arch/ia64/sn/kernel/io_acpi_init.c
@@ -132,7 +132,7 @@ sn_get_bussoft_ptr(struct pci_bus *bus)
struct acpi_resource_vendor_typed *vendor;
- handle = PCI_CONTROLLER(bus)->acpi_handle;
+ handle = acpi_device_handle(PCI_CONTROLLER(bus)->companion);
status = acpi_get_vendor_resource(handle, METHOD_NAME__CRS,
&sn_uuid, &buffer);
if (ACPI_FAILURE(status)) {
@@ -360,7 +360,7 @@ sn_acpi_get_pcidev_info(struct pci_dev *dev, struct pcidev_info **pcidev_info,
acpi_status status;
struct acpi_buffer name_buffer = { ACPI_ALLOCATE_BUFFER, NULL };
- rootbus_handle = PCI_CONTROLLER(dev)->acpi_handle;
+ rootbus_handle = acpi_device_handle(PCI_CONTROLLER(dev)->companion);
status = acpi_evaluate_integer(rootbus_handle, METHOD_NAME__SEG, NULL,
&segment);
if (ACPI_SUCCESS(status)) {
diff --git a/arch/m32r/include/asm/Kbuild b/arch/m32r/include/asm/Kbuild
index 2b58c5f0bc38..932435ac4e5c 100644
--- a/arch/m32r/include/asm/Kbuild
+++ b/arch/m32r/include/asm/Kbuild
@@ -4,3 +4,4 @@ generic-y += exec.h
generic-y += module.h
generic-y += trace_clock.h
generic-y += preempt.h
+generic-y += hash.h
diff --git a/arch/m68k/include/asm/Kbuild b/arch/m68k/include/asm/Kbuild
index a5d27f272a59..7cc8c364924d 100644
--- a/arch/m68k/include/asm/Kbuild
+++ b/arch/m68k/include/asm/Kbuild
@@ -32,3 +32,4 @@ generic-y += types.h
generic-y += word-at-a-time.h
generic-y += xor.h
generic-y += preempt.h
+generic-y += hash.h
diff --git a/arch/metag/include/asm/Kbuild b/arch/metag/include/asm/Kbuild
index 84d0c1d6b9b3..b716d807c2ec 100644
--- a/arch/metag/include/asm/Kbuild
+++ b/arch/metag/include/asm/Kbuild
@@ -53,3 +53,4 @@ generic-y += user.h
generic-y += vga.h
generic-y += xor.h
generic-y += preempt.h
+generic-y += hash.h
diff --git a/arch/microblaze/include/asm/Kbuild b/arch/microblaze/include/asm/Kbuild
index ce0bbf8f5640..43eec338ff50 100644
--- a/arch/microblaze/include/asm/Kbuild
+++ b/arch/microblaze/include/asm/Kbuild
@@ -4,3 +4,4 @@ generic-y += exec.h
generic-y += trace_clock.h
generic-y += syscalls.h
generic-y += preempt.h
+generic-y += hash.h
diff --git a/arch/mips/bcm47xx/setup.c b/arch/mips/bcm47xx/setup.c
index 1f30571968e7..9057728ac56b 100644
--- a/arch/mips/bcm47xx/setup.c
+++ b/arch/mips/bcm47xx/setup.c
@@ -28,6 +28,9 @@
#include <linux/export.h>
#include <linux/types.h>
+#include <linux/ethtool.h>
+#include <linux/phy.h>
+#include <linux/phy_fixed.h>
#include <linux/ssb/ssb.h>
#include <linux/ssb/ssb_embedded.h>
#include <linux/bcma/bcma_soc.h>
@@ -225,6 +228,12 @@ void __init plat_mem_setup(void)
bcm47xx_board_detect();
}
+static struct fixed_phy_status bcm47xx_fixed_phy_status __initdata = {
+ .link = 1,
+ .speed = SPEED_100,
+ .duplex = DUPLEX_FULL,
+};
+
static int __init bcm47xx_register_bus_complete(void)
{
switch (bcm47xx_bus_type) {
@@ -239,6 +248,7 @@ static int __init bcm47xx_register_bus_complete(void)
break;
#endif
}
+ fixed_phy_add(PHY_POLL, 0, &bcm47xx_fixed_phy_status);
return 0;
}
device_initcall(bcm47xx_register_bus_complete);
diff --git a/arch/mips/include/asm/Kbuild b/arch/mips/include/asm/Kbuild
index 1acbb8b77a71..2d7f65052c1f 100644
--- a/arch/mips/include/asm/Kbuild
+++ b/arch/mips/include/asm/Kbuild
@@ -14,3 +14,4 @@ generic-y += trace_clock.h
generic-y += preempt.h
generic-y += ucontext.h
generic-y += xor.h
+generic-y += hash.h
diff --git a/arch/mn10300/include/asm/Kbuild b/arch/mn10300/include/asm/Kbuild
index 74742dc6a3da..bc42f14c9c2e 100644
--- a/arch/mn10300/include/asm/Kbuild
+++ b/arch/mn10300/include/asm/Kbuild
@@ -3,3 +3,4 @@ generic-y += clkdev.h
generic-y += exec.h
generic-y += trace_clock.h
generic-y += preempt.h
+generic-y += hash.h
diff --git a/arch/openrisc/include/asm/Kbuild b/arch/openrisc/include/asm/Kbuild
index da1951a22907..2e40f1ca8667 100644
--- a/arch/openrisc/include/asm/Kbuild
+++ b/arch/openrisc/include/asm/Kbuild
@@ -69,3 +69,4 @@ generic-y += vga.h
generic-y += word-at-a-time.h
generic-y += xor.h
generic-y += preempt.h
+generic-y += hash.h
diff --git a/arch/parisc/configs/c3000_defconfig b/arch/parisc/configs/c3000_defconfig
index ec1b014952b6..acacd348df89 100644
--- a/arch/parisc/configs/c3000_defconfig
+++ b/arch/parisc/configs/c3000_defconfig
@@ -50,7 +50,7 @@ CONFIG_BLK_DEV_CRYPTOLOOP=m
CONFIG_IDE=y
CONFIG_BLK_DEV_IDECD=y
CONFIG_BLK_DEV_NS87415=y
-CONFIG_BLK_DEV_SIIMAGE=m
+CONFIG_PATA_SIL680=m
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=y
diff --git a/arch/parisc/configs/c8000_defconfig b/arch/parisc/configs/c8000_defconfig
index e1c8d2015c89..8249ac9d9cfc 100644
--- a/arch/parisc/configs/c8000_defconfig
+++ b/arch/parisc/configs/c8000_defconfig
@@ -20,7 +20,6 @@ CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODVERSIONS=y
CONFIG_BLK_DEV_INTEGRITY=y
CONFIG_PA8X00=y
-CONFIG_MLONGCALLS=y
CONFIG_64BIT=y
CONFIG_SMP=y
CONFIG_PREEMPT=y
@@ -81,8 +80,6 @@ CONFIG_IDE=y
CONFIG_BLK_DEV_IDECD=y
CONFIG_BLK_DEV_PLATFORM=y
CONFIG_BLK_DEV_GENERIC=y
-CONFIG_BLK_DEV_SIIMAGE=y
-CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=m
CONFIG_BLK_DEV_SR=m
@@ -94,6 +91,8 @@ CONFIG_SCSI_FC_ATTRS=y
CONFIG_SCSI_SAS_LIBSAS=m
CONFIG_ISCSI_TCP=m
CONFIG_ISCSI_BOOT_SYSFS=m
+CONFIG_ATA=y
+CONFIG_PATA_SIL680=y
CONFIG_FUSION=y
CONFIG_FUSION_SPI=y
CONFIG_FUSION_SAS=y
@@ -114,9 +113,8 @@ CONFIG_INPUT_FF_MEMLESS=m
# CONFIG_KEYBOARD_ATKBD is not set
# CONFIG_KEYBOARD_HIL_OLD is not set
# CONFIG_KEYBOARD_HIL is not set
-CONFIG_MOUSE_PS2=m
+# CONFIG_MOUSE_PS2 is not set
CONFIG_INPUT_MISC=y
-CONFIG_INPUT_CM109=m
CONFIG_SERIO_SERPORT=m
CONFIG_SERIO_PARKBD=m
CONFIG_SERIO_GSCPS2=m
@@ -167,34 +165,6 @@ CONFIG_SND_VERBOSE_PRINTK=y
CONFIG_SND_AD1889=m
# CONFIG_SND_USB is not set
# CONFIG_SND_GSC is not set
-CONFIG_HID_A4TECH=m
-CONFIG_HID_APPLE=m
-CONFIG_HID_BELKIN=m
-CONFIG_HID_CHERRY=m
-CONFIG_HID_CHICONY=m
-CONFIG_HID_CYPRESS=m
-CONFIG_HID_DRAGONRISE=m
-CONFIG_HID_EZKEY=m
-CONFIG_HID_KYE=m
-CONFIG_HID_GYRATION=m
-CONFIG_HID_TWINHAN=m
-CONFIG_HID_KENSINGTON=m
-CONFIG_HID_LOGITECH=m
-CONFIG_HID_LOGITECH_DJ=m
-CONFIG_HID_MICROSOFT=m
-CONFIG_HID_MONTEREY=m
-CONFIG_HID_NTRIG=m
-CONFIG_HID_ORTEK=m
-CONFIG_HID_PANTHERLORD=m
-CONFIG_HID_PETALYNX=m
-CONFIG_HID_SAMSUNG=m
-CONFIG_HID_SUNPLUS=m
-CONFIG_HID_GREENASIA=m
-CONFIG_HID_SMARTJOYPLUS=m
-CONFIG_HID_TOPSEED=m
-CONFIG_HID_THRUSTMASTER=m
-CONFIG_HID_ZEROPLUS=m
-CONFIG_USB_HID=m
CONFIG_USB=y
CONFIG_USB_OHCI_HCD=y
CONFIG_USB_STORAGE=y
diff --git a/arch/parisc/configs/generic-64bit_defconfig b/arch/parisc/configs/generic-64bit_defconfig
index 5874cebee077..28c1b5de044e 100644
--- a/arch/parisc/configs/generic-64bit_defconfig
+++ b/arch/parisc/configs/generic-64bit_defconfig
@@ -24,7 +24,6 @@ CONFIG_MODVERSIONS=y
CONFIG_BLK_DEV_INTEGRITY=y
# CONFIG_IOSCHED_DEADLINE is not set
CONFIG_PA8X00=y
-CONFIG_MLONGCALLS=y
CONFIG_64BIT=y
CONFIG_SMP=y
# CONFIG_COMPACTION is not set
@@ -68,7 +67,6 @@ CONFIG_IDE_GD=m
CONFIG_IDE_GD_ATAPI=y
CONFIG_BLK_DEV_IDECD=m
CONFIG_BLK_DEV_NS87415=y
-CONFIG_BLK_DEV_SIIMAGE=y
# CONFIG_SCSI_PROC_FS is not set
CONFIG_BLK_DEV_SD=y
CONFIG_BLK_DEV_SR=y
@@ -82,6 +80,7 @@ CONFIG_SCSI_ZALON=y
CONFIG_SCSI_QLA_ISCSI=m
CONFIG_SCSI_DH=y
CONFIG_ATA=y
+CONFIG_PATA_SIL680=y
CONFIG_ATA_GENERIC=y
CONFIG_MD=y
CONFIG_MD_LINEAR=m
@@ -162,7 +161,7 @@ CONFIG_SLIP_MODE_SLIP6=y
CONFIG_INPUT_EVDEV=y
# CONFIG_KEYBOARD_HIL_OLD is not set
# CONFIG_KEYBOARD_HIL is not set
-# CONFIG_INPUT_MOUSE is not set
+# CONFIG_MOUSE_PS2 is not set
CONFIG_INPUT_MISC=y
CONFIG_SERIO_SERPORT=m
# CONFIG_HP_SDC is not set
@@ -216,32 +215,7 @@ CONFIG_BACKLIGHT_LCD_SUPPORT=y
CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
CONFIG_LOGO=y
# CONFIG_LOGO_LINUX_MONO is not set
-CONFIG_HID=m
CONFIG_HIDRAW=y
-CONFIG_HID_DRAGONRISE=m
-CONFIG_DRAGONRISE_FF=y
-CONFIG_HID_KYE=m
-CONFIG_HID_GYRATION=m
-CONFIG_HID_TWINHAN=m
-CONFIG_LOGITECH_FF=y
-CONFIG_LOGIRUMBLEPAD2_FF=y
-CONFIG_HID_NTRIG=m
-CONFIG_HID_PANTHERLORD=m
-CONFIG_PANTHERLORD_FF=y
-CONFIG_HID_PETALYNX=m
-CONFIG_HID_SAMSUNG=m
-CONFIG_HID_SONY=m
-CONFIG_HID_SUNPLUS=m
-CONFIG_HID_GREENASIA=m
-CONFIG_GREENASIA_FF=y
-CONFIG_HID_SMARTJOYPLUS=m
-CONFIG_SMARTJOYPLUS_FF=y
-CONFIG_HID_TOPSEED=m
-CONFIG_HID_THRUSTMASTER=m
-CONFIG_THRUSTMASTER_FF=y
-CONFIG_HID_ZEROPLUS=m
-CONFIG_ZEROPLUS_FF=y
-CONFIG_USB_HID=m
CONFIG_HID_PID=y
CONFIG_USB_HIDDEV=y
CONFIG_USB=y
@@ -251,13 +225,8 @@ CONFIG_USB_DYNAMIC_MINORS=y
CONFIG_USB_MON=m
CONFIG_USB_WUSB_CBAF=m
CONFIG_USB_XHCI_HCD=m
-CONFIG_USB_EHCI_HCD=m
-CONFIG_USB_OHCI_HCD=m
-CONFIG_USB_R8A66597_HCD=m
-CONFIG_USB_ACM=m
-CONFIG_USB_PRINTER=m
-CONFIG_USB_WDM=m
-CONFIG_USB_TMC=m
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_OHCI_HCD=y
CONFIG_NEW_LEDS=y
CONFIG_LEDS_CLASS=y
CONFIG_LEDS_TRIGGERS=y
diff --git a/arch/parisc/include/asm/Kbuild b/arch/parisc/include/asm/Kbuild
index a603b9ebe54c..75edd5fcc6ff 100644
--- a/arch/parisc/include/asm/Kbuild
+++ b/arch/parisc/include/asm/Kbuild
@@ -5,3 +5,4 @@ generic-y += word-at-a-time.h auxvec.h user.h cputime.h emergency-restart.h \
poll.h xor.h clkdev.h exec.h
generic-y += trace_clock.h
generic-y += preempt.h
+generic-y += hash.h
diff --git a/arch/parisc/include/asm/serial.h b/arch/parisc/include/asm/serial.h
index d7e3cc60dbc3..77e9b67c87ee 100644
--- a/arch/parisc/include/asm/serial.h
+++ b/arch/parisc/include/asm/serial.h
@@ -6,5 +6,3 @@
* This is used for 16550-compatible UARTs
*/
#define BASE_BAUD ( 1843200 / 16 )
-
-#define SERIAL_PORT_DFNS
diff --git a/arch/parisc/include/asm/socket.h b/arch/parisc/include/asm/socket.h
new file mode 100644
index 000000000000..748016cb122d
--- /dev/null
+++ b/arch/parisc/include/asm/socket.h
@@ -0,0 +1,11 @@
+#ifndef _ASM_SOCKET_H
+#define _ASM_SOCKET_H
+
+#include <uapi/asm/socket.h>
+
+/* O_NONBLOCK clashes with the bits used for socket types. Therefore we
+ * have to define SOCK_NONBLOCK to a different value here.
+ */
+#define SOCK_NONBLOCK 0x40000000
+
+#endif /* _ASM_SOCKET_H */
diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
index 63f4dd0b49c2..4006964d8e12 100644
--- a/arch/parisc/include/asm/uaccess.h
+++ b/arch/parisc/include/asm/uaccess.h
@@ -4,14 +4,11 @@
/*
* User space memory access functions
*/
-#include <asm/processor.h>
#include <asm/page.h>
#include <asm/cache.h>
#include <asm/errno.h>
#include <asm-generic/uaccess-unaligned.h>
-#include <linux/sched.h>
-
#define VERIFY_READ 0
#define VERIFY_WRITE 1
@@ -36,43 +33,12 @@ extern int __get_user_bad(void);
extern int __put_kernel_bad(void);
extern int __put_user_bad(void);
-
-/*
- * Test whether a block of memory is a valid user space address.
- * Returns 0 if the range is valid, nonzero otherwise.
- */
-static inline int __range_not_ok(unsigned long addr, unsigned long size,
- unsigned long limit)
+static inline long access_ok(int type, const void __user * addr,
+ unsigned long size)
{
- unsigned long __newaddr = addr + size;
- return (__newaddr < addr || __newaddr > limit || size > limit);
+ return 1;
}
-/**
- * access_ok: - Checks if a user space pointer is valid
- * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that
- * %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
- * to write to a block, it is always safe to read from it.
- * @addr: User space pointer to start of block to check
- * @size: Size of block to check
- *
- * Context: User context only. This function may sleep.
- *
- * Checks if a pointer to a block of memory in user space is valid.
- *
- * Returns true (nonzero) if the memory block may be valid, false (zero)
- * if it is definitely invalid.
- *
- * Note that, depending on architecture, this function probably just
- * checks that the pointer is in the user space range - after calling
- * this function, memory access functions may still return -EFAULT.
- */
-#define access_ok(type, addr, size) \
-( __chk_user_ptr(addr), \
- !__range_not_ok((unsigned long) (__force void *) (addr), \
- size, user_addr_max()) \
-)
-
#define put_user __put_user
#define get_user __get_user
@@ -253,11 +219,7 @@ extern long lstrnlen_user(const char __user *,long);
/*
* Complex access routines -- macros
*/
-#ifdef CONFIG_COMPAT
-#define user_addr_max() (TASK_SIZE)
-#else
-#define user_addr_max() (DEFAULT_TASK_SIZE)
-#endif
+#define user_addr_max() (~0UL)
#define strnlen_user lstrnlen_user
#define strlen_user(str) lstrnlen_user(str, 0x7fffffffL)
diff --git a/arch/parisc/include/uapi/asm/socket.h b/arch/parisc/include/uapi/asm/socket.h
index 7c614d01f1fa..f33113a6141e 100644
--- a/arch/parisc/include/uapi/asm/socket.h
+++ b/arch/parisc/include/uapi/asm/socket.h
@@ -1,5 +1,5 @@
-#ifndef _ASM_SOCKET_H
-#define _ASM_SOCKET_H
+#ifndef _UAPI_ASM_SOCKET_H
+#define _UAPI_ASM_SOCKET_H
#include <asm/sockios.h>
@@ -77,9 +77,4 @@
#define SO_MAX_PACING_RATE 0x4048
-/* O_NONBLOCK clashes with the bits used for socket types. Therefore we
- * have to define SOCK_NONBLOCK to a different value here.
- */
-#define SOCK_NONBLOCK 0x40000000
-
-#endif /* _ASM_SOCKET_H */
+#endif /* _UAPI_ASM_SOCKET_H */
diff --git a/arch/parisc/kernel/hardware.c b/arch/parisc/kernel/hardware.c
index 06cb3992907e..608716f8496b 100644
--- a/arch/parisc/kernel/hardware.c
+++ b/arch/parisc/kernel/hardware.c
@@ -36,6 +36,9 @@
* HP PARISC Hardware Database
* Access to this database is only possible during bootup
* so don't reference this table after starting the init process
+ *
+ * NOTE: Product names which are listed here and ends with a '?'
+ * are guessed. If you know the correct name, please let us know.
*/
static struct hp_hardware hp_hardware_list[] = {
@@ -222,7 +225,7 @@ static struct hp_hardware hp_hardware_list[] = {
{HPHW_NPROC,0x5DD,0x4,0x81,"Duet W2"},
{HPHW_NPROC,0x5DE,0x4,0x81,"Piccolo W+"},
{HPHW_NPROC,0x5DF,0x4,0x81,"Cantata W2"},
- {HPHW_NPROC,0x5DF,0x0,0x00,"Marcato W+? (rp5470)"},
+ {HPHW_NPROC,0x5DF,0x0,0x00,"Marcato W+ (rp5470)?"},
{HPHW_NPROC,0x5E0,0x4,0x91,"Cantata DC- W2"},
{HPHW_NPROC,0x5E1,0x4,0x91,"Crescendo DC- W2"},
{HPHW_NPROC,0x5E2,0x4,0x91,"Crescendo 650 W2"},
@@ -276,9 +279,11 @@ static struct hp_hardware hp_hardware_list[] = {
{HPHW_NPROC,0x888,0x4,0x91,"Storm Peak Fast DC-"},
{HPHW_NPROC,0x889,0x4,0x91,"Storm Peak Fast"},
{HPHW_NPROC,0x88A,0x4,0x91,"Crestone Peak Slow"},
+ {HPHW_NPROC,0x88B,0x4,0x91,"Crestone Peak Fast?"},
{HPHW_NPROC,0x88C,0x4,0x91,"Orca Mako+"},
{HPHW_NPROC,0x88D,0x4,0x91,"Rainier/Medel Mako+ Slow"},
{HPHW_NPROC,0x88E,0x4,0x91,"Rainier/Medel Mako+ Fast"},
+ {HPHW_NPROC,0x892,0x4,0x91,"Mt. Hamilton Slow Mako+?"},
{HPHW_NPROC,0x894,0x4,0x91,"Mt. Hamilton Fast Mako+"},
{HPHW_NPROC,0x895,0x4,0x91,"Storm Peak Slow Mako+"},
{HPHW_NPROC,0x896,0x4,0x91,"Storm Peak Fast Mako+"},
diff --git a/arch/parisc/kernel/head.S b/arch/parisc/kernel/head.S
index d2d58258aea6..d4dc588c0dc1 100644
--- a/arch/parisc/kernel/head.S
+++ b/arch/parisc/kernel/head.S
@@ -41,9 +41,7 @@ END(boot_args)
.import fault_vector_11,code /* IVA parisc 1.1 32 bit */
.import $global$ /* forward declaration */
#endif /*!CONFIG_64BIT*/
- .export _stext,data /* Kernel want it this way! */
-_stext:
-ENTRY(stext)
+ENTRY(parisc_kernel_start)
.proc
.callinfo
@@ -347,7 +345,7 @@ smp_slave_stext:
.procend
#endif /* CONFIG_SMP */
-ENDPROC(stext)
+ENDPROC(parisc_kernel_start)
#ifndef CONFIG_64BIT
.section .data..read_mostly
diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
index 5dfd248e3f1a..0d3a9d4927b5 100644
--- a/arch/parisc/kernel/sys_parisc.c
+++ b/arch/parisc/kernel/sys_parisc.c
@@ -61,8 +61,15 @@ static int get_offset(struct address_space *mapping)
return (unsigned long) mapping >> 8;
}
-static unsigned long get_shared_area(struct address_space *mapping,
- unsigned long addr, unsigned long len, unsigned long pgoff)
+static unsigned long shared_align_offset(struct file *filp, unsigned long pgoff)
+{
+ struct address_space *mapping = filp ? filp->f_mapping : NULL;
+
+ return (get_offset(mapping) + pgoff) << PAGE_SHIFT;
+}
+
+static unsigned long get_shared_area(struct file *filp, unsigned long addr,
+ unsigned long len, unsigned long pgoff)
{
struct vm_unmapped_area_info info;
@@ -71,7 +78,7 @@ static unsigned long get_shared_area(struct address_space *mapping,
info.low_limit = PAGE_ALIGN(addr);
info.high_limit = TASK_SIZE;
info.align_mask = PAGE_MASK & (SHMLBA - 1);
- info.align_offset = (get_offset(mapping) + pgoff) << PAGE_SHIFT;
+ info.align_offset = shared_align_offset(filp, pgoff);
return vm_unmapped_area(&info);
}
@@ -82,20 +89,18 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
return -ENOMEM;
if (flags & MAP_FIXED) {
if ((flags & MAP_SHARED) &&
- (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
+ (addr - shared_align_offset(filp, pgoff)) & (SHMLBA - 1))
return -EINVAL;
return addr;
}
if (!addr)
addr = TASK_UNMAPPED_BASE;
- if (filp) {
- addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
- } else if(flags & MAP_SHARED) {
- addr = get_shared_area(NULL, addr, len, pgoff);
- } else {
+ if (filp || (flags & MAP_SHARED))
+ addr = get_shared_area(filp, addr, len, pgoff);
+ else
addr = get_unshared_area(addr, len);
- }
+
return addr;
}
diff --git a/arch/parisc/kernel/unwind.c b/arch/parisc/kernel/unwind.c
index 76ed62ed785b..ddd988b267a9 100644
--- a/arch/parisc/kernel/unwind.c
+++ b/arch/parisc/kernel/unwind.c
@@ -168,7 +168,7 @@ void unwind_table_remove(struct unwind_table *table)
}
/* Called from setup_arch to import the kernel unwind info */
-int unwind_init(void)
+int __init unwind_init(void)
{
long start, stop;
register unsigned long gp __asm__ ("r27");
@@ -233,7 +233,6 @@ static void unwind_frame_regs(struct unwind_frame_info *info)
e = find_unwind_entry(info->ip);
if (e == NULL) {
unsigned long sp;
- extern char _stext[], _etext[];
dbg("Cannot find unwind entry for 0x%lx; forced unwinding\n", info->ip);
@@ -281,8 +280,7 @@ static void unwind_frame_regs(struct unwind_frame_info *info)
break;
info->prev_ip = tmp;
sp = info->prev_sp;
- } while (info->prev_ip < (unsigned long)_stext ||
- info->prev_ip > (unsigned long)_etext);
+ } while (!kernel_text_address(info->prev_ip));
info->rp = 0;
@@ -435,9 +433,8 @@ unsigned long return_address(unsigned int level)
do {
if (unwind_once(&info) < 0 || info.ip == 0)
return 0;
- if (!__kernel_text_address(info.ip)) {
+ if (!kernel_text_address(info.ip))
return 0;
- }
} while (info.ip && level--);
return info.ip;
diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S
index 4bb095a2f6fc..0dacc5ca555a 100644
--- a/arch/parisc/kernel/vmlinux.lds.S
+++ b/arch/parisc/kernel/vmlinux.lds.S
@@ -6,24 +6,19 @@
* Copyright (C) 2000 Michael Ang <mang with subcarrier.org>
* Copyright (C) 2002 Randolph Chung <tausq with parisc-linux.org>
* Copyright (C) 2003 James Bottomley <jejb with parisc-linux.org>
- * Copyright (C) 2006 Helge Deller <deller@gmx.de>
- *
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * Copyright (C) 2006-2013 Helge Deller <deller@gmx.de>
+ */
+
+/*
+ * Put page table entries (swapper_pg_dir) as the first thing in .bss. This
+ * will ensure that it has .bss alignment (PAGE_SIZE).
*/
+#define BSS_FIRST_SECTIONS *(.data..vm0.pmd) \
+ *(.data..vm0.pgd) \
+ *(.data..vm0.pte)
+
#include <asm-generic/vmlinux.lds.h>
+
/* needed for the processor specific cache alignment size */
#include <asm/cache.h>
#include <asm/page.h>
@@ -39,7 +34,7 @@ OUTPUT_FORMAT("elf64-hppa-linux")
OUTPUT_ARCH(hppa:hppa2.0w)
#endif
-ENTRY(_stext)
+ENTRY(parisc_kernel_start)
#ifndef CONFIG_64BIT
jiffies = jiffies_64 + 4;
#else
@@ -49,11 +44,29 @@ SECTIONS
{
. = KERNEL_BINARY_TEXT_START;
+ __init_begin = .;
+ HEAD_TEXT_SECTION
+ INIT_TEXT_SECTION(8)
+
+ . = ALIGN(PAGE_SIZE);
+ INIT_DATA_SECTION(PAGE_SIZE)
+ /* we have to discard exit text and such at runtime, not link time */
+ .exit.text :
+ {
+ EXIT_TEXT
+ }
+ .exit.data :
+ {
+ EXIT_DATA
+ }
+ PERCPU_SECTION(8)
+ . = ALIGN(PAGE_SIZE);
+ __init_end = .;
+ /* freed after init ends here */
+
_text = .; /* Text and read-only data */
- .head ALIGN(16) : {
- HEAD_TEXT
- } = 0
- .text ALIGN(16) : {
+ _stext = .;
+ .text ALIGN(PAGE_SIZE) : {
TEXT_TEXT
SCHED_TEXT
LOCK_TEXT
@@ -68,21 +81,28 @@ SECTIONS
*(.lock.text) /* out-of-line lock text */
*(.gnu.warning)
}
- /* End of text section */
+ . = ALIGN(PAGE_SIZE);
_etext = .;
+ /* End of text section */
/* Start of data section */
_sdata = .;
- RODATA
+ RO_DATA_SECTION(8)
- /* writeable */
- /* Make sure this is page aligned so
- * that we can properly leave these
- * as writable
- */
- . = ALIGN(PAGE_SIZE);
- data_start = .;
+#ifdef CONFIG_64BIT
+ . = ALIGN(16);
+ /* Linkage tables */
+ .opd : {
+ *(.opd)
+ } PROVIDE (__gp = .);
+ .plt : {
+ *(.plt)
+ }
+ .dlt : {
+ *(.dlt)
+ }
+#endif
/* unwind info */
.PARISC.unwind : {
@@ -91,7 +111,15 @@ SECTIONS
__stop___unwind = .;
}
- EXCEPTION_TABLE(16)
+ /* writeable */
+ /* Make sure this is page aligned so
+ * that we can properly leave these
+ * as writable
+ */
+ . = ALIGN(PAGE_SIZE);
+ data_start = .;
+
+ EXCEPTION_TABLE(8)
NOTES
/* Data */
@@ -107,54 +135,8 @@ SECTIONS
_edata = .;
/* BSS */
- __bss_start = .;
- /* page table entries need to be PAGE_SIZE aligned */
- . = ALIGN(PAGE_SIZE);
- .data..vmpages : {
- *(.data..vm0.pmd)
- *(.data..vm0.pgd)
- *(.data..vm0.pte)
- }
- .bss : {
- *(.bss)
- *(COMMON)
- }
- __bss_stop = .;
-
-#ifdef CONFIG_64BIT
- . = ALIGN(16);
- /* Linkage tables */
- .opd : {
- *(.opd)
- } PROVIDE (__gp = .);
- .plt : {
- *(.plt)
- }
- .dlt : {
- *(.dlt)
- }
-#endif
+ BSS_SECTION(PAGE_SIZE, PAGE_SIZE, 8)
- /* reserve space for interrupt stack by aligning __init* to 16k */
- . = ALIGN(16384);
- __init_begin = .;
- INIT_TEXT_SECTION(16384)
- . = ALIGN(PAGE_SIZE);
- INIT_DATA_SECTION(16)
- /* we have to discard exit text and such at runtime, not link time */
- .exit.text :
- {
- EXIT_TEXT
- }
- .exit.data :
- {
- EXIT_DATA
- }
-
- PERCPU_SECTION(L1_CACHE_BYTES)
- . = ALIGN(PAGE_SIZE);
- __init_end = .;
- /* freed after init ends here */
_end = . ;
STABS_DEBUG
diff --git a/arch/parisc/lib/memcpy.c b/arch/parisc/lib/memcpy.c
index b5507ec06b84..413dc1769299 100644
--- a/arch/parisc/lib/memcpy.c
+++ b/arch/parisc/lib/memcpy.c
@@ -161,7 +161,7 @@ static inline void prefetch_dst(const void *addr)
/* Copy from a not-aligned src to an aligned dst, using shifts. Handles 4 words
* per loop. This code is derived from glibc.
*/
-static inline unsigned long copy_dstaligned(unsigned long dst,
+static noinline unsigned long copy_dstaligned(unsigned long dst,
unsigned long src, unsigned long len)
{
/* gcc complains that a2 and a3 may be uninitialized, but actually
@@ -276,7 +276,7 @@ handle_store_error:
/* Returns PA_MEMCPY_OK, PA_MEMCPY_LOAD_ERROR or PA_MEMCPY_STORE_ERROR.
* In case of an access fault the faulty address can be read from the per_cpu
* exception data struct. */
-static unsigned long pa_memcpy_internal(void *dstp, const void *srcp,
+static noinline unsigned long pa_memcpy_internal(void *dstp, const void *srcp,
unsigned long len)
{
register unsigned long src, dst, t1, t2, t3;
@@ -529,7 +529,7 @@ long probe_kernel_read(void *dst, const void *src, size_t size)
{
unsigned long addr = (unsigned long)src;
- if (size < 0 || addr < PAGE_SIZE)
+ if (addr < PAGE_SIZE)
return -EFAULT;
/* check for I/O space F_EXTEND(0xfff00000) access as well? */
diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
index 7584a5df0fa4..9d08c71a967e 100644
--- a/arch/parisc/mm/fault.c
+++ b/arch/parisc/mm/fault.c
@@ -282,16 +282,34 @@ bad_area:
#endif
switch (code) {
case 15: /* Data TLB miss fault/Data page fault */
+ /* send SIGSEGV when outside of vma */
+ if (!vma ||
+ address < vma->vm_start || address > vma->vm_end) {
+ si.si_signo = SIGSEGV;
+ si.si_code = SEGV_MAPERR;
+ break;
+ }
+
+ /* send SIGSEGV for wrong permissions */
+ if ((vma->vm_flags & acc_type) != acc_type) {
+ si.si_signo = SIGSEGV;
+ si.si_code = SEGV_ACCERR;
+ break;
+ }
+
+ /* probably address is outside of mapped file */
+ /* fall through */
case 17: /* NA data TLB miss / page fault */
case 18: /* Unaligned access - PCXS only */
si.si_signo = SIGBUS;
- si.si_code = BUS_ADRERR;
+ si.si_code = (code == 18) ? BUS_ADRALN : BUS_ADRERR;
break;
case 16: /* Non-access instruction TLB miss fault */
case 26: /* PCXL: Data memory access rights trap */
default:
si.si_signo = SIGSEGV;
- si.si_code = SEGV_MAPERR;
+ si.si_code = (code == 26) ? SEGV_ACCERR : SEGV_MAPERR;
+ break;
}
si.si_errno = 0;
si.si_addr = (void __user *) address;
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
index b0f96c0e6316..96f8168cf4ec 100644
--- a/arch/parisc/mm/init.c
+++ b/arch/parisc/mm/init.c
@@ -32,6 +32,7 @@
#include <asm/sections.h>
extern int data_start;
+extern void parisc_kernel_start(void); /* Kernel entry point in head.S */
#if PT_NLEVELS == 3
/* NOTE: This layout exactly conforms to the hybrid L2/L3 page table layout
@@ -324,8 +325,9 @@ static void __init setup_bootmem(void)
reserve_bootmem_node(NODE_DATA(0), 0UL,
(unsigned long)(PAGE0->mem_free +
PDC_CONSOLE_IO_IODC_SIZE), BOOTMEM_DEFAULT);
- reserve_bootmem_node(NODE_DATA(0), __pa((unsigned long)_text),
- (unsigned long)(_end - _text), BOOTMEM_DEFAULT);
+ reserve_bootmem_node(NODE_DATA(0), __pa(KERNEL_BINARY_TEXT_START),
+ (unsigned long)(_end - KERNEL_BINARY_TEXT_START),
+ BOOTMEM_DEFAULT);
reserve_bootmem_node(NODE_DATA(0), (bootmap_start_pfn << PAGE_SHIFT),
((bootmap_pfn - bootmap_start_pfn) << PAGE_SHIFT),
BOOTMEM_DEFAULT);
@@ -378,6 +380,17 @@ static void __init setup_bootmem(void)
request_resource(&sysram_resources[0], &pdcdata_resource);
}
+static int __init parisc_text_address(unsigned long vaddr)
+{
+ static unsigned long head_ptr __initdata;
+
+ if (!head_ptr)
+ head_ptr = PAGE_MASK & (unsigned long)
+ dereference_function_descriptor(&parisc_kernel_start);
+
+ return core_kernel_text(vaddr) || vaddr == head_ptr;
+}
+
static void __init map_pages(unsigned long start_vaddr,
unsigned long start_paddr, unsigned long size,
pgprot_t pgprot, int force)
@@ -466,7 +479,7 @@ static void __init map_pages(unsigned long start_vaddr,
*/
if (force)
pte = __mk_pte(address, pgprot);
- else if (core_kernel_text(vaddr) &&
+ else if (parisc_text_address(vaddr) &&
address != fv_addr)
pte = __mk_pte(address, PAGE_KERNEL_EXEC);
else
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index 607acf54a425..0f4344e6fbca 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -75,8 +75,10 @@ LDEMULATION := lppc
GNUTARGET := powerpcle
MULTIPLEWORD := -mno-multiple
else
+ifeq ($(call cc-option-yn,-mbig-endian),y)
override CC += -mbig-endian
override AS += -mbig-endian
+endif
override LD += -EB
LDEMULATION := ppc
GNUTARGET := powerpc
@@ -111,6 +113,7 @@ endif
endif
CFLAGS-$(CONFIG_PPC64) := -mtraceback=no -mcall-aixdesc
+CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv1)
CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mcmodel=medium,-mminimal-toc)
CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mno-pointers-to-nested-functions)
CFLAGS-$(CONFIG_PPC32) := -ffixed-r2 $(MULTIPLEWORD)
@@ -127,7 +130,12 @@ CFLAGS-$(CONFIG_POWER5_CPU) += $(call cc-option,-mcpu=power5)
CFLAGS-$(CONFIG_POWER6_CPU) += $(call cc-option,-mcpu=power6)
CFLAGS-$(CONFIG_POWER7_CPU) += $(call cc-option,-mcpu=power7)
+# Altivec option not allowed with e500mc64 in GCC.
+ifeq ($(CONFIG_ALTIVEC),y)
+E5500_CPU := -mcpu=powerpc64
+else
E5500_CPU := $(call cc-option,-mcpu=e500mc64,-mcpu=powerpc64)
+endif
CFLAGS-$(CONFIG_E5500_CPU) += $(E5500_CPU)
CFLAGS-$(CONFIG_E6500_CPU) += $(call cc-option,-mcpu=e6500,$(E5500_CPU))
diff --git a/arch/powerpc/boot/dts/fsl/b4si-post.dtsi b/arch/powerpc/boot/dts/fsl/b4si-post.dtsi
index 4c617bf8cdb2..4f6e48277c46 100644
--- a/arch/powerpc/boot/dts/fsl/b4si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/b4si-post.dtsi
@@ -223,13 +223,13 @@
reg = <0xe2000 0x1000>;
};
-/include/ "qoriq-dma-0.dtsi"
+/include/ "elo3-dma-0.dtsi"
dma@100300 {
fsl,iommu-parent = <&pamu0>;
fsl,liodn-reg = <&guts 0x580>; /* DMA1LIODNR */
};
-/include/ "qoriq-dma-1.dtsi"
+/include/ "elo3-dma-1.dtsi"
dma@101300 {
fsl,iommu-parent = <&pamu0>;
fsl,liodn-reg = <&guts 0x584>; /* DMA2LIODNR */
diff --git a/arch/powerpc/boot/dts/fsl/elo3-dma-0.dtsi b/arch/powerpc/boot/dts/fsl/elo3-dma-0.dtsi
new file mode 100644
index 000000000000..3c210e0d5201
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/elo3-dma-0.dtsi
@@ -0,0 +1,82 @@
+/*
+ * QorIQ Elo3 DMA device tree stub [ controller @ offset 0x100000 ]
+ *
+ * Copyright 2013 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+dma0: dma@100300 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "fsl,elo3-dma";
+ reg = <0x100300 0x4>,
+ <0x100600 0x4>;
+ ranges = <0x0 0x100100 0x500>;
+ dma-channel@0 {
+ compatible = "fsl,eloplus-dma-channel";
+ reg = <0x0 0x80>;
+ interrupts = <28 2 0 0>;
+ };
+ dma-channel@80 {
+ compatible = "fsl,eloplus-dma-channel";
+ reg = <0x80 0x80>;
+ interrupts = <29 2 0 0>;
+ };
+ dma-channel@100 {
+ compatible = "fsl,eloplus-dma-channel";
+ reg = <0x100 0x80>;
+ interrupts = <30 2 0 0>;
+ };
+ dma-channel@180 {
+ compatible = "fsl,eloplus-dma-channel";
+ reg = <0x180 0x80>;
+ interrupts = <31 2 0 0>;
+ };
+ dma-channel@300 {
+ compatible = "fsl,eloplus-dma-channel";
+ reg = <0x300 0x80>;
+ interrupts = <76 2 0 0>;
+ };
+ dma-channel@380 {
+ compatible = "fsl,eloplus-dma-channel";
+ reg = <0x380 0x80>;
+ interrupts = <77 2 0 0>;
+ };
+ dma-channel@400 {
+ compatible = "fsl,eloplus-dma-channel";
+ reg = <0x400 0x80>;
+ interrupts = <78 2 0 0>;
+ };
+ dma-channel@480 {
+ compatible = "fsl,eloplus-dma-channel";
+ reg = <0x480 0x80>;
+ interrupts = <79 2 0 0>;
+ };
+};
diff --git a/arch/powerpc/boot/dts/fsl/elo3-dma-1.dtsi b/arch/powerpc/boot/dts/fsl/elo3-dma-1.dtsi
new file mode 100644
index 000000000000..cccf3bb38224
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/elo3-dma-1.dtsi
@@ -0,0 +1,82 @@
+/*
+ * QorIQ Elo3 DMA device tree stub [ controller @ offset 0x101000 ]
+ *
+ * Copyright 2013 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+dma1: dma@101300 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "fsl,elo3-dma";
+ reg = <0x101300 0x4>,
+ <0x101600 0x4>;
+ ranges = <0x0 0x101100 0x500>;
+ dma-channel@0 {
+ compatible = "fsl,eloplus-dma-channel";
+ reg = <0x0 0x80>;
+ interrupts = <32 2 0 0>;
+ };
+ dma-channel@80 {
+ compatible = "fsl,eloplus-dma-channel";
+ reg = <0x80 0x80>;
+ interrupts = <33 2 0 0>;
+ };
+ dma-channel@100 {
+ compatible = "fsl,eloplus-dma-channel";
+ reg = <0x100 0x80>;
+ interrupts = <34 2 0 0>;
+ };
+ dma-channel@180 {
+ compatible = "fsl,eloplus-dma-channel";
+ reg = <0x180 0x80>;
+ interrupts = <35 2 0 0>;
+ };
+ dma-channel@300 {
+ compatible = "fsl,eloplus-dma-channel";
+ reg = <0x300 0x80>;
+ interrupts = <80 2 0 0>;
+ };
+ dma-channel@380 {
+ compatible = "fsl,eloplus-dma-channel";
+ reg = <0x380 0x80>;
+ interrupts = <81 2 0 0>;
+ };
+ dma-channel@400 {
+ compatible = "fsl,eloplus-dma-channel";
+ reg = <0x400 0x80>;
+ interrupts = <82 2 0 0>;
+ };
+ dma-channel@480 {
+ compatible = "fsl,eloplus-dma-channel";
+ reg = <0x480 0x80>;
+ interrupts = <83 2 0 0>;
+ };
+};
diff --git a/arch/powerpc/boot/dts/fsl/t4240si-post.dtsi b/arch/powerpc/boot/dts/fsl/t4240si-post.dtsi
index 510afa362de1..4143a9733cd0 100644
--- a/arch/powerpc/boot/dts/fsl/t4240si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/t4240si-post.dtsi
@@ -387,8 +387,8 @@
reg = <0xea000 0x4000>;
};
-/include/ "qoriq-dma-0.dtsi"
-/include/ "qoriq-dma-1.dtsi"
+/include/ "elo3-dma-0.dtsi"
+/include/ "elo3-dma-1.dtsi"
/include/ "qoriq-espi-0.dtsi"
spi@110000 {
diff --git a/arch/powerpc/boot/dts/mpc5121.dtsi b/arch/powerpc/boot/dts/mpc5121.dtsi
index bd14c00e5146..2d7cb04ac962 100644
--- a/arch/powerpc/boot/dts/mpc5121.dtsi
+++ b/arch/powerpc/boot/dts/mpc5121.dtsi
@@ -77,7 +77,6 @@
compatible = "fsl,mpc5121-immr";
#address-cells = <1>;
#size-cells = <1>;
- #interrupt-cells = <2>;
ranges = <0x0 0x80000000 0x400000>;
reg = <0x80000000 0x400000>;
bus-frequency = <66000000>; /* 66 MHz ips bus */
diff --git a/arch/powerpc/boot/dts/mpc5125twr.dts b/arch/powerpc/boot/dts/mpc5125twr.dts
index 4177b62240c2..a618dfc13e4c 100644
--- a/arch/powerpc/boot/dts/mpc5125twr.dts
+++ b/arch/powerpc/boot/dts/mpc5125twr.dts
@@ -58,7 +58,6 @@
compatible = "fsl,mpc5121-immr";
#address-cells = <1>;
#size-cells = <1>;
- #interrupt-cells = <2>;
ranges = <0x0 0x80000000 0x400000>;
reg = <0x80000000 0x400000>;
bus-frequency = <66000000>; // 66 MHz ips bus
@@ -189,6 +188,10 @@
reg = <0xA000 0x1000>;
};
+ // disable USB1 port
+ // TODO:
+ // correct pinmux config and fix USB3320 ulpi dependency
+ // before re-enabling it
usb@3000 {
compatible = "fsl,mpc5121-usb2-dr";
reg = <0x3000 0x400>;
@@ -197,6 +200,7 @@
interrupts = <43 0x8>;
dr_mode = "host";
phy_type = "ulpi";
+ status = "disabled";
};
// 5125 PSCs are not 52xx or 5121 PSC compatible
diff --git a/arch/powerpc/boot/dts/xcalibur1501.dts b/arch/powerpc/boot/dts/xcalibur1501.dts
index cc00f4ddd9a7..c409cbafb126 100644
--- a/arch/powerpc/boot/dts/xcalibur1501.dts
+++ b/arch/powerpc/boot/dts/xcalibur1501.dts
@@ -637,14 +637,14 @@
tlu@2f000 {
compatible = "fsl,mpc8572-tlu", "fsl_tlu";
reg = <0x2f000 0x1000>;
- interupts = <61 2 >;
+ interrupts = <61 2>;
interrupt-parent = <&mpic>;
};
tlu@15000 {
compatible = "fsl,mpc8572-tlu", "fsl_tlu";
reg = <0x15000 0x1000>;
- interupts = <75 2>;
+ interrupts = <75 2>;
interrupt-parent = <&mpic>;
};
};
diff --git a/arch/powerpc/boot/dts/xpedite5301.dts b/arch/powerpc/boot/dts/xpedite5301.dts
index 53c1c6a9752f..04cb410da48b 100644
--- a/arch/powerpc/boot/dts/xpedite5301.dts
+++ b/arch/powerpc/boot/dts/xpedite5301.dts
@@ -547,14 +547,14 @@
tlu@2f000 {
compatible = "fsl,mpc8572-tlu", "fsl_tlu";
reg = <0x2f000 0x1000>;
- interupts = <61 2 >;
+ interrupts = <61 2>;
interrupt-parent = <&mpic>;
};
tlu@15000 {
compatible = "fsl,mpc8572-tlu", "fsl_tlu";
reg = <0x15000 0x1000>;
- interupts = <75 2>;
+ interrupts = <75 2>;
interrupt-parent = <&mpic>;
};
};
diff --git a/arch/powerpc/boot/dts/xpedite5330.dts b/arch/powerpc/boot/dts/xpedite5330.dts
index 215225983150..73f8620f1ce7 100644
--- a/arch/powerpc/boot/dts/xpedite5330.dts
+++ b/arch/powerpc/boot/dts/xpedite5330.dts
@@ -583,14 +583,14 @@
tlu@2f000 {
compatible = "fsl,mpc8572-tlu", "fsl_tlu";
reg = <0x2f000 0x1000>;
- interupts = <61 2 >;
+ interrupts = <61 2>;
interrupt-parent = <&mpic>;
};
tlu@15000 {
compatible = "fsl,mpc8572-tlu", "fsl_tlu";
reg = <0x15000 0x1000>;
- interupts = <75 2>;
+ interrupts = <75 2>;
interrupt-parent = <&mpic>;
};
};
diff --git a/arch/powerpc/boot/dts/xpedite5370.dts b/arch/powerpc/boot/dts/xpedite5370.dts
index 11dbda10d756..cd0ea2b99362 100644
--- a/arch/powerpc/boot/dts/xpedite5370.dts
+++ b/arch/powerpc/boot/dts/xpedite5370.dts
@@ -545,14 +545,14 @@
tlu@2f000 {
compatible = "fsl,mpc8572-tlu", "fsl_tlu";
reg = <0x2f000 0x1000>;
- interupts = <61 2 >;
+ interrupts = <61 2>;
interrupt-parent = <&mpic>;
};
tlu@15000 {
compatible = "fsl,mpc8572-tlu", "fsl_tlu";
reg = <0x15000 0x1000>;
- interupts = <75 2>;
+ interrupts = <75 2>;
interrupt-parent = <&mpic>;
};
};
diff --git a/arch/powerpc/boot/util.S b/arch/powerpc/boot/util.S
index 5143228e3e5f..6636b1d7821b 100644
--- a/arch/powerpc/boot/util.S
+++ b/arch/powerpc/boot/util.S
@@ -71,18 +71,32 @@ udelay:
add r4,r4,r5
addi r4,r4,-1
divw r4,r4,r5 /* BUS ticks */
+#ifdef CONFIG_8xx
+1: mftbu r5
+ mftb r6
+ mftbu r7
+#else
1: mfspr r5, SPRN_TBRU
mfspr r6, SPRN_TBRL
mfspr r7, SPRN_TBRU
+#endif
cmpw 0,r5,r7
bne 1b /* Get [synced] base time */
addc r9,r6,r4 /* Compute end time */
addze r8,r5
+#ifdef CONFIG_8xx
+2: mftbu r5
+#else
2: mfspr r5, SPRN_TBRU
+#endif
cmpw 0,r5,r8
blt 2b
bgt 3f
+#ifdef CONFIG_8xx
+ mftb r6
+#else
mfspr r6, SPRN_TBRL
+#endif
cmpw 0,r6,r9
blt 2b
3: blr
diff --git a/arch/powerpc/configs/52xx/cm5200_defconfig b/arch/powerpc/configs/52xx/cm5200_defconfig
index 69b57daf402e..0b88c7b30bb9 100644
--- a/arch/powerpc/configs/52xx/cm5200_defconfig
+++ b/arch/powerpc/configs/52xx/cm5200_defconfig
@@ -12,7 +12,6 @@ CONFIG_EXPERT=y
CONFIG_PPC_MPC52xx=y
CONFIG_PPC_MPC5200_SIMPLE=y
# CONFIG_PPC_PMAC is not set
-CONFIG_PPC_BESTCOMM=y
CONFIG_SPARSE_IRQ=y
CONFIG_PM=y
# CONFIG_PCI is not set
@@ -71,6 +70,8 @@ CONFIG_USB_DEVICEFS=y
CONFIG_USB_OHCI_HCD=y
CONFIG_USB_OHCI_HCD_PPC_OF_BE=y
CONFIG_USB_STORAGE=y
+CONFIG_DMADEVICES=y
+CONFIG_PPC_BESTCOMM=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
diff --git a/arch/powerpc/configs/52xx/lite5200b_defconfig b/arch/powerpc/configs/52xx/lite5200b_defconfig
index f3638ae0a627..104a332e79ab 100644
--- a/arch/powerpc/configs/52xx/lite5200b_defconfig
+++ b/arch/powerpc/configs/52xx/lite5200b_defconfig
@@ -15,7 +15,6 @@ CONFIG_PPC_MPC52xx=y
CONFIG_PPC_MPC5200_SIMPLE=y
CONFIG_PPC_LITE5200=y
# CONFIG_PPC_PMAC is not set
-CONFIG_PPC_BESTCOMM=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_SPARSE_IRQ=y
@@ -59,6 +58,8 @@ CONFIG_I2C_CHARDEV=y
CONFIG_I2C_MPC=y
# CONFIG_HWMON is not set
CONFIG_VIDEO_OUTPUT_CONTROL=m
+CONFIG_DMADEVICES=y
+CONFIG_PPC_BESTCOMM=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
diff --git a/arch/powerpc/configs/52xx/motionpro_defconfig b/arch/powerpc/configs/52xx/motionpro_defconfig
index 0c7de9620ea6..0d13ad7e4478 100644
--- a/arch/powerpc/configs/52xx/motionpro_defconfig
+++ b/arch/powerpc/configs/52xx/motionpro_defconfig
@@ -12,7 +12,6 @@ CONFIG_EXPERT=y
CONFIG_PPC_MPC52xx=y
CONFIG_PPC_MPC5200_SIMPLE=y
# CONFIG_PPC_PMAC is not set
-CONFIG_PPC_BESTCOMM=y
CONFIG_SPARSE_IRQ=y
CONFIG_PM=y
# CONFIG_PCI is not set
@@ -84,6 +83,8 @@ CONFIG_LEDS_TRIGGERS=y
CONFIG_LEDS_TRIGGER_TIMER=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_DS1307=y
+CONFIG_DMADEVICES=y
+CONFIG_PPC_BESTCOMM=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
diff --git a/arch/powerpc/configs/52xx/pcm030_defconfig b/arch/powerpc/configs/52xx/pcm030_defconfig
index 22e719575c60..430aa182fa1c 100644
--- a/arch/powerpc/configs/52xx/pcm030_defconfig
+++ b/arch/powerpc/configs/52xx/pcm030_defconfig
@@ -21,7 +21,6 @@ CONFIG_MODULE_UNLOAD=y
CONFIG_PPC_MPC52xx=y
CONFIG_PPC_MPC5200_SIMPLE=y
# CONFIG_PPC_PMAC is not set
-CONFIG_PPC_BESTCOMM=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_HZ_100=y
@@ -87,6 +86,8 @@ CONFIG_USB_OHCI_HCD_PPC_OF_BE=y
CONFIG_USB_STORAGE=m
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_PCF8563=m
+CONFIG_DMADEVICES=y
+CONFIG_PPC_BESTCOMM=y
CONFIG_EXT2_FS=m
CONFIG_EXT3_FS=m
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
diff --git a/arch/powerpc/configs/52xx/tqm5200_defconfig b/arch/powerpc/configs/52xx/tqm5200_defconfig
index 716a37be16e3..7af4c5bb7c63 100644
--- a/arch/powerpc/configs/52xx/tqm5200_defconfig
+++ b/arch/powerpc/configs/52xx/tqm5200_defconfig
@@ -17,7 +17,6 @@ CONFIG_PPC_MPC52xx=y
CONFIG_PPC_MPC5200_SIMPLE=y
CONFIG_PPC_MPC5200_BUGFIX=y
# CONFIG_PPC_PMAC is not set
-CONFIG_PPC_BESTCOMM=y
CONFIG_PM=y
# CONFIG_PCI is not set
CONFIG_NET=y
@@ -86,6 +85,8 @@ CONFIG_USB_STORAGE=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_DS1307=y
CONFIG_RTC_DRV_DS1374=y
+CONFIG_DMADEVICES=y
+CONFIG_PPC_BESTCOMM=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
diff --git a/arch/powerpc/configs/mpc5200_defconfig b/arch/powerpc/configs/mpc5200_defconfig
index 6640a35bebb7..8b682d1cf4d6 100644
--- a/arch/powerpc/configs/mpc5200_defconfig
+++ b/arch/powerpc/configs/mpc5200_defconfig
@@ -15,7 +15,6 @@ CONFIG_PPC_MEDIA5200=y
CONFIG_PPC_MPC5200_BUGFIX=y
CONFIG_PPC_MPC5200_LPBFIFO=m
# CONFIG_PPC_PMAC is not set
-CONFIG_PPC_BESTCOMM=y
CONFIG_SIMPLE_GPIO=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
@@ -125,6 +124,8 @@ CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_DS1307=y
CONFIG_RTC_DRV_DS1374=y
CONFIG_RTC_DRV_PCF8563=m
+CONFIG_DMADEVICES=y
+CONFIG_PPC_BESTCOMM=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
diff --git a/arch/powerpc/configs/pasemi_defconfig b/arch/powerpc/configs/pasemi_defconfig
index bd8a6f71944f..cec044a3ff69 100644
--- a/arch/powerpc/configs/pasemi_defconfig
+++ b/arch/powerpc/configs/pasemi_defconfig
@@ -2,7 +2,6 @@ CONFIG_PPC64=y
CONFIG_ALTIVEC=y
CONFIG_SMP=y
CONFIG_NR_CPUS=2
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
@@ -45,8 +44,9 @@ CONFIG_INET_AH=y
CONFIG_INET_ESP=y
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
CONFIG_MTD=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_SLRAM=y
CONFIG_MTD_PHRAM=y
@@ -88,7 +88,6 @@ CONFIG_BLK_DEV_DM=y
CONFIG_DM_CRYPT=y
CONFIG_NETDEVICES=y
CONFIG_DUMMY=y
-CONFIG_MII=y
CONFIG_TIGON3=y
CONFIG_E1000=y
CONFIG_PASEMI_MAC=y
@@ -174,8 +173,8 @@ CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
CONFIG_CRC_CCITT=y
CONFIG_PRINTK_TIME=y
-CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_FS=y
+CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DETECT_HUNG_TASK=y
# CONFIG_SCHED_DEBUG is not set
diff --git a/arch/powerpc/configs/pseries_le_defconfig b/arch/powerpc/configs/pseries_le_defconfig
new file mode 100644
index 000000000000..62771e0adb7c
--- /dev/null
+++ b/arch/powerpc/configs/pseries_le_defconfig
@@ -0,0 +1,352 @@
+CONFIG_PPC64=y
+CONFIG_ALTIVEC=y
+CONFIG_VSX=y
+CONFIG_SMP=y
+CONFIG_NR_CPUS=2048
+CONFIG_CPU_LITTLE_ENDIAN=y
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_AUDIT=y
+CONFIG_AUDITSYSCALL=y
+CONFIG_IRQ_DOMAIN_DEBUG=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_CGROUPS=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_PROFILING=y
+CONFIG_OPROFILE=y
+CONFIG_KPROBES=y
+CONFIG_JUMP_LABEL=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_PPC_SPLPAR=y
+CONFIG_SCANLOG=m
+CONFIG_PPC_SMLPAR=y
+CONFIG_DTL=y
+# CONFIG_PPC_PMAC is not set
+CONFIG_RTAS_FLASH=m
+CONFIG_IBMEBUS=y
+CONFIG_HZ_100=y
+CONFIG_BINFMT_MISC=m
+CONFIG_PPC_TRANSACTIONAL_MEM=y
+CONFIG_KEXEC=y
+CONFIG_IRQ_ALL_CPUS=y
+CONFIG_MEMORY_HOTPLUG=y
+CONFIG_MEMORY_HOTREMOVE=y
+CONFIG_CMA=y
+CONFIG_PPC_64K_PAGES=y
+CONFIG_PPC_SUBPAGE_PROT=y
+CONFIG_SCHED_SMT=y
+CONFIG_HOTPLUG_PCI=y
+CONFIG_HOTPLUG_PCI_RPA=m
+CONFIG_HOTPLUG_PCI_RPA_DLPAR=m
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=m
+CONFIG_NET_KEY=m
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_NET_IPIP=y
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_IPCOMP=m
+# CONFIG_IPV6 is not set
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CT_PROTO_UDPLITE=m
+CONFIG_NF_CONNTRACK_FTP=m
+CONFIG_NF_CONNTRACK_IRC=m
+CONFIG_NF_CONNTRACK_TFTP=m
+CONFIG_NF_CT_NETLINK=m
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
+CONFIG_NETFILTER_XT_TARGET_MARK=m
+CONFIG_NETFILTER_XT_TARGET_NFLOG=m
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
+CONFIG_NETFILTER_XT_MATCH_COMMENT=m
+CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NETFILTER_XT_MATCH_DCCP=m
+CONFIG_NETFILTER_XT_MATCH_DSCP=m
+CONFIG_NETFILTER_XT_MATCH_ESP=m
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
+CONFIG_NETFILTER_XT_MATCH_HELPER=m
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
+CONFIG_NETFILTER_XT_MATCH_LENGTH=m
+CONFIG_NETFILTER_XT_MATCH_LIMIT=m
+CONFIG_NETFILTER_XT_MATCH_MAC=m
+CONFIG_NETFILTER_XT_MATCH_MARK=m
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+CONFIG_NETFILTER_XT_MATCH_OWNER=m
+CONFIG_NETFILTER_XT_MATCH_POLICY=m
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
+CONFIG_NETFILTER_XT_MATCH_QUOTA=m
+CONFIG_NETFILTER_XT_MATCH_RATEEST=m
+CONFIG_NETFILTER_XT_MATCH_REALM=m
+CONFIG_NETFILTER_XT_MATCH_RECENT=m
+CONFIG_NETFILTER_XT_MATCH_SCTP=m
+CONFIG_NETFILTER_XT_MATCH_STATE=m
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
+CONFIG_NETFILTER_XT_MATCH_STRING=m
+CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
+CONFIG_NETFILTER_XT_MATCH_TIME=m
+CONFIG_NETFILTER_XT_MATCH_U32=m
+CONFIG_NF_CONNTRACK_IPV4=m
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_MATCH_AH=m
+CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_TTL=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_TARGET_ULOG=m
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_PROC_DEVICETREE=y
+CONFIG_PARPORT=m
+CONFIG_PARPORT_PC=m
+CONFIG_BLK_DEV_FD=m
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_NBD=m
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=65536
+CONFIG_VIRTIO_BLK=m
+CONFIG_IDE=y
+CONFIG_BLK_DEV_IDECD=y
+CONFIG_BLK_DEV_GENERIC=y
+CONFIG_BLK_DEV_AMD74XX=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_ST=y
+CONFIG_BLK_DEV_SR=y
+CONFIG_BLK_DEV_SR_VENDOR=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_SCSI_MULTI_LUN=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_FC_ATTRS=y
+CONFIG_SCSI_CXGB3_ISCSI=m
+CONFIG_SCSI_CXGB4_ISCSI=m
+CONFIG_SCSI_BNX2_ISCSI=m
+CONFIG_BE2ISCSI=m
+CONFIG_SCSI_MPT2SAS=m
+CONFIG_SCSI_IBMVSCSI=y
+CONFIG_SCSI_IBMVFC=m
+CONFIG_SCSI_SYM53C8XX_2=y
+CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=0
+CONFIG_SCSI_IPR=y
+CONFIG_SCSI_QLA_FC=m
+CONFIG_SCSI_QLA_ISCSI=m
+CONFIG_SCSI_LPFC=m
+CONFIG_SCSI_VIRTIO=m
+CONFIG_SCSI_DH=m
+CONFIG_SCSI_DH_RDAC=m
+CONFIG_SCSI_DH_ALUA=m
+CONFIG_ATA=y
+# CONFIG_ATA_SFF is not set
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=y
+CONFIG_MD_LINEAR=y
+CONFIG_MD_RAID0=y
+CONFIG_MD_RAID1=y
+CONFIG_MD_RAID10=m
+CONFIG_MD_RAID456=m
+CONFIG_MD_MULTIPATH=m
+CONFIG_MD_FAULTY=m
+CONFIG_BLK_DEV_DM=y
+CONFIG_DM_CRYPT=m
+CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_MIRROR=m
+CONFIG_DM_ZERO=m
+CONFIG_DM_MULTIPATH=m
+CONFIG_DM_MULTIPATH_QL=m
+CONFIG_DM_MULTIPATH_ST=m
+CONFIG_DM_UEVENT=y
+CONFIG_BONDING=m
+CONFIG_DUMMY=m
+CONFIG_NETCONSOLE=y
+CONFIG_NETPOLL_TRAP=y
+CONFIG_TUN=m
+CONFIG_VIRTIO_NET=m
+CONFIG_VORTEX=y
+CONFIG_ACENIC=m
+CONFIG_ACENIC_OMIT_TIGON_I=y
+CONFIG_PCNET32=y
+CONFIG_TIGON3=y
+CONFIG_CHELSIO_T1=m
+CONFIG_BE2NET=m
+CONFIG_S2IO=m
+CONFIG_IBMVETH=y
+CONFIG_EHEA=y
+CONFIG_E100=y
+CONFIG_E1000=y
+CONFIG_E1000E=y
+CONFIG_IXGB=m
+CONFIG_IXGBE=m
+CONFIG_MLX4_EN=m
+CONFIG_MYRI10GE=m
+CONFIG_QLGE=m
+CONFIG_NETXEN_NIC=m
+CONFIG_PPP=m
+CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPPOE=m
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
+# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
+CONFIG_INPUT_EVDEV=m
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_PCSPKR=m
+# CONFIG_SERIO_SERPORT is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_ICOM=m
+CONFIG_SERIAL_JSM=m
+CONFIG_HVC_CONSOLE=y
+CONFIG_HVC_RTAS=y
+CONFIG_HVCS=m
+CONFIG_VIRTIO_CONSOLE=m
+CONFIG_IBM_BSR=m
+CONFIG_GEN_RTC=y
+CONFIG_RAW_DRIVER=y
+CONFIG_MAX_RAW_DEVS=1024
+CONFIG_FB=y
+CONFIG_FIRMWARE_EDID=y
+CONFIG_FB_OF=y
+CONFIG_FB_MATROX=y
+CONFIG_FB_MATROX_MILLENIUM=y
+CONFIG_FB_MATROX_MYSTIQUE=y
+CONFIG_FB_MATROX_G=y
+CONFIG_FB_RADEON=y
+CONFIG_FB_IBM_GXT4500=y
+CONFIG_LCD_PLATFORM=m
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_LOGO=y
+CONFIG_HID_GYRATION=y
+CONFIG_HID_PANTHERLORD=y
+CONFIG_HID_PETALYNX=y
+CONFIG_HID_SAMSUNG=y
+CONFIG_HID_SUNPLUS=y
+CONFIG_USB_HIDDEV=y
+CONFIG_USB=y
+CONFIG_USB_MON=m
+CONFIG_USB_EHCI_HCD=y
+# CONFIG_USB_EHCI_HCD_PPC_OF is not set
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_STORAGE=m
+CONFIG_INFINIBAND=m
+CONFIG_INFINIBAND_USER_MAD=m
+CONFIG_INFINIBAND_USER_ACCESS=m
+CONFIG_INFINIBAND_MTHCA=m
+CONFIG_INFINIBAND_EHCA=m
+CONFIG_INFINIBAND_CXGB3=m
+CONFIG_INFINIBAND_CXGB4=m
+CONFIG_MLX4_INFINIBAND=m
+CONFIG_INFINIBAND_IPOIB=m
+CONFIG_INFINIBAND_IPOIB_CM=y
+CONFIG_INFINIBAND_SRP=m
+CONFIG_INFINIBAND_ISER=m
+CONFIG_VIRTIO_PCI=m
+CONFIG_VIRTIO_BALLOON=m
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT2_FS_POSIX_ACL=y
+CONFIG_EXT2_FS_SECURITY=y
+CONFIG_EXT2_FS_XIP=y
+CONFIG_EXT3_FS=y
+CONFIG_EXT3_FS_POSIX_ACL=y
+CONFIG_EXT3_FS_SECURITY=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_REISERFS_FS=y
+CONFIG_REISERFS_FS_XATTR=y
+CONFIG_REISERFS_FS_POSIX_ACL=y
+CONFIG_REISERFS_FS_SECURITY=y
+CONFIG_JFS_FS=m
+CONFIG_JFS_POSIX_ACL=y
+CONFIG_JFS_SECURITY=y
+CONFIG_XFS_FS=m
+CONFIG_XFS_POSIX_ACL=y
+CONFIG_BTRFS_FS=m
+CONFIG_BTRFS_FS_POSIX_ACL=y
+CONFIG_NILFS2_FS=m
+CONFIG_AUTOFS4_FS=m
+CONFIG_FUSE_FS=m
+CONFIG_ISO9660_FS=y
+CONFIG_UDF_FS=m
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_HUGETLBFS=y
+CONFIG_CRAMFS=m
+CONFIG_SQUASHFS=m
+CONFIG_SQUASHFS_XATTR=y
+CONFIG_SQUASHFS_LZO=y
+CONFIG_SQUASHFS_XZ=y
+CONFIG_PSTORE=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3_ACL=y
+CONFIG_NFS_V4=y
+CONFIG_NFSD=m
+CONFIG_NFSD_V3_ACL=y
+CONFIG_NFSD_V4=y
+CONFIG_CIFS=m
+CONFIG_CIFS_XATTR=y
+CONFIG_CIFS_POSIX=y
+CONFIG_NLS_DEFAULT="utf8"
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_NLS_UTF8=y
+CONFIG_CRC_T10DIF=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_DEBUG_STACK_USAGE=y
+CONFIG_DEBUG_STACKOVERFLOW=y
+CONFIG_LOCKUP_DETECTOR=y
+CONFIG_LATENCYTOP=y
+CONFIG_SCHED_TRACER=y
+CONFIG_BLK_DEV_IO_TRACE=y
+CONFIG_CODE_PATCHING_SELFTEST=y
+CONFIG_FTR_FIXUP_SELFTEST=y
+CONFIG_MSI_BITMAP_SELFTEST=y
+CONFIG_XMON=y
+CONFIG_CRYPTO_TEST=m
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SALSA20=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_LZO=m
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_DEV_NX=y
+CONFIG_CRYPTO_DEV_NX_ENCRYPT=m
diff --git a/arch/powerpc/include/asm/Kbuild b/arch/powerpc/include/asm/Kbuild
index d8f9d2f18a23..6c0a955a1b06 100644
--- a/arch/powerpc/include/asm/Kbuild
+++ b/arch/powerpc/include/asm/Kbuild
@@ -3,4 +3,5 @@ generic-y += clkdev.h
generic-y += rwsem.h
generic-y += trace_clock.h
generic-y += preempt.h
-generic-y += vtime.h \ No newline at end of file
+generic-y += vtime.h
+generic-y += hash.h
diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
index cc0655a702a7..935b5e7a1436 100644
--- a/arch/powerpc/include/asm/elf.h
+++ b/arch/powerpc/include/asm/elf.h
@@ -31,6 +31,8 @@
extern unsigned long randomize_et_dyn(unsigned long base);
#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
+#define ELF_CORE_EFLAGS (is_elf2_task() ? 2 : 0)
+
/*
* Our registers are always unsigned longs, whether we're a 32 bit
* process or 64 bit, on either a 64 bit or 32 bit kernel.
@@ -86,6 +88,8 @@ typedef elf_vrregset_t elf_fpxregset_t;
#ifdef __powerpc64__
# define SET_PERSONALITY(ex) \
do { \
+ if (((ex).e_flags & 0x3) == 2) \
+ set_thread_flag(TIF_ELF2ABI); \
if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \
set_thread_flag(TIF_32BIT); \
else \
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
index 894662a5d4d5..243ce69ad685 100644
--- a/arch/powerpc/include/asm/exception-64s.h
+++ b/arch/powerpc/include/asm/exception-64s.h
@@ -284,7 +284,7 @@ do_kvm_##n: \
subi r1,r1,INT_FRAME_SIZE; /* alloc frame on kernel stack */ \
beq- 1f; \
ld r1,PACAKSAVE(r13); /* kernel stack to use */ \
-1: cmpdi cr1,r1,0; /* check if r1 is in userspace */ \
+1: cmpdi cr1,r1,-INT_FRAME_SIZE; /* check if r1 is in userspace */ \
blt+ cr1,3f; /* abort if it is */ \
li r1,(n); /* will be reloaded later */ \
sth r1,PACA_TRAP_SAVE(r13); \
diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
index 0c7f2bfcf134..d8b600b3f058 100644
--- a/arch/powerpc/include/asm/hvcall.h
+++ b/arch/powerpc/include/asm/hvcall.h
@@ -403,6 +403,8 @@ static inline unsigned long cmo_get_page_size(void)
extern long pSeries_enable_reloc_on_exc(void);
extern long pSeries_disable_reloc_on_exc(void);
+extern long pseries_big_endian_exceptions(void);
+
#else
#define pSeries_enable_reloc_on_exc() do {} while (0)
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index 4a594b76674d..bc23b1ba7980 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -192,6 +192,10 @@ extern void kvmppc_load_up_vsx(void);
extern u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst);
extern ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst);
extern int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd);
+extern void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu,
+ struct kvm_vcpu *vcpu);
+extern void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu,
+ struct kvmppc_book3s_shadow_vcpu *svcpu);
static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu)
{
diff --git a/arch/powerpc/include/asm/kvm_book3s_asm.h b/arch/powerpc/include/asm/kvm_book3s_asm.h
index 0bd9348a4db9..192917d2239c 100644
--- a/arch/powerpc/include/asm/kvm_book3s_asm.h
+++ b/arch/powerpc/include/asm/kvm_book3s_asm.h
@@ -79,6 +79,7 @@ struct kvmppc_host_state {
ulong vmhandler;
ulong scratch0;
ulong scratch1;
+ ulong scratch2;
u8 in_guest;
u8 restore_hid5;
u8 napping;
@@ -106,6 +107,7 @@ struct kvmppc_host_state {
};
struct kvmppc_book3s_shadow_vcpu {
+ bool in_use;
ulong gpr[14];
u32 cr;
u32 xer;
diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h
index 033c06be1d84..7bdcf340016c 100644
--- a/arch/powerpc/include/asm/opal.h
+++ b/arch/powerpc/include/asm/opal.h
@@ -720,13 +720,13 @@ int64_t opal_pci_next_error(uint64_t phb_id, uint64_t *first_frozen_pe,
int64_t opal_pci_poll(uint64_t phb_id);
int64_t opal_return_cpu(void);
-int64_t opal_xscom_read(uint32_t gcid, uint32_t pcb_addr, uint64_t *val);
+int64_t opal_xscom_read(uint32_t gcid, uint32_t pcb_addr, __be64 *val);
int64_t opal_xscom_write(uint32_t gcid, uint32_t pcb_addr, uint64_t val);
int64_t opal_lpc_write(uint32_t chip_id, enum OpalLPCAddressType addr_type,
uint32_t addr, uint32_t data, uint32_t sz);
int64_t opal_lpc_read(uint32_t chip_id, enum OpalLPCAddressType addr_type,
- uint32_t addr, uint32_t *data, uint32_t sz);
+ uint32_t addr, __be32 *data, uint32_t sz);
int64_t opal_validate_flash(uint64_t buffer, uint32_t *size, uint32_t *result);
int64_t opal_manage_flash(uint8_t op);
int64_t opal_update_flash(uint64_t blk_list);
diff --git a/arch/powerpc/include/asm/pgalloc-32.h b/arch/powerpc/include/asm/pgalloc-32.h
index 27b2386f738a..842846c1b711 100644
--- a/arch/powerpc/include/asm/pgalloc-32.h
+++ b/arch/powerpc/include/asm/pgalloc-32.h
@@ -84,10 +84,8 @@ static inline void pgtable_free_tlb(struct mmu_gather *tlb,
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
unsigned long address)
{
- struct page *page = page_address(table);
-
tlb_flush_pgtable(tlb, address);
- pgtable_page_dtor(page);
- pgtable_free_tlb(tlb, page, 0);
+ pgtable_page_dtor(table);
+ pgtable_free_tlb(tlb, page_address(table), 0);
}
#endif /* _ASM_POWERPC_PGALLOC_32_H */
diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
index 16cb92d215d2..4b0be20fcbfd 100644
--- a/arch/powerpc/include/asm/pgalloc-64.h
+++ b/arch/powerpc/include/asm/pgalloc-64.h
@@ -16,6 +16,7 @@ struct vmemmap_backing {
unsigned long phys;
unsigned long virt_addr;
};
+extern struct vmemmap_backing *vmemmap_list;
/*
* Functions that deal with pagetables that could be at any level of
@@ -147,11 +148,9 @@ static inline void pgtable_free_tlb(struct mmu_gather *tlb,
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
unsigned long address)
{
- struct page *page = page_address(table);
-
tlb_flush_pgtable(tlb, address);
- pgtable_page_dtor(page);
- pgtable_free_tlb(tlb, page, 0);
+ pgtable_page_dtor(table);
+ pgtable_free_tlb(tlb, page_address(table), 0);
}
#else /* if CONFIG_PPC_64K_PAGES */
diff --git a/arch/powerpc/include/asm/plpar_wrappers.h b/arch/powerpc/include/asm/plpar_wrappers.h
index a63b045e707c..12c32c5f533d 100644
--- a/arch/powerpc/include/asm/plpar_wrappers.h
+++ b/arch/powerpc/include/asm/plpar_wrappers.h
@@ -287,6 +287,32 @@ static inline long disable_reloc_on_exceptions(void) {
return plpar_set_mode(0, 3, 0, 0);
}
+/*
+ * Take exceptions in big endian mode on this partition
+ *
+ * Note: this call has a partition wide scope and can take a while to complete.
+ * If it returns H_LONG_BUSY_* it should be retried periodically until it
+ * returns H_SUCCESS.
+ */
+static inline long enable_big_endian_exceptions(void)
+{
+ /* mflags = 0: big endian exceptions */
+ return plpar_set_mode(0, 4, 0, 0);
+}
+
+/*
+ * Take exceptions in little endian mode on this partition
+ *
+ * Note: this call has a partition wide scope and can take a while to complete.
+ * If it returns H_LONG_BUSY_* it should be retried periodically until it
+ * returns H_SUCCESS.
+ */
+static inline long enable_little_endian_exceptions(void)
+{
+ /* mflags = 1: little endian exceptions */
+ return plpar_set_mode(1, 4, 0, 0);
+}
+
static inline long plapr_set_ciabr(unsigned long ciabr)
{
return plpar_set_mode(0, 1, ciabr, 0);
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
index 3c1acc31a092..f595b98079ee 100644
--- a/arch/powerpc/include/asm/ppc_asm.h
+++ b/arch/powerpc/include/asm/ppc_asm.h
@@ -366,6 +366,8 @@ BEGIN_FTR_SECTION_NESTED(96); \
cmpwi dest,0; \
beq- 90b; \
END_FTR_SECTION_NESTED(CPU_FTR_CELL_TB_BUG, CPU_FTR_CELL_TB_BUG, 96)
+#elif defined(CONFIG_8xx)
+#define MFTB(dest) mftb dest
#else
#define MFTB(dest) mfspr dest, SPRN_TBRL
#endif
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index 5c45787d551e..fa8388ed94c5 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -1174,12 +1174,19 @@
#else /* __powerpc64__ */
+#if defined(CONFIG_8xx)
+#define mftbl() ({unsigned long rval; \
+ asm volatile("mftbl %0" : "=r" (rval)); rval;})
+#define mftbu() ({unsigned long rval; \
+ asm volatile("mftbu %0" : "=r" (rval)); rval;})
+#else
#define mftbl() ({unsigned long rval; \
asm volatile("mfspr %0, %1" : "=r" (rval) : \
"i" (SPRN_TBRL)); rval;})
#define mftbu() ({unsigned long rval; \
asm volatile("mfspr %0, %1" : "=r" (rval) : \
"i" (SPRN_TBRU)); rval;})
+#endif
#endif /* !__powerpc64__ */
#define mttbl(v) asm volatile("mttbl %0":: "r"(v))
diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
index 98da78e0c2c0..084e0807db98 100644
--- a/arch/powerpc/include/asm/smp.h
+++ b/arch/powerpc/include/asm/smp.h
@@ -33,6 +33,7 @@ extern int boot_cpuid;
extern int spinning_secondaries;
extern void cpu_die(void);
+extern int cpu_to_chip_id(int cpu);
#ifdef CONFIG_SMP
@@ -112,7 +113,6 @@ static inline struct cpumask *cpu_core_mask(int cpu)
}
extern int cpu_to_core_id(int cpu);
-extern int cpu_to_chip_id(int cpu);
/* Since OpenPIC has only 4 IPIs, we use slightly different message numbers.
*
diff --git a/arch/powerpc/include/asm/switch_to.h b/arch/powerpc/include/asm/switch_to.h
index 9ee12610af02..aace90547614 100644
--- a/arch/powerpc/include/asm/switch_to.h
+++ b/arch/powerpc/include/asm/switch_to.h
@@ -35,7 +35,7 @@ extern void giveup_vsx(struct task_struct *);
extern void enable_kernel_spe(void);
extern void giveup_spe(struct task_struct *);
extern void load_up_spe(struct task_struct *);
-extern void switch_booke_debug_regs(struct thread_struct *new_thread);
+extern void switch_booke_debug_regs(struct debug_reg *new_debug);
#ifndef CONFIG_SMP
extern void discard_lazy_cpu_state(void);
diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
index 8fd6cf6dcee8..9854c564ac52 100644
--- a/arch/powerpc/include/asm/thread_info.h
+++ b/arch/powerpc/include/asm/thread_info.h
@@ -105,6 +105,9 @@ static inline struct thread_info *current_thread_info(void)
#define TIF_EMULATE_STACK_STORE 16 /* Is an instruction emulation
for stack store? */
#define TIF_MEMDIE 17 /* is terminating due to OOM killer */
+#if defined(CONFIG_PPC64)
+#define TIF_ELF2ABI 18 /* function descriptors must die! */
+#endif
/* as above, but as bit values */
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
@@ -183,6 +186,12 @@ static inline bool test_thread_local_flags(unsigned int flags)
#define is_32bit_task() (1)
#endif
+#if defined(CONFIG_PPC64)
+#define is_elf2_task() (test_thread_flag(TIF_ELF2ABI))
+#else
+#define is_elf2_task() (0)
+#endif
+
#endif /* !__ASSEMBLY__ */
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/timex.h b/arch/powerpc/include/asm/timex.h
index 18908caa1f3b..2cf846edb3fc 100644
--- a/arch/powerpc/include/asm/timex.h
+++ b/arch/powerpc/include/asm/timex.h
@@ -29,7 +29,11 @@ static inline cycles_t get_cycles(void)
ret = 0;
__asm__ __volatile__(
+#ifdef CONFIG_8xx
+ "97: mftb %0\n"
+#else
"97: mfspr %0, %2\n"
+#endif
"99:\n"
".section __ftr_fixup,\"a\"\n"
".align 2\n"
@@ -41,7 +45,11 @@ static inline cycles_t get_cycles(void)
" .long 0\n"
" .long 0\n"
".previous"
+#ifdef CONFIG_8xx
+ : "=r" (ret) : "i" (CPU_FTR_601));
+#else
: "=r" (ret) : "i" (CPU_FTR_601), "i" (SPRN_TBRL));
+#endif
return ret;
#endif
}
diff --git a/arch/powerpc/include/asm/unaligned.h b/arch/powerpc/include/asm/unaligned.h
index 5f1b1e3c2137..8296381ae432 100644
--- a/arch/powerpc/include/asm/unaligned.h
+++ b/arch/powerpc/include/asm/unaligned.h
@@ -4,13 +4,18 @@
#ifdef __KERNEL__
/*
- * The PowerPC can do unaligned accesses itself in big endian mode.
+ * The PowerPC can do unaligned accesses itself based on its endian mode.
*/
#include <linux/unaligned/access_ok.h>
#include <linux/unaligned/generic.h>
+#ifdef __LITTLE_ENDIAN__
+#define get_unaligned __get_unaligned_le
+#define put_unaligned __put_unaligned_le
+#else
#define get_unaligned __get_unaligned_be
#define put_unaligned __put_unaligned_be
+#endif
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_UNALIGNED_H */
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 2ea5cc033ec8..d3de01066f7d 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -576,6 +576,7 @@ int main(void)
HSTATE_FIELD(HSTATE_VMHANDLER, vmhandler);
HSTATE_FIELD(HSTATE_SCRATCH0, scratch0);
HSTATE_FIELD(HSTATE_SCRATCH1, scratch1);
+ HSTATE_FIELD(HSTATE_SCRATCH2, scratch2);
HSTATE_FIELD(HSTATE_IN_GUEST, in_guest);
HSTATE_FIELD(HSTATE_RESTORE_HID5, restore_hid5);
HSTATE_FIELD(HSTATE_NAPPING, napping);
diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c
index 779a78c26435..11c1d069d920 100644
--- a/arch/powerpc/kernel/crash_dump.c
+++ b/arch/powerpc/kernel/crash_dump.c
@@ -124,15 +124,15 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
void crash_free_reserved_phys_range(unsigned long begin, unsigned long end)
{
unsigned long addr;
- const u32 *basep, *sizep;
+ const __be32 *basep, *sizep;
unsigned int rtas_start = 0, rtas_end = 0;
basep = of_get_property(rtas.dev, "linux,rtas-base", NULL);
sizep = of_get_property(rtas.dev, "rtas-size", NULL);
if (basep && sizep) {
- rtas_start = *basep;
- rtas_end = *basep + *sizep;
+ rtas_start = be32_to_cpup(basep);
+ rtas_end = rtas_start + be32_to_cpup(sizep);
}
for (addr = begin; addr < end; addr += PAGE_SIZE) {
diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
index 671302065347..4bd687d5e7aa 100644
--- a/arch/powerpc/kernel/eeh.c
+++ b/arch/powerpc/kernel/eeh.c
@@ -686,6 +686,15 @@ void eeh_save_bars(struct eeh_dev *edev)
for (i = 0; i < 16; i++)
eeh_ops->read_config(dn, i * 4, 4, &edev->config_space[i]);
+
+ /*
+ * For PCI bridges including root port, we need enable bus
+ * master explicitly. Otherwise, it can't fetch IODA table
+ * entries correctly. So we cache the bit in advance so that
+ * we can restore it after reset, either PHB range or PE range.
+ */
+ if (edev->mode & EEH_DEV_BRIDGE)
+ edev->config_space[1] |= PCI_COMMAND_MASTER;
}
/**
diff --git a/arch/powerpc/kernel/eeh_event.c b/arch/powerpc/kernel/eeh_event.c
index d27c5afc90ae..72d748b56c86 100644
--- a/arch/powerpc/kernel/eeh_event.c
+++ b/arch/powerpc/kernel/eeh_event.c
@@ -74,8 +74,13 @@ static int eeh_event_handler(void * dummy)
pe = event->pe;
if (pe) {
eeh_pe_state_mark(pe, EEH_PE_RECOVERING);
- pr_info("EEH: Detected PCI bus error on PHB#%d-PE#%x\n",
- pe->phb->global_number, pe->addr);
+ if (pe->type & EEH_PE_PHB)
+ pr_info("EEH: Detected error on PHB#%d\n",
+ pe->phb->global_number);
+ else
+ pr_info("EEH: Detected PCI bus error on "
+ "PHB#%d-PE#%x\n",
+ pe->phb->global_number, pe->addr);
eeh_handle_event(pe);
eeh_pe_state_clear(pe, EEH_PE_RECOVERING);
} else {
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index 2ae41aba4053..4f0946de2d5c 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -80,6 +80,7 @@ END_FTR_SECTION(0, 1)
* of the function that the cpu should jump to to continue
* initialization.
*/
+ .balign 8
.globl __secondary_hold_spinloop
__secondary_hold_spinloop:
.llong 0x0
@@ -470,6 +471,7 @@ _STATIC(__after_prom_start)
mtctr r8
bctr
+.balign 8
p_end: .llong _end - _stext
4: /* Now copy the rest of the kernel up to _end */
diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c
index e1ec57e87b3b..75d4f7340da8 100644
--- a/arch/powerpc/kernel/machine_kexec.c
+++ b/arch/powerpc/kernel/machine_kexec.c
@@ -18,6 +18,7 @@
#include <linux/ftrace.h>
#include <asm/machdep.h>
+#include <asm/pgalloc.h>
#include <asm/prom.h>
#include <asm/sections.h>
@@ -75,6 +76,17 @@ void arch_crash_save_vmcoreinfo(void)
#ifndef CONFIG_NEED_MULTIPLE_NODES
VMCOREINFO_SYMBOL(contig_page_data);
#endif
+#if defined(CONFIG_PPC64) && defined(CONFIG_SPARSEMEM_VMEMMAP)
+ VMCOREINFO_SYMBOL(vmemmap_list);
+ VMCOREINFO_SYMBOL(mmu_vmemmap_psize);
+ VMCOREINFO_SYMBOL(mmu_psize_defs);
+ VMCOREINFO_STRUCT_SIZE(vmemmap_backing);
+ VMCOREINFO_OFFSET(vmemmap_backing, list);
+ VMCOREINFO_OFFSET(vmemmap_backing, phys);
+ VMCOREINFO_OFFSET(vmemmap_backing, virt_addr);
+ VMCOREINFO_STRUCT_SIZE(mmu_psize_def);
+ VMCOREINFO_OFFSET(mmu_psize_def, shift);
+#endif
}
/*
@@ -136,7 +148,7 @@ void __init reserve_crashkernel(void)
* a small SLB (128MB) since the crash kernel needs to place
* itself and some stacks to be in the first segment.
*/
- crashk_res.start = min(0x80000000ULL, (ppc64_rma_size / 2));
+ crashk_res.start = min(0x8000000ULL, (ppc64_rma_size / 2));
#else
crashk_res.start = KDUMP_KERNELBASE;
#endif
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index e59caf874d05..64bf8db12b15 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -246,8 +246,8 @@ _GLOBAL(__bswapdi2)
or r3,r7,r9
blr
-#if defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE)
+#ifdef CONFIG_PPC_EARLY_DEBUG_BOOTX
_GLOBAL(rmci_on)
sync
isync
@@ -277,6 +277,9 @@ _GLOBAL(rmci_off)
isync
sync
blr
+#endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */
+
+#if defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE)
/*
* Do an IO access in real mode
diff --git a/arch/powerpc/kernel/nvram_64.c b/arch/powerpc/kernel/nvram_64.c
index fd82c289ab1c..28b898e68185 100644
--- a/arch/powerpc/kernel/nvram_64.c
+++ b/arch/powerpc/kernel/nvram_64.c
@@ -210,7 +210,7 @@ static void __init nvram_print_partitions(char * label)
printk(KERN_WARNING "--------%s---------\n", label);
printk(KERN_WARNING "indx\t\tsig\tchks\tlen\tname\n");
list_for_each_entry(tmp_part, &nvram_partitions, partition) {
- printk(KERN_WARNING "%4d \t%02x\t%02x\t%d\t%12s\n",
+ printk(KERN_WARNING "%4d \t%02x\t%02x\t%d\t%12.12s\n",
tmp_part->index, tmp_part->header.signature,
tmp_part->header.checksum, tmp_part->header.length,
tmp_part->header.name);
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 75c2d1009985..4a96556fd2d4 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -339,7 +339,7 @@ static void set_debug_reg_defaults(struct thread_struct *thread)
#endif
}
-static void prime_debug_regs(struct thread_struct *thread)
+static void prime_debug_regs(struct debug_reg *debug)
{
/*
* We could have inherited MSR_DE from userspace, since
@@ -348,22 +348,22 @@ static void prime_debug_regs(struct thread_struct *thread)
*/
mtmsr(mfmsr() & ~MSR_DE);
- mtspr(SPRN_IAC1, thread->debug.iac1);
- mtspr(SPRN_IAC2, thread->debug.iac2);
+ mtspr(SPRN_IAC1, debug->iac1);
+ mtspr(SPRN_IAC2, debug->iac2);
#if CONFIG_PPC_ADV_DEBUG_IACS > 2
- mtspr(SPRN_IAC3, thread->debug.iac3);
- mtspr(SPRN_IAC4, thread->debug.iac4);
+ mtspr(SPRN_IAC3, debug->iac3);
+ mtspr(SPRN_IAC4, debug->iac4);
#endif
- mtspr(SPRN_DAC1, thread->debug.dac1);
- mtspr(SPRN_DAC2, thread->debug.dac2);
+ mtspr(SPRN_DAC1, debug->dac1);
+ mtspr(SPRN_DAC2, debug->dac2);
#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
- mtspr(SPRN_DVC1, thread->debug.dvc1);
- mtspr(SPRN_DVC2, thread->debug.dvc2);
+ mtspr(SPRN_DVC1, debug->dvc1);
+ mtspr(SPRN_DVC2, debug->dvc2);
#endif
- mtspr(SPRN_DBCR0, thread->debug.dbcr0);
- mtspr(SPRN_DBCR1, thread->debug.dbcr1);
+ mtspr(SPRN_DBCR0, debug->dbcr0);
+ mtspr(SPRN_DBCR1, debug->dbcr1);
#ifdef CONFIG_BOOKE
- mtspr(SPRN_DBCR2, thread->debug.dbcr2);
+ mtspr(SPRN_DBCR2, debug->dbcr2);
#endif
}
/*
@@ -371,11 +371,11 @@ static void prime_debug_regs(struct thread_struct *thread)
* debug registers, set the debug registers from the values
* stored in the new thread.
*/
-void switch_booke_debug_regs(struct thread_struct *new_thread)
+void switch_booke_debug_regs(struct debug_reg *new_debug)
{
if ((current->thread.debug.dbcr0 & DBCR0_IDM)
- || (new_thread->debug.dbcr0 & DBCR0_IDM))
- prime_debug_regs(new_thread);
+ || (new_debug->dbcr0 & DBCR0_IDM))
+ prime_debug_regs(new_debug);
}
EXPORT_SYMBOL_GPL(switch_booke_debug_regs);
#else /* !CONFIG_PPC_ADV_DEBUG_REGS */
@@ -683,7 +683,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
#endif /* CONFIG_SMP */
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
- switch_booke_debug_regs(&new->thread);
+ switch_booke_debug_regs(&new->thread.debug);
#else
/*
* For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would
@@ -858,17 +858,21 @@ void show_regs(struct pt_regs * regs)
printk("MSR: "REG" ", regs->msr);
printbits(regs->msr, msr_bits);
printk(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer);
-#ifdef CONFIG_PPC64
- printk("SOFTE: %ld\n", regs->softe);
-#endif
trap = TRAP(regs);
if ((regs->trap != 0xc00) && cpu_has_feature(CPU_FTR_CFAR))
- printk("CFAR: "REG"\n", regs->orig_gpr3);
- if (trap == 0x300 || trap == 0x600)
+ printk("CFAR: "REG" ", regs->orig_gpr3);
+ if (trap == 0x200 || trap == 0x300 || trap == 0x600)
#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
- printk("DEAR: "REG", ESR: "REG"\n", regs->dar, regs->dsisr);
+ printk("DEAR: "REG" ESR: "REG" ", regs->dar, regs->dsisr);
#else
- printk("DAR: "REG", DSISR: %08lx\n", regs->dar, regs->dsisr);
+ printk("DAR: "REG" DSISR: %08lx ", regs->dar, regs->dsisr);
+#endif
+#ifdef CONFIG_PPC64
+ printk("SOFTE: %ld ", regs->softe);
+#endif
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ if (MSR_TM_ACTIVE(regs->msr))
+ printk("\nPACATMSCRATCH: %016llx ", get_paca()->tm_scratch);
#endif
for (i = 0; i < 32; i++) {
@@ -887,9 +891,6 @@ void show_regs(struct pt_regs * regs)
printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
#endif
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
- printk("PACATMSCRATCH [%llx]\n", get_paca()->tm_scratch);
-#endif
show_stack(current, (unsigned long *) regs->gpr[1]);
if (!user_mode(regs))
show_instructions(regs);
@@ -1086,25 +1087,45 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
regs->msr = MSR_USER;
#else
if (!is_32bit_task()) {
- unsigned long entry, toc;
+ unsigned long entry;
- /* start is a relocated pointer to the function descriptor for
- * the elf _start routine. The first entry in the function
- * descriptor is the entry address of _start and the second
- * entry is the TOC value we need to use.
- */
- __get_user(entry, (unsigned long __user *)start);
- __get_user(toc, (unsigned long __user *)start+1);
+ if (is_elf2_task()) {
+ /* Look ma, no function descriptors! */
+ entry = start;
- /* Check whether the e_entry function descriptor entries
- * need to be relocated before we can use them.
- */
- if (load_addr != 0) {
- entry += load_addr;
- toc += load_addr;
+ /*
+ * Ulrich says:
+ * The latest iteration of the ABI requires that when
+ * calling a function (at its global entry point),
+ * the caller must ensure r12 holds the entry point
+ * address (so that the function can quickly
+ * establish addressability).
+ */
+ regs->gpr[12] = start;
+ /* Make sure that's restored on entry to userspace. */
+ set_thread_flag(TIF_RESTOREALL);
+ } else {
+ unsigned long toc;
+
+ /* start is a relocated pointer to the function
+ * descriptor for the elf _start routine. The first
+ * entry in the function descriptor is the entry
+ * address of _start and the second entry is the TOC
+ * value we need to use.
+ */
+ __get_user(entry, (unsigned long __user *)start);
+ __get_user(toc, (unsigned long __user *)start+1);
+
+ /* Check whether the e_entry function descriptor entries
+ * need to be relocated before we can use them.
+ */
+ if (load_addr != 0) {
+ entry += load_addr;
+ toc += load_addr;
+ }
+ regs->gpr[2] = toc;
}
regs->nip = entry;
- regs->gpr[2] = toc;
regs->msr = MSR_USER64;
} else {
regs->nip = start;
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index f3a47098fb8e..fa0ad8aafbcc 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -777,6 +777,26 @@ int of_get_ibm_chip_id(struct device_node *np)
return -1;
}
+/**
+ * cpu_to_chip_id - Return the cpus chip-id
+ * @cpu: The logical cpu number.
+ *
+ * Return the value of the ibm,chip-id property corresponding to the given
+ * logical cpu number. If the chip-id can not be found, returns -1.
+ */
+int cpu_to_chip_id(int cpu)
+{
+ struct device_node *np;
+
+ np = of_get_cpu_node(cpu, NULL);
+ if (!np)
+ return -1;
+
+ of_node_put(np);
+ return of_get_ibm_chip_id(np);
+}
+EXPORT_SYMBOL(cpu_to_chip_id);
+
#ifdef CONFIG_PPC_PSERIES
/*
* Fix up the uninitialized fields in a new device node:
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index 75fb40498b41..2e3d2bf536c5 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -1555,7 +1555,7 @@ long arch_ptrace(struct task_struct *child, long request,
flush_fp_to_thread(child);
if (fpidx < (PT_FPSCR - PT_FPR0))
- memcpy(&tmp, &child->thread.fp_state.fpr,
+ memcpy(&tmp, &child->thread.TS_FPR(fpidx),
sizeof(long));
else
tmp = child->thread.fp_state.fpscr;
@@ -1588,7 +1588,7 @@ long arch_ptrace(struct task_struct *child, long request,
flush_fp_to_thread(child);
if (fpidx < (PT_FPSCR - PT_FPR0))
- memcpy(&child->thread.fp_state.fpr, &data,
+ memcpy(&child->thread.TS_FPR(fpidx), &data,
sizeof(long));
else
child->thread.fp_state.fpscr = data;
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index febc80445d25..bc76cc6b419c 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -479,7 +479,7 @@ void __init smp_setup_cpu_maps(void)
if (machine_is(pseries) && firmware_has_feature(FW_FEATURE_LPAR) &&
(dn = of_find_node_by_path("/rtas"))) {
int num_addr_cell, num_size_cell, maxcpus;
- const unsigned int *ireg;
+ const __be32 *ireg;
num_addr_cell = of_n_addr_cells(dn);
num_size_cell = of_n_size_cells(dn);
@@ -489,7 +489,7 @@ void __init smp_setup_cpu_maps(void)
if (!ireg)
goto out;
- maxcpus = ireg[num_addr_cell + num_size_cell];
+ maxcpus = be32_to_cpup(ireg + num_addr_cell + num_size_cell);
/* Double maxcpus for processors which have SMT capability */
if (cpu_has_feature(CPU_FTR_SMT))
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index 749778e0a69d..68027bfa5f8e 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -445,6 +445,12 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
#endif /* CONFIG_ALTIVEC */
if (copy_fpr_to_user(&frame->mc_fregs, current))
return 1;
+
+ /*
+ * Clear the MSR VSX bit to indicate there is no valid state attached
+ * to this context, except in the specific case below where we set it.
+ */
+ msr &= ~MSR_VSX;
#ifdef CONFIG_VSX
/*
* Copy VSR 0-31 upper half from thread_struct to local
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index b3c615764c9b..42991045349f 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -122,6 +122,12 @@ static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
flush_fp_to_thread(current);
/* copy fpr regs and fpscr */
err |= copy_fpr_to_user(&sc->fp_regs, current);
+
+ /*
+ * Clear the MSR VSX bit to indicate there is no valid state attached
+ * to this context, except in the specific case below where we set it.
+ */
+ msr &= ~MSR_VSX;
#ifdef CONFIG_VSX
/*
* Copy VSX low doubleword to local buffer for formatting,
@@ -701,12 +707,6 @@ badframe:
int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
sigset_t *set, struct pt_regs *regs)
{
- /* Handler is *really* a pointer to the function descriptor for
- * the signal routine. The first entry in the function
- * descriptor is the entry address of signal and the second
- * entry is the TOC value we need to use.
- */
- func_descr_t __user *funct_desc_ptr;
struct rt_sigframe __user *frame;
unsigned long newsp = 0;
long err = 0;
@@ -766,19 +766,32 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
goto badframe;
regs->link = (unsigned long) &frame->tramp[0];
}
- funct_desc_ptr = (func_descr_t __user *) ka->sa.sa_handler;
/* Allocate a dummy caller frame for the signal handler. */
newsp = ((unsigned long)frame) - __SIGNAL_FRAMESIZE;
err |= put_user(regs->gpr[1], (unsigned long __user *)newsp);
/* Set up "regs" so we "return" to the signal handler. */
- err |= get_user(regs->nip, &funct_desc_ptr->entry);
+ if (is_elf2_task()) {
+ regs->nip = (unsigned long) ka->sa.sa_handler;
+ regs->gpr[12] = regs->nip;
+ } else {
+ /* Handler is *really* a pointer to the function descriptor for
+ * the signal routine. The first entry in the function
+ * descriptor is the entry address of signal and the second
+ * entry is the TOC value we need to use.
+ */
+ func_descr_t __user *funct_desc_ptr =
+ (func_descr_t __user *) ka->sa.sa_handler;
+
+ err |= get_user(regs->nip, &funct_desc_ptr->entry);
+ err |= get_user(regs->gpr[2], &funct_desc_ptr->toc);
+ }
+
/* enter the signal handler in native-endian mode */
regs->msr &= ~MSR_LE;
regs->msr |= (MSR_KERNEL & MSR_LE);
regs->gpr[1] = newsp;
- err |= get_user(regs->gpr[2], &funct_desc_ptr->toc);
regs->gpr[3] = signr;
regs->result = 0;
if (ka->sa.sa_flags & SA_SIGINFO) {
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 930cd8af3503..c1cf4a1522d9 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -580,7 +580,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle)
int cpu_to_core_id(int cpu)
{
struct device_node *np;
- const int *reg;
+ const __be32 *reg;
int id = -1;
np = of_get_cpu_node(cpu, NULL);
@@ -591,28 +591,12 @@ int cpu_to_core_id(int cpu)
if (!reg)
goto out;
- id = *reg;
+ id = be32_to_cpup(reg);
out:
of_node_put(np);
return id;
}
-/* Return the value of the chip-id property corresponding
- * to the given logical cpu.
- */
-int cpu_to_chip_id(int cpu)
-{
- struct device_node *np;
-
- np = of_get_cpu_node(cpu, NULL);
- if (!np)
- return -1;
-
- of_node_put(np);
- return of_get_ibm_chip_id(np);
-}
-EXPORT_SYMBOL(cpu_to_chip_id);
-
/* Helper routines for cpu to core mapping */
int cpu_core_index_of_thread(int cpu)
{
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 192b051df97e..b3b144121cc9 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -213,8 +213,6 @@ static u64 scan_dispatch_log(u64 stop_tb)
if (i == be64_to_cpu(vpa->dtl_idx))
return 0;
while (i < be64_to_cpu(vpa->dtl_idx)) {
- if (dtl_consumer)
- dtl_consumer(dtl, i);
dtb = be64_to_cpu(dtl->timebase);
tb_delta = be32_to_cpu(dtl->enqueue_to_dispatch_time) +
be32_to_cpu(dtl->ready_to_enqueue_time);
@@ -227,6 +225,8 @@ static u64 scan_dispatch_log(u64 stop_tb)
}
if (dtb > stop_tb)
break;
+ if (dtl_consumer)
+ dtl_consumer(dtl, i);
stolen += tb_delta;
++i;
++dtl;
diff --git a/arch/powerpc/kernel/vdso32/gettimeofday.S b/arch/powerpc/kernel/vdso32/gettimeofday.S
index 6b1f2a6d5517..6b2b69616e77 100644
--- a/arch/powerpc/kernel/vdso32/gettimeofday.S
+++ b/arch/powerpc/kernel/vdso32/gettimeofday.S
@@ -232,9 +232,15 @@ __do_get_tspec:
lwz r6,(CFG_TB_ORIG_STAMP+4)(r9)
/* Get a stable TB value */
+#ifdef CONFIG_8xx
+2: mftbu r3
+ mftbl r4
+ mftbu r0
+#else
2: mfspr r3, SPRN_TBRU
mfspr r4, SPRN_TBRL
mfspr r0, SPRN_TBRU
+#endif
cmplw cr0,r3,r0
bne- 2b
diff --git a/arch/powerpc/kernel/vdso64/sigtramp.S b/arch/powerpc/kernel/vdso64/sigtramp.S
index 45ea281e9a21..542c6f422e4d 100644
--- a/arch/powerpc/kernel/vdso64/sigtramp.S
+++ b/arch/powerpc/kernel/vdso64/sigtramp.S
@@ -142,6 +142,13 @@ V_FUNCTION_END(__kernel_sigtramp_rt64)
/* Size of CR reg in DWARF unwind info. */
#define CRSIZE 4
+/* Offset of CR reg within a full word. */
+#ifdef __LITTLE_ENDIAN__
+#define CROFF 0
+#else
+#define CROFF (RSIZE - CRSIZE)
+#endif
+
/* This is the offset of the VMX reg pointer. */
#define VREGS 48*RSIZE+33*8
@@ -181,7 +188,14 @@ V_FUNCTION_END(__kernel_sigtramp_rt64)
rsave (31, 31*RSIZE); \
rsave (67, 32*RSIZE); /* ap, used as temp for nip */ \
rsave (65, 36*RSIZE); /* lr */ \
- rsave (70, 38*RSIZE + (RSIZE - CRSIZE)) /* cr */
+ rsave (68, 38*RSIZE + CROFF); /* cr fields */ \
+ rsave (69, 38*RSIZE + CROFF); \
+ rsave (70, 38*RSIZE + CROFF); \
+ rsave (71, 38*RSIZE + CROFF); \
+ rsave (72, 38*RSIZE + CROFF); \
+ rsave (73, 38*RSIZE + CROFF); \
+ rsave (74, 38*RSIZE + CROFF); \
+ rsave (75, 38*RSIZE + CROFF)
/* Describe where the FP regs are saved. */
#define EH_FRAME_FP \
diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
index e7d0c88f621a..76a64821f4a2 100644
--- a/arch/powerpc/kernel/vio.c
+++ b/arch/powerpc/kernel/vio.c
@@ -1419,7 +1419,7 @@ struct vio_dev *vio_register_device_node(struct device_node *of_node)
/* needed to ensure proper operation of coherent allocations
* later, in case driver doesn't set it explicitly */
- dma_set_mask_and_coherent(&viodev->dev, DMA_BIT_MASK(64));
+ dma_coerce_mask_and_coherent(&viodev->dev, DMA_BIT_MASK(64));
}
/* register with generic device framework */
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index f3ff587a8b7d..c5d148434c08 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -469,11 +469,14 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
slb_v = vcpu->kvm->arch.vrma_slb_v;
}
+ preempt_disable();
/* Find the HPTE in the hash table */
index = kvmppc_hv_find_lock_hpte(kvm, eaddr, slb_v,
HPTE_V_VALID | HPTE_V_ABSENT);
- if (index < 0)
+ if (index < 0) {
+ preempt_enable();
return -ENOENT;
+ }
hptep = (unsigned long *)(kvm->arch.hpt_virt + (index << 4));
v = hptep[0] & ~HPTE_V_HVLOCK;
gr = kvm->arch.revmap[index].guest_rpte;
@@ -481,6 +484,7 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
/* Unlock the HPTE */
asm volatile("lwsync" : : : "memory");
hptep[0] = v;
+ preempt_enable();
gpte->eaddr = eaddr;
gpte->vpage = ((v & HPTE_V_AVPN) << 4) | ((eaddr >> 12) & 0xfff);
@@ -665,6 +669,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
return -EFAULT;
} else {
page = pages[0];
+ pfn = page_to_pfn(page);
if (PageHuge(page)) {
page = compound_head(page);
pte_size <<= compound_order(page);
@@ -689,7 +694,6 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
}
rcu_read_unlock_sched();
}
- pfn = page_to_pfn(page);
}
ret = -EFAULT;
@@ -707,8 +711,14 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
r = (r & ~(HPTE_R_W|HPTE_R_I|HPTE_R_G)) | HPTE_R_M;
}
- /* Set the HPTE to point to pfn */
- r = (r & ~(HPTE_R_PP0 - pte_size)) | (pfn << PAGE_SHIFT);
+ /*
+ * Set the HPTE to point to pfn.
+ * Since the pfn is at PAGE_SIZE granularity, make sure we
+ * don't mask out lower-order bits if psize < PAGE_SIZE.
+ */
+ if (psize < PAGE_SIZE)
+ psize = PAGE_SIZE;
+ r = (r & ~(HPTE_R_PP0 - psize)) | ((pfn << PAGE_SHIFT) & ~(psize - 1));
if (hpte_is_writable(r) && !write_ok)
r = hpte_make_readonly(r);
ret = RESUME_GUEST;
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 072287f1c3bc..b51d5db78068 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -131,8 +131,9 @@ static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu)
static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu)
{
struct kvmppc_vcore *vc = vcpu->arch.vcore;
+ unsigned long flags;
- spin_lock(&vcpu->arch.tbacct_lock);
+ spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE &&
vc->preempt_tb != TB_NIL) {
vc->stolen_tb += mftb() - vc->preempt_tb;
@@ -143,19 +144,20 @@ static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu)
vcpu->arch.busy_stolen += mftb() - vcpu->arch.busy_preempt;
vcpu->arch.busy_preempt = TB_NIL;
}
- spin_unlock(&vcpu->arch.tbacct_lock);
+ spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags);
}
static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu)
{
struct kvmppc_vcore *vc = vcpu->arch.vcore;
+ unsigned long flags;
- spin_lock(&vcpu->arch.tbacct_lock);
+ spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE)
vc->preempt_tb = mftb();
if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST)
vcpu->arch.busy_preempt = mftb();
- spin_unlock(&vcpu->arch.tbacct_lock);
+ spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags);
}
static void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr)
@@ -486,11 +488,11 @@ static u64 vcore_stolen_time(struct kvmppc_vcore *vc, u64 now)
*/
if (vc->vcore_state != VCORE_INACTIVE &&
vc->runner->arch.run_task != current) {
- spin_lock(&vc->runner->arch.tbacct_lock);
+ spin_lock_irq(&vc->runner->arch.tbacct_lock);
p = vc->stolen_tb;
if (vc->preempt_tb != TB_NIL)
p += now - vc->preempt_tb;
- spin_unlock(&vc->runner->arch.tbacct_lock);
+ spin_unlock_irq(&vc->runner->arch.tbacct_lock);
} else {
p = vc->stolen_tb;
}
@@ -512,10 +514,10 @@ static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu,
core_stolen = vcore_stolen_time(vc, now);
stolen = core_stolen - vcpu->arch.stolen_logged;
vcpu->arch.stolen_logged = core_stolen;
- spin_lock(&vcpu->arch.tbacct_lock);
+ spin_lock_irq(&vcpu->arch.tbacct_lock);
stolen += vcpu->arch.busy_stolen;
vcpu->arch.busy_stolen = 0;
- spin_unlock(&vcpu->arch.tbacct_lock);
+ spin_unlock_irq(&vcpu->arch.tbacct_lock);
if (!dt || !vpa)
return;
memset(dt, 0, sizeof(struct dtl_entry));
@@ -589,7 +591,9 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
if (list_empty(&vcpu->kvm->arch.rtas_tokens))
return RESUME_HOST;
+ idx = srcu_read_lock(&vcpu->kvm->srcu);
rc = kvmppc_rtas_hcall(vcpu);
+ srcu_read_unlock(&vcpu->kvm->srcu, idx);
if (rc == -ENOENT)
return RESUME_HOST;
@@ -1115,13 +1119,13 @@ static void kvmppc_remove_runnable(struct kvmppc_vcore *vc,
if (vcpu->arch.state != KVMPPC_VCPU_RUNNABLE)
return;
- spin_lock(&vcpu->arch.tbacct_lock);
+ spin_lock_irq(&vcpu->arch.tbacct_lock);
now = mftb();
vcpu->arch.busy_stolen += vcore_stolen_time(vc, now) -
vcpu->arch.stolen_logged;
vcpu->arch.busy_preempt = now;
vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
- spin_unlock(&vcpu->arch.tbacct_lock);
+ spin_unlock_irq(&vcpu->arch.tbacct_lock);
--vc->n_runnable;
list_del(&vcpu->arch.run_list);
}
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index 9c515440ad1a..8689e2e30857 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -225,6 +225,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
is_io = pa & (HPTE_R_I | HPTE_R_W);
pte_size = PAGE_SIZE << (pa & KVMPPC_PAGE_ORDER_MASK);
pa &= PAGE_MASK;
+ pa |= gpa & ~PAGE_MASK;
} else {
/* Translate to host virtual address */
hva = __gfn_to_hva_memslot(memslot, gfn);
@@ -238,13 +239,13 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
ptel = hpte_make_readonly(ptel);
is_io = hpte_cache_bits(pte_val(pte));
pa = pte_pfn(pte) << PAGE_SHIFT;
+ pa |= hva & (pte_size - 1);
+ pa |= gpa & ~PAGE_MASK;
}
}
if (pte_size < psize)
return H_PARAMETER;
- if (pa && pte_size > psize)
- pa |= gpa & (pte_size - 1);
ptel &= ~(HPTE_R_PP0 - psize);
ptel |= pa;
@@ -749,6 +750,10 @@ static int slb_base_page_shift[4] = {
20, /* 1M, unsupported */
};
+/* When called from virtmode, this func should be protected by
+ * preempt_disable(), otherwise, the holding of HPTE_V_HVLOCK
+ * can trigger deadlock issue.
+ */
long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
unsigned long valid)
{
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index bc8de75b1925..be4fa04a37c9 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -153,7 +153,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
13: b machine_check_fwnmi
-
/*
* We come in here when wakened from nap mode on a secondary hw thread.
* Relocation is off and most register values are lost.
@@ -224,6 +223,11 @@ kvm_start_guest:
/* Clear our vcpu pointer so we don't come back in early */
li r0, 0
std r0, HSTATE_KVM_VCPU(r13)
+ /*
+ * Make sure we clear HSTATE_KVM_VCPU(r13) before incrementing
+ * the nap_count, because once the increment to nap_count is
+ * visible we could be given another vcpu.
+ */
lwsync
/* Clear any pending IPI - we're an offline thread */
ld r5, HSTATE_XICS_PHYS(r13)
@@ -241,7 +245,6 @@ kvm_start_guest:
/* increment the nap count and then go to nap mode */
ld r4, HSTATE_KVM_VCORE(r13)
addi r4, r4, VCORE_NAP_COUNT
- lwsync /* make previous updates visible */
51: lwarx r3, 0, r4
addi r3, r3, 1
stwcx. r3, 0, r4
@@ -751,15 +754,14 @@ kvmppc_interrupt_hv:
* guest CR, R12 saved in shadow VCPU SCRATCH1/0
* guest R13 saved in SPRN_SCRATCH0
*/
- /* abuse host_r2 as third scratch area; we get r2 from PACATOC(r13) */
- std r9, HSTATE_HOST_R2(r13)
+ std r9, HSTATE_SCRATCH2(r13)
lbz r9, HSTATE_IN_GUEST(r13)
cmpwi r9, KVM_GUEST_MODE_HOST_HV
beq kvmppc_bad_host_intr
#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
cmpwi r9, KVM_GUEST_MODE_GUEST
- ld r9, HSTATE_HOST_R2(r13)
+ ld r9, HSTATE_SCRATCH2(r13)
beq kvmppc_interrupt_pr
#endif
/* We're now back in the host but in guest MMU context */
@@ -779,7 +781,7 @@ kvmppc_interrupt_hv:
std r6, VCPU_GPR(R6)(r9)
std r7, VCPU_GPR(R7)(r9)
std r8, VCPU_GPR(R8)(r9)
- ld r0, HSTATE_HOST_R2(r13)
+ ld r0, HSTATE_SCRATCH2(r13)
std r0, VCPU_GPR(R9)(r9)
std r10, VCPU_GPR(R10)(r9)
std r11, VCPU_GPR(R11)(r9)
@@ -990,14 +992,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
*/
/* Increment the threads-exiting-guest count in the 0xff00
bits of vcore->entry_exit_count */
- lwsync
ld r5,HSTATE_KVM_VCORE(r13)
addi r6,r5,VCORE_ENTRY_EXIT
41: lwarx r3,0,r6
addi r0,r3,0x100
stwcx. r0,0,r6
bne 41b
- lwsync
+ isync /* order stwcx. vs. reading napping_threads */
/*
* At this point we have an interrupt that we have to pass
@@ -1030,6 +1031,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
sld r0,r0,r4
andc. r3,r3,r0 /* no sense IPI'ing ourselves */
beq 43f
+ /* Order entry/exit update vs. IPIs */
+ sync
mulli r4,r4,PACA_SIZE /* get paca for thread 0 */
subf r6,r4,r13
42: andi. r0,r3,1
@@ -1638,10 +1641,10 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
bge kvm_cede_exit
stwcx. r4,0,r6
bne 31b
+ /* order napping_threads update vs testing entry_exit_count */
+ isync
li r0,1
stb r0,HSTATE_NAPPING(r13)
- /* order napping_threads update vs testing entry_exit_count */
- lwsync
mr r4,r3
lwz r7,VCORE_ENTRY_EXIT(r5)
cmpwi r7,0x100
diff --git a/arch/powerpc/kvm/book3s_interrupts.S b/arch/powerpc/kvm/book3s_interrupts.S
index f4dd041c14ea..f779450cb07c 100644
--- a/arch/powerpc/kvm/book3s_interrupts.S
+++ b/arch/powerpc/kvm/book3s_interrupts.S
@@ -129,29 +129,32 @@ kvm_start_lightweight:
* R12 = exit handler id
* R13 = PACA
* SVCPU.* = guest *
+ * MSR.EE = 1
*
*/
+ PPC_LL r3, GPR4(r1) /* vcpu pointer */
+
+ /*
+ * kvmppc_copy_from_svcpu can clobber volatile registers, save
+ * the exit handler id to the vcpu and restore it from there later.
+ */
+ stw r12, VCPU_TRAP(r3)
+
/* Transfer reg values from shadow vcpu back to vcpu struct */
/* On 64-bit, interrupts are still off at this point */
- PPC_LL r3, GPR4(r1) /* vcpu pointer */
+
GET_SHADOW_VCPU(r4)
bl FUNC(kvmppc_copy_from_svcpu)
nop
#ifdef CONFIG_PPC_BOOK3S_64
- /* Re-enable interrupts */
- ld r3, HSTATE_HOST_MSR(r13)
- ori r3, r3, MSR_EE
- MTMSR_EERI(r3)
-
/*
* Reload kernel SPRG3 value.
* No need to save guest value as usermode can't modify SPRG3.
*/
ld r3, PACA_SPRG3(r13)
mtspr SPRN_SPRG3, r3
-
#endif /* CONFIG_PPC_BOOK3S_64 */
/* R7 = vcpu */
@@ -177,7 +180,7 @@ kvm_start_lightweight:
PPC_STL r31, VCPU_GPR(R31)(r7)
/* Pass the exit number as 3rd argument to kvmppc_handle_exit */
- mr r5, r12
+ lwz r5, VCPU_TRAP(r7)
/* Restore r3 (kvm_run) and r4 (vcpu) */
REST_2GPRS(3, r1)
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index fe14ca3dd171..5b9e9063cfaf 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -66,6 +66,7 @@ static void kvmppc_core_vcpu_load_pr(struct kvm_vcpu *vcpu, int cpu)
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
memcpy(svcpu->slb, to_book3s(vcpu)->slb_shadow, sizeof(svcpu->slb));
svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max;
+ svcpu->in_use = 0;
svcpu_put(svcpu);
#endif
vcpu->cpu = smp_processor_id();
@@ -78,6 +79,9 @@ static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu)
{
#ifdef CONFIG_PPC_BOOK3S_64
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
+ if (svcpu->in_use) {
+ kvmppc_copy_from_svcpu(vcpu, svcpu);
+ }
memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb));
to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max;
svcpu_put(svcpu);
@@ -110,12 +114,26 @@ void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu,
svcpu->ctr = vcpu->arch.ctr;
svcpu->lr = vcpu->arch.lr;
svcpu->pc = vcpu->arch.pc;
+ svcpu->in_use = true;
}
/* Copy data touched by real-mode code from shadow vcpu back to vcpu */
void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu,
struct kvmppc_book3s_shadow_vcpu *svcpu)
{
+ /*
+ * vcpu_put would just call us again because in_use hasn't
+ * been updated yet.
+ */
+ preempt_disable();
+
+ /*
+ * Maybe we were already preempted and synced the svcpu from
+ * our preempt notifiers. Don't bother touching this svcpu then.
+ */
+ if (!svcpu->in_use)
+ goto out;
+
vcpu->arch.gpr[0] = svcpu->gpr[0];
vcpu->arch.gpr[1] = svcpu->gpr[1];
vcpu->arch.gpr[2] = svcpu->gpr[2];
@@ -139,6 +157,10 @@ void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu,
vcpu->arch.fault_dar = svcpu->fault_dar;
vcpu->arch.fault_dsisr = svcpu->fault_dsisr;
vcpu->arch.last_inst = svcpu->last_inst;
+ svcpu->in_use = false;
+
+out:
+ preempt_enable();
}
static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu)
diff --git a/arch/powerpc/kvm/book3s_rmhandlers.S b/arch/powerpc/kvm/book3s_rmhandlers.S
index a38c4c9edab8..c3c5231adade 100644
--- a/arch/powerpc/kvm/book3s_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_rmhandlers.S
@@ -153,15 +153,11 @@ _GLOBAL(kvmppc_entry_trampoline)
li r6, MSR_IR | MSR_DR
andc r6, r5, r6 /* Clear DR and IR in MSR value */
-#ifdef CONFIG_PPC_BOOK3S_32
/*
* Set EE in HOST_MSR so that it's enabled when we get into our
- * C exit handler function. On 64-bit we delay enabling
- * interrupts until we have finished transferring stuff
- * to or from the PACA.
+ * C exit handler function.
*/
ori r5, r5, MSR_EE
-#endif
mtsrr0 r7
mtsrr1 r6
RFI
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 53e65a210b9a..0591e05db74b 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -681,7 +681,7 @@ int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
{
int ret, s;
- struct thread_struct thread;
+ struct debug_reg debug;
#ifdef CONFIG_PPC_FPU
struct thread_fp_state fp;
int fpexc_mode;
@@ -723,9 +723,9 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
#endif
/* Switch to guest debug context */
- thread.debug = vcpu->arch.shadow_dbg_reg;
- switch_booke_debug_regs(&thread);
- thread.debug = current->thread.debug;
+ debug = vcpu->arch.shadow_dbg_reg;
+ switch_booke_debug_regs(&debug);
+ debug = current->thread.debug;
current->thread.debug = vcpu->arch.shadow_dbg_reg;
kvmppc_fix_ee_before_entry();
@@ -736,8 +736,8 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
We also get here with interrupts enabled. */
/* Switch back to user space debug context */
- switch_booke_debug_regs(&thread);
- current->thread.debug = thread.debug;
+ switch_booke_debug_regs(&debug);
+ current->thread.debug = debug;
#ifdef CONFIG_PPC_FPU
kvmppc_save_guest_fp(vcpu);
diff --git a/arch/powerpc/lib/copyuser_64.S b/arch/powerpc/lib/copyuser_64.S
index d73a59014900..596a285c0755 100644
--- a/arch/powerpc/lib/copyuser_64.S
+++ b/arch/powerpc/lib/copyuser_64.S
@@ -9,6 +9,14 @@
#include <asm/processor.h>
#include <asm/ppc_asm.h>
+#ifdef __BIG_ENDIAN__
+#define sLd sld /* Shift towards low-numbered address. */
+#define sHd srd /* Shift towards high-numbered address. */
+#else
+#define sLd srd /* Shift towards low-numbered address. */
+#define sHd sld /* Shift towards high-numbered address. */
+#endif
+
.align 7
_GLOBAL(__copy_tofrom_user)
BEGIN_FTR_SECTION
@@ -118,10 +126,10 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
24: ld r9,0(r4) /* 3+2n loads, 2+2n stores */
25: ld r0,8(r4)
- sld r6,r9,r10
+ sLd r6,r9,r10
26: ldu r9,16(r4)
- srd r7,r0,r11
- sld r8,r0,r10
+ sHd r7,r0,r11
+ sLd r8,r0,r10
or r7,r7,r6
blt cr6,79f
27: ld r0,8(r4)
@@ -129,35 +137,35 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
28: ld r0,0(r4) /* 4+2n loads, 3+2n stores */
29: ldu r9,8(r4)
- sld r8,r0,r10
+ sLd r8,r0,r10
addi r3,r3,-8
blt cr6,5f
30: ld r0,8(r4)
- srd r12,r9,r11
- sld r6,r9,r10
+ sHd r12,r9,r11
+ sLd r6,r9,r10
31: ldu r9,16(r4)
or r12,r8,r12
- srd r7,r0,r11
- sld r8,r0,r10
+ sHd r7,r0,r11
+ sLd r8,r0,r10
addi r3,r3,16
beq cr6,78f
1: or r7,r7,r6
32: ld r0,8(r4)
76: std r12,8(r3)
-2: srd r12,r9,r11
- sld r6,r9,r10
+2: sHd r12,r9,r11
+ sLd r6,r9,r10
33: ldu r9,16(r4)
or r12,r8,r12
77: stdu r7,16(r3)
- srd r7,r0,r11
- sld r8,r0,r10
+ sHd r7,r0,r11
+ sLd r8,r0,r10
bdnz 1b
78: std r12,8(r3)
or r7,r7,r6
79: std r7,16(r3)
-5: srd r12,r9,r11
+5: sHd r12,r9,r11
or r12,r8,r12
80: std r12,24(r3)
bne 6f
@@ -165,23 +173,38 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
blr
6: cmpwi cr1,r5,8
addi r3,r3,32
- sld r9,r9,r10
+ sLd r9,r9,r10
ble cr1,7f
34: ld r0,8(r4)
- srd r7,r0,r11
+ sHd r7,r0,r11
or r9,r7,r9
7:
bf cr7*4+1,1f
+#ifdef __BIG_ENDIAN__
rotldi r9,r9,32
+#endif
94: stw r9,0(r3)
+#ifdef __LITTLE_ENDIAN__
+ rotrdi r9,r9,32
+#endif
addi r3,r3,4
1: bf cr7*4+2,2f
+#ifdef __BIG_ENDIAN__
rotldi r9,r9,16
+#endif
95: sth r9,0(r3)
+#ifdef __LITTLE_ENDIAN__
+ rotrdi r9,r9,16
+#endif
addi r3,r3,2
2: bf cr7*4+3,3f
+#ifdef __BIG_ENDIAN__
rotldi r9,r9,8
+#endif
96: stb r9,0(r3)
+#ifdef __LITTLE_ENDIAN__
+ rotrdi r9,r9,8
+#endif
3: li r3,0
blr
diff --git a/arch/powerpc/mm/gup.c b/arch/powerpc/mm/gup.c
index 6936547018b8..c5f734e20b0f 100644
--- a/arch/powerpc/mm/gup.c
+++ b/arch/powerpc/mm/gup.c
@@ -123,6 +123,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
struct mm_struct *mm = current->mm;
unsigned long addr, len, end;
unsigned long next;
+ unsigned long flags;
pgd_t *pgdp;
int nr = 0;
@@ -156,7 +157,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
* So long as we atomically load page table pointers versus teardown,
* we can follow the address down to the the page and take a ref on it.
*/
- local_irq_disable();
+ local_irq_save(flags);
pgdp = pgd_offset(mm, addr);
do {
@@ -179,7 +180,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
break;
} while (pgdp++, addr = next, addr != end);
- local_irq_enable();
+ local_irq_restore(flags);
return nr;
}
diff --git a/arch/powerpc/mm/hugetlbpage-book3e.c b/arch/powerpc/mm/hugetlbpage-book3e.c
index 3bc700655fc8..74551b5e41e5 100644
--- a/arch/powerpc/mm/hugetlbpage-book3e.c
+++ b/arch/powerpc/mm/hugetlbpage-book3e.c
@@ -117,6 +117,5 @@ void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
struct hstate *hstate = hstate_file(vma->vm_file);
unsigned long tsize = huge_page_shift(hstate) - 10;
- __flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr, tsize, 0);
-
+ __flush_tlb_page(vma->vm_mm, vmaddr, tsize, 0);
}
diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
index 3e99c149271a..7ce9cf3b6988 100644
--- a/arch/powerpc/mm/slice.c
+++ b/arch/powerpc/mm/slice.c
@@ -258,7 +258,7 @@ static bool slice_scan_available(unsigned long addr,
slice = GET_HIGH_SLICE_INDEX(addr);
*boundary_addr = (slice + end) ?
((slice + end) << SLICE_HIGH_SHIFT) : SLICE_LOW_TOP;
- return !!(available.high_slices & (1u << slice));
+ return !!(available.high_slices & (1ul << slice));
}
}
diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c
index 41cd68dee681..358d74303138 100644
--- a/arch/powerpc/mm/tlb_nohash.c
+++ b/arch/powerpc/mm/tlb_nohash.c
@@ -305,7 +305,7 @@ void __flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
{
#ifdef CONFIG_HUGETLB_PAGE
- if (is_vm_hugetlb_page(vma))
+ if (vma && is_vm_hugetlb_page(vma))
flush_hugetlb_page(vma, vmaddr);
#endif
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index c2a566fb8bb8..bca2465a9c34 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -403,3 +403,28 @@ config PPC_DOORBELL
default n
endmenu
+
+choice
+ prompt "Endianness selection"
+ default CPU_BIG_ENDIAN
+ help
+ This option selects whether a big endian or little endian kernel will
+ be built.
+
+config CPU_BIG_ENDIAN
+ bool "Build big endian kernel"
+ help
+ Build a big endian kernel.
+
+ If unsure, select this option.
+
+config CPU_LITTLE_ENDIAN
+ bool "Build little endian kernel"
+ help
+ Build a little endian kernel.
+
+ Note that if cross compiling a little endian kernel,
+ CROSS_COMPILE must point to a toolchain capable of targeting
+ little endian powerpc.
+
+endchoice
diff --git a/arch/powerpc/platforms/powernv/eeh-ioda.c b/arch/powerpc/platforms/powernv/eeh-ioda.c
index 02245cee7818..d7ddcee7feb8 100644
--- a/arch/powerpc/platforms/powernv/eeh-ioda.c
+++ b/arch/powerpc/platforms/powernv/eeh-ioda.c
@@ -36,7 +36,6 @@
#include "powernv.h"
#include "pci.h"
-static char *hub_diag = NULL;
static int ioda_eeh_nb_init = 0;
static int ioda_eeh_event(struct notifier_block *nb,
@@ -140,15 +139,6 @@ static int ioda_eeh_post_init(struct pci_controller *hose)
ioda_eeh_nb_init = 1;
}
- /* We needn't HUB diag-data on PHB3 */
- if (phb->type == PNV_PHB_IODA1 && !hub_diag) {
- hub_diag = (char *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
- if (!hub_diag) {
- pr_err("%s: Out of memory !\n", __func__);
- return -ENOMEM;
- }
- }
-
#ifdef CONFIG_DEBUG_FS
if (phb->dbgfs) {
debugfs_create_file("err_injct_outbound", 0600,
@@ -633,11 +623,10 @@ static void ioda_eeh_hub_diag_common(struct OpalIoP7IOCErrorData *data)
static void ioda_eeh_hub_diag(struct pci_controller *hose)
{
struct pnv_phb *phb = hose->private_data;
- struct OpalIoP7IOCErrorData *data;
+ struct OpalIoP7IOCErrorData *data = &phb->diag.hub_diag;
long rc;
- data = (struct OpalIoP7IOCErrorData *)ioda_eeh_hub_diag;
- rc = opal_pci_get_hub_diag_data(phb->hub_id, data, PAGE_SIZE);
+ rc = opal_pci_get_hub_diag_data(phb->hub_id, data, sizeof(*data));
if (rc != OPAL_SUCCESS) {
pr_warning("%s: Failed to get HUB#%llx diag-data (%ld)\n",
__func__, phb->hub_id, rc);
@@ -820,14 +809,15 @@ static void ioda_eeh_phb_diag(struct pci_controller *hose)
struct OpalIoPhbErrorCommon *common;
long rc;
- common = (struct OpalIoPhbErrorCommon *)phb->diag.blob;
- rc = opal_pci_get_phb_diag_data2(phb->opal_id, common, PAGE_SIZE);
+ rc = opal_pci_get_phb_diag_data2(phb->opal_id, phb->diag.blob,
+ PNV_PCI_DIAG_BUF_SIZE);
if (rc != OPAL_SUCCESS) {
pr_warning("%s: Failed to get diag-data for PHB#%x (%ld)\n",
__func__, hose->global_number, rc);
return;
}
+ common = (struct OpalIoPhbErrorCommon *)phb->diag.blob;
switch (common->ioType) {
case OPAL_PHB_ERROR_DATA_TYPE_P7IOC:
ioda_eeh_p7ioc_phb_diag(hose, common);
diff --git a/arch/powerpc/platforms/powernv/opal-lpc.c b/arch/powerpc/platforms/powernv/opal-lpc.c
index e7e59e4f9892..79d83cad3d67 100644
--- a/arch/powerpc/platforms/powernv/opal-lpc.c
+++ b/arch/powerpc/platforms/powernv/opal-lpc.c
@@ -24,25 +24,25 @@ static int opal_lpc_chip_id = -1;
static u8 opal_lpc_inb(unsigned long port)
{
int64_t rc;
- uint32_t data;
+ __be32 data;
if (opal_lpc_chip_id < 0 || port > 0xffff)
return 0xff;
rc = opal_lpc_read(opal_lpc_chip_id, OPAL_LPC_IO, port, &data, 1);
- return rc ? 0xff : data;
+ return rc ? 0xff : be32_to_cpu(data);
}
static __le16 __opal_lpc_inw(unsigned long port)
{
int64_t rc;
- uint32_t data;
+ __be32 data;
if (opal_lpc_chip_id < 0 || port > 0xfffe)
return 0xffff;
if (port & 1)
return (__le16)opal_lpc_inb(port) << 8 | opal_lpc_inb(port + 1);
rc = opal_lpc_read(opal_lpc_chip_id, OPAL_LPC_IO, port, &data, 2);
- return rc ? 0xffff : data;
+ return rc ? 0xffff : be32_to_cpu(data);
}
static u16 opal_lpc_inw(unsigned long port)
{
@@ -52,7 +52,7 @@ static u16 opal_lpc_inw(unsigned long port)
static __le32 __opal_lpc_inl(unsigned long port)
{
int64_t rc;
- uint32_t data;
+ __be32 data;
if (opal_lpc_chip_id < 0 || port > 0xfffc)
return 0xffffffff;
@@ -62,7 +62,7 @@ static __le32 __opal_lpc_inl(unsigned long port)
(__le32)opal_lpc_inb(port + 2) << 8 |
opal_lpc_inb(port + 3);
rc = opal_lpc_read(opal_lpc_chip_id, OPAL_LPC_IO, port, &data, 4);
- return rc ? 0xffffffff : data;
+ return rc ? 0xffffffff : be32_to_cpu(data);
}
static u32 opal_lpc_inl(unsigned long port)
diff --git a/arch/powerpc/platforms/powernv/opal-xscom.c b/arch/powerpc/platforms/powernv/opal-xscom.c
index 4d99a8fd55ac..4fbf276ac99e 100644
--- a/arch/powerpc/platforms/powernv/opal-xscom.c
+++ b/arch/powerpc/platforms/powernv/opal-xscom.c
@@ -96,9 +96,11 @@ static int opal_scom_read(scom_map_t map, u64 reg, u64 *value)
{
struct opal_scom_map *m = map;
int64_t rc;
+ __be64 v;
reg = opal_scom_unmangle(reg);
- rc = opal_xscom_read(m->chip, m->addr + reg, (uint64_t *)__pa(value));
+ rc = opal_xscom_read(m->chip, m->addr + reg, (__be64 *)__pa(&v));
+ *value = be64_to_cpu(v);
return opal_xscom_err_xlate(rc);
}
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index 084cdfa40682..2c6d173842b2 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -720,6 +720,7 @@ static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
tbl->it_type = TCE_PCI_SWINV_CREATE | TCE_PCI_SWINV_FREE;
}
iommu_init_table(tbl, phb->hose->node);
+ iommu_register_group(tbl, pci_domain_nr(pe->pbus), pe->pe_number);
if (pe->pdev)
set_iommu_table_base(&pe->pdev->dev, tbl);
diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h
index 911c24ef033e..1ed8d5f40f5a 100644
--- a/arch/powerpc/platforms/powernv/pci.h
+++ b/arch/powerpc/platforms/powernv/pci.h
@@ -172,11 +172,13 @@ struct pnv_phb {
} ioda;
};
- /* PHB status structure */
+ /* PHB and hub status structure */
union {
unsigned char blob[PNV_PCI_DIAG_BUF_SIZE];
struct OpalIoP7IOCPhbErrorData p7ioc;
+ struct OpalIoP7IOCErrorData hub_diag;
} diag;
+
};
extern struct pci_ops pnv_pci_ops;
diff --git a/arch/powerpc/platforms/powernv/rng.c b/arch/powerpc/platforms/powernv/rng.c
index 8844628915dc..1cb160dc1609 100644
--- a/arch/powerpc/platforms/powernv/rng.c
+++ b/arch/powerpc/platforms/powernv/rng.c
@@ -19,6 +19,7 @@
#include <asm/io.h>
#include <asm/prom.h>
#include <asm/machdep.h>
+#include <asm/smp.h>
struct powernv_rng {
diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c
index 7fbc25b1813f..ccb633e077b1 100644
--- a/arch/powerpc/platforms/pseries/eeh_pseries.c
+++ b/arch/powerpc/platforms/pseries/eeh_pseries.c
@@ -189,8 +189,9 @@ static void *pseries_eeh_of_probe(struct device_node *dn, void *flag)
struct eeh_dev *edev;
struct eeh_pe pe;
struct pci_dn *pdn = PCI_DN(dn);
- const u32 *class_code, *vendor_id, *device_id;
- const u32 *regs;
+ const __be32 *classp, *vendorp, *devicep;
+ u32 class_code;
+ const __be32 *regs;
u32 pcie_flags;
int enable = 0;
int ret;
@@ -201,22 +202,24 @@ static void *pseries_eeh_of_probe(struct device_node *dn, void *flag)
return NULL;
/* Retrieve class/vendor/device IDs */
- class_code = of_get_property(dn, "class-code", NULL);
- vendor_id = of_get_property(dn, "vendor-id", NULL);
- device_id = of_get_property(dn, "device-id", NULL);
+ classp = of_get_property(dn, "class-code", NULL);
+ vendorp = of_get_property(dn, "vendor-id", NULL);
+ devicep = of_get_property(dn, "device-id", NULL);
/* Skip for bad OF node or PCI-ISA bridge */
- if (!class_code || !vendor_id || !device_id)
+ if (!classp || !vendorp || !devicep)
return NULL;
if (dn->type && !strcmp(dn->type, "isa"))
return NULL;
+ class_code = of_read_number(classp, 1);
+
/*
* Update class code and mode of eeh device. We need
* correctly reflects that current device is root port
* or PCIe switch downstream port.
*/
- edev->class_code = *class_code;
+ edev->class_code = class_code;
edev->pcie_cap = pseries_eeh_find_cap(dn, PCI_CAP_ID_EXP);
edev->mode &= 0xFFFFFF00;
if ((edev->class_code >> 8) == PCI_CLASS_BRIDGE_PCI) {
@@ -243,12 +246,12 @@ static void *pseries_eeh_of_probe(struct device_node *dn, void *flag)
/* Initialize the fake PE */
memset(&pe, 0, sizeof(struct eeh_pe));
pe.phb = edev->phb;
- pe.config_addr = regs[0];
+ pe.config_addr = of_read_number(regs, 1);
/* Enable EEH on the device */
ret = eeh_ops->set_option(&pe, EEH_OPT_ENABLE);
if (!ret) {
- edev->config_addr = regs[0];
+ edev->config_addr = of_read_number(regs, 1);
/* Retrieve PE address */
edev->pe_config_addr = eeh_ops->get_pe_addr(&pe);
pe.addr = edev->pe_config_addr;
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index 356bc75ca74f..4fca3def9db9 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -245,6 +245,23 @@ static void pSeries_lpar_hptab_clear(void)
&(ptes[j].pteh), &(ptes[j].ptel));
}
}
+
+#ifdef __LITTLE_ENDIAN__
+ /* Reset exceptions to big endian */
+ if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
+ long rc;
+
+ rc = pseries_big_endian_exceptions();
+ /*
+ * At this point it is unlikely panic() will get anything
+ * out to the user, but at least this will stop us from
+ * continuing on further and creating an even more
+ * difficult to debug situation.
+ */
+ if (rc)
+ panic("Could not enable big endian exceptions");
+ }
+#endif
}
/*
diff --git a/arch/powerpc/platforms/pseries/lparcfg.c b/arch/powerpc/platforms/pseries/lparcfg.c
index e738007eae64..c9fecf09b8fa 100644
--- a/arch/powerpc/platforms/pseries/lparcfg.c
+++ b/arch/powerpc/platforms/pseries/lparcfg.c
@@ -157,7 +157,7 @@ static void parse_ppp_data(struct seq_file *m)
{
struct hvcall_ppp_data ppp_data;
struct device_node *root;
- const int *perf_level;
+ const __be32 *perf_level;
int rc;
rc = h_get_ppp(&ppp_data);
@@ -201,7 +201,7 @@ static void parse_ppp_data(struct seq_file *m)
perf_level = of_get_property(root,
"ibm,partition-performance-parameters-level",
NULL);
- if (perf_level && (*perf_level >= 1)) {
+ if (perf_level && (be32_to_cpup(perf_level) >= 1)) {
seq_printf(m,
"physical_procs_allocated_to_virtualization=%d\n",
ppp_data.phys_platform_procs);
@@ -435,7 +435,7 @@ static int pseries_lparcfg_data(struct seq_file *m, void *v)
int partition_potential_processors;
int partition_active_processors;
struct device_node *rtas_node;
- const int *lrdrp = NULL;
+ const __be32 *lrdrp = NULL;
rtas_node = of_find_node_by_path("/rtas");
if (rtas_node)
@@ -444,7 +444,7 @@ static int pseries_lparcfg_data(struct seq_file *m, void *v)
if (lrdrp == NULL) {
partition_potential_processors = vdso_data->processorCount;
} else {
- partition_potential_processors = *(lrdrp + 4);
+ partition_potential_processors = be32_to_cpup(lrdrp + 4);
}
of_node_put(rtas_node);
@@ -654,7 +654,7 @@ static int lparcfg_data(struct seq_file *m, void *v)
const char *model = "";
const char *system_id = "";
const char *tmp;
- const unsigned int *lp_index_ptr;
+ const __be32 *lp_index_ptr;
unsigned int lp_index = 0;
seq_printf(m, "%s %s\n", MODULE_NAME, MODULE_VERS);
@@ -670,7 +670,7 @@ static int lparcfg_data(struct seq_file *m, void *v)
lp_index_ptr = of_get_property(rootdn, "ibm,partition-no",
NULL);
if (lp_index_ptr)
- lp_index = *lp_index_ptr;
+ lp_index = be32_to_cpup(lp_index_ptr);
of_node_put(rootdn);
}
seq_printf(m, "serial_number=%s\n", system_id);
diff --git a/arch/powerpc/platforms/pseries/msi.c b/arch/powerpc/platforms/pseries/msi.c
index 6d2f0abce6fa..0c882e83c4ce 100644
--- a/arch/powerpc/platforms/pseries/msi.c
+++ b/arch/powerpc/platforms/pseries/msi.c
@@ -130,7 +130,8 @@ static int check_req(struct pci_dev *pdev, int nvec, char *prop_name)
{
struct device_node *dn;
struct pci_dn *pdn;
- const u32 *req_msi;
+ const __be32 *p;
+ u32 req_msi;
pdn = pci_get_pdn(pdev);
if (!pdn)
@@ -138,19 +139,20 @@ static int check_req(struct pci_dev *pdev, int nvec, char *prop_name)
dn = pdn->node;
- req_msi = of_get_property(dn, prop_name, NULL);
- if (!req_msi) {
+ p = of_get_property(dn, prop_name, NULL);
+ if (!p) {
pr_debug("rtas_msi: No %s on %s\n", prop_name, dn->full_name);
return -ENOENT;
}
- if (*req_msi < nvec) {
+ req_msi = be32_to_cpup(p);
+ if (req_msi < nvec) {
pr_debug("rtas_msi: %s requests < %d MSIs\n", prop_name, nvec);
- if (*req_msi == 0) /* Be paranoid */
+ if (req_msi == 0) /* Be paranoid */
return -ENOSPC;
- return *req_msi;
+ return req_msi;
}
return 0;
@@ -171,7 +173,7 @@ static int check_req_msix(struct pci_dev *pdev, int nvec)
static struct device_node *find_pe_total_msi(struct pci_dev *dev, int *total)
{
struct device_node *dn;
- const u32 *p;
+ const __be32 *p;
dn = of_node_get(pci_device_to_OF_node(dev));
while (dn) {
@@ -179,7 +181,7 @@ static struct device_node *find_pe_total_msi(struct pci_dev *dev, int *total)
if (p) {
pr_debug("rtas_msi: found prop on dn %s\n",
dn->full_name);
- *total = *p;
+ *total = be32_to_cpup(p);
return dn;
}
@@ -232,13 +234,13 @@ struct msi_counts {
static void *count_non_bridge_devices(struct device_node *dn, void *data)
{
struct msi_counts *counts = data;
- const u32 *p;
+ const __be32 *p;
u32 class;
pr_debug("rtas_msi: counting %s\n", dn->full_name);
p = of_get_property(dn, "class-code", NULL);
- class = p ? *p : 0;
+ class = p ? be32_to_cpup(p) : 0;
if ((class >> 8) != PCI_CLASS_BRIDGE_PCI)
counts->num_devices++;
@@ -249,7 +251,7 @@ static void *count_non_bridge_devices(struct device_node *dn, void *data)
static void *count_spare_msis(struct device_node *dn, void *data)
{
struct msi_counts *counts = data;
- const u32 *p;
+ const __be32 *p;
int req;
if (dn == counts->requestor)
@@ -260,11 +262,11 @@ static void *count_spare_msis(struct device_node *dn, void *data)
req = 0;
p = of_get_property(dn, "ibm,req#msi", NULL);
if (p)
- req = *p;
+ req = be32_to_cpup(p);
p = of_get_property(dn, "ibm,req#msi-x", NULL);
if (p)
- req = max(req, (int)*p);
+ req = max(req, (int)be32_to_cpup(p));
}
if (req < counts->quota)
diff --git a/arch/powerpc/platforms/pseries/nvram.c b/arch/powerpc/platforms/pseries/nvram.c
index 7bfaf58d4664..d7096f2f7751 100644
--- a/arch/powerpc/platforms/pseries/nvram.c
+++ b/arch/powerpc/platforms/pseries/nvram.c
@@ -43,8 +43,8 @@ static char nvram_buf[NVRW_CNT]; /* assume this is in the first 4GB */
static DEFINE_SPINLOCK(nvram_lock);
struct err_log_info {
- int error_type;
- unsigned int seq_num;
+ __be32 error_type;
+ __be32 seq_num;
};
struct nvram_os_partition {
@@ -79,9 +79,9 @@ static const char *pseries_nvram_os_partitions[] = {
};
struct oops_log_info {
- u16 version;
- u16 report_length;
- u64 timestamp;
+ __be16 version;
+ __be16 report_length;
+ __be64 timestamp;
} __attribute__((packed));
static void oops_to_nvram(struct kmsg_dumper *dumper,
@@ -291,8 +291,8 @@ int nvram_write_os_partition(struct nvram_os_partition *part, char * buff,
length = part->size;
}
- info.error_type = err_type;
- info.seq_num = error_log_cnt;
+ info.error_type = cpu_to_be32(err_type);
+ info.seq_num = cpu_to_be32(error_log_cnt);
tmp_index = part->index;
@@ -364,8 +364,8 @@ int nvram_read_partition(struct nvram_os_partition *part, char *buff,
}
if (part->os_partition) {
- *error_log_cnt = info.seq_num;
- *err_type = info.error_type;
+ *error_log_cnt = be32_to_cpu(info.seq_num);
+ *err_type = be32_to_cpu(info.error_type);
}
return 0;
@@ -529,9 +529,9 @@ static int zip_oops(size_t text_len)
pr_err("nvram: logging uncompressed oops/panic report\n");
return -1;
}
- oops_hdr->version = OOPS_HDR_VERSION;
- oops_hdr->report_length = (u16) zipped_len;
- oops_hdr->timestamp = get_seconds();
+ oops_hdr->version = cpu_to_be16(OOPS_HDR_VERSION);
+ oops_hdr->report_length = cpu_to_be16(zipped_len);
+ oops_hdr->timestamp = cpu_to_be64(get_seconds());
return 0;
}
@@ -574,9 +574,9 @@ static int nvram_pstore_write(enum pstore_type_id type,
clobbering_unread_rtas_event())
return -1;
- oops_hdr->version = OOPS_HDR_VERSION;
- oops_hdr->report_length = (u16) size;
- oops_hdr->timestamp = get_seconds();
+ oops_hdr->version = cpu_to_be16(OOPS_HDR_VERSION);
+ oops_hdr->report_length = cpu_to_be16(size);
+ oops_hdr->timestamp = cpu_to_be64(get_seconds());
if (compressed)
err_type = ERR_TYPE_KERNEL_PANIC_GZ;
@@ -670,16 +670,16 @@ static ssize_t nvram_pstore_read(u64 *id, enum pstore_type_id *type,
size_t length, hdr_size;
oops_hdr = (struct oops_log_info *)buff;
- if (oops_hdr->version < OOPS_HDR_VERSION) {
+ if (be16_to_cpu(oops_hdr->version) < OOPS_HDR_VERSION) {
/* Old format oops header had 2-byte record size */
hdr_size = sizeof(u16);
- length = oops_hdr->version;
+ length = be16_to_cpu(oops_hdr->version);
time->tv_sec = 0;
time->tv_nsec = 0;
} else {
hdr_size = sizeof(*oops_hdr);
- length = oops_hdr->report_length;
- time->tv_sec = oops_hdr->timestamp;
+ length = be16_to_cpu(oops_hdr->report_length);
+ time->tv_sec = be64_to_cpu(oops_hdr->timestamp);
time->tv_nsec = 0;
}
*buf = kmalloc(length, GFP_KERNEL);
@@ -889,13 +889,13 @@ static void oops_to_nvram(struct kmsg_dumper *dumper,
kmsg_dump_get_buffer(dumper, false,
oops_data, oops_data_sz, &text_len);
err_type = ERR_TYPE_KERNEL_PANIC;
- oops_hdr->version = OOPS_HDR_VERSION;
- oops_hdr->report_length = (u16) text_len;
- oops_hdr->timestamp = get_seconds();
+ oops_hdr->version = cpu_to_be16(OOPS_HDR_VERSION);
+ oops_hdr->report_length = cpu_to_be16(text_len);
+ oops_hdr->timestamp = cpu_to_be64(get_seconds());
}
(void) nvram_write_os_partition(&oops_log_partition, oops_buf,
- (int) (sizeof(*oops_hdr) + oops_hdr->report_length), err_type,
+ (int) (sizeof(*oops_hdr) + text_len), err_type,
++oops_count);
spin_unlock_irqrestore(&lock, flags);
diff --git a/arch/powerpc/platforms/pseries/pci.c b/arch/powerpc/platforms/pseries/pci.c
index 5f93856cdf47..70670a2d9cf2 100644
--- a/arch/powerpc/platforms/pseries/pci.c
+++ b/arch/powerpc/platforms/pseries/pci.c
@@ -113,7 +113,7 @@ int pseries_root_bridge_prepare(struct pci_host_bridge *bridge)
{
struct device_node *dn, *pdn;
struct pci_bus *bus;
- const uint32_t *pcie_link_speed_stats;
+ const __be32 *pcie_link_speed_stats;
bus = bridge->bus;
@@ -122,7 +122,7 @@ int pseries_root_bridge_prepare(struct pci_host_bridge *bridge)
return 0;
for (pdn = dn; pdn != NULL; pdn = of_get_next_parent(pdn)) {
- pcie_link_speed_stats = (const uint32_t *) of_get_property(pdn,
+ pcie_link_speed_stats = of_get_property(pdn,
"ibm,pcie-link-speed-stats", NULL);
if (pcie_link_speed_stats)
break;
@@ -135,7 +135,7 @@ int pseries_root_bridge_prepare(struct pci_host_bridge *bridge)
return 0;
}
- switch (pcie_link_speed_stats[0]) {
+ switch (be32_to_cpup(pcie_link_speed_stats)) {
case 0x01:
bus->max_bus_speed = PCIE_SPEED_2_5GT;
break;
@@ -147,7 +147,7 @@ int pseries_root_bridge_prepare(struct pci_host_bridge *bridge)
break;
}
- switch (pcie_link_speed_stats[1]) {
+ switch (be32_to_cpup(pcie_link_speed_stats)) {
case 0x01:
bus->cur_bus_speed = PCIE_SPEED_2_5GT;
break;
diff --git a/arch/powerpc/platforms/pseries/rng.c b/arch/powerpc/platforms/pseries/rng.c
index a702f1c08242..72a102758d4e 100644
--- a/arch/powerpc/platforms/pseries/rng.c
+++ b/arch/powerpc/platforms/pseries/rng.c
@@ -13,6 +13,7 @@
#include <linux/of.h>
#include <asm/archrandom.h>
#include <asm/machdep.h>
+#include <asm/plpar_wrappers.h>
static int pseries_get_random_long(unsigned long *v)
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index 1f97e2b87a62..c1f190858701 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -442,6 +442,32 @@ static void pSeries_machine_kexec(struct kimage *image)
}
#endif
+#ifdef __LITTLE_ENDIAN__
+long pseries_big_endian_exceptions(void)
+{
+ long rc;
+
+ while (1) {
+ rc = enable_big_endian_exceptions();
+ if (!H_IS_LONG_BUSY(rc))
+ return rc;
+ mdelay(get_longbusy_msecs(rc));
+ }
+}
+
+static long pseries_little_endian_exceptions(void)
+{
+ long rc;
+
+ while (1) {
+ rc = enable_little_endian_exceptions();
+ if (!H_IS_LONG_BUSY(rc))
+ return rc;
+ mdelay(get_longbusy_msecs(rc));
+ }
+}
+#endif
+
static void __init pSeries_setup_arch(void)
{
panic_timeout = 10;
@@ -698,6 +724,22 @@ static int __init pSeries_probe(void)
/* Now try to figure out if we are running on LPAR */
of_scan_flat_dt(pseries_probe_fw_features, NULL);
+#ifdef __LITTLE_ENDIAN__
+ if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
+ long rc;
+ /*
+ * Tell the hypervisor that we want our exceptions to
+ * be taken in little endian mode. If this fails we don't
+ * want to use BUG() because it will trigger an exception.
+ */
+ rc = pseries_little_endian_exceptions();
+ if (rc) {
+ ppc_md.progress("H_SET_MODE LE exception fail", 0);
+ panic("Could not enable little endian exceptions");
+ }
+ }
+#endif
+
if (firmware_has_feature(FW_FEATURE_LPAR))
hpte_init_lpar();
else
diff --git a/arch/powerpc/platforms/wsp/chroma.c b/arch/powerpc/platforms/wsp/chroma.c
index 8ef53bc2e70e..aaa46b353715 100644
--- a/arch/powerpc/platforms/wsp/chroma.c
+++ b/arch/powerpc/platforms/wsp/chroma.c
@@ -15,6 +15,7 @@
#include <linux/of.h>
#include <linux/smp.h>
#include <linux/time.h>
+#include <linux/of_fdt.h>
#include <asm/machdep.h>
#include <asm/udbg.h>
diff --git a/arch/powerpc/platforms/wsp/h8.c b/arch/powerpc/platforms/wsp/h8.c
index d18e6cc19df3..a3c87f395750 100644
--- a/arch/powerpc/platforms/wsp/h8.c
+++ b/arch/powerpc/platforms/wsp/h8.c
@@ -10,6 +10,7 @@
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/io.h>
+#include <linux/of_address.h>
#include "wsp.h"
diff --git a/arch/powerpc/platforms/wsp/ics.c b/arch/powerpc/platforms/wsp/ics.c
index 2d3b1dd9571d..9cd92e645028 100644
--- a/arch/powerpc/platforms/wsp/ics.c
+++ b/arch/powerpc/platforms/wsp/ics.c
@@ -18,6 +18,8 @@
#include <linux/smp.h>
#include <linux/spinlock.h>
#include <linux/types.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <asm/io.h>
#include <asm/irq.h>
diff --git a/arch/powerpc/platforms/wsp/opb_pic.c b/arch/powerpc/platforms/wsp/opb_pic.c
index cb565bf93650..3f6729807938 100644
--- a/arch/powerpc/platforms/wsp/opb_pic.c
+++ b/arch/powerpc/platforms/wsp/opb_pic.c
@@ -15,6 +15,8 @@
#include <linux/of.h>
#include <linux/slab.h>
#include <linux/time.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <asm/reg_a2.h>
#include <asm/irq.h>
diff --git a/arch/powerpc/platforms/wsp/psr2.c b/arch/powerpc/platforms/wsp/psr2.c
index 508ec8282b96..a87b414c766a 100644
--- a/arch/powerpc/platforms/wsp/psr2.c
+++ b/arch/powerpc/platforms/wsp/psr2.c
@@ -15,6 +15,7 @@
#include <linux/of.h>
#include <linux/smp.h>
#include <linux/time.h>
+#include <linux/of_fdt.h>
#include <asm/machdep.h>
#include <asm/udbg.h>
diff --git a/arch/powerpc/platforms/wsp/scom_wsp.c b/arch/powerpc/platforms/wsp/scom_wsp.c
index 8928507affea..6538b4de34fc 100644
--- a/arch/powerpc/platforms/wsp/scom_wsp.c
+++ b/arch/powerpc/platforms/wsp/scom_wsp.c
@@ -14,6 +14,7 @@
#include <linux/of.h>
#include <linux/spinlock.h>
#include <linux/types.h>
+#include <linux/of_address.h>
#include <asm/cputhreads.h>
#include <asm/reg_a2.h>
diff --git a/arch/powerpc/platforms/wsp/wsp.c b/arch/powerpc/platforms/wsp/wsp.c
index ddb6efe88914..58cd1f00e1ef 100644
--- a/arch/powerpc/platforms/wsp/wsp.c
+++ b/arch/powerpc/platforms/wsp/wsp.c
@@ -13,6 +13,7 @@
#include <linux/smp.h>
#include <linux/delay.h>
#include <linux/time.h>
+#include <linux/of_address.h>
#include <asm/scom.h>
diff --git a/arch/powerpc/sysdev/ppc4xx_ocm.c b/arch/powerpc/sysdev/ppc4xx_ocm.c
index b7c43453236d..85d9e37f5ccb 100644
--- a/arch/powerpc/sysdev/ppc4xx_ocm.c
+++ b/arch/powerpc/sysdev/ppc4xx_ocm.c
@@ -339,7 +339,7 @@ void *ppc4xx_ocm_alloc(phys_addr_t *phys, int size, int align,
if (IS_ERR_VALUE(offset))
continue;
- ocm_blk = kzalloc(sizeof(struct ocm_block *), GFP_KERNEL);
+ ocm_blk = kzalloc(sizeof(struct ocm_block), GFP_KERNEL);
if (!ocm_blk) {
printk(KERN_ERR "PPC4XX OCM: could not allocate ocm block");
rh_free(ocm_reg->rh, offset);
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 314fced4fc14..1e1a03d2d19f 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -101,7 +101,7 @@ config S390
select GENERIC_CPU_DEVICES if !SMP
select GENERIC_FIND_FIRST_BIT
select GENERIC_SMP_IDLE_THREAD
- select GENERIC_TIME_VSYSCALL_OLD
+ select GENERIC_TIME_VSYSCALL
select HAVE_ALIGNED_STRUCT_PAGE if SLUB
select HAVE_ARCH_JUMP_LABEL if !MARCH_G5
select HAVE_ARCH_SECCOMP_FILTER
@@ -347,14 +347,14 @@ config SMP
Even if you don't know what to do here, say Y.
config NR_CPUS
- int "Maximum number of CPUs (2-64)"
- range 2 64
+ int "Maximum number of CPUs (2-256)"
+ range 2 256
depends on SMP
default "32" if !64BIT
default "64" if 64BIT
help
This allows you to specify the maximum number of CPUs which this
- kernel will support. The maximum supported value is 64 and the
+ kernel will support. The maximum supported value is 256 and the
minimum value which makes sense is 2.
This is purely to save memory - each supported CPU adds
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
index 46cae138ece2..b3feabd39f31 100644
--- a/arch/s390/crypto/aes_s390.c
+++ b/arch/s390/crypto/aes_s390.c
@@ -35,7 +35,6 @@ static u8 *ctrblk;
static char keylen_flag;
struct s390_aes_ctx {
- u8 iv[AES_BLOCK_SIZE];
u8 key[AES_MAX_KEY_SIZE];
long enc;
long dec;
@@ -56,8 +55,7 @@ struct pcc_param {
struct s390_xts_ctx {
u8 key[32];
- u8 xts_param[16];
- struct pcc_param pcc;
+ u8 pcc_key[32];
long enc;
long dec;
int key_len;
@@ -441,30 +439,36 @@ static int cbc_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
return aes_set_key(tfm, in_key, key_len);
}
-static int cbc_aes_crypt(struct blkcipher_desc *desc, long func, void *param,
+static int cbc_aes_crypt(struct blkcipher_desc *desc, long func,
struct blkcipher_walk *walk)
{
+ struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
int ret = blkcipher_walk_virt(desc, walk);
unsigned int nbytes = walk->nbytes;
+ struct {
+ u8 iv[AES_BLOCK_SIZE];
+ u8 key[AES_MAX_KEY_SIZE];
+ } param;
if (!nbytes)
goto out;
- memcpy(param, walk->iv, AES_BLOCK_SIZE);
+ memcpy(param.iv, walk->iv, AES_BLOCK_SIZE);
+ memcpy(param.key, sctx->key, sctx->key_len);
do {
/* only use complete blocks */
unsigned int n = nbytes & ~(AES_BLOCK_SIZE - 1);
u8 *out = walk->dst.virt.addr;
u8 *in = walk->src.virt.addr;
- ret = crypt_s390_kmc(func, param, out, in, n);
+ ret = crypt_s390_kmc(func, &param, out, in, n);
if (ret < 0 || ret != n)
return -EIO;
nbytes &= AES_BLOCK_SIZE - 1;
ret = blkcipher_walk_done(desc, walk, nbytes);
} while ((nbytes = walk->nbytes));
- memcpy(walk->iv, param, AES_BLOCK_SIZE);
+ memcpy(walk->iv, param.iv, AES_BLOCK_SIZE);
out:
return ret;
@@ -481,7 +485,7 @@ static int cbc_aes_encrypt(struct blkcipher_desc *desc,
return fallback_blk_enc(desc, dst, src, nbytes);
blkcipher_walk_init(&walk, dst, src, nbytes);
- return cbc_aes_crypt(desc, sctx->enc, sctx->iv, &walk);
+ return cbc_aes_crypt(desc, sctx->enc, &walk);
}
static int cbc_aes_decrypt(struct blkcipher_desc *desc,
@@ -495,7 +499,7 @@ static int cbc_aes_decrypt(struct blkcipher_desc *desc,
return fallback_blk_dec(desc, dst, src, nbytes);
blkcipher_walk_init(&walk, dst, src, nbytes);
- return cbc_aes_crypt(desc, sctx->dec, sctx->iv, &walk);
+ return cbc_aes_crypt(desc, sctx->dec, &walk);
}
static struct crypto_alg cbc_aes_alg = {
@@ -586,7 +590,7 @@ static int xts_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
xts_ctx->enc = KM_XTS_128_ENCRYPT;
xts_ctx->dec = KM_XTS_128_DECRYPT;
memcpy(xts_ctx->key + 16, in_key, 16);
- memcpy(xts_ctx->pcc.key + 16, in_key + 16, 16);
+ memcpy(xts_ctx->pcc_key + 16, in_key + 16, 16);
break;
case 48:
xts_ctx->enc = 0;
@@ -597,7 +601,7 @@ static int xts_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
xts_ctx->enc = KM_XTS_256_ENCRYPT;
xts_ctx->dec = KM_XTS_256_DECRYPT;
memcpy(xts_ctx->key, in_key, 32);
- memcpy(xts_ctx->pcc.key, in_key + 32, 32);
+ memcpy(xts_ctx->pcc_key, in_key + 32, 32);
break;
default:
*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
@@ -616,29 +620,33 @@ static int xts_aes_crypt(struct blkcipher_desc *desc, long func,
unsigned int nbytes = walk->nbytes;
unsigned int n;
u8 *in, *out;
- void *param;
+ struct pcc_param pcc_param;
+ struct {
+ u8 key[32];
+ u8 init[16];
+ } xts_param;
if (!nbytes)
goto out;
- memset(xts_ctx->pcc.block, 0, sizeof(xts_ctx->pcc.block));
- memset(xts_ctx->pcc.bit, 0, sizeof(xts_ctx->pcc.bit));
- memset(xts_ctx->pcc.xts, 0, sizeof(xts_ctx->pcc.xts));
- memcpy(xts_ctx->pcc.tweak, walk->iv, sizeof(xts_ctx->pcc.tweak));
- param = xts_ctx->pcc.key + offset;
- ret = crypt_s390_pcc(func, param);
+ memset(pcc_param.block, 0, sizeof(pcc_param.block));
+ memset(pcc_param.bit, 0, sizeof(pcc_param.bit));
+ memset(pcc_param.xts, 0, sizeof(pcc_param.xts));
+ memcpy(pcc_param.tweak, walk->iv, sizeof(pcc_param.tweak));
+ memcpy(pcc_param.key, xts_ctx->pcc_key, 32);
+ ret = crypt_s390_pcc(func, &pcc_param.key[offset]);
if (ret < 0)
return -EIO;
- memcpy(xts_ctx->xts_param, xts_ctx->pcc.xts, 16);
- param = xts_ctx->key + offset;
+ memcpy(xts_param.key, xts_ctx->key, 32);
+ memcpy(xts_param.init, pcc_param.xts, 16);
do {
/* only use complete blocks */
n = nbytes & ~(AES_BLOCK_SIZE - 1);
out = walk->dst.virt.addr;
in = walk->src.virt.addr;
- ret = crypt_s390_km(func, param, out, in, n);
+ ret = crypt_s390_km(func, &xts_param.key[offset], out, in, n);
if (ret < 0 || ret != n)
return -EIO;
diff --git a/arch/s390/include/asm/Kbuild b/arch/s390/include/asm/Kbuild
index 7a5288f3479a..8386a4a1f19a 100644
--- a/arch/s390/include/asm/Kbuild
+++ b/arch/s390/include/asm/Kbuild
@@ -3,3 +3,4 @@
generic-y += clkdev.h
generic-y += trace_clock.h
generic-y += preempt.h
+generic-y += hash.h
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
index 316c8503a3b4..114258eeaacd 100644
--- a/arch/s390/include/asm/page.h
+++ b/arch/s390/include/asm/page.h
@@ -48,33 +48,21 @@ static inline void clear_page(void *page)
: "memory", "cc");
}
+/*
+ * copy_page uses the mvcl instruction with 0xb0 padding byte in order to
+ * bypass caches when copying a page. Especially when copying huge pages
+ * this keeps L1 and L2 data caches alive.
+ */
static inline void copy_page(void *to, void *from)
{
- if (MACHINE_HAS_MVPG) {
- register unsigned long reg0 asm ("0") = 0;
- asm volatile(
- " mvpg %0,%1"
- : : "a" (to), "a" (from), "d" (reg0)
- : "memory", "cc");
- } else
- asm volatile(
- " mvc 0(256,%0),0(%1)\n"
- " mvc 256(256,%0),256(%1)\n"
- " mvc 512(256,%0),512(%1)\n"
- " mvc 768(256,%0),768(%1)\n"
- " mvc 1024(256,%0),1024(%1)\n"
- " mvc 1280(256,%0),1280(%1)\n"
- " mvc 1536(256,%0),1536(%1)\n"
- " mvc 1792(256,%0),1792(%1)\n"
- " mvc 2048(256,%0),2048(%1)\n"
- " mvc 2304(256,%0),2304(%1)\n"
- " mvc 2560(256,%0),2560(%1)\n"
- " mvc 2816(256,%0),2816(%1)\n"
- " mvc 3072(256,%0),3072(%1)\n"
- " mvc 3328(256,%0),3328(%1)\n"
- " mvc 3584(256,%0),3584(%1)\n"
- " mvc 3840(256,%0),3840(%1)\n"
- : : "a" (to), "a" (from) : "memory");
+ register void *reg2 asm ("2") = to;
+ register unsigned long reg3 asm ("3") = 0x1000;
+ register void *reg4 asm ("4") = from;
+ register unsigned long reg5 asm ("5") = 0xb0001000;
+ asm volatile(
+ " mvcl 2,4"
+ : "+d" (reg2), "+d" (reg3), "+d" (reg4), "+d" (reg5)
+ : : "memory", "cc");
}
#define clear_user_page(page, vaddr, pg) clear_page(page)
diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h
index 30ef748bc161..2f390956c7c1 100644
--- a/arch/s390/include/asm/sclp.h
+++ b/arch/s390/include/asm/sclp.h
@@ -8,6 +8,7 @@
#include <linux/types.h>
#include <asm/chpid.h>
+#include <asm/cpu.h>
#define SCLP_CHP_INFO_MASK_SIZE 32
@@ -37,7 +38,7 @@ struct sclp_cpu_info {
unsigned int standby;
unsigned int combined;
int has_cpu_type;
- struct sclp_cpu_entry cpu[255];
+ struct sclp_cpu_entry cpu[MAX_CPU_ADDRESS + 1];
};
int sclp_get_cpu_info(struct sclp_cpu_info *info);
diff --git a/arch/s390/include/asm/vdso.h b/arch/s390/include/asm/vdso.h
index a73eb2e1e918..bc9746a7d47c 100644
--- a/arch/s390/include/asm/vdso.h
+++ b/arch/s390/include/asm/vdso.h
@@ -26,8 +26,9 @@ struct vdso_data {
__u64 wtom_clock_nsec; /* 0x28 */
__u32 tz_minuteswest; /* Minutes west of Greenwich 0x30 */
__u32 tz_dsttime; /* Type of dst correction 0x34 */
- __u32 ectg_available;
- __u32 ntp_mult; /* NTP adjusted multiplier 0x3C */
+ __u32 ectg_available; /* ECTG instruction present 0x38 */
+ __u32 tk_mult; /* Mult. used for xtime_nsec 0x3c */
+ __u32 tk_shift; /* Shift used for xtime_nsec 0x40 */
};
struct vdso_per_cpu_data {
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index 2416138ebd3e..e4c99a183651 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -65,12 +65,14 @@ int main(void)
DEFINE(__VDSO_WTOM_NSEC, offsetof(struct vdso_data, wtom_clock_nsec));
DEFINE(__VDSO_TIMEZONE, offsetof(struct vdso_data, tz_minuteswest));
DEFINE(__VDSO_ECTG_OK, offsetof(struct vdso_data, ectg_available));
- DEFINE(__VDSO_NTP_MULT, offsetof(struct vdso_data, ntp_mult));
+ DEFINE(__VDSO_TK_MULT, offsetof(struct vdso_data, tk_mult));
+ DEFINE(__VDSO_TK_SHIFT, offsetof(struct vdso_data, tk_shift));
DEFINE(__VDSO_ECTG_BASE, offsetof(struct vdso_per_cpu_data, ectg_timer_base));
DEFINE(__VDSO_ECTG_USER, offsetof(struct vdso_per_cpu_data, ectg_user_time));
/* constants used by the vdso */
DEFINE(__CLOCK_REALTIME, CLOCK_REALTIME);
DEFINE(__CLOCK_MONOTONIC, CLOCK_MONOTONIC);
+ DEFINE(__CLOCK_THREAD_CPUTIME_ID, CLOCK_THREAD_CPUTIME_ID);
DEFINE(__CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC);
BLANK();
/* idle data offsets */
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
index 6e2442978409..95e7ba0fbb7e 100644
--- a/arch/s390/kernel/compat_signal.c
+++ b/arch/s390/kernel/compat_signal.c
@@ -194,7 +194,7 @@ static int restore_sigregs32(struct pt_regs *regs,_sigregs32 __user *sregs)
return -EINVAL;
/* Use regs->psw.mask instead of PSW_USER_BITS to preserve PER bit. */
- regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) |
+ regs->psw.mask = (regs->psw.mask & ~(PSW_MASK_USER | PSW_MASK_RI)) |
(__u64)(user_sregs.regs.psw.mask & PSW32_MASK_USER) << 32 |
(__u64)(user_sregs.regs.psw.mask & PSW32_MASK_RI) << 32 |
(__u64)(user_sregs.regs.psw.addr & PSW32_ADDR_AMODE);
diff --git a/arch/s390/kernel/pgm_check.S b/arch/s390/kernel/pgm_check.S
index 4a460c44e17e..813ec7260878 100644
--- a/arch/s390/kernel/pgm_check.S
+++ b/arch/s390/kernel/pgm_check.S
@@ -78,7 +78,7 @@ PGM_CHECK_DEFAULT /* 34 */
PGM_CHECK_DEFAULT /* 35 */
PGM_CHECK_DEFAULT /* 36 */
PGM_CHECK_DEFAULT /* 37 */
-PGM_CHECK_DEFAULT /* 38 */
+PGM_CHECK_64BIT(do_dat_exception) /* 38 */
PGM_CHECK_64BIT(do_dat_exception) /* 39 */
PGM_CHECK_64BIT(do_dat_exception) /* 3a */
PGM_CHECK_64BIT(do_dat_exception) /* 3b */
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
index fb535874a246..d8fd508ccd1e 100644
--- a/arch/s390/kernel/signal.c
+++ b/arch/s390/kernel/signal.c
@@ -94,7 +94,7 @@ static int restore_sigregs(struct pt_regs *regs, _sigregs __user *sregs)
return -EINVAL;
/* Use regs->psw.mask instead of PSW_USER_BITS to preserve PER bit. */
- regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) |
+ regs->psw.mask = (regs->psw.mask & ~(PSW_MASK_USER | PSW_MASK_RI)) |
(user_sregs.regs.psw.mask & (PSW_MASK_USER | PSW_MASK_RI));
/* Check for invalid user address space control. */
if ((regs->psw.mask & PSW_MASK_ASC) == PSW_ASC_HOME)
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index 064c3082ab33..dd95f1631621 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -108,20 +108,10 @@ static void fixup_clock_comparator(unsigned long long delta)
set_clock_comparator(S390_lowcore.clock_comparator);
}
-static int s390_next_ktime(ktime_t expires,
+static int s390_next_event(unsigned long delta,
struct clock_event_device *evt)
{
- struct timespec ts;
- u64 nsecs;
-
- ts.tv_sec = ts.tv_nsec = 0;
- monotonic_to_bootbased(&ts);
- nsecs = ktime_to_ns(ktime_add(timespec_to_ktime(ts), expires));
- do_div(nsecs, 125);
- S390_lowcore.clock_comparator = sched_clock_base_cc + (nsecs << 9);
- /* Program the maximum value if we have an overflow (== year 2042) */
- if (unlikely(S390_lowcore.clock_comparator < sched_clock_base_cc))
- S390_lowcore.clock_comparator = -1ULL;
+ S390_lowcore.clock_comparator = get_tod_clock() + delta;
set_clock_comparator(S390_lowcore.clock_comparator);
return 0;
}
@@ -146,15 +136,14 @@ void init_cpu_timer(void)
cpu = smp_processor_id();
cd = &per_cpu(comparators, cpu);
cd->name = "comparator";
- cd->features = CLOCK_EVT_FEAT_ONESHOT |
- CLOCK_EVT_FEAT_KTIME;
+ cd->features = CLOCK_EVT_FEAT_ONESHOT;
cd->mult = 16777;
cd->shift = 12;
cd->min_delta_ns = 1;
cd->max_delta_ns = LONG_MAX;
cd->rating = 400;
cd->cpumask = cpumask_of(cpu);
- cd->set_next_ktime = s390_next_ktime;
+ cd->set_next_event = s390_next_event;
cd->set_mode = s390_set_mode;
clockevents_register_device(cd);
@@ -221,21 +210,30 @@ struct clocksource * __init clocksource_default_clock(void)
return &clocksource_tod;
}
-void update_vsyscall_old(struct timespec *wall_time, struct timespec *wtm,
- struct clocksource *clock, u32 mult)
+void update_vsyscall(struct timekeeper *tk)
{
- if (clock != &clocksource_tod)
+ u64 nsecps;
+
+ if (tk->clock != &clocksource_tod)
return;
/* Make userspace gettimeofday spin until we're done. */
++vdso_data->tb_update_count;
smp_wmb();
- vdso_data->xtime_tod_stamp = clock->cycle_last;
- vdso_data->xtime_clock_sec = wall_time->tv_sec;
- vdso_data->xtime_clock_nsec = wall_time->tv_nsec;
- vdso_data->wtom_clock_sec = wtm->tv_sec;
- vdso_data->wtom_clock_nsec = wtm->tv_nsec;
- vdso_data->ntp_mult = mult;
+ vdso_data->xtime_tod_stamp = tk->clock->cycle_last;
+ vdso_data->xtime_clock_sec = tk->xtime_sec;
+ vdso_data->xtime_clock_nsec = tk->xtime_nsec;
+ vdso_data->wtom_clock_sec =
+ tk->xtime_sec + tk->wall_to_monotonic.tv_sec;
+ vdso_data->wtom_clock_nsec = tk->xtime_nsec +
+ + (tk->wall_to_monotonic.tv_nsec << tk->shift);
+ nsecps = (u64) NSEC_PER_SEC << tk->shift;
+ while (vdso_data->wtom_clock_nsec >= nsecps) {
+ vdso_data->wtom_clock_nsec -= nsecps;
+ vdso_data->wtom_clock_sec++;
+ }
+ vdso_data->tk_mult = tk->mult;
+ vdso_data->tk_shift = tk->shift;
smp_wmb();
++vdso_data->tb_update_count;
}
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c
index a84476f2a9bb..613649096783 100644
--- a/arch/s390/kernel/vdso.c
+++ b/arch/s390/kernel/vdso.c
@@ -125,7 +125,7 @@ int vdso_alloc_per_cpu(struct _lowcore *lowcore)
psal[i] = 0x80000000;
lowcore->paste[4] = (u32)(addr_t) psal;
- psal[0] = 0x20000000;
+ psal[0] = 0x02000000;
psal[2] = (u32)(addr_t) aste;
*(unsigned long *) (aste + 2) = segment_table +
_ASCE_TABLE_LENGTH + _ASCE_USER_BITS + _ASCE_TYPE_SEGMENT;
diff --git a/arch/s390/kernel/vdso32/clock_gettime.S b/arch/s390/kernel/vdso32/clock_gettime.S
index b2224e0b974c..65fc3979c2f1 100644
--- a/arch/s390/kernel/vdso32/clock_gettime.S
+++ b/arch/s390/kernel/vdso32/clock_gettime.S
@@ -38,25 +38,21 @@ __kernel_clock_gettime:
sl %r1,__VDSO_XTIME_STAMP+4(%r5)
brc 3,2f
ahi %r0,-1
-2: ms %r0,__VDSO_NTP_MULT(%r5) /* cyc2ns(clock,cycle_delta) */
+2: ms %r0,__VDSO_TK_MULT(%r5) /* * tk->mult */
lr %r2,%r0
- l %r0,__VDSO_NTP_MULT(%r5)
+ l %r0,__VDSO_TK_MULT(%r5)
ltr %r1,%r1
mr %r0,%r0
jnm 3f
- a %r0,__VDSO_NTP_MULT(%r5)
+ a %r0,__VDSO_TK_MULT(%r5)
3: alr %r0,%r2
- srdl %r0,12
- al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */
- al %r1,__VDSO_XTIME_NSEC+4(%r5)
- brc 12,4f
- ahi %r0,1
-4: l %r2,__VDSO_XTIME_SEC+4(%r5)
- al %r0,__VDSO_WTOM_NSEC(%r5) /* + wall_to_monotonic */
+ al %r0,__VDSO_WTOM_NSEC(%r5)
al %r1,__VDSO_WTOM_NSEC+4(%r5)
brc 12,5f
ahi %r0,1
-5: al %r2,__VDSO_WTOM_SEC+4(%r5)
+5: l %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */
+ srdl %r0,0(%r2) /* >> tk->shift */
+ l %r2,__VDSO_WTOM_SEC+4(%r5)
cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */
jne 1b
basr %r5,0
@@ -86,20 +82,21 @@ __kernel_clock_gettime:
sl %r1,__VDSO_XTIME_STAMP+4(%r5)
brc 3,12f
ahi %r0,-1
-12: ms %r0,__VDSO_NTP_MULT(%r5) /* cyc2ns(clock,cycle_delta) */
+12: ms %r0,__VDSO_TK_MULT(%r5) /* * tk->mult */
lr %r2,%r0
- l %r0,__VDSO_NTP_MULT(%r5)
+ l %r0,__VDSO_TK_MULT(%r5)
ltr %r1,%r1
mr %r0,%r0
jnm 13f
- a %r0,__VDSO_NTP_MULT(%r5)
+ a %r0,__VDSO_TK_MULT(%r5)
13: alr %r0,%r2
- srdl %r0,12
- al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */
+ al %r0,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */
al %r1,__VDSO_XTIME_NSEC+4(%r5)
brc 12,14f
ahi %r0,1
-14: l %r2,__VDSO_XTIME_SEC+4(%r5)
+14: l %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */
+ srdl %r0,0(%r2) /* >> tk->shift */
+ l %r2,__VDSO_XTIME_SEC+4(%r5)
cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */
jne 11b
basr %r5,0
diff --git a/arch/s390/kernel/vdso32/gettimeofday.S b/arch/s390/kernel/vdso32/gettimeofday.S
index 2d3633175e3b..fd621a950f7c 100644
--- a/arch/s390/kernel/vdso32/gettimeofday.S
+++ b/arch/s390/kernel/vdso32/gettimeofday.S
@@ -35,15 +35,14 @@ __kernel_gettimeofday:
sl %r1,__VDSO_XTIME_STAMP+4(%r5)
brc 3,3f
ahi %r0,-1
-3: ms %r0,__VDSO_NTP_MULT(%r5) /* cyc2ns(clock,cycle_delta) */
+3: ms %r0,__VDSO_TK_MULT(%r5) /* * tk->mult */
st %r0,24(%r15)
- l %r0,__VDSO_NTP_MULT(%r5)
+ l %r0,__VDSO_TK_MULT(%r5)
ltr %r1,%r1
mr %r0,%r0
jnm 4f
- a %r0,__VDSO_NTP_MULT(%r5)
+ a %r0,__VDSO_TK_MULT(%r5)
4: al %r0,24(%r15)
- srdl %r0,12
al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */
al %r1,__VDSO_XTIME_NSEC+4(%r5)
brc 12,5f
@@ -51,6 +50,8 @@ __kernel_gettimeofday:
5: mvc 24(4,%r15),__VDSO_XTIME_SEC+4(%r5)
cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */
jne 1b
+ l %r4,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */
+ srdl %r0,0(%r4) /* >> tk->shift */
l %r4,24(%r15) /* get tv_sec from stack */
basr %r5,0
6: ltr %r0,%r0
diff --git a/arch/s390/kernel/vdso64/clock_getres.S b/arch/s390/kernel/vdso64/clock_getres.S
index 176e1f75f9aa..34deba7c7ed1 100644
--- a/arch/s390/kernel/vdso64/clock_getres.S
+++ b/arch/s390/kernel/vdso64/clock_getres.S
@@ -23,7 +23,9 @@ __kernel_clock_getres:
je 0f
cghi %r2,__CLOCK_MONOTONIC
je 0f
- cghi %r2,-2 /* CLOCK_THREAD_CPUTIME_ID for this thread */
+ cghi %r2,__CLOCK_THREAD_CPUTIME_ID
+ je 0f
+ cghi %r2,-2 /* Per-thread CPUCLOCK with PID=0, VIRT=1 */
jne 2f
larl %r5,_vdso_data
icm %r0,15,__LC_ECTG_OK(%r5)
diff --git a/arch/s390/kernel/vdso64/clock_gettime.S b/arch/s390/kernel/vdso64/clock_gettime.S
index d46c95ed5f19..91940ed33a4a 100644
--- a/arch/s390/kernel/vdso64/clock_gettime.S
+++ b/arch/s390/kernel/vdso64/clock_gettime.S
@@ -22,7 +22,9 @@ __kernel_clock_gettime:
larl %r5,_vdso_data
cghi %r2,__CLOCK_REALTIME
je 4f
- cghi %r2,-2 /* CLOCK_THREAD_CPUTIME_ID for this thread */
+ cghi %r2,__CLOCK_THREAD_CPUTIME_ID
+ je 9f
+ cghi %r2,-2 /* Per-thread CPUCLOCK with PID=0, VIRT=1 */
je 9f
cghi %r2,__CLOCK_MONOTONIC
jne 12f
@@ -34,14 +36,13 @@ __kernel_clock_gettime:
tmll %r4,0x0001 /* pending update ? loop */
jnz 0b
stck 48(%r15) /* Store TOD clock */
+ lgf %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */
+ lg %r0,__VDSO_WTOM_SEC(%r5)
lg %r1,48(%r15)
sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
- msgf %r1,__VDSO_NTP_MULT(%r5) /* * NTP adjustment */
- srlg %r1,%r1,12 /* cyc2ns(clock,cycle_delta) */
- alg %r1,__VDSO_XTIME_NSEC(%r5) /* + xtime */
- lg %r0,__VDSO_XTIME_SEC(%r5)
- alg %r1,__VDSO_WTOM_NSEC(%r5) /* + wall_to_monotonic */
- alg %r0,__VDSO_WTOM_SEC(%r5)
+ msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */
+ alg %r1,__VDSO_WTOM_NSEC(%r5)
+ srlg %r1,%r1,0(%r2) /* >> tk->shift */
clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */
jne 0b
larl %r5,13f
@@ -62,12 +63,13 @@ __kernel_clock_gettime:
tmll %r4,0x0001 /* pending update ? loop */
jnz 5b
stck 48(%r15) /* Store TOD clock */
+ lgf %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */
lg %r1,48(%r15)
sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
- msgf %r1,__VDSO_NTP_MULT(%r5) /* * NTP adjustment */
- srlg %r1,%r1,12 /* cyc2ns(clock,cycle_delta) */
- alg %r1,__VDSO_XTIME_NSEC(%r5) /* + xtime */
- lg %r0,__VDSO_XTIME_SEC(%r5)
+ msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */
+ alg %r1,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */
+ srlg %r1,%r1,0(%r2) /* >> tk->shift */
+ lg %r0,__VDSO_XTIME_SEC(%r5) /* tk->xtime_sec */
clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */
jne 5b
larl %r5,13f
diff --git a/arch/s390/kernel/vdso64/gettimeofday.S b/arch/s390/kernel/vdso64/gettimeofday.S
index 36ee674722ec..d0860d1d0ccc 100644
--- a/arch/s390/kernel/vdso64/gettimeofday.S
+++ b/arch/s390/kernel/vdso64/gettimeofday.S
@@ -31,12 +31,13 @@ __kernel_gettimeofday:
stck 48(%r15) /* Store TOD clock */
lg %r1,48(%r15)
sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
- msgf %r1,__VDSO_NTP_MULT(%r5) /* * NTP adjustment */
- srlg %r1,%r1,12 /* cyc2ns(clock,cycle_delta) */
- alg %r1,__VDSO_XTIME_NSEC(%r5) /* + xtime.tv_nsec */
- lg %r0,__VDSO_XTIME_SEC(%r5) /* xtime.tv_sec */
+ msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */
+ alg %r1,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */
+ lg %r0,__VDSO_XTIME_SEC(%r5) /* tk->xtime_sec */
clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */
jne 0b
+ lgf %r5,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */
+ srlg %r1,%r1,0(%r5) /* >> tk->shift */
larl %r5,5f
2: clg %r1,0(%r5)
jl 3f
diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c
index 97e03caf7825..dbdab3e7a1a6 100644
--- a/arch/s390/lib/uaccess_pt.c
+++ b/arch/s390/lib/uaccess_pt.c
@@ -78,11 +78,14 @@ static size_t copy_in_kernel(size_t count, void __user *to,
* contains the (negative) exception code.
*/
#ifdef CONFIG_64BIT
+
static unsigned long follow_table(struct mm_struct *mm,
unsigned long address, int write)
{
unsigned long *table = (unsigned long *)__pa(mm->pgd);
+ if (unlikely(address > mm->context.asce_limit - 1))
+ return -0x38UL;
switch (mm->context.asce_bits & _ASCE_TYPE_MASK) {
case _ASCE_TYPE_REGION1:
table = table + ((address >> 53) & 0x7ff);
diff --git a/arch/score/include/asm/Kbuild b/arch/score/include/asm/Kbuild
index f3414ade77a3..099e7ba40599 100644
--- a/arch/score/include/asm/Kbuild
+++ b/arch/score/include/asm/Kbuild
@@ -5,3 +5,5 @@ generic-y += clkdev.h
generic-y += trace_clock.h
generic-y += xor.h
generic-y += preempt.h
+generic-y += hash.h
+
diff --git a/arch/sh/include/asm/Kbuild b/arch/sh/include/asm/Kbuild
index 231efbb68108..0cd7198a4524 100644
--- a/arch/sh/include/asm/Kbuild
+++ b/arch/sh/include/asm/Kbuild
@@ -35,3 +35,4 @@ generic-y += trace_clock.h
generic-y += ucontext.h
generic-y += xor.h
generic-y += preempt.h
+generic-y += hash.h
diff --git a/arch/sh/lib/Makefile b/arch/sh/lib/Makefile
index 7b95f29e3174..3baff31e58cf 100644
--- a/arch/sh/lib/Makefile
+++ b/arch/sh/lib/Makefile
@@ -6,7 +6,7 @@ lib-y = delay.o memmove.o memchr.o \
checksum.o strlen.o div64.o div64-generic.o
# Extracted from libgcc
-lib-y += movmem.o ashldi3.o ashrdi3.o lshrdi3.o \
+obj-y += movmem.o ashldi3.o ashrdi3.o lshrdi3.o \
ashlsi3.o ashrsi3.o ashiftrt.o lshrsi3.o \
udiv_qrnnd.o
diff --git a/arch/sparc/include/asm/Kbuild b/arch/sparc/include/asm/Kbuild
index bf390667657a..4b60a0c325ec 100644
--- a/arch/sparc/include/asm/Kbuild
+++ b/arch/sparc/include/asm/Kbuild
@@ -17,3 +17,4 @@ generic-y += trace_clock.h
generic-y += types.h
generic-y += word-at-a-time.h
generic-y += preempt.h
+generic-y += hash.h
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
index 8358dc144959..0f9e94537eee 100644
--- a/arch/sparc/include/asm/pgtable_64.h
+++ b/arch/sparc/include/asm/pgtable_64.h
@@ -619,7 +619,7 @@ static inline unsigned long pte_present(pte_t pte)
}
#define pte_accessible pte_accessible
-static inline unsigned long pte_accessible(pte_t a)
+static inline unsigned long pte_accessible(struct mm_struct *mm, pte_t a)
{
return pte_val(a) & _PAGE_VALID;
}
@@ -847,7 +847,7 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
* SUN4V NOTE: _PAGE_VALID is the same value in both the SUN4U
* and SUN4V pte layout, so this inline test is fine.
*/
- if (likely(mm != &init_mm) && pte_accessible(orig))
+ if (likely(mm != &init_mm) && pte_accessible(mm, orig))
tlb_batch_add(mm, addr, ptep, orig, fullmm);
}
diff --git a/arch/tile/include/asm/Kbuild b/arch/tile/include/asm/Kbuild
index 22f3bd147fa7..3793c75e45d9 100644
--- a/arch/tile/include/asm/Kbuild
+++ b/arch/tile/include/asm/Kbuild
@@ -39,3 +39,4 @@ generic-y += trace_clock.h
generic-y += types.h
generic-y += xor.h
generic-y += preempt.h
+generic-y += hash.h
diff --git a/arch/um/Makefile b/arch/um/Makefile
index 48d92bbe62e9..36e658a4291c 100644
--- a/arch/um/Makefile
+++ b/arch/um/Makefile
@@ -33,12 +33,11 @@ MODE_INCLUDE += -I$(srctree)/$(ARCH_DIR)/include/shared/skas
HEADER_ARCH := $(SUBARCH)
-# Additional ARCH settings for x86
-ifeq ($(SUBARCH),i386)
- HEADER_ARCH := x86
+ifneq ($(filter $(SUBARCH),x86 x86_64 i386),)
+ HEADER_ARCH := x86
endif
-ifeq ($(SUBARCH),x86_64)
- HEADER_ARCH := x86
+
+ifdef CONFIG_64BIT
KBUILD_CFLAGS += -mcmodel=large
endif
diff --git a/arch/um/include/asm/Kbuild b/arch/um/include/asm/Kbuild
index fdde187e6087..75de4abe4f94 100644
--- a/arch/um/include/asm/Kbuild
+++ b/arch/um/include/asm/Kbuild
@@ -4,3 +4,4 @@ generic-y += ftrace.h pci.h io.h param.h delay.h mutex.h current.h exec.h
generic-y += switch_to.h clkdev.h
generic-y += trace_clock.h
generic-y += preempt.h
+generic-y += hash.h
diff --git a/arch/um/kernel/sysrq.c b/arch/um/kernel/sysrq.c
index 4d6fdf68edf3..799d7e413bf5 100644
--- a/arch/um/kernel/sysrq.c
+++ b/arch/um/kernel/sysrq.c
@@ -19,7 +19,7 @@ struct stack_frame {
unsigned long return_address;
};
-static void print_stack_trace(unsigned long *sp, unsigned long bp)
+static void do_stack_trace(unsigned long *sp, unsigned long bp)
{
int reliable;
unsigned long addr;
@@ -94,5 +94,5 @@ void show_stack(struct task_struct *task, unsigned long *stack)
}
printk(KERN_CONT "\n");
- print_stack_trace(sp, bp);
+ do_stack_trace(sp, bp);
}
diff --git a/arch/unicore32/include/asm/Kbuild b/arch/unicore32/include/asm/Kbuild
index 00045cbe5c63..3ef4f9d9bf5d 100644
--- a/arch/unicore32/include/asm/Kbuild
+++ b/arch/unicore32/include/asm/Kbuild
@@ -61,3 +61,4 @@ generic-y += user.h
generic-y += vga.h
generic-y += xor.h
generic-y += preempt.h
+generic-y += hash.h
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index e903c71f7e69..0952ecd60eca 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -26,6 +26,7 @@ config X86
select HAVE_AOUT if X86_32
select HAVE_UNSTABLE_SCHED_CLOCK
select ARCH_SUPPORTS_NUMA_BALANCING
+ select ARCH_SUPPORTS_INT128 if X86_64
select ARCH_WANTS_PROT_NUMA_PROT_NONE
select HAVE_IDE
select HAVE_OPROFILE
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 41250fb33985..57d021507120 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -31,6 +31,9 @@ ifeq ($(CONFIG_X86_32),y)
KBUILD_CFLAGS += -msoft-float -mregparm=3 -freg-struct-return
+ # Don't autogenerate MMX or SSE instructions
+ KBUILD_CFLAGS += -mno-mmx -mno-sse
+
# Never want PIC in a 32-bit kernel, prevent breakage with GCC built
# with nonstandard options
KBUILD_CFLAGS += -fno-pic
@@ -57,8 +60,11 @@ else
KBUILD_AFLAGS += -m64
KBUILD_CFLAGS += -m64
+ # Don't autogenerate MMX or SSE instructions
+ KBUILD_CFLAGS += -mno-mmx -mno-sse
+
# Use -mpreferred-stack-boundary=3 if supported.
- KBUILD_CFLAGS += $(call cc-option,-mno-sse -mpreferred-stack-boundary=3)
+ KBUILD_CFLAGS += $(call cc-option,-mpreferred-stack-boundary=3)
# FIXME - should be integrated in Makefile.cpu (Makefile_32.cpu)
cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8)
diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
index dce69a256896..d9c11956fce0 100644
--- a/arch/x86/boot/Makefile
+++ b/arch/x86/boot/Makefile
@@ -53,18 +53,18 @@ $(obj)/cpustr.h: $(obj)/mkcpustr FORCE
# How to compile the 16-bit code. Note we always compile for -march=i386,
# that way we can complain to the user if the CPU is insufficient.
-KBUILD_CFLAGS := $(USERINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
+KBUILD_CFLAGS := $(USERINCLUDE) -m32 -g -Os -D_SETUP -D__KERNEL__ \
-DDISABLE_BRANCH_PROFILING \
-Wall -Wstrict-prototypes \
-march=i386 -mregparm=3 \
-include $(srctree)/$(src)/code16gcc.h \
-fno-strict-aliasing -fomit-frame-pointer -fno-pic \
+ -mno-mmx -mno-sse \
$(call cc-option, -ffreestanding) \
$(call cc-option, -fno-toplevel-reorder,\
- $(call cc-option, -fno-unit-at-a-time)) \
+ $(call cc-option, -fno-unit-at-a-time)) \
$(call cc-option, -fno-stack-protector) \
$(call cc-option, -mpreferred-stack-boundary=2)
-KBUILD_CFLAGS += $(call cc-option, -m32)
KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
GCOV_PROFILE := n
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
index dcd90df10ab4..c8a6792e7842 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -13,6 +13,7 @@ KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
cflags-$(CONFIG_X86_32) := -march=i386
cflags-$(CONFIG_X86_64) := -mcmodel=small
KBUILD_CFLAGS += $(cflags-y)
+KBUILD_CFLAGS += -mno-mmx -mno-sse
KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile
index 7d6ba9db1be9..e0fc24db234a 100644
--- a/arch/x86/crypto/Makefile
+++ b/arch/x86/crypto/Makefile
@@ -3,8 +3,9 @@
#
avx_supported := $(call as-instr,vpxor %xmm0$(comma)%xmm0$(comma)%xmm0,yes,no)
+avx2_supported := $(call as-instr,vpgatherdd %ymm0$(comma)(%eax$(comma)%ymm1\
+ $(comma)4)$(comma)%ymm2,yes,no)
-obj-$(CONFIG_CRYPTO_ABLK_HELPER_X86) += ablk_helper.o
obj-$(CONFIG_CRYPTO_GLUE_HELPER_X86) += glue_helper.o
obj-$(CONFIG_CRYPTO_AES_586) += aes-i586.o
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index f80e668785c0..835488b745ee 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -34,7 +34,7 @@
#include <asm/cpu_device_id.h>
#include <asm/i387.h>
#include <asm/crypto/aes.h>
-#include <asm/crypto/ablk_helper.h>
+#include <crypto/ablk_helper.h>
#include <crypto/scatterwalk.h>
#include <crypto/internal/aead.h>
#include <linux/workqueue.h>
diff --git a/arch/x86/crypto/camellia_aesni_avx2_glue.c b/arch/x86/crypto/camellia_aesni_avx2_glue.c
index 414fe5d7946b..4209a76fcdaa 100644
--- a/arch/x86/crypto/camellia_aesni_avx2_glue.c
+++ b/arch/x86/crypto/camellia_aesni_avx2_glue.c
@@ -14,6 +14,7 @@
#include <linux/types.h>
#include <linux/crypto.h>
#include <linux/err.h>
+#include <crypto/ablk_helper.h>
#include <crypto/algapi.h>
#include <crypto/ctr.h>
#include <crypto/lrw.h>
@@ -21,7 +22,6 @@
#include <asm/xcr.h>
#include <asm/xsave.h>
#include <asm/crypto/camellia.h>
-#include <asm/crypto/ablk_helper.h>
#include <asm/crypto/glue_helper.h>
#define CAMELLIA_AESNI_PARALLEL_BLOCKS 16
diff --git a/arch/x86/crypto/camellia_aesni_avx_glue.c b/arch/x86/crypto/camellia_aesni_avx_glue.c
index 37fd0c0a81ea..87a041a10f4a 100644
--- a/arch/x86/crypto/camellia_aesni_avx_glue.c
+++ b/arch/x86/crypto/camellia_aesni_avx_glue.c
@@ -14,6 +14,7 @@
#include <linux/types.h>
#include <linux/crypto.h>
#include <linux/err.h>
+#include <crypto/ablk_helper.h>
#include <crypto/algapi.h>
#include <crypto/ctr.h>
#include <crypto/lrw.h>
@@ -21,7 +22,6 @@
#include <asm/xcr.h>
#include <asm/xsave.h>
#include <asm/crypto/camellia.h>
-#include <asm/crypto/ablk_helper.h>
#include <asm/crypto/glue_helper.h>
#define CAMELLIA_AESNI_PARALLEL_BLOCKS 16
diff --git a/arch/x86/crypto/cast5_avx_glue.c b/arch/x86/crypto/cast5_avx_glue.c
index c6631813dc11..e6a3700489b9 100644
--- a/arch/x86/crypto/cast5_avx_glue.c
+++ b/arch/x86/crypto/cast5_avx_glue.c
@@ -26,13 +26,13 @@
#include <linux/types.h>
#include <linux/crypto.h>
#include <linux/err.h>
+#include <crypto/ablk_helper.h>
#include <crypto/algapi.h>
#include <crypto/cast5.h>
#include <crypto/cryptd.h>
#include <crypto/ctr.h>
#include <asm/xcr.h>
#include <asm/xsave.h>
-#include <asm/crypto/ablk_helper.h>
#include <asm/crypto/glue_helper.h>
#define CAST5_PARALLEL_BLOCKS 16
diff --git a/arch/x86/crypto/cast6_avx_glue.c b/arch/x86/crypto/cast6_avx_glue.c
index 8d0dfb86a559..09f3677393e4 100644
--- a/arch/x86/crypto/cast6_avx_glue.c
+++ b/arch/x86/crypto/cast6_avx_glue.c
@@ -28,6 +28,7 @@
#include <linux/types.h>
#include <linux/crypto.h>
#include <linux/err.h>
+#include <crypto/ablk_helper.h>
#include <crypto/algapi.h>
#include <crypto/cast6.h>
#include <crypto/cryptd.h>
@@ -37,7 +38,6 @@
#include <crypto/xts.h>
#include <asm/xcr.h>
#include <asm/xsave.h>
-#include <asm/crypto/ablk_helper.h>
#include <asm/crypto/glue_helper.h>
#define CAST6_PARALLEL_BLOCKS 8
diff --git a/arch/x86/crypto/serpent_avx2_glue.c b/arch/x86/crypto/serpent_avx2_glue.c
index 23aabc6c20a5..2fae489b1524 100644
--- a/arch/x86/crypto/serpent_avx2_glue.c
+++ b/arch/x86/crypto/serpent_avx2_glue.c
@@ -14,6 +14,7 @@
#include <linux/types.h>
#include <linux/crypto.h>
#include <linux/err.h>
+#include <crypto/ablk_helper.h>
#include <crypto/algapi.h>
#include <crypto/ctr.h>
#include <crypto/lrw.h>
@@ -22,7 +23,6 @@
#include <asm/xcr.h>
#include <asm/xsave.h>
#include <asm/crypto/serpent-avx.h>
-#include <asm/crypto/ablk_helper.h>
#include <asm/crypto/glue_helper.h>
#define SERPENT_AVX2_PARALLEL_BLOCKS 16
diff --git a/arch/x86/crypto/serpent_avx_glue.c b/arch/x86/crypto/serpent_avx_glue.c
index 9ae83cf8d21e..ff4870870972 100644
--- a/arch/x86/crypto/serpent_avx_glue.c
+++ b/arch/x86/crypto/serpent_avx_glue.c
@@ -28,6 +28,7 @@
#include <linux/types.h>
#include <linux/crypto.h>
#include <linux/err.h>
+#include <crypto/ablk_helper.h>
#include <crypto/algapi.h>
#include <crypto/serpent.h>
#include <crypto/cryptd.h>
@@ -38,7 +39,6 @@
#include <asm/xcr.h>
#include <asm/xsave.h>
#include <asm/crypto/serpent-avx.h>
-#include <asm/crypto/ablk_helper.h>
#include <asm/crypto/glue_helper.h>
/* 8-way parallel cipher functions */
diff --git a/arch/x86/crypto/serpent_sse2_glue.c b/arch/x86/crypto/serpent_sse2_glue.c
index 97a356ece24d..8c95f8637306 100644
--- a/arch/x86/crypto/serpent_sse2_glue.c
+++ b/arch/x86/crypto/serpent_sse2_glue.c
@@ -34,6 +34,7 @@
#include <linux/types.h>
#include <linux/crypto.h>
#include <linux/err.h>
+#include <crypto/ablk_helper.h>
#include <crypto/algapi.h>
#include <crypto/serpent.h>
#include <crypto/cryptd.h>
@@ -42,7 +43,6 @@
#include <crypto/lrw.h>
#include <crypto/xts.h>
#include <asm/crypto/serpent-sse2.h>
-#include <asm/crypto/ablk_helper.h>
#include <asm/crypto/glue_helper.h>
static void serpent_decrypt_cbc_xway(void *ctx, u128 *dst, const u128 *src)
diff --git a/arch/x86/crypto/sha256_ssse3_glue.c b/arch/x86/crypto/sha256_ssse3_glue.c
index 50226c4b86ed..f248546da1ca 100644
--- a/arch/x86/crypto/sha256_ssse3_glue.c
+++ b/arch/x86/crypto/sha256_ssse3_glue.c
@@ -281,7 +281,7 @@ static int __init sha256_ssse3_mod_init(void)
/* allow AVX to override SSSE3, it's a little faster */
if (avx_usable()) {
#ifdef CONFIG_AS_AVX2
- if (boot_cpu_has(X86_FEATURE_AVX2))
+ if (boot_cpu_has(X86_FEATURE_AVX2) && boot_cpu_has(X86_FEATURE_BMI2))
sha256_transform_asm = sha256_transform_rorx;
else
#endif
@@ -319,4 +319,4 @@ MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("SHA256 Secure Hash Algorithm, Supplemental SSE3 accelerated");
MODULE_ALIAS("sha256");
-MODULE_ALIAS("sha384");
+MODULE_ALIAS("sha224");
diff --git a/arch/x86/crypto/twofish_avx_glue.c b/arch/x86/crypto/twofish_avx_glue.c
index a62ba541884e..4e3c665be129 100644
--- a/arch/x86/crypto/twofish_avx_glue.c
+++ b/arch/x86/crypto/twofish_avx_glue.c
@@ -28,6 +28,7 @@
#include <linux/types.h>
#include <linux/crypto.h>
#include <linux/err.h>
+#include <crypto/ablk_helper.h>
#include <crypto/algapi.h>
#include <crypto/twofish.h>
#include <crypto/cryptd.h>
@@ -39,7 +40,6 @@
#include <asm/xcr.h>
#include <asm/xsave.h>
#include <asm/crypto/twofish.h>
-#include <asm/crypto/ablk_helper.h>
#include <asm/crypto/glue_helper.h>
#include <crypto/scatterwalk.h>
#include <linux/workqueue.h>
diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
index da31c8b8a92d..b17f4f48ecd7 100644
--- a/arch/x86/include/asm/atomic.h
+++ b/arch/x86/include/asm/atomic.h
@@ -77,7 +77,7 @@ static inline void atomic_sub(int i, atomic_t *v)
*/
static inline int atomic_sub_and_test(int i, atomic_t *v)
{
- GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, i, "%0", "e");
+ GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", "e");
}
/**
@@ -141,7 +141,7 @@ static inline int atomic_inc_and_test(atomic_t *v)
*/
static inline int atomic_add_negative(int i, atomic_t *v)
{
- GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, i, "%0", "s");
+ GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", "s");
}
/**
diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
index 3f065c985aee..46e9052bbd28 100644
--- a/arch/x86/include/asm/atomic64_64.h
+++ b/arch/x86/include/asm/atomic64_64.h
@@ -72,7 +72,7 @@ static inline void atomic64_sub(long i, atomic64_t *v)
*/
static inline int atomic64_sub_and_test(long i, atomic64_t *v)
{
- GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, i, "%0", "e");
+ GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, "er", i, "%0", "e");
}
/**
@@ -138,7 +138,7 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
*/
static inline int atomic64_add_negative(long i, atomic64_t *v)
{
- GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, i, "%0", "s");
+ GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, "er", i, "%0", "s");
}
/**
diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
index 6d76d0935989..9fc1af74dc83 100644
--- a/arch/x86/include/asm/bitops.h
+++ b/arch/x86/include/asm/bitops.h
@@ -205,7 +205,7 @@ static inline void change_bit(long nr, volatile unsigned long *addr)
*/
static inline int test_and_set_bit(long nr, volatile unsigned long *addr)
{
- GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, nr, "%0", "c");
+ GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c");
}
/**
@@ -251,7 +251,7 @@ static inline int __test_and_set_bit(long nr, volatile unsigned long *addr)
*/
static inline int test_and_clear_bit(long nr, volatile unsigned long *addr)
{
- GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, nr, "%0", "c");
+ GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c");
}
/**
@@ -304,7 +304,7 @@ static inline int __test_and_change_bit(long nr, volatile unsigned long *addr)
*/
static inline int test_and_change_bit(long nr, volatile unsigned long *addr)
{
- GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, nr, "%0", "c");
+ GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c");
}
static __always_inline int constant_test_bit(long nr, const volatile unsigned long *addr)
diff --git a/arch/x86/include/asm/hash.h b/arch/x86/include/asm/hash.h
new file mode 100644
index 000000000000..e8c58f88b1d4
--- /dev/null
+++ b/arch/x86/include/asm/hash.h
@@ -0,0 +1,7 @@
+#ifndef _ASM_X86_HASH_H
+#define _ASM_X86_HASH_H
+
+struct fast_hash_ops;
+extern void setup_arch_fast_hash(struct fast_hash_ops *ops);
+
+#endif /* _ASM_X86_HASH_H */
diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
index 5b23e605e707..4ad6560847b1 100644
--- a/arch/x86/include/asm/local.h
+++ b/arch/x86/include/asm/local.h
@@ -52,7 +52,7 @@ static inline void local_sub(long i, local_t *l)
*/
static inline int local_sub_and_test(long i, local_t *l)
{
- GEN_BINARY_RMWcc(_ASM_SUB, l->a.counter, i, "%0", "e");
+ GEN_BINARY_RMWcc(_ASM_SUB, l->a.counter, "er", i, "%0", "e");
}
/**
@@ -92,7 +92,7 @@ static inline int local_inc_and_test(local_t *l)
*/
static inline int local_add_negative(long i, local_t *l)
{
- GEN_BINARY_RMWcc(_ASM_ADD, l->a.counter, i, "%0", "s");
+ GEN_BINARY_RMWcc(_ASM_ADD, l->a.counter, "er", i, "%0", "s");
}
/**
diff --git a/arch/x86/include/asm/pci.h b/arch/x86/include/asm/pci.h
index 7d7443283a9d..947b5c417e83 100644
--- a/arch/x86/include/asm/pci.h
+++ b/arch/x86/include/asm/pci.h
@@ -15,7 +15,7 @@ struct pci_sysdata {
int domain; /* PCI domain */
int node; /* NUMA node */
#ifdef CONFIG_ACPI
- void *acpi; /* ACPI-specific data */
+ struct acpi_device *companion; /* ACPI companion device */
#endif
#ifdef CONFIG_X86_64
void *iommu; /* IOMMU private data */
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 3d1999458709..bbc8b12fa443 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -452,9 +452,16 @@ static inline int pte_present(pte_t a)
}
#define pte_accessible pte_accessible
-static inline int pte_accessible(pte_t a)
+static inline bool pte_accessible(struct mm_struct *mm, pte_t a)
{
- return pte_flags(a) & _PAGE_PRESENT;
+ if (pte_flags(a) & _PAGE_PRESENT)
+ return true;
+
+ if ((pte_flags(a) & (_PAGE_PROTNONE | _PAGE_NUMA)) &&
+ mm_tlb_flush_pending(mm))
+ return true;
+
+ return false;
}
static inline int pte_hidden(pte_t pte)
diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
index 8729723636fd..c8b051933b1b 100644
--- a/arch/x86/include/asm/preempt.h
+++ b/arch/x86/include/asm/preempt.h
@@ -8,6 +8,12 @@
DECLARE_PER_CPU(int, __preempt_count);
/*
+ * We use the PREEMPT_NEED_RESCHED bit as an inverted NEED_RESCHED such
+ * that a decrement hitting 0 means we can and should reschedule.
+ */
+#define PREEMPT_ENABLED (0 + PREEMPT_NEED_RESCHED)
+
+/*
* We mask the PREEMPT_NEED_RESCHED bit so as not to confuse all current users
* that think a non-zero value indicates we cannot preempt.
*/
@@ -74,6 +80,11 @@ static __always_inline void __preempt_count_sub(int val)
__this_cpu_add_4(__preempt_count, -val);
}
+/*
+ * Because we keep PREEMPT_NEED_RESCHED set when we do _not_ need to reschedule
+ * a decrement which hits zero means we have no preempt_count and should
+ * reschedule.
+ */
static __always_inline bool __preempt_count_dec_and_test(void)
{
GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), "e");
diff --git a/arch/x86/include/asm/rmwcc.h b/arch/x86/include/asm/rmwcc.h
index 1ff990f1de8e..8f7866a5b9a4 100644
--- a/arch/x86/include/asm/rmwcc.h
+++ b/arch/x86/include/asm/rmwcc.h
@@ -16,8 +16,8 @@ cc_label: \
#define GEN_UNARY_RMWcc(op, var, arg0, cc) \
__GEN_RMWcc(op " " arg0, var, cc)
-#define GEN_BINARY_RMWcc(op, var, val, arg0, cc) \
- __GEN_RMWcc(op " %1, " arg0, var, cc, "er" (val))
+#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \
+ __GEN_RMWcc(op " %1, " arg0, var, cc, vcon (val))
#else /* !CC_HAVE_ASM_GOTO */
@@ -33,8 +33,8 @@ do { \
#define GEN_UNARY_RMWcc(op, var, arg0, cc) \
__GEN_RMWcc(op " " arg0, var, cc)
-#define GEN_BINARY_RMWcc(op, var, val, arg0, cc) \
- __GEN_RMWcc(op " %2, " arg0, var, cc, "er" (val))
+#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \
+ __GEN_RMWcc(op " %2, " arg0, var, cc, vcon (val))
#endif /* CC_HAVE_ASM_GOTO */
diff --git a/arch/x86/include/asm/simd.h b/arch/x86/include/asm/simd.h
new file mode 100644
index 000000000000..ee80b92f0096
--- /dev/null
+++ b/arch/x86/include/asm/simd.h
@@ -0,0 +1,11 @@
+
+#include <asm/i387.h>
+
+/*
+ * may_use_simd - whether it is allowable at this time to issue SIMD
+ * instructions or access the SIMD register file
+ */
+static __must_check inline bool may_use_simd(void)
+{
+ return irq_fpu_usable();
+}
diff --git a/arch/x86/include/asm/trace/irq_vectors.h b/arch/x86/include/asm/trace/irq_vectors.h
index 2874df24e7a4..4cab890007a7 100644
--- a/arch/x86/include/asm/trace/irq_vectors.h
+++ b/arch/x86/include/asm/trace/irq_vectors.h
@@ -72,6 +72,17 @@ DEFINE_IRQ_VECTOR_EVENT(x86_platform_ipi);
DEFINE_IRQ_VECTOR_EVENT(irq_work);
/*
+ * We must dis-allow sampling irq_work_exit() because perf event sampling
+ * itself can cause irq_work, which would lead to an infinite loop;
+ *
+ * 1) irq_work_exit happens
+ * 2) generates perf sample
+ * 3) generates irq_work
+ * 4) goto 1
+ */
+TRACE_EVENT_PERF_PERM(irq_work_exit, is_sampling_event(p_event) ? -EPERM : 0);
+
+/*
* call_function - called when entering/exiting a call function interrupt
* vector handler
*/
diff --git a/arch/x86/include/uapi/asm/msr-index.h b/arch/x86/include/uapi/asm/msr-index.h
index b93e09a0fa21..37813b5ddc37 100644
--- a/arch/x86/include/uapi/asm/msr-index.h
+++ b/arch/x86/include/uapi/asm/msr-index.h
@@ -147,6 +147,8 @@
#define MSR_PP1_ENERGY_STATUS 0x00000641
#define MSR_PP1_POLICY 0x00000642
+#define MSR_CORE_C1_RES 0x00000660
+
#define MSR_AMD64_MC0_MASK 0xc0010044
#define MSR_IA32_MCx_CTL(x) (MSR_IA32_MC0_CTL + 4*(x))
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index dc1ec0dff939..ea04b342c026 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -387,7 +387,8 @@ static void init_intel(struct cpuinfo_x86 *c)
set_cpu_cap(c, X86_FEATURE_PEBS);
}
- if (c->x86 == 6 && c->x86_model == 29 && cpu_has_clflush)
+ if (c->x86 == 6 && cpu_has_clflush &&
+ (c->x86_model == 29 || c->x86_model == 46 || c->x86_model == 47))
set_cpu_cap(c, X86_FEATURE_CLFLUSH_MONITOR);
#ifdef CONFIG_X86_64
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
index fd00bb29425d..c1a861829d81 100644
--- a/arch/x86/kernel/cpu/perf_event.h
+++ b/arch/x86/kernel/cpu/perf_event.h
@@ -262,11 +262,20 @@ struct cpu_hw_events {
__EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK, \
HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW)
-#define EVENT_CONSTRAINT_END \
- EVENT_CONSTRAINT(0, 0, 0)
+/*
+ * We define the end marker as having a weight of -1
+ * to enable blacklisting of events using a counter bitmask
+ * of zero and thus a weight of zero.
+ * The end marker has a weight that cannot possibly be
+ * obtained from counting the bits in the bitmask.
+ */
+#define EVENT_CONSTRAINT_END { .weight = -1 }
+/*
+ * Check for end marker with weight == -1
+ */
#define for_each_event_constraint(e, c) \
- for ((e) = (c); (e)->weight; (e)++)
+ for ((e) = (c); (e)->weight != -1; (e)++)
/*
* Extra registers for specific events.
diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c
index 96f958d8cd45..bc4a088f9023 100644
--- a/arch/x86/kernel/early-quirks.c
+++ b/arch/x86/kernel/early-quirks.c
@@ -330,8 +330,8 @@ static struct pci_device_id intel_stolen_ids[] __initdata = {
INTEL_I915GM_IDS(gen3_stolen_size),
INTEL_I945G_IDS(gen3_stolen_size),
INTEL_I945GM_IDS(gen3_stolen_size),
- INTEL_VLV_M_IDS(gen3_stolen_size),
- INTEL_VLV_D_IDS(gen3_stolen_size),
+ INTEL_VLV_M_IDS(gen6_stolen_size),
+ INTEL_VLV_D_IDS(gen6_stolen_size),
INTEL_PINEVIEW_IDS(gen3_stolen_size),
INTEL_I965G_IDS(gen3_stolen_size),
INTEL_G33_IDS(gen3_stolen_size),
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index da3c599584a3..c752cb43e52f 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -558,6 +558,17 @@ void native_machine_shutdown(void)
{
/* Stop the cpus and apics */
#ifdef CONFIG_X86_IO_APIC
+ /*
+ * Disabling IO APIC before local APIC is a workaround for
+ * erratum AVR31 in "Intel Atom Processor C2000 Product Family
+ * Specification Update". In this situation, interrupts that target
+ * a Logical Processor whose Local APIC is either in the process of
+ * being hardware disabled or software disabled are neither delivered
+ * nor discarded. When this erratum occurs, the processor may hang.
+ *
+ * Even without the erratum, it still makes sense to quiet IO APIC
+ * before disabling Local APIC.
+ */
disable_IO_APIC();
#endif
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 5439117d5c4c..dec48bfaddb8 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -143,6 +143,8 @@ static inline int kvm_apic_id(struct kvm_lapic *apic)
return (kvm_apic_get_reg(apic, APIC_ID) >> 24) & 0xff;
}
+#define KVM_X2APIC_CID_BITS 0
+
static void recalculate_apic_map(struct kvm *kvm)
{
struct kvm_apic_map *new, *old = NULL;
@@ -180,7 +182,8 @@ static void recalculate_apic_map(struct kvm *kvm)
if (apic_x2apic_mode(apic)) {
new->ldr_bits = 32;
new->cid_shift = 16;
- new->cid_mask = new->lid_mask = 0xffff;
+ new->cid_mask = (1 << KVM_X2APIC_CID_BITS) - 1;
+ new->lid_mask = 0xffff;
} else if (kvm_apic_sw_enabled(apic) &&
!new->cid_mask /* flat mode */ &&
kvm_apic_get_reg(apic, APIC_DFR) == APIC_DFR_CLUSTER) {
@@ -841,7 +844,8 @@ static u32 apic_get_tmcct(struct kvm_lapic *apic)
ASSERT(apic != NULL);
/* if initial count is 0, current count should also be 0 */
- if (kvm_apic_get_reg(apic, APIC_TMICT) == 0)
+ if (kvm_apic_get_reg(apic, APIC_TMICT) == 0 ||
+ apic->lapic_timer.period == 0)
return 0;
remaining = hrtimer_get_remaining(&apic->lapic_timer.timer);
@@ -1691,7 +1695,6 @@ static void apic_sync_pv_eoi_from_guest(struct kvm_vcpu *vcpu,
void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu)
{
u32 data;
- void *vapic;
if (test_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention))
apic_sync_pv_eoi_from_guest(vcpu, vcpu->arch.apic);
@@ -1699,9 +1702,8 @@ void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu)
if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention))
return;
- vapic = kmap_atomic(vcpu->arch.apic->vapic_page);
- data = *(u32 *)(vapic + offset_in_page(vcpu->arch.apic->vapic_addr));
- kunmap_atomic(vapic);
+ kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
+ sizeof(u32));
apic_set_tpr(vcpu->arch.apic, data & 0xff);
}
@@ -1737,7 +1739,6 @@ void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu)
u32 data, tpr;
int max_irr, max_isr;
struct kvm_lapic *apic = vcpu->arch.apic;
- void *vapic;
apic_sync_pv_eoi_to_guest(vcpu, apic);
@@ -1753,18 +1754,24 @@ void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu)
max_isr = 0;
data = (tpr & 0xff) | ((max_isr & 0xf0) << 8) | (max_irr << 24);
- vapic = kmap_atomic(vcpu->arch.apic->vapic_page);
- *(u32 *)(vapic + offset_in_page(vcpu->arch.apic->vapic_addr)) = data;
- kunmap_atomic(vapic);
+ kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
+ sizeof(u32));
}
-void kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr)
+int kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr)
{
- vcpu->arch.apic->vapic_addr = vapic_addr;
- if (vapic_addr)
+ if (vapic_addr) {
+ if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
+ &vcpu->arch.apic->vapic_cache,
+ vapic_addr, sizeof(u32)))
+ return -EINVAL;
__set_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention);
- else
+ } else {
__clear_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention);
+ }
+
+ vcpu->arch.apic->vapic_addr = vapic_addr;
+ return 0;
}
int kvm_x2apic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data)
diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
index c730ac9fe801..c8b0d0d2da5c 100644
--- a/arch/x86/kvm/lapic.h
+++ b/arch/x86/kvm/lapic.h
@@ -34,7 +34,7 @@ struct kvm_lapic {
*/
void *regs;
gpa_t vapic_addr;
- struct page *vapic_page;
+ struct gfn_to_hva_cache vapic_cache;
unsigned long pending_events;
unsigned int sipi_vector;
};
@@ -76,7 +76,7 @@ void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data);
void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset);
void kvm_apic_set_eoi_accelerated(struct kvm_vcpu *vcpu, int vector);
-void kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr);
+int kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr);
void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu);
void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu);
diff --git a/arch/x86/kvm/mmu_audit.c b/arch/x86/kvm/mmu_audit.c
index daff69e21150..1185fe7a7f47 100644
--- a/arch/x86/kvm/mmu_audit.c
+++ b/arch/x86/kvm/mmu_audit.c
@@ -296,4 +296,4 @@ static struct kernel_param_ops audit_param_ops = {
.get = param_get_bool,
};
-module_param_cb(mmu_audit, &audit_param_ops, &mmu_audit, 0644);
+arch_param_cb(mmu_audit, &audit_param_ops, &mmu_audit, 0644);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 21ef1ba184ae..5d004da1e35d 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -3214,8 +3214,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
r = -EFAULT;
if (copy_from_user(&va, argp, sizeof va))
goto out;
- r = 0;
- kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
+ r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
break;
}
case KVM_X86_SETUP_MCE: {
@@ -5739,36 +5738,6 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu)
!kvm_event_needs_reinjection(vcpu);
}
-static int vapic_enter(struct kvm_vcpu *vcpu)
-{
- struct kvm_lapic *apic = vcpu->arch.apic;
- struct page *page;
-
- if (!apic || !apic->vapic_addr)
- return 0;
-
- page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
- if (is_error_page(page))
- return -EFAULT;
-
- vcpu->arch.apic->vapic_page = page;
- return 0;
-}
-
-static void vapic_exit(struct kvm_vcpu *vcpu)
-{
- struct kvm_lapic *apic = vcpu->arch.apic;
- int idx;
-
- if (!apic || !apic->vapic_addr)
- return;
-
- idx = srcu_read_lock(&vcpu->kvm->srcu);
- kvm_release_page_dirty(apic->vapic_page);
- mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
- srcu_read_unlock(&vcpu->kvm->srcu, idx);
-}
-
static void update_cr8_intercept(struct kvm_vcpu *vcpu)
{
int max_irr, tpr;
@@ -6069,11 +6038,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
struct kvm *kvm = vcpu->kvm;
vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
- r = vapic_enter(vcpu);
- if (r) {
- srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
- return r;
- }
r = 1;
while (r > 0) {
@@ -6132,8 +6096,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
- vapic_exit(vcpu);
-
return r;
}
diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
index 992d63bb154f..eabcb6e6a900 100644
--- a/arch/x86/lib/Makefile
+++ b/arch/x86/lib/Makefile
@@ -24,7 +24,7 @@ lib-$(CONFIG_SMP) += rwlock.o
lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
lib-$(CONFIG_INSTRUCTION_DECODER) += insn.o inat.o
-obj-y += msr.o msr-reg.o msr-reg-export.o
+obj-y += msr.o msr-reg.o msr-reg-export.o hash.o
ifeq ($(CONFIG_X86_32),y)
obj-y += atomic64_32.o
diff --git a/arch/x86/lib/hash.c b/arch/x86/lib/hash.c
new file mode 100644
index 000000000000..3056702e81fb
--- /dev/null
+++ b/arch/x86/lib/hash.c
@@ -0,0 +1,88 @@
+/*
+ * Some portions derived from code covered by the following notice:
+ *
+ * Copyright (c) 2010-2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/hash.h>
+
+#include <asm/processor.h>
+#include <asm/cpufeature.h>
+#include <asm/hash.h>
+
+static inline u32 crc32_u32(u32 crc, u32 val)
+{
+ asm ("crc32l %1,%0\n" : "+r" (crc) : "rm" (val));
+ return crc;
+}
+
+static u32 intel_crc4_2_hash(const void *data, u32 len, u32 seed)
+{
+ const u32 *p32 = (const u32 *) data;
+ u32 i, tmp = 0;
+
+ for (i = 0; i < len / 4; i++)
+ seed = crc32_u32(*p32++, seed);
+
+ switch (3 - (len & 0x03)) {
+ case 0:
+ tmp |= *((const u8 *) p32 + 2) << 16;
+ /* fallthrough */
+ case 1:
+ tmp |= *((const u8 *) p32 + 1) << 8;
+ /* fallthrough */
+ case 2:
+ tmp |= *((const u8 *) p32);
+ seed = crc32_u32(tmp, seed);
+ default:
+ break;
+ }
+
+ return seed;
+}
+
+static u32 intel_crc4_2_hash2(const u32 *data, u32 len, u32 seed)
+{
+ const u32 *p32 = (const u32 *) data;
+ u32 i;
+
+ for (i = 0; i < len; i++)
+ seed = crc32_u32(*p32++, seed);
+
+ return seed;
+}
+
+void setup_arch_fast_hash(struct fast_hash_ops *ops)
+{
+ if (cpu_has_xmm4_2) {
+ ops->hash = intel_crc4_2_hash;
+ ops->hash2 = intel_crc4_2_hash2;
+ }
+}
diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
index dd74e46828c0..0596e8e0cc19 100644
--- a/arch/x86/mm/gup.c
+++ b/arch/x86/mm/gup.c
@@ -83,6 +83,12 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
pte_t pte = gup_get_pte(ptep);
struct page *page;
+ /* Similar to the PMD case, NUMA hinting must take slow path */
+ if (pte_numa(pte)) {
+ pte_unmap(ptep);
+ return 0;
+ }
+
if ((pte_flags(pte) & (mask | _PAGE_SPECIAL)) != mask) {
pte_unmap(ptep);
return 0;
@@ -167,6 +173,13 @@ static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
if (pmd_none(pmd) || pmd_trans_splitting(pmd))
return 0;
if (unlikely(pmd_large(pmd))) {
+ /*
+ * NUMA hinting faults need to be handled in the GUP
+ * slowpath for accounting purposes and so that they
+ * can be serialised against THP migration.
+ */
+ if (pmd_numa(pmd))
+ return 0;
if (!gup_huge_pmd(pmd, addr, next, write, pages, nr))
return 0;
} else {
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index a7cccb6d7fec..c96314abd144 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -61,6 +61,7 @@ void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
#if PAGETABLE_LEVELS > 2
void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
{
+ struct page *page = virt_to_page(pmd);
paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT);
/*
* NOTE! For PAE, any changes to the top page-directory-pointer-table
@@ -69,7 +70,8 @@ void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
#ifdef CONFIG_X86_PAE
tlb->need_flush_all = 1;
#endif
- tlb_remove_page(tlb, virt_to_page(pmd));
+ pgtable_pmd_page_dtor(page);
+ tlb_remove_page(tlb, page);
}
#if PAGETABLE_LEVELS > 3
@@ -209,7 +211,7 @@ static int preallocate_pmds(pmd_t *pmds[])
if (!pmd)
failed = true;
if (pmd && !pgtable_pmd_page_ctor(virt_to_page(pmd))) {
- free_page((unsigned long)pmds[i]);
+ free_page((unsigned long)pmd);
pmd = NULL;
failed = true;
}
diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c
index 7fb24e53d4c8..4f25ec077552 100644
--- a/arch/x86/pci/acpi.c
+++ b/arch/x86/pci/acpi.c
@@ -518,7 +518,7 @@ struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
sd = &info->sd;
sd->domain = domain;
sd->node = node;
- sd->acpi = device->handle;
+ sd->companion = device;
/*
* Maybe the desired pci bus has been already scanned. In such case
* it is unnecessary to scan the pci bus with the given domain,busnum.
@@ -589,7 +589,7 @@ int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
{
struct pci_sysdata *sd = bridge->bus->sysdata;
- ACPI_HANDLE_SET(&bridge->dev, sd->acpi);
+ ACPI_COMPANION_SET(&bridge->dev, sd->companion);
return 0;
}
diff --git a/arch/x86/platform/efi/early_printk.c b/arch/x86/platform/efi/early_printk.c
index 6599a0027b76..81b506d5befd 100644
--- a/arch/x86/platform/efi/early_printk.c
+++ b/arch/x86/platform/efi/early_printk.c
@@ -142,7 +142,7 @@ early_efi_write(struct console *con, const char *str, unsigned int num)
efi_y += font->height;
}
- if (efi_y + font->height >= si->lfb_height) {
+ if (efi_y + font->height > si->lfb_height) {
u32 i;
efi_y -= font->height;
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index 92c02344a060..cceb813044ef 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -690,13 +690,6 @@ void __init efi_init(void)
set_bit(EFI_MEMMAP, &x86_efi_facility);
-#ifdef CONFIG_X86_32
- if (efi_is_native()) {
- x86_platform.get_wallclock = efi_get_time;
- x86_platform.set_wallclock = efi_set_rtc_mmss;
- }
-#endif
-
#if EFI_DEBUG
print_efi_memmap();
#endif
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
index 0f92173a12b6..efe4d7220397 100644
--- a/arch/x86/platform/uv/tlb_uv.c
+++ b/arch/x86/platform/uv/tlb_uv.c
@@ -1070,12 +1070,13 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
unsigned long status;
bcp = &per_cpu(bau_control, cpu);
- stat = bcp->statp;
- stat->s_enters++;
if (bcp->nobau)
return cpumask;
+ stat = bcp->statp;
+ stat->s_enters++;
+
if (bcp->busy) {
descriptor_status =
read_lmmr(UVH_LB_BAU_SB_ACTIVATION_STATUS_0);
diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile
index 88692871823f..9cac82588cbc 100644
--- a/arch/x86/realmode/rm/Makefile
+++ b/arch/x86/realmode/rm/Makefile
@@ -73,9 +73,10 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -m32 -g -Os -D_SETUP -D__KERNEL__ -D_WAKEUP \
-march=i386 -mregparm=3 \
-include $(srctree)/$(src)/../../boot/code16gcc.h \
-fno-strict-aliasing -fomit-frame-pointer -fno-pic \
+ -mno-mmx -mno-sse \
$(call cc-option, -ffreestanding) \
$(call cc-option, -fno-toplevel-reorder,\
- $(call cc-option, -fno-unit-at-a-time)) \
+ $(call cc-option, -fno-unit-at-a-time)) \
$(call cc-option, -fno-stack-protector) \
$(call cc-option, -mpreferred-stack-boundary=2)
KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
diff --git a/arch/xtensa/include/asm/Kbuild b/arch/xtensa/include/asm/Kbuild
index 228d6aee3a16..d7efa1502101 100644
--- a/arch/xtensa/include/asm/Kbuild
+++ b/arch/xtensa/include/asm/Kbuild
@@ -29,3 +29,4 @@ generic-y += topology.h
generic-y += trace_clock.h
generic-y += xor.h
generic-y += preempt.h
+generic-y += hash.h
diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h
index 1610b22edf09..86154eab9523 100644
--- a/block/blk-cgroup.h
+++ b/block/blk-cgroup.h
@@ -435,9 +435,9 @@ static inline uint64_t blkg_stat_read(struct blkg_stat *stat)
uint64_t v;
do {
- start = u64_stats_fetch_begin(&stat->syncp);
+ start = u64_stats_fetch_begin_bh(&stat->syncp);
v = stat->cnt;
- } while (u64_stats_fetch_retry(&stat->syncp, start));
+ } while (u64_stats_fetch_retry_bh(&stat->syncp, start));
return v;
}
@@ -508,9 +508,9 @@ static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat)
struct blkg_rwstat tmp;
do {
- start = u64_stats_fetch_begin(&rwstat->syncp);
+ start = u64_stats_fetch_begin_bh(&rwstat->syncp);
tmp = *rwstat;
- } while (u64_stats_fetch_retry(&rwstat->syncp, start));
+ } while (u64_stats_fetch_retry_bh(&rwstat->syncp, start));
return tmp;
}
diff --git a/block/blk-flush.c b/block/blk-flush.c
index 331e627301ea..fb6f3c0ffa49 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -502,15 +502,6 @@ void blk_abort_flushes(struct request_queue *q)
}
}
-static void bio_end_flush(struct bio *bio, int err)
-{
- if (err)
- clear_bit(BIO_UPTODATE, &bio->bi_flags);
- if (bio->bi_private)
- complete(bio->bi_private);
- bio_put(bio);
-}
-
/**
* blkdev_issue_flush - queue a flush
* @bdev: blockdev to issue flush for
@@ -526,7 +517,6 @@ static void bio_end_flush(struct bio *bio, int err)
int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
sector_t *error_sector)
{
- DECLARE_COMPLETION_ONSTACK(wait);
struct request_queue *q;
struct bio *bio;
int ret = 0;
@@ -548,13 +538,9 @@ int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
return -ENXIO;
bio = bio_alloc(gfp_mask, 0);
- bio->bi_end_io = bio_end_flush;
bio->bi_bdev = bdev;
- bio->bi_private = &wait;
- bio_get(bio);
- submit_bio(WRITE_FLUSH, bio);
- wait_for_completion_io(&wait);
+ ret = submit_bio_wait(WRITE_FLUSH, bio);
/*
* The driver must store the error location in ->bi_sector, if
@@ -564,9 +550,6 @@ int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
if (error_sector)
*error_sector = bio->bi_sector;
- if (!bio_flagged(bio, BIO_UPTODATE))
- ret = -EIO;
-
bio_put(bio);
return ret;
}
diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c
index ba6cf8e9aa0a..b91ce75bd35d 100644
--- a/block/blk-mq-sysfs.c
+++ b/block/blk-mq-sysfs.c
@@ -335,9 +335,22 @@ static struct kobj_type blk_mq_hw_ktype = {
void blk_mq_unregister_disk(struct gendisk *disk)
{
struct request_queue *q = disk->queue;
+ struct blk_mq_hw_ctx *hctx;
+ struct blk_mq_ctx *ctx;
+ int i, j;
+
+ queue_for_each_hw_ctx(q, hctx, i) {
+ hctx_for_each_ctx(hctx, ctx, j) {
+ kobject_del(&ctx->kobj);
+ kobject_put(&ctx->kobj);
+ }
+ kobject_del(&hctx->kobj);
+ kobject_put(&hctx->kobj);
+ }
kobject_uevent(&q->mq_kobj, KOBJ_REMOVE);
kobject_del(&q->mq_kobj);
+ kobject_put(&q->mq_kobj);
kobject_put(&disk_to_dev(disk)->kobj);
}
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 862f458d4760..c79126e11030 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -171,9 +171,12 @@ bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
}
EXPORT_SYMBOL(blk_mq_can_queue);
-static void blk_mq_rq_ctx_init(struct blk_mq_ctx *ctx, struct request *rq,
- unsigned int rw_flags)
+static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
+ struct request *rq, unsigned int rw_flags)
{
+ if (blk_queue_io_stat(q))
+ rw_flags |= REQ_IO_STAT;
+
rq->mq_ctx = ctx;
rq->cmd_flags = rw_flags;
ctx->rq_dispatched[rw_is_sync(rw_flags)]++;
@@ -197,12 +200,14 @@ static struct request *blk_mq_alloc_request_pinned(struct request_queue *q,
rq = __blk_mq_alloc_request(hctx, gfp & ~__GFP_WAIT, reserved);
if (rq) {
- blk_mq_rq_ctx_init(ctx, rq, rw);
- break;
- } else if (!(gfp & __GFP_WAIT))
+ blk_mq_rq_ctx_init(q, ctx, rq, rw);
break;
+ }
blk_mq_put_ctx(ctx);
+ if (!(gfp & __GFP_WAIT))
+ break;
+
__blk_mq_run_hw_queue(hctx);
blk_mq_wait_for_tags(hctx->tags);
} while (1);
@@ -219,7 +224,8 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
return NULL;
rq = blk_mq_alloc_request_pinned(q, rw, gfp, reserved);
- blk_mq_put_ctx(rq->mq_ctx);
+ if (rq)
+ blk_mq_put_ctx(rq->mq_ctx);
return rq;
}
@@ -232,7 +238,8 @@ struct request *blk_mq_alloc_reserved_request(struct request_queue *q, int rw,
return NULL;
rq = blk_mq_alloc_request_pinned(q, rw, gfp, true);
- blk_mq_put_ctx(rq->mq_ctx);
+ if (rq)
+ blk_mq_put_ctx(rq->mq_ctx);
return rq;
}
EXPORT_SYMBOL(blk_mq_alloc_reserved_request);
@@ -305,12 +312,12 @@ void blk_mq_complete_request(struct request *rq, int error)
blk_account_io_completion(rq, bytes);
+ blk_account_io_done(rq);
+
if (rq->end_io)
rq->end_io(rq, error);
else
blk_mq_free_request(rq);
-
- blk_account_io_done(rq);
}
void __blk_mq_end_io(struct request *rq, int error)
@@ -718,6 +725,8 @@ static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
{
struct blk_mq_ctx *ctx = rq->mq_ctx;
+ trace_block_rq_insert(hctx->queue, rq);
+
list_add_tail(&rq->queuelist, &ctx->rq_list);
blk_mq_hctx_mark_pending(hctx, ctx);
@@ -921,7 +930,7 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
trace_block_getrq(q, bio, rw);
rq = __blk_mq_alloc_request(hctx, GFP_ATOMIC, false);
if (likely(rq))
- blk_mq_rq_ctx_init(ctx, rq, rw);
+ blk_mq_rq_ctx_init(q, ctx, rq, rw);
else {
blk_mq_put_ctx(ctx);
trace_block_sleeprq(q, bio, rw);
@@ -1377,6 +1386,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_reg *reg,
q->queue_hw_ctx = hctxs;
q->mq_ops = reg->ops;
+ q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
blk_queue_make_request(q, blk_mq_make_request);
blk_queue_rq_timed_out(q, reg->ops->timeout);
diff --git a/block/partitions/efi.c b/block/partitions/efi.c
index a8287b49d062..dc51f467a560 100644
--- a/block/partitions/efi.c
+++ b/block/partitions/efi.c
@@ -96,6 +96,7 @@
* - Code works, detects all the partitions.
*
************************************************************/
+#include <linux/kernel.h>
#include <linux/crc32.h>
#include <linux/ctype.h>
#include <linux/math64.h>
@@ -715,8 +716,8 @@ int efi_partition(struct parsed_partitions *state)
efi_guid_unparse(&ptes[i].unique_partition_guid, info->uuid);
/* Naively convert UTF16-LE to 7 bits. */
- label_max = min(sizeof(info->volname) - 1,
- sizeof(ptes[i].partition_name));
+ label_max = min(ARRAY_SIZE(info->volname) - 1,
+ ARRAY_SIZE(ptes[i].partition_name));
info->volname[label_max] = 0;
while (label_count < label_max) {
u8 c = ptes[i].partition_name[label_count] & 0xff;
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 71f337aefa39..7bcb70d216e1 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -174,9 +174,8 @@ config CRYPTO_TEST
help
Quick & dirty crypto test module.
-config CRYPTO_ABLK_HELPER_X86
+config CRYPTO_ABLK_HELPER
tristate
- depends on X86
select CRYPTO_CRYPTD
config CRYPTO_GLUE_HELPER_X86
@@ -695,7 +694,7 @@ config CRYPTO_AES_NI_INTEL
select CRYPTO_AES_X86_64 if 64BIT
select CRYPTO_AES_586 if !64BIT
select CRYPTO_CRYPTD
- select CRYPTO_ABLK_HELPER_X86
+ select CRYPTO_ABLK_HELPER
select CRYPTO_ALGAPI
select CRYPTO_GLUE_HELPER_X86 if 64BIT
select CRYPTO_LRW
@@ -895,7 +894,7 @@ config CRYPTO_CAMELLIA_AESNI_AVX_X86_64
depends on CRYPTO
select CRYPTO_ALGAPI
select CRYPTO_CRYPTD
- select CRYPTO_ABLK_HELPER_X86
+ select CRYPTO_ABLK_HELPER
select CRYPTO_GLUE_HELPER_X86
select CRYPTO_CAMELLIA_X86_64
select CRYPTO_LRW
@@ -917,7 +916,7 @@ config CRYPTO_CAMELLIA_AESNI_AVX2_X86_64
depends on CRYPTO
select CRYPTO_ALGAPI
select CRYPTO_CRYPTD
- select CRYPTO_ABLK_HELPER_X86
+ select CRYPTO_ABLK_HELPER
select CRYPTO_GLUE_HELPER_X86
select CRYPTO_CAMELLIA_X86_64
select CRYPTO_CAMELLIA_AESNI_AVX_X86_64
@@ -969,7 +968,7 @@ config CRYPTO_CAST5_AVX_X86_64
depends on X86 && 64BIT
select CRYPTO_ALGAPI
select CRYPTO_CRYPTD
- select CRYPTO_ABLK_HELPER_X86
+ select CRYPTO_ABLK_HELPER
select CRYPTO_CAST_COMMON
select CRYPTO_CAST5
help
@@ -992,7 +991,7 @@ config CRYPTO_CAST6_AVX_X86_64
depends on X86 && 64BIT
select CRYPTO_ALGAPI
select CRYPTO_CRYPTD
- select CRYPTO_ABLK_HELPER_X86
+ select CRYPTO_ABLK_HELPER
select CRYPTO_GLUE_HELPER_X86
select CRYPTO_CAST_COMMON
select CRYPTO_CAST6
@@ -1110,7 +1109,7 @@ config CRYPTO_SERPENT_SSE2_X86_64
depends on X86 && 64BIT
select CRYPTO_ALGAPI
select CRYPTO_CRYPTD
- select CRYPTO_ABLK_HELPER_X86
+ select CRYPTO_ABLK_HELPER
select CRYPTO_GLUE_HELPER_X86
select CRYPTO_SERPENT
select CRYPTO_LRW
@@ -1132,7 +1131,7 @@ config CRYPTO_SERPENT_SSE2_586
depends on X86 && !64BIT
select CRYPTO_ALGAPI
select CRYPTO_CRYPTD
- select CRYPTO_ABLK_HELPER_X86
+ select CRYPTO_ABLK_HELPER
select CRYPTO_GLUE_HELPER_X86
select CRYPTO_SERPENT
select CRYPTO_LRW
@@ -1154,7 +1153,7 @@ config CRYPTO_SERPENT_AVX_X86_64
depends on X86 && 64BIT
select CRYPTO_ALGAPI
select CRYPTO_CRYPTD
- select CRYPTO_ABLK_HELPER_X86
+ select CRYPTO_ABLK_HELPER
select CRYPTO_GLUE_HELPER_X86
select CRYPTO_SERPENT
select CRYPTO_LRW
@@ -1176,7 +1175,7 @@ config CRYPTO_SERPENT_AVX2_X86_64
depends on X86 && 64BIT
select CRYPTO_ALGAPI
select CRYPTO_CRYPTD
- select CRYPTO_ABLK_HELPER_X86
+ select CRYPTO_ABLK_HELPER
select CRYPTO_GLUE_HELPER_X86
select CRYPTO_SERPENT
select CRYPTO_SERPENT_AVX_X86_64
@@ -1292,7 +1291,7 @@ config CRYPTO_TWOFISH_AVX_X86_64
depends on X86 && 64BIT
select CRYPTO_ALGAPI
select CRYPTO_CRYPTD
- select CRYPTO_ABLK_HELPER_X86
+ select CRYPTO_ABLK_HELPER
select CRYPTO_GLUE_HELPER_X86
select CRYPTO_TWOFISH_COMMON
select CRYPTO_TWOFISH_X86_64
@@ -1402,6 +1401,9 @@ config CRYPTO_USER_API_SKCIPHER
This option enables the user-spaces interface for symmetric
key cipher algorithms.
+config CRYPTO_HASH_INFO
+ bool
+
source "drivers/crypto/Kconfig"
source crypto/asymmetric_keys/Kconfig
diff --git a/crypto/Makefile b/crypto/Makefile
index 80019ba8da3a..989c510da8cc 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -2,8 +2,13 @@
# Cryptographic API
#
+# memneq MUST be built with -Os or -O0 to prevent early-return optimizations
+# that will defeat memneq's actual purpose to prevent timing attacks.
+CFLAGS_REMOVE_memneq.o := -O1 -O2 -O3
+CFLAGS_memneq.o := -Os
+
obj-$(CONFIG_CRYPTO) += crypto.o
-crypto-y := api.o cipher.o compress.o
+crypto-y := api.o cipher.o compress.o memneq.o
obj-$(CONFIG_CRYPTO_WORKQUEUE) += crypto_wq.o
@@ -104,3 +109,5 @@ obj-$(CONFIG_CRYPTO_USER_API_SKCIPHER) += algif_skcipher.o
obj-$(CONFIG_XOR_BLOCKS) += xor.o
obj-$(CONFIG_ASYNC_CORE) += async_tx/
obj-$(CONFIG_ASYMMETRIC_KEY_TYPE) += asymmetric_keys/
+obj-$(CONFIG_CRYPTO_HASH_INFO) += hash_info.o
+obj-$(CONFIG_CRYPTO_ABLK_HELPER) += ablk_helper.o
diff --git a/arch/x86/crypto/ablk_helper.c b/crypto/ablk_helper.c
index 43282fe04a8b..ffe7278d4bd8 100644
--- a/arch/x86/crypto/ablk_helper.c
+++ b/crypto/ablk_helper.c
@@ -28,10 +28,11 @@
#include <linux/crypto.h>
#include <linux/init.h>
#include <linux/module.h>
+#include <linux/hardirq.h>
#include <crypto/algapi.h>
#include <crypto/cryptd.h>
-#include <asm/i387.h>
-#include <asm/crypto/ablk_helper.h>
+#include <crypto/ablk_helper.h>
+#include <asm/simd.h>
int ablk_set_key(struct crypto_ablkcipher *tfm, const u8 *key,
unsigned int key_len)
@@ -70,11 +71,11 @@ int ablk_encrypt(struct ablkcipher_request *req)
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm);
- if (!irq_fpu_usable()) {
+ if (!may_use_simd()) {
struct ablkcipher_request *cryptd_req =
ablkcipher_request_ctx(req);
- memcpy(cryptd_req, req, sizeof(*req));
+ *cryptd_req = *req;
ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
return crypto_ablkcipher_encrypt(cryptd_req);
@@ -89,11 +90,11 @@ int ablk_decrypt(struct ablkcipher_request *req)
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm);
- if (!irq_fpu_usable()) {
+ if (!may_use_simd()) {
struct ablkcipher_request *cryptd_req =
ablkcipher_request_ctx(req);
- memcpy(cryptd_req, req, sizeof(*req));
+ *cryptd_req = *req;
ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
return crypto_ablkcipher_decrypt(cryptd_req);
diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c
index 7d4a8d28277e..40886c489903 100644
--- a/crypto/ablkcipher.c
+++ b/crypto/ablkcipher.c
@@ -16,9 +16,7 @@
#include <crypto/internal/skcipher.h>
#include <linux/cpumask.h>
#include <linux/err.h>
-#include <linux/init.h>
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/rtnetlink.h>
#include <linux/sched.h>
#include <linux/slab.h>
@@ -30,8 +28,6 @@
#include "internal.h"
-static const char *skcipher_default_geniv __read_mostly;
-
struct ablkcipher_buffer {
struct list_head entry;
struct scatter_walk dst;
@@ -527,8 +523,7 @@ const char *crypto_default_geniv(const struct crypto_alg *alg)
alg->cra_blocksize)
return "chainiv";
- return alg->cra_flags & CRYPTO_ALG_ASYNC ?
- "eseqiv" : skcipher_default_geniv;
+ return "eseqiv";
}
static int crypto_givcipher_default(struct crypto_alg *alg, u32 type, u32 mask)
@@ -709,17 +704,3 @@ err:
return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(crypto_alloc_ablkcipher);
-
-static int __init skcipher_module_init(void)
-{
- skcipher_default_geniv = num_possible_cpus() > 1 ?
- "eseqiv" : "chainiv";
- return 0;
-}
-
-static void skcipher_module_exit(void)
-{
-}
-
-module_init(skcipher_module_init);
-module_exit(skcipher_module_exit);
diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c
index ef5356cd280a..850246206b12 100644
--- a/crypto/algif_hash.c
+++ b/crypto/algif_hash.c
@@ -114,6 +114,9 @@ static ssize_t hash_sendpage(struct socket *sock, struct page *page,
struct hash_ctx *ctx = ask->private;
int err;
+ if (flags & MSG_SENDPAGE_NOTLAST)
+ flags |= MSG_MORE;
+
lock_sock(sk);
sg_init_table(ctx->sgl.sg, 1);
sg_set_page(ctx->sgl.sg, page, size, offset);
diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
index 6a6dfc062d2a..a19c027b29bd 100644
--- a/crypto/algif_skcipher.c
+++ b/crypto/algif_skcipher.c
@@ -378,6 +378,9 @@ static ssize_t skcipher_sendpage(struct socket *sock, struct page *page,
struct skcipher_sg_list *sgl;
int err = -EINVAL;
+ if (flags & MSG_SENDPAGE_NOTLAST)
+ flags |= MSG_MORE;
+
lock_sock(sk);
if (!ctx->more && ctx->used)
goto unlock;
diff --git a/crypto/ansi_cprng.c b/crypto/ansi_cprng.c
index c0bb3778f1ae..666f1962a160 100644
--- a/crypto/ansi_cprng.c
+++ b/crypto/ansi_cprng.c
@@ -230,11 +230,11 @@ remainder:
*/
if (byte_count < DEFAULT_BLK_SZ) {
empty_rbuf:
- for (; ctx->rand_data_valid < DEFAULT_BLK_SZ;
- ctx->rand_data_valid++) {
+ while (ctx->rand_data_valid < DEFAULT_BLK_SZ) {
*ptr = ctx->rand_data[ctx->rand_data_valid];
ptr++;
byte_count--;
+ ctx->rand_data_valid++;
if (byte_count == 0)
goto done;
}
diff --git a/crypto/asymmetric_keys/Kconfig b/crypto/asymmetric_keys/Kconfig
index 6d2c2ea12559..03a6eb95ab50 100644
--- a/crypto/asymmetric_keys/Kconfig
+++ b/crypto/asymmetric_keys/Kconfig
@@ -12,6 +12,8 @@ if ASYMMETRIC_KEY_TYPE
config ASYMMETRIC_PUBLIC_KEY_SUBTYPE
tristate "Asymmetric public-key crypto algorithm subtype"
select MPILIB
+ select PUBLIC_KEY_ALGO_RSA
+ select CRYPTO_HASH_INFO
help
This option provides support for asymmetric public key type handling.
If signature generation and/or verification are to be used,
@@ -20,8 +22,8 @@ config ASYMMETRIC_PUBLIC_KEY_SUBTYPE
config PUBLIC_KEY_ALGO_RSA
tristate "RSA public-key algorithm"
- depends on ASYMMETRIC_PUBLIC_KEY_SUBTYPE
select MPILIB_EXTRA
+ select MPILIB
help
This option enables support for the RSA algorithm (PKCS#1, RFC3447).
diff --git a/crypto/asymmetric_keys/asymmetric_type.c b/crypto/asymmetric_keys/asymmetric_type.c
index cf807654d221..b77eb5304788 100644
--- a/crypto/asymmetric_keys/asymmetric_type.c
+++ b/crypto/asymmetric_keys/asymmetric_type.c
@@ -209,6 +209,7 @@ struct key_type key_type_asymmetric = {
.match = asymmetric_key_match,
.destroy = asymmetric_key_destroy,
.describe = asymmetric_key_describe,
+ .def_lookup_type = KEYRING_SEARCH_LOOKUP_ITERATE,
};
EXPORT_SYMBOL_GPL(key_type_asymmetric);
diff --git a/crypto/asymmetric_keys/public_key.c b/crypto/asymmetric_keys/public_key.c
index cb2e29180a87..97eb001960b9 100644
--- a/crypto/asymmetric_keys/public_key.c
+++ b/crypto/asymmetric_keys/public_key.c
@@ -22,29 +22,25 @@
MODULE_LICENSE("GPL");
-const char *const pkey_algo[PKEY_ALGO__LAST] = {
+const char *const pkey_algo_name[PKEY_ALGO__LAST] = {
[PKEY_ALGO_DSA] = "DSA",
[PKEY_ALGO_RSA] = "RSA",
};
-EXPORT_SYMBOL_GPL(pkey_algo);
+EXPORT_SYMBOL_GPL(pkey_algo_name);
-const char *const pkey_hash_algo[PKEY_HASH__LAST] = {
- [PKEY_HASH_MD4] = "md4",
- [PKEY_HASH_MD5] = "md5",
- [PKEY_HASH_SHA1] = "sha1",
- [PKEY_HASH_RIPE_MD_160] = "rmd160",
- [PKEY_HASH_SHA256] = "sha256",
- [PKEY_HASH_SHA384] = "sha384",
- [PKEY_HASH_SHA512] = "sha512",
- [PKEY_HASH_SHA224] = "sha224",
+const struct public_key_algorithm *pkey_algo[PKEY_ALGO__LAST] = {
+#if defined(CONFIG_PUBLIC_KEY_ALGO_RSA) || \
+ defined(CONFIG_PUBLIC_KEY_ALGO_RSA_MODULE)
+ [PKEY_ALGO_RSA] = &RSA_public_key_algorithm,
+#endif
};
-EXPORT_SYMBOL_GPL(pkey_hash_algo);
+EXPORT_SYMBOL_GPL(pkey_algo);
-const char *const pkey_id_type[PKEY_ID_TYPE__LAST] = {
+const char *const pkey_id_type_name[PKEY_ID_TYPE__LAST] = {
[PKEY_ID_PGP] = "PGP",
[PKEY_ID_X509] = "X509",
};
-EXPORT_SYMBOL_GPL(pkey_id_type);
+EXPORT_SYMBOL_GPL(pkey_id_type_name);
/*
* Provide a part of a description of the key for /proc/keys.
@@ -56,7 +52,7 @@ static void public_key_describe(const struct key *asymmetric_key,
if (key)
seq_printf(m, "%s.%s",
- pkey_id_type[key->id_type], key->algo->name);
+ pkey_id_type_name[key->id_type], key->algo->name);
}
/*
@@ -78,21 +74,45 @@ EXPORT_SYMBOL_GPL(public_key_destroy);
/*
* Verify a signature using a public key.
*/
-static int public_key_verify_signature(const struct key *key,
- const struct public_key_signature *sig)
+int public_key_verify_signature(const struct public_key *pk,
+ const struct public_key_signature *sig)
{
- const struct public_key *pk = key->payload.data;
+ const struct public_key_algorithm *algo;
+
+ BUG_ON(!pk);
+ BUG_ON(!pk->mpi[0]);
+ BUG_ON(!pk->mpi[1]);
+ BUG_ON(!sig);
+ BUG_ON(!sig->digest);
+ BUG_ON(!sig->mpi[0]);
+
+ algo = pk->algo;
+ if (!algo) {
+ if (pk->pkey_algo >= PKEY_ALGO__LAST)
+ return -ENOPKG;
+ algo = pkey_algo[pk->pkey_algo];
+ if (!algo)
+ return -ENOPKG;
+ }
- if (!pk->algo->verify_signature)
+ if (!algo->verify_signature)
return -ENOTSUPP;
- if (sig->nr_mpi != pk->algo->n_sig_mpi) {
+ if (sig->nr_mpi != algo->n_sig_mpi) {
pr_debug("Signature has %u MPI not %u\n",
- sig->nr_mpi, pk->algo->n_sig_mpi);
+ sig->nr_mpi, algo->n_sig_mpi);
return -EINVAL;
}
- return pk->algo->verify_signature(pk, sig);
+ return algo->verify_signature(pk, sig);
+}
+EXPORT_SYMBOL_GPL(public_key_verify_signature);
+
+static int public_key_verify_signature_2(const struct key *key,
+ const struct public_key_signature *sig)
+{
+ const struct public_key *pk = key->payload.data;
+ return public_key_verify_signature(pk, sig);
}
/*
@@ -103,6 +123,6 @@ struct asymmetric_key_subtype public_key_subtype = {
.name = "public_key",
.describe = public_key_describe,
.destroy = public_key_destroy,
- .verify_signature = public_key_verify_signature,
+ .verify_signature = public_key_verify_signature_2,
};
EXPORT_SYMBOL_GPL(public_key_subtype);
diff --git a/crypto/asymmetric_keys/public_key.h b/crypto/asymmetric_keys/public_key.h
index 5e5e35626899..5c37a22a0637 100644
--- a/crypto/asymmetric_keys/public_key.h
+++ b/crypto/asymmetric_keys/public_key.h
@@ -28,3 +28,9 @@ struct public_key_algorithm {
};
extern const struct public_key_algorithm RSA_public_key_algorithm;
+
+/*
+ * public_key.c
+ */
+extern int public_key_verify_signature(const struct public_key *pk,
+ const struct public_key_signature *sig);
diff --git a/crypto/asymmetric_keys/rsa.c b/crypto/asymmetric_keys/rsa.c
index 4a6a0696f8a3..459cf97a75e2 100644
--- a/crypto/asymmetric_keys/rsa.c
+++ b/crypto/asymmetric_keys/rsa.c
@@ -13,6 +13,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
+#include <crypto/algapi.h>
#include "public_key.h"
MODULE_LICENSE("GPL");
@@ -73,13 +74,13 @@ static const struct {
size_t size;
} RSA_ASN1_templates[PKEY_HASH__LAST] = {
#define _(X) { RSA_digest_info_##X, sizeof(RSA_digest_info_##X) }
- [PKEY_HASH_MD5] = _(MD5),
- [PKEY_HASH_SHA1] = _(SHA1),
- [PKEY_HASH_RIPE_MD_160] = _(RIPE_MD_160),
- [PKEY_HASH_SHA256] = _(SHA256),
- [PKEY_HASH_SHA384] = _(SHA384),
- [PKEY_HASH_SHA512] = _(SHA512),
- [PKEY_HASH_SHA224] = _(SHA224),
+ [HASH_ALGO_MD5] = _(MD5),
+ [HASH_ALGO_SHA1] = _(SHA1),
+ [HASH_ALGO_RIPE_MD_160] = _(RIPE_MD_160),
+ [HASH_ALGO_SHA256] = _(SHA256),
+ [HASH_ALGO_SHA384] = _(SHA384),
+ [HASH_ALGO_SHA512] = _(SHA512),
+ [HASH_ALGO_SHA224] = _(SHA224),
#undef _
};
@@ -189,12 +190,12 @@ static int RSA_verify(const u8 *H, const u8 *EM, size_t k, size_t hash_size,
}
}
- if (memcmp(asn1_template, EM + T_offset, asn1_size) != 0) {
+ if (crypto_memneq(asn1_template, EM + T_offset, asn1_size) != 0) {
kleave(" = -EBADMSG [EM[T] ASN.1 mismatch]");
return -EBADMSG;
}
- if (memcmp(H, EM + T_offset + asn1_size, hash_size) != 0) {
+ if (crypto_memneq(H, EM + T_offset + asn1_size, hash_size) != 0) {
kleave(" = -EKEYREJECTED [EM[T] hash mismatch]");
return -EKEYREJECTED;
}
diff --git a/crypto/asymmetric_keys/x509_cert_parser.c b/crypto/asymmetric_keys/x509_cert_parser.c
index facbf26bc6bb..29893162497c 100644
--- a/crypto/asymmetric_keys/x509_cert_parser.c
+++ b/crypto/asymmetric_keys/x509_cert_parser.c
@@ -47,6 +47,8 @@ void x509_free_certificate(struct x509_certificate *cert)
kfree(cert->subject);
kfree(cert->fingerprint);
kfree(cert->authority);
+ kfree(cert->sig.digest);
+ mpi_free(cert->sig.rsa.s);
kfree(cert);
}
}
@@ -152,33 +154,33 @@ int x509_note_pkey_algo(void *context, size_t hdrlen,
return -ENOPKG; /* Unsupported combination */
case OID_md4WithRSAEncryption:
- ctx->cert->sig_hash_algo = PKEY_HASH_MD5;
- ctx->cert->sig_pkey_algo = PKEY_ALGO_RSA;
+ ctx->cert->sig.pkey_hash_algo = HASH_ALGO_MD5;
+ ctx->cert->sig.pkey_algo = PKEY_ALGO_RSA;
break;
case OID_sha1WithRSAEncryption:
- ctx->cert->sig_hash_algo = PKEY_HASH_SHA1;
- ctx->cert->sig_pkey_algo = PKEY_ALGO_RSA;
+ ctx->cert->sig.pkey_hash_algo = HASH_ALGO_SHA1;
+ ctx->cert->sig.pkey_algo = PKEY_ALGO_RSA;
break;
case OID_sha256WithRSAEncryption:
- ctx->cert->sig_hash_algo = PKEY_HASH_SHA256;
- ctx->cert->sig_pkey_algo = PKEY_ALGO_RSA;
+ ctx->cert->sig.pkey_hash_algo = HASH_ALGO_SHA256;
+ ctx->cert->sig.pkey_algo = PKEY_ALGO_RSA;
break;
case OID_sha384WithRSAEncryption:
- ctx->cert->sig_hash_algo = PKEY_HASH_SHA384;
- ctx->cert->sig_pkey_algo = PKEY_ALGO_RSA;
+ ctx->cert->sig.pkey_hash_algo = HASH_ALGO_SHA384;
+ ctx->cert->sig.pkey_algo = PKEY_ALGO_RSA;
break;
case OID_sha512WithRSAEncryption:
- ctx->cert->sig_hash_algo = PKEY_HASH_SHA512;
- ctx->cert->sig_pkey_algo = PKEY_ALGO_RSA;
+ ctx->cert->sig.pkey_hash_algo = HASH_ALGO_SHA512;
+ ctx->cert->sig.pkey_algo = PKEY_ALGO_RSA;
break;
case OID_sha224WithRSAEncryption:
- ctx->cert->sig_hash_algo = PKEY_HASH_SHA224;
- ctx->cert->sig_pkey_algo = PKEY_ALGO_RSA;
+ ctx->cert->sig.pkey_hash_algo = HASH_ALGO_SHA224;
+ ctx->cert->sig.pkey_algo = PKEY_ALGO_RSA;
break;
}
@@ -203,8 +205,8 @@ int x509_note_signature(void *context, size_t hdrlen,
return -EINVAL;
}
- ctx->cert->sig = value;
- ctx->cert->sig_size = vlen;
+ ctx->cert->raw_sig = value;
+ ctx->cert->raw_sig_size = vlen;
return 0;
}
@@ -343,8 +345,9 @@ int x509_extract_key_data(void *context, size_t hdrlen,
if (ctx->last_oid != OID_rsaEncryption)
return -ENOPKG;
- /* There seems to be an extraneous 0 byte on the front of the data */
- ctx->cert->pkey_algo = PKEY_ALGO_RSA;
+ ctx->cert->pub->pkey_algo = PKEY_ALGO_RSA;
+
+ /* Discard the BIT STRING metadata */
ctx->key = value + 1;
ctx->key_size = vlen - 1;
return 0;
diff --git a/crypto/asymmetric_keys/x509_parser.h b/crypto/asymmetric_keys/x509_parser.h
index f86dc5fcc4ad..87d9cc26f630 100644
--- a/crypto/asymmetric_keys/x509_parser.h
+++ b/crypto/asymmetric_keys/x509_parser.h
@@ -9,6 +9,7 @@
* 2 of the Licence, or (at your option) any later version.
*/
+#include <linux/time.h>
#include <crypto/public_key.h>
struct x509_certificate {
@@ -20,13 +21,11 @@ struct x509_certificate {
char *authority; /* Authority key fingerprint as hex */
struct tm valid_from;
struct tm valid_to;
- enum pkey_algo pkey_algo : 8; /* Public key algorithm */
- enum pkey_algo sig_pkey_algo : 8; /* Signature public key algorithm */
- enum pkey_hash_algo sig_hash_algo : 8; /* Signature hash algorithm */
const void *tbs; /* Signed data */
- size_t tbs_size; /* Size of signed data */
- const void *sig; /* Signature data */
- size_t sig_size; /* Size of sigature */
+ unsigned tbs_size; /* Size of signed data */
+ unsigned raw_sig_size; /* Size of sigature */
+ const void *raw_sig; /* Signature data */
+ struct public_key_signature sig; /* Signature parameters */
};
/*
@@ -34,3 +33,10 @@ struct x509_certificate {
*/
extern void x509_free_certificate(struct x509_certificate *cert);
extern struct x509_certificate *x509_cert_parse(const void *data, size_t datalen);
+
+/*
+ * x509_public_key.c
+ */
+extern int x509_get_sig_params(struct x509_certificate *cert);
+extern int x509_check_signature(const struct public_key *pub,
+ struct x509_certificate *cert);
diff --git a/crypto/asymmetric_keys/x509_public_key.c b/crypto/asymmetric_keys/x509_public_key.c
index 06007f0e880c..382ef0d2ff2e 100644
--- a/crypto/asymmetric_keys/x509_public_key.c
+++ b/crypto/asymmetric_keys/x509_public_key.c
@@ -23,82 +23,84 @@
#include "public_key.h"
#include "x509_parser.h"
-static const
-struct public_key_algorithm *x509_public_key_algorithms[PKEY_ALGO__LAST] = {
- [PKEY_ALGO_DSA] = NULL,
-#if defined(CONFIG_PUBLIC_KEY_ALGO_RSA) || \
- defined(CONFIG_PUBLIC_KEY_ALGO_RSA_MODULE)
- [PKEY_ALGO_RSA] = &RSA_public_key_algorithm,
-#endif
-};
-
/*
- * Check the signature on a certificate using the provided public key
+ * Set up the signature parameters in an X.509 certificate. This involves
+ * digesting the signed data and extracting the signature.
*/
-static int x509_check_signature(const struct public_key *pub,
- const struct x509_certificate *cert)
+int x509_get_sig_params(struct x509_certificate *cert)
{
- struct public_key_signature *sig;
struct crypto_shash *tfm;
struct shash_desc *desc;
size_t digest_size, desc_size;
+ void *digest;
int ret;
pr_devel("==>%s()\n", __func__);
-
+
+ if (cert->sig.rsa.s)
+ return 0;
+
+ cert->sig.rsa.s = mpi_read_raw_data(cert->raw_sig, cert->raw_sig_size);
+ if (!cert->sig.rsa.s)
+ return -ENOMEM;
+ cert->sig.nr_mpi = 1;
+
/* Allocate the hashing algorithm we're going to need and find out how
* big the hash operational data will be.
*/
- tfm = crypto_alloc_shash(pkey_hash_algo[cert->sig_hash_algo], 0, 0);
+ tfm = crypto_alloc_shash(hash_algo_name[cert->sig.pkey_hash_algo], 0, 0);
if (IS_ERR(tfm))
return (PTR_ERR(tfm) == -ENOENT) ? -ENOPKG : PTR_ERR(tfm);
desc_size = crypto_shash_descsize(tfm) + sizeof(*desc);
digest_size = crypto_shash_digestsize(tfm);
- /* We allocate the hash operational data storage on the end of our
- * context data.
+ /* We allocate the hash operational data storage on the end of the
+ * digest storage space.
*/
ret = -ENOMEM;
- sig = kzalloc(sizeof(*sig) + desc_size + digest_size, GFP_KERNEL);
- if (!sig)
- goto error_no_sig;
+ digest = kzalloc(digest_size + desc_size, GFP_KERNEL);
+ if (!digest)
+ goto error;
- sig->pkey_hash_algo = cert->sig_hash_algo;
- sig->digest = (u8 *)sig + sizeof(*sig) + desc_size;
- sig->digest_size = digest_size;
+ cert->sig.digest = digest;
+ cert->sig.digest_size = digest_size;
- desc = (void *)sig + sizeof(*sig);
- desc->tfm = tfm;
- desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+ desc = digest + digest_size;
+ desc->tfm = tfm;
+ desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
ret = crypto_shash_init(desc);
if (ret < 0)
goto error;
+ might_sleep();
+ ret = crypto_shash_finup(desc, cert->tbs, cert->tbs_size, digest);
+error:
+ crypto_free_shash(tfm);
+ pr_devel("<==%s() = %d\n", __func__, ret);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(x509_get_sig_params);
- ret = -ENOMEM;
- sig->rsa.s = mpi_read_raw_data(cert->sig, cert->sig_size);
- if (!sig->rsa.s)
- goto error;
+/*
+ * Check the signature on a certificate using the provided public key
+ */
+int x509_check_signature(const struct public_key *pub,
+ struct x509_certificate *cert)
+{
+ int ret;
- ret = crypto_shash_finup(desc, cert->tbs, cert->tbs_size, sig->digest);
- if (ret < 0)
- goto error_mpi;
+ pr_devel("==>%s()\n", __func__);
- ret = pub->algo->verify_signature(pub, sig);
+ ret = x509_get_sig_params(cert);
+ if (ret < 0)
+ return ret;
+ ret = public_key_verify_signature(pub, &cert->sig);
pr_debug("Cert Verification: %d\n", ret);
-
-error_mpi:
- mpi_free(sig->rsa.s);
-error:
- kfree(sig);
-error_no_sig:
- crypto_free_shash(tfm);
-
- pr_devel("<==%s() = %d\n", __func__, ret);
return ret;
}
+EXPORT_SYMBOL_GPL(x509_check_signature);
/*
* Attempt to parse a data blob for a key as an X509 certificate.
@@ -106,7 +108,6 @@ error_no_sig:
static int x509_key_preparse(struct key_preparsed_payload *prep)
{
struct x509_certificate *cert;
- struct tm now;
size_t srlen, sulen;
char *desc = NULL;
int ret;
@@ -117,7 +118,18 @@ static int x509_key_preparse(struct key_preparsed_payload *prep)
pr_devel("Cert Issuer: %s\n", cert->issuer);
pr_devel("Cert Subject: %s\n", cert->subject);
- pr_devel("Cert Key Algo: %s\n", pkey_algo[cert->pkey_algo]);
+
+ if (cert->pub->pkey_algo >= PKEY_ALGO__LAST ||
+ cert->sig.pkey_algo >= PKEY_ALGO__LAST ||
+ cert->sig.pkey_hash_algo >= PKEY_HASH__LAST ||
+ !pkey_algo[cert->pub->pkey_algo] ||
+ !pkey_algo[cert->sig.pkey_algo] ||
+ !hash_algo_name[cert->sig.pkey_hash_algo]) {
+ ret = -ENOPKG;
+ goto error_free_cert;
+ }
+
+ pr_devel("Cert Key Algo: %s\n", pkey_algo_name[cert->pub->pkey_algo]);
pr_devel("Cert Valid From: %04ld-%02d-%02d %02d:%02d:%02d\n",
cert->valid_from.tm_year + 1900, cert->valid_from.tm_mon + 1,
cert->valid_from.tm_mday, cert->valid_from.tm_hour,
@@ -127,58 +139,22 @@ static int x509_key_preparse(struct key_preparsed_payload *prep)
cert->valid_to.tm_mday, cert->valid_to.tm_hour,
cert->valid_to.tm_min, cert->valid_to.tm_sec);
pr_devel("Cert Signature: %s + %s\n",
- pkey_algo[cert->sig_pkey_algo],
- pkey_hash_algo[cert->sig_hash_algo]);
+ pkey_algo_name[cert->sig.pkey_algo],
+ hash_algo_name[cert->sig.pkey_hash_algo]);
- if (!cert->fingerprint || !cert->authority) {
- pr_warn("Cert for '%s' must have SubjKeyId and AuthKeyId extensions\n",
+ if (!cert->fingerprint) {
+ pr_warn("Cert for '%s' must have a SubjKeyId extension\n",
cert->subject);
ret = -EKEYREJECTED;
goto error_free_cert;
}
- time_to_tm(CURRENT_TIME.tv_sec, 0, &now);
- pr_devel("Now: %04ld-%02d-%02d %02d:%02d:%02d\n",
- now.tm_year + 1900, now.tm_mon + 1, now.tm_mday,
- now.tm_hour, now.tm_min, now.tm_sec);
- if (now.tm_year < cert->valid_from.tm_year ||
- (now.tm_year == cert->valid_from.tm_year &&
- (now.tm_mon < cert->valid_from.tm_mon ||
- (now.tm_mon == cert->valid_from.tm_mon &&
- (now.tm_mday < cert->valid_from.tm_mday ||
- (now.tm_mday == cert->valid_from.tm_mday &&
- (now.tm_hour < cert->valid_from.tm_hour ||
- (now.tm_hour == cert->valid_from.tm_hour &&
- (now.tm_min < cert->valid_from.tm_min ||
- (now.tm_min == cert->valid_from.tm_min &&
- (now.tm_sec < cert->valid_from.tm_sec
- ))))))))))) {
- pr_warn("Cert %s is not yet valid\n", cert->fingerprint);
- ret = -EKEYREJECTED;
- goto error_free_cert;
- }
- if (now.tm_year > cert->valid_to.tm_year ||
- (now.tm_year == cert->valid_to.tm_year &&
- (now.tm_mon > cert->valid_to.tm_mon ||
- (now.tm_mon == cert->valid_to.tm_mon &&
- (now.tm_mday > cert->valid_to.tm_mday ||
- (now.tm_mday == cert->valid_to.tm_mday &&
- (now.tm_hour > cert->valid_to.tm_hour ||
- (now.tm_hour == cert->valid_to.tm_hour &&
- (now.tm_min > cert->valid_to.tm_min ||
- (now.tm_min == cert->valid_to.tm_min &&
- (now.tm_sec > cert->valid_to.tm_sec
- ))))))))))) {
- pr_warn("Cert %s has expired\n", cert->fingerprint);
- ret = -EKEYEXPIRED;
- goto error_free_cert;
- }
-
- cert->pub->algo = x509_public_key_algorithms[cert->pkey_algo];
+ cert->pub->algo = pkey_algo[cert->pub->pkey_algo];
cert->pub->id_type = PKEY_ID_X509;
- /* Check the signature on the key */
- if (strcmp(cert->fingerprint, cert->authority) == 0) {
+ /* Check the signature on the key if it appears to be self-signed */
+ if (!cert->authority ||
+ strcmp(cert->fingerprint, cert->authority) == 0) {
ret = x509_check_signature(cert->pub, cert);
if (ret < 0)
goto error_free_cert;
@@ -237,3 +213,6 @@ static void __exit x509_key_exit(void)
module_init(x509_key_init);
module_exit(x509_key_exit);
+
+MODULE_DESCRIPTION("X.509 certificate parser");
+MODULE_LICENSE("GPL");
diff --git a/crypto/async_tx/async_memcpy.c b/crypto/async_tx/async_memcpy.c
index 9e62feffb374..f8c0b8dbeb75 100644
--- a/crypto/async_tx/async_memcpy.c
+++ b/crypto/async_tx/async_memcpy.c
@@ -50,33 +50,36 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
&dest, 1, &src, 1, len);
struct dma_device *device = chan ? chan->device : NULL;
struct dma_async_tx_descriptor *tx = NULL;
+ struct dmaengine_unmap_data *unmap = NULL;
- if (device && is_dma_copy_aligned(device, src_offset, dest_offset, len)) {
- dma_addr_t dma_dest, dma_src;
+ if (device)
+ unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOIO);
+
+ if (unmap && is_dma_copy_aligned(device, src_offset, dest_offset, len)) {
unsigned long dma_prep_flags = 0;
if (submit->cb_fn)
dma_prep_flags |= DMA_PREP_INTERRUPT;
if (submit->flags & ASYNC_TX_FENCE)
dma_prep_flags |= DMA_PREP_FENCE;
- dma_dest = dma_map_page(device->dev, dest, dest_offset, len,
- DMA_FROM_DEVICE);
-
- dma_src = dma_map_page(device->dev, src, src_offset, len,
- DMA_TO_DEVICE);
-
- tx = device->device_prep_dma_memcpy(chan, dma_dest, dma_src,
- len, dma_prep_flags);
- if (!tx) {
- dma_unmap_page(device->dev, dma_dest, len,
- DMA_FROM_DEVICE);
- dma_unmap_page(device->dev, dma_src, len,
- DMA_TO_DEVICE);
- }
+
+ unmap->to_cnt = 1;
+ unmap->addr[0] = dma_map_page(device->dev, src, src_offset, len,
+ DMA_TO_DEVICE);
+ unmap->from_cnt = 1;
+ unmap->addr[1] = dma_map_page(device->dev, dest, dest_offset, len,
+ DMA_FROM_DEVICE);
+ unmap->len = len;
+
+ tx = device->device_prep_dma_memcpy(chan, unmap->addr[1],
+ unmap->addr[0], len,
+ dma_prep_flags);
}
if (tx) {
pr_debug("%s: (async) len: %zu\n", __func__, len);
+
+ dma_set_unmap(tx, unmap);
async_tx_submit(chan, tx, submit);
} else {
void *dest_buf, *src_buf;
@@ -96,6 +99,8 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
async_tx_sync_epilog(submit);
}
+ dmaengine_unmap_put(unmap);
+
return tx;
}
EXPORT_SYMBOL_GPL(async_memcpy);
diff --git a/crypto/async_tx/async_pq.c b/crypto/async_tx/async_pq.c
index 91d5d385899e..d05327caf69d 100644
--- a/crypto/async_tx/async_pq.c
+++ b/crypto/async_tx/async_pq.c
@@ -46,49 +46,24 @@ static struct page *pq_scribble_page;
* do_async_gen_syndrome - asynchronously calculate P and/or Q
*/
static __async_inline struct dma_async_tx_descriptor *
-do_async_gen_syndrome(struct dma_chan *chan, struct page **blocks,
- const unsigned char *scfs, unsigned int offset, int disks,
- size_t len, dma_addr_t *dma_src,
+do_async_gen_syndrome(struct dma_chan *chan,
+ const unsigned char *scfs, int disks,
+ struct dmaengine_unmap_data *unmap,
+ enum dma_ctrl_flags dma_flags,
struct async_submit_ctl *submit)
{
struct dma_async_tx_descriptor *tx = NULL;
struct dma_device *dma = chan->device;
- enum dma_ctrl_flags dma_flags = 0;
enum async_tx_flags flags_orig = submit->flags;
dma_async_tx_callback cb_fn_orig = submit->cb_fn;
dma_async_tx_callback cb_param_orig = submit->cb_param;
int src_cnt = disks - 2;
- unsigned char coefs[src_cnt];
unsigned short pq_src_cnt;
dma_addr_t dma_dest[2];
int src_off = 0;
- int idx;
- int i;
- /* DMAs use destinations as sources, so use BIDIRECTIONAL mapping */
- if (P(blocks, disks))
- dma_dest[0] = dma_map_page(dma->dev, P(blocks, disks), offset,
- len, DMA_BIDIRECTIONAL);
- else
- dma_flags |= DMA_PREP_PQ_DISABLE_P;
- if (Q(blocks, disks))
- dma_dest[1] = dma_map_page(dma->dev, Q(blocks, disks), offset,
- len, DMA_BIDIRECTIONAL);
- else
- dma_flags |= DMA_PREP_PQ_DISABLE_Q;
-
- /* convert source addresses being careful to collapse 'empty'
- * sources and update the coefficients accordingly
- */
- for (i = 0, idx = 0; i < src_cnt; i++) {
- if (blocks[i] == NULL)
- continue;
- dma_src[idx] = dma_map_page(dma->dev, blocks[i], offset, len,
- DMA_TO_DEVICE);
- coefs[idx] = scfs[i];
- idx++;
- }
- src_cnt = idx;
+ if (submit->flags & ASYNC_TX_FENCE)
+ dma_flags |= DMA_PREP_FENCE;
while (src_cnt > 0) {
submit->flags = flags_orig;
@@ -100,28 +75,25 @@ do_async_gen_syndrome(struct dma_chan *chan, struct page **blocks,
if (src_cnt > pq_src_cnt) {
submit->flags &= ~ASYNC_TX_ACK;
submit->flags |= ASYNC_TX_FENCE;
- dma_flags |= DMA_COMPL_SKIP_DEST_UNMAP;
submit->cb_fn = NULL;
submit->cb_param = NULL;
} else {
- dma_flags &= ~DMA_COMPL_SKIP_DEST_UNMAP;
submit->cb_fn = cb_fn_orig;
submit->cb_param = cb_param_orig;
if (cb_fn_orig)
dma_flags |= DMA_PREP_INTERRUPT;
}
- if (submit->flags & ASYNC_TX_FENCE)
- dma_flags |= DMA_PREP_FENCE;
- /* Since we have clobbered the src_list we are committed
- * to doing this asynchronously. Drivers force forward
- * progress in case they can not provide a descriptor
+ /* Drivers force forward progress in case they can not provide
+ * a descriptor
*/
for (;;) {
+ dma_dest[0] = unmap->addr[disks - 2];
+ dma_dest[1] = unmap->addr[disks - 1];
tx = dma->device_prep_dma_pq(chan, dma_dest,
- &dma_src[src_off],
+ &unmap->addr[src_off],
pq_src_cnt,
- &coefs[src_off], len,
+ &scfs[src_off], unmap->len,
dma_flags);
if (likely(tx))
break;
@@ -129,6 +101,7 @@ do_async_gen_syndrome(struct dma_chan *chan, struct page **blocks,
dma_async_issue_pending(chan);
}
+ dma_set_unmap(tx, unmap);
async_tx_submit(chan, tx, submit);
submit->depend_tx = tx;
@@ -188,10 +161,6 @@ do_sync_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
* set to NULL those buffers will be replaced with the raid6_zero_page
* in the synchronous path and omitted in the hardware-asynchronous
* path.
- *
- * 'blocks' note: if submit->scribble is NULL then the contents of
- * 'blocks' may be overwritten to perform address conversions
- * (dma_map_page() or page_address()).
*/
struct dma_async_tx_descriptor *
async_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
@@ -202,26 +171,69 @@ async_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
&P(blocks, disks), 2,
blocks, src_cnt, len);
struct dma_device *device = chan ? chan->device : NULL;
- dma_addr_t *dma_src = NULL;
+ struct dmaengine_unmap_data *unmap = NULL;
BUG_ON(disks > 255 || !(P(blocks, disks) || Q(blocks, disks)));
- if (submit->scribble)
- dma_src = submit->scribble;
- else if (sizeof(dma_addr_t) <= sizeof(struct page *))
- dma_src = (dma_addr_t *) blocks;
+ if (device)
+ unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOIO);
- if (dma_src && device &&
+ if (unmap &&
(src_cnt <= dma_maxpq(device, 0) ||
dma_maxpq(device, DMA_PREP_CONTINUE) > 0) &&
is_dma_pq_aligned(device, offset, 0, len)) {
+ struct dma_async_tx_descriptor *tx;
+ enum dma_ctrl_flags dma_flags = 0;
+ unsigned char coefs[src_cnt];
+ int i, j;
+
/* run the p+q asynchronously */
pr_debug("%s: (async) disks: %d len: %zu\n",
__func__, disks, len);
- return do_async_gen_syndrome(chan, blocks, raid6_gfexp, offset,
- disks, len, dma_src, submit);
+
+ /* convert source addresses being careful to collapse 'empty'
+ * sources and update the coefficients accordingly
+ */
+ unmap->len = len;
+ for (i = 0, j = 0; i < src_cnt; i++) {
+ if (blocks[i] == NULL)
+ continue;
+ unmap->addr[j] = dma_map_page(device->dev, blocks[i], offset,
+ len, DMA_TO_DEVICE);
+ coefs[j] = raid6_gfexp[i];
+ unmap->to_cnt++;
+ j++;
+ }
+
+ /*
+ * DMAs use destinations as sources,
+ * so use BIDIRECTIONAL mapping
+ */
+ unmap->bidi_cnt++;
+ if (P(blocks, disks))
+ unmap->addr[j++] = dma_map_page(device->dev, P(blocks, disks),
+ offset, len, DMA_BIDIRECTIONAL);
+ else {
+ unmap->addr[j++] = 0;
+ dma_flags |= DMA_PREP_PQ_DISABLE_P;
+ }
+
+ unmap->bidi_cnt++;
+ if (Q(blocks, disks))
+ unmap->addr[j++] = dma_map_page(device->dev, Q(blocks, disks),
+ offset, len, DMA_BIDIRECTIONAL);
+ else {
+ unmap->addr[j++] = 0;
+ dma_flags |= DMA_PREP_PQ_DISABLE_Q;
+ }
+
+ tx = do_async_gen_syndrome(chan, coefs, j, unmap, dma_flags, submit);
+ dmaengine_unmap_put(unmap);
+ return tx;
}
+ dmaengine_unmap_put(unmap);
+
/* run the pq synchronously */
pr_debug("%s: (sync) disks: %d len: %zu\n", __func__, disks, len);
@@ -277,50 +289,60 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks,
struct dma_async_tx_descriptor *tx;
unsigned char coefs[disks-2];
enum dma_ctrl_flags dma_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0;
- dma_addr_t *dma_src = NULL;
- int src_cnt = 0;
+ struct dmaengine_unmap_data *unmap = NULL;
BUG_ON(disks < 4);
- if (submit->scribble)
- dma_src = submit->scribble;
- else if (sizeof(dma_addr_t) <= sizeof(struct page *))
- dma_src = (dma_addr_t *) blocks;
+ if (device)
+ unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOIO);
- if (dma_src && device && disks <= dma_maxpq(device, 0) &&
+ if (unmap && disks <= dma_maxpq(device, 0) &&
is_dma_pq_aligned(device, offset, 0, len)) {
struct device *dev = device->dev;
- dma_addr_t *pq = &dma_src[disks-2];
- int i;
+ dma_addr_t pq[2];
+ int i, j = 0, src_cnt = 0;
pr_debug("%s: (async) disks: %d len: %zu\n",
__func__, disks, len);
- if (!P(blocks, disks))
+
+ unmap->len = len;
+ for (i = 0; i < disks-2; i++)
+ if (likely(blocks[i])) {
+ unmap->addr[j] = dma_map_page(dev, blocks[i],
+ offset, len,
+ DMA_TO_DEVICE);
+ coefs[j] = raid6_gfexp[i];
+ unmap->to_cnt++;
+ src_cnt++;
+ j++;
+ }
+
+ if (!P(blocks, disks)) {
+ pq[0] = 0;
dma_flags |= DMA_PREP_PQ_DISABLE_P;
- else
+ } else {
pq[0] = dma_map_page(dev, P(blocks, disks),
offset, len,
DMA_TO_DEVICE);
- if (!Q(blocks, disks))
+ unmap->addr[j++] = pq[0];
+ unmap->to_cnt++;
+ }
+ if (!Q(blocks, disks)) {
+ pq[1] = 0;
dma_flags |= DMA_PREP_PQ_DISABLE_Q;
- else
+ } else {
pq[1] = dma_map_page(dev, Q(blocks, disks),
offset, len,
DMA_TO_DEVICE);
+ unmap->addr[j++] = pq[1];
+ unmap->to_cnt++;
+ }
if (submit->flags & ASYNC_TX_FENCE)
dma_flags |= DMA_PREP_FENCE;
- for (i = 0; i < disks-2; i++)
- if (likely(blocks[i])) {
- dma_src[src_cnt] = dma_map_page(dev, blocks[i],
- offset, len,
- DMA_TO_DEVICE);
- coefs[src_cnt] = raid6_gfexp[i];
- src_cnt++;
- }
-
for (;;) {
- tx = device->device_prep_dma_pq_val(chan, pq, dma_src,
+ tx = device->device_prep_dma_pq_val(chan, pq,
+ unmap->addr,
src_cnt,
coefs,
len, pqres,
@@ -330,6 +352,8 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks,
async_tx_quiesce(&submit->depend_tx);
dma_async_issue_pending(chan);
}
+
+ dma_set_unmap(tx, unmap);
async_tx_submit(chan, tx, submit);
return tx;
diff --git a/crypto/async_tx/async_raid6_recov.c b/crypto/async_tx/async_raid6_recov.c
index a9f08a6a582e..934a84981495 100644
--- a/crypto/async_tx/async_raid6_recov.c
+++ b/crypto/async_tx/async_raid6_recov.c
@@ -26,6 +26,7 @@
#include <linux/dma-mapping.h>
#include <linux/raid/pq.h>
#include <linux/async_tx.h>
+#include <linux/dmaengine.h>
static struct dma_async_tx_descriptor *
async_sum_product(struct page *dest, struct page **srcs, unsigned char *coef,
@@ -34,35 +35,45 @@ async_sum_product(struct page *dest, struct page **srcs, unsigned char *coef,
struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ,
&dest, 1, srcs, 2, len);
struct dma_device *dma = chan ? chan->device : NULL;
+ struct dmaengine_unmap_data *unmap = NULL;
const u8 *amul, *bmul;
u8 ax, bx;
u8 *a, *b, *c;
- if (dma) {
- dma_addr_t dma_dest[2];
- dma_addr_t dma_src[2];
+ if (dma)
+ unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOIO);
+
+ if (unmap) {
struct device *dev = dma->dev;
+ dma_addr_t pq[2];
struct dma_async_tx_descriptor *tx;
enum dma_ctrl_flags dma_flags = DMA_PREP_PQ_DISABLE_P;
if (submit->flags & ASYNC_TX_FENCE)
dma_flags |= DMA_PREP_FENCE;
- dma_dest[1] = dma_map_page(dev, dest, 0, len, DMA_BIDIRECTIONAL);
- dma_src[0] = dma_map_page(dev, srcs[0], 0, len, DMA_TO_DEVICE);
- dma_src[1] = dma_map_page(dev, srcs[1], 0, len, DMA_TO_DEVICE);
- tx = dma->device_prep_dma_pq(chan, dma_dest, dma_src, 2, coef,
+ unmap->addr[0] = dma_map_page(dev, srcs[0], 0, len, DMA_TO_DEVICE);
+ unmap->addr[1] = dma_map_page(dev, srcs[1], 0, len, DMA_TO_DEVICE);
+ unmap->to_cnt = 2;
+
+ unmap->addr[2] = dma_map_page(dev, dest, 0, len, DMA_BIDIRECTIONAL);
+ unmap->bidi_cnt = 1;
+ /* engine only looks at Q, but expects it to follow P */
+ pq[1] = unmap->addr[2];
+
+ unmap->len = len;
+ tx = dma->device_prep_dma_pq(chan, pq, unmap->addr, 2, coef,
len, dma_flags);
if (tx) {
+ dma_set_unmap(tx, unmap);
async_tx_submit(chan, tx, submit);
+ dmaengine_unmap_put(unmap);
return tx;
}
/* could not get a descriptor, unmap and fall through to
* the synchronous path
*/
- dma_unmap_page(dev, dma_dest[1], len, DMA_BIDIRECTIONAL);
- dma_unmap_page(dev, dma_src[0], len, DMA_TO_DEVICE);
- dma_unmap_page(dev, dma_src[1], len, DMA_TO_DEVICE);
+ dmaengine_unmap_put(unmap);
}
/* run the operation synchronously */
@@ -89,23 +100,38 @@ async_mult(struct page *dest, struct page *src, u8 coef, size_t len,
struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ,
&dest, 1, &src, 1, len);
struct dma_device *dma = chan ? chan->device : NULL;
+ struct dmaengine_unmap_data *unmap = NULL;
const u8 *qmul; /* Q multiplier table */
u8 *d, *s;
- if (dma) {
+ if (dma)
+ unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOIO);
+
+ if (unmap) {
dma_addr_t dma_dest[2];
- dma_addr_t dma_src[1];
struct device *dev = dma->dev;
struct dma_async_tx_descriptor *tx;
enum dma_ctrl_flags dma_flags = DMA_PREP_PQ_DISABLE_P;
if (submit->flags & ASYNC_TX_FENCE)
dma_flags |= DMA_PREP_FENCE;
- dma_dest[1] = dma_map_page(dev, dest, 0, len, DMA_BIDIRECTIONAL);
- dma_src[0] = dma_map_page(dev, src, 0, len, DMA_TO_DEVICE);
- tx = dma->device_prep_dma_pq(chan, dma_dest, dma_src, 1, &coef,
- len, dma_flags);
+ unmap->addr[0] = dma_map_page(dev, src, 0, len, DMA_TO_DEVICE);
+ unmap->to_cnt++;
+ unmap->addr[1] = dma_map_page(dev, dest, 0, len, DMA_BIDIRECTIONAL);
+ dma_dest[1] = unmap->addr[1];
+ unmap->bidi_cnt++;
+ unmap->len = len;
+
+ /* this looks funny, but the engine looks for Q at
+ * dma_dest[1] and ignores dma_dest[0] as a dest
+ * due to DMA_PREP_PQ_DISABLE_P
+ */
+ tx = dma->device_prep_dma_pq(chan, dma_dest, unmap->addr,
+ 1, &coef, len, dma_flags);
+
if (tx) {
+ dma_set_unmap(tx, unmap);
+ dmaengine_unmap_put(unmap);
async_tx_submit(chan, tx, submit);
return tx;
}
@@ -113,8 +139,7 @@ async_mult(struct page *dest, struct page *src, u8 coef, size_t len,
/* could not get a descriptor, unmap and fall through to
* the synchronous path
*/
- dma_unmap_page(dev, dma_dest[1], len, DMA_BIDIRECTIONAL);
- dma_unmap_page(dev, dma_src[0], len, DMA_TO_DEVICE);
+ dmaengine_unmap_put(unmap);
}
/* no channel available, or failed to allocate a descriptor, so
diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c
index 7be34248b450..39ea4791a3c9 100644
--- a/crypto/async_tx/async_tx.c
+++ b/crypto/async_tx/async_tx.c
@@ -128,7 +128,7 @@ async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx,
}
device->device_issue_pending(chan);
} else {
- if (dma_wait_for_async_tx(depend_tx) != DMA_SUCCESS)
+ if (dma_wait_for_async_tx(depend_tx) != DMA_COMPLETE)
panic("%s: DMA error waiting for depend_tx\n",
__func__);
tx->tx_submit(tx);
@@ -280,7 +280,7 @@ void async_tx_quiesce(struct dma_async_tx_descriptor **tx)
* we are referring to the correct operation
*/
BUG_ON(async_tx_test_ack(*tx));
- if (dma_wait_for_async_tx(*tx) != DMA_SUCCESS)
+ if (dma_wait_for_async_tx(*tx) != DMA_COMPLETE)
panic("%s: DMA error waiting for transaction\n",
__func__);
async_tx_ack(*tx);
diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c
index 8ade0a0481c6..3c562f5a60bb 100644
--- a/crypto/async_tx/async_xor.c
+++ b/crypto/async_tx/async_xor.c
@@ -33,48 +33,31 @@
/* do_async_xor - dma map the pages and perform the xor with an engine */
static __async_inline struct dma_async_tx_descriptor *
-do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list,
- unsigned int offset, int src_cnt, size_t len, dma_addr_t *dma_src,
+do_async_xor(struct dma_chan *chan, struct dmaengine_unmap_data *unmap,
struct async_submit_ctl *submit)
{
struct dma_device *dma = chan->device;
struct dma_async_tx_descriptor *tx = NULL;
- int src_off = 0;
- int i;
dma_async_tx_callback cb_fn_orig = submit->cb_fn;
void *cb_param_orig = submit->cb_param;
enum async_tx_flags flags_orig = submit->flags;
- enum dma_ctrl_flags dma_flags;
- int xor_src_cnt = 0;
- dma_addr_t dma_dest;
-
- /* map the dest bidrectional in case it is re-used as a source */
- dma_dest = dma_map_page(dma->dev, dest, offset, len, DMA_BIDIRECTIONAL);
- for (i = 0; i < src_cnt; i++) {
- /* only map the dest once */
- if (!src_list[i])
- continue;
- if (unlikely(src_list[i] == dest)) {
- dma_src[xor_src_cnt++] = dma_dest;
- continue;
- }
- dma_src[xor_src_cnt++] = dma_map_page(dma->dev, src_list[i], offset,
- len, DMA_TO_DEVICE);
- }
- src_cnt = xor_src_cnt;
+ enum dma_ctrl_flags dma_flags = 0;
+ int src_cnt = unmap->to_cnt;
+ int xor_src_cnt;
+ dma_addr_t dma_dest = unmap->addr[unmap->to_cnt];
+ dma_addr_t *src_list = unmap->addr;
while (src_cnt) {
+ dma_addr_t tmp;
+
submit->flags = flags_orig;
- dma_flags = 0;
xor_src_cnt = min(src_cnt, (int)dma->max_xor);
- /* if we are submitting additional xors, leave the chain open,
- * clear the callback parameters, and leave the destination
- * buffer mapped
+ /* if we are submitting additional xors, leave the chain open
+ * and clear the callback parameters
*/
if (src_cnt > xor_src_cnt) {
submit->flags &= ~ASYNC_TX_ACK;
submit->flags |= ASYNC_TX_FENCE;
- dma_flags = DMA_COMPL_SKIP_DEST_UNMAP;
submit->cb_fn = NULL;
submit->cb_param = NULL;
} else {
@@ -85,12 +68,18 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list,
dma_flags |= DMA_PREP_INTERRUPT;
if (submit->flags & ASYNC_TX_FENCE)
dma_flags |= DMA_PREP_FENCE;
- /* Since we have clobbered the src_list we are committed
- * to doing this asynchronously. Drivers force forward progress
- * in case they can not provide a descriptor
+
+ /* Drivers force forward progress in case they can not provide a
+ * descriptor
*/
- tx = dma->device_prep_dma_xor(chan, dma_dest, &dma_src[src_off],
- xor_src_cnt, len, dma_flags);
+ tmp = src_list[0];
+ if (src_list > unmap->addr)
+ src_list[0] = dma_dest;
+ tx = dma->device_prep_dma_xor(chan, dma_dest, src_list,
+ xor_src_cnt, unmap->len,
+ dma_flags);
+ src_list[0] = tmp;
+
if (unlikely(!tx))
async_tx_quiesce(&submit->depend_tx);
@@ -99,22 +88,21 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list,
while (unlikely(!tx)) {
dma_async_issue_pending(chan);
tx = dma->device_prep_dma_xor(chan, dma_dest,
- &dma_src[src_off],
- xor_src_cnt, len,
+ src_list,
+ xor_src_cnt, unmap->len,
dma_flags);
}
+ dma_set_unmap(tx, unmap);
async_tx_submit(chan, tx, submit);
submit->depend_tx = tx;
if (src_cnt > xor_src_cnt) {
/* drop completed sources */
src_cnt -= xor_src_cnt;
- src_off += xor_src_cnt;
-
/* use the intermediate result a source */
- dma_src[--src_off] = dma_dest;
src_cnt++;
+ src_list += xor_src_cnt - 1;
} else
break;
}
@@ -189,22 +177,40 @@ async_xor(struct page *dest, struct page **src_list, unsigned int offset,
struct dma_chan *chan = async_tx_find_channel(submit, DMA_XOR,
&dest, 1, src_list,
src_cnt, len);
- dma_addr_t *dma_src = NULL;
+ struct dma_device *device = chan ? chan->device : NULL;
+ struct dmaengine_unmap_data *unmap = NULL;
BUG_ON(src_cnt <= 1);
- if (submit->scribble)
- dma_src = submit->scribble;
- else if (sizeof(dma_addr_t) <= sizeof(struct page *))
- dma_src = (dma_addr_t *) src_list;
+ if (device)
+ unmap = dmaengine_get_unmap_data(device->dev, src_cnt+1, GFP_NOIO);
+
+ if (unmap && is_dma_xor_aligned(device, offset, 0, len)) {
+ struct dma_async_tx_descriptor *tx;
+ int i, j;
- if (dma_src && chan && is_dma_xor_aligned(chan->device, offset, 0, len)) {
/* run the xor asynchronously */
pr_debug("%s (async): len: %zu\n", __func__, len);
- return do_async_xor(chan, dest, src_list, offset, src_cnt, len,
- dma_src, submit);
+ unmap->len = len;
+ for (i = 0, j = 0; i < src_cnt; i++) {
+ if (!src_list[i])
+ continue;
+ unmap->to_cnt++;
+ unmap->addr[j++] = dma_map_page(device->dev, src_list[i],
+ offset, len, DMA_TO_DEVICE);
+ }
+
+ /* map it bidirectional as it may be re-used as a source */
+ unmap->addr[j] = dma_map_page(device->dev, dest, offset, len,
+ DMA_BIDIRECTIONAL);
+ unmap->bidi_cnt = 1;
+
+ tx = do_async_xor(chan, unmap, submit);
+ dmaengine_unmap_put(unmap);
+ return tx;
} else {
+ dmaengine_unmap_put(unmap);
/* run the xor synchronously */
pr_debug("%s (sync): len: %zu\n", __func__, len);
WARN_ONCE(chan, "%s: no space for dma address conversion\n",
@@ -268,16 +274,14 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset,
struct dma_chan *chan = xor_val_chan(submit, dest, src_list, src_cnt, len);
struct dma_device *device = chan ? chan->device : NULL;
struct dma_async_tx_descriptor *tx = NULL;
- dma_addr_t *dma_src = NULL;
+ struct dmaengine_unmap_data *unmap = NULL;
BUG_ON(src_cnt <= 1);
- if (submit->scribble)
- dma_src = submit->scribble;
- else if (sizeof(dma_addr_t) <= sizeof(struct page *))
- dma_src = (dma_addr_t *) src_list;
+ if (device)
+ unmap = dmaengine_get_unmap_data(device->dev, src_cnt, GFP_NOIO);
- if (dma_src && device && src_cnt <= device->max_xor &&
+ if (unmap && src_cnt <= device->max_xor &&
is_dma_xor_aligned(device, offset, 0, len)) {
unsigned long dma_prep_flags = 0;
int i;
@@ -288,11 +292,15 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset,
dma_prep_flags |= DMA_PREP_INTERRUPT;
if (submit->flags & ASYNC_TX_FENCE)
dma_prep_flags |= DMA_PREP_FENCE;
- for (i = 0; i < src_cnt; i++)
- dma_src[i] = dma_map_page(device->dev, src_list[i],
- offset, len, DMA_TO_DEVICE);
- tx = device->device_prep_dma_xor_val(chan, dma_src, src_cnt,
+ for (i = 0; i < src_cnt; i++) {
+ unmap->addr[i] = dma_map_page(device->dev, src_list[i],
+ offset, len, DMA_TO_DEVICE);
+ unmap->to_cnt++;
+ }
+ unmap->len = len;
+
+ tx = device->device_prep_dma_xor_val(chan, unmap->addr, src_cnt,
len, result,
dma_prep_flags);
if (unlikely(!tx)) {
@@ -301,11 +309,11 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset,
while (!tx) {
dma_async_issue_pending(chan);
tx = device->device_prep_dma_xor_val(chan,
- dma_src, src_cnt, len, result,
+ unmap->addr, src_cnt, len, result,
dma_prep_flags);
}
}
-
+ dma_set_unmap(tx, unmap);
async_tx_submit(chan, tx, submit);
} else {
enum async_tx_flags flags_orig = submit->flags;
@@ -327,6 +335,7 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset,
async_tx_sync_epilog(submit);
submit->flags = flags_orig;
}
+ dmaengine_unmap_put(unmap);
return tx;
}
diff --git a/crypto/async_tx/raid6test.c b/crypto/async_tx/raid6test.c
index 4a92bac744dc..dad95f45b88f 100644
--- a/crypto/async_tx/raid6test.c
+++ b/crypto/async_tx/raid6test.c
@@ -28,7 +28,7 @@
#undef pr
#define pr(fmt, args...) pr_info("raid6test: " fmt, ##args)
-#define NDISKS 16 /* Including P and Q */
+#define NDISKS 64 /* Including P and Q */
static struct page *dataptrs[NDISKS];
static addr_conv_t addr_conv[NDISKS];
@@ -219,6 +219,14 @@ static int raid6_test(void)
err += test(11, &tests);
err += test(12, &tests);
}
+
+ /* the 24 disk case is special for ioatdma as it is the boudary point
+ * at which it needs to switch from 8-source ops to 16-source
+ * ops for continuation (assumes DMA_HAS_PQ_CONTINUE is not set)
+ */
+ if (NDISKS > 24)
+ err += test(24, &tests);
+
err += test(NDISKS, &tests);
pr("\n");
diff --git a/crypto/authenc.c b/crypto/authenc.c
index ffce19de05cf..e1223559d5df 100644
--- a/crypto/authenc.c
+++ b/crypto/authenc.c
@@ -52,40 +52,52 @@ static void authenc_request_complete(struct aead_request *req, int err)
aead_request_complete(req, err);
}
-static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
- unsigned int keylen)
+int crypto_authenc_extractkeys(struct crypto_authenc_keys *keys, const u8 *key,
+ unsigned int keylen)
{
- unsigned int authkeylen;
- unsigned int enckeylen;
- struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
- struct crypto_ahash *auth = ctx->auth;
- struct crypto_ablkcipher *enc = ctx->enc;
- struct rtattr *rta = (void *)key;
+ struct rtattr *rta = (struct rtattr *)key;
struct crypto_authenc_key_param *param;
- int err = -EINVAL;
if (!RTA_OK(rta, keylen))
- goto badkey;
+ return -EINVAL;
if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
- goto badkey;
+ return -EINVAL;
if (RTA_PAYLOAD(rta) < sizeof(*param))
- goto badkey;
+ return -EINVAL;
param = RTA_DATA(rta);
- enckeylen = be32_to_cpu(param->enckeylen);
+ keys->enckeylen = be32_to_cpu(param->enckeylen);
key += RTA_ALIGN(rta->rta_len);
keylen -= RTA_ALIGN(rta->rta_len);
- if (keylen < enckeylen)
- goto badkey;
+ if (keylen < keys->enckeylen)
+ return -EINVAL;
- authkeylen = keylen - enckeylen;
+ keys->authkeylen = keylen - keys->enckeylen;
+ keys->authkey = key;
+ keys->enckey = key + keys->authkeylen;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(crypto_authenc_extractkeys);
+
+static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
+ unsigned int keylen)
+{
+ struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
+ struct crypto_ahash *auth = ctx->auth;
+ struct crypto_ablkcipher *enc = ctx->enc;
+ struct crypto_authenc_keys keys;
+ int err = -EINVAL;
+
+ if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
+ goto badkey;
crypto_ahash_clear_flags(auth, CRYPTO_TFM_REQ_MASK);
crypto_ahash_set_flags(auth, crypto_aead_get_flags(authenc) &
CRYPTO_TFM_REQ_MASK);
- err = crypto_ahash_setkey(auth, key, authkeylen);
+ err = crypto_ahash_setkey(auth, keys.authkey, keys.authkeylen);
crypto_aead_set_flags(authenc, crypto_ahash_get_flags(auth) &
CRYPTO_TFM_RES_MASK);
@@ -95,7 +107,7 @@ static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
crypto_ablkcipher_clear_flags(enc, CRYPTO_TFM_REQ_MASK);
crypto_ablkcipher_set_flags(enc, crypto_aead_get_flags(authenc) &
CRYPTO_TFM_REQ_MASK);
- err = crypto_ablkcipher_setkey(enc, key + authkeylen, enckeylen);
+ err = crypto_ablkcipher_setkey(enc, keys.enckey, keys.enckeylen);
crypto_aead_set_flags(authenc, crypto_ablkcipher_get_flags(enc) &
CRYPTO_TFM_RES_MASK);
@@ -188,7 +200,7 @@ static void authenc_verify_ahash_update_done(struct crypto_async_request *areq,
scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
authsize, 0);
- err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
+ err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
if (err)
goto out;
@@ -227,7 +239,7 @@ static void authenc_verify_ahash_done(struct crypto_async_request *areq,
scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
authsize, 0);
- err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
+ err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
if (err)
goto out;
@@ -368,9 +380,10 @@ static void crypto_authenc_encrypt_done(struct crypto_async_request *req,
if (!err) {
struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
- struct ablkcipher_request *abreq = aead_request_ctx(areq);
- u8 *iv = (u8 *)(abreq + 1) +
- crypto_ablkcipher_reqsize(ctx->enc);
+ struct authenc_request_ctx *areq_ctx = aead_request_ctx(areq);
+ struct ablkcipher_request *abreq = (void *)(areq_ctx->tail
+ + ctx->reqoff);
+ u8 *iv = (u8 *)abreq - crypto_ablkcipher_ivsize(ctx->enc);
err = crypto_authenc_genicv(areq, iv, 0);
}
@@ -462,7 +475,7 @@ static int crypto_authenc_verify(struct aead_request *req,
ihash = ohash + authsize;
scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
authsize, 0);
- return memcmp(ihash, ohash, authsize) ? -EBADMSG : 0;
+ return crypto_memneq(ihash, ohash, authsize) ? -EBADMSG : 0;
}
static int crypto_authenc_iverify(struct aead_request *req, u8 *iv,
diff --git a/crypto/authencesn.c b/crypto/authencesn.c
index ab53762fc309..4be0dd4373a9 100644
--- a/crypto/authencesn.c
+++ b/crypto/authencesn.c
@@ -59,37 +59,19 @@ static void authenc_esn_request_complete(struct aead_request *req, int err)
static int crypto_authenc_esn_setkey(struct crypto_aead *authenc_esn, const u8 *key,
unsigned int keylen)
{
- unsigned int authkeylen;
- unsigned int enckeylen;
struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
struct crypto_ahash *auth = ctx->auth;
struct crypto_ablkcipher *enc = ctx->enc;
- struct rtattr *rta = (void *)key;
- struct crypto_authenc_key_param *param;
+ struct crypto_authenc_keys keys;
int err = -EINVAL;
- if (!RTA_OK(rta, keylen))
+ if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
goto badkey;
- if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
- goto badkey;
- if (RTA_PAYLOAD(rta) < sizeof(*param))
- goto badkey;
-
- param = RTA_DATA(rta);
- enckeylen = be32_to_cpu(param->enckeylen);
-
- key += RTA_ALIGN(rta->rta_len);
- keylen -= RTA_ALIGN(rta->rta_len);
-
- if (keylen < enckeylen)
- goto badkey;
-
- authkeylen = keylen - enckeylen;
crypto_ahash_clear_flags(auth, CRYPTO_TFM_REQ_MASK);
crypto_ahash_set_flags(auth, crypto_aead_get_flags(authenc_esn) &
CRYPTO_TFM_REQ_MASK);
- err = crypto_ahash_setkey(auth, key, authkeylen);
+ err = crypto_ahash_setkey(auth, keys.authkey, keys.authkeylen);
crypto_aead_set_flags(authenc_esn, crypto_ahash_get_flags(auth) &
CRYPTO_TFM_RES_MASK);
@@ -99,7 +81,7 @@ static int crypto_authenc_esn_setkey(struct crypto_aead *authenc_esn, const u8 *
crypto_ablkcipher_clear_flags(enc, CRYPTO_TFM_REQ_MASK);
crypto_ablkcipher_set_flags(enc, crypto_aead_get_flags(authenc_esn) &
CRYPTO_TFM_REQ_MASK);
- err = crypto_ablkcipher_setkey(enc, key + authkeylen, enckeylen);
+ err = crypto_ablkcipher_setkey(enc, keys.enckey, keys.enckeylen);
crypto_aead_set_flags(authenc_esn, crypto_ablkcipher_get_flags(enc) &
CRYPTO_TFM_RES_MASK);
@@ -247,7 +229,7 @@ static void authenc_esn_verify_ahash_update_done(struct crypto_async_request *ar
scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
authsize, 0);
- err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
+ err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
if (err)
goto out;
@@ -296,7 +278,7 @@ static void authenc_esn_verify_ahash_update_done2(struct crypto_async_request *a
scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
authsize, 0);
- err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
+ err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
if (err)
goto out;
@@ -336,7 +318,7 @@ static void authenc_esn_verify_ahash_done(struct crypto_async_request *areq,
scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
authsize, 0);
- err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
+ err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
if (err)
goto out;
@@ -568,7 +550,7 @@ static int crypto_authenc_esn_verify(struct aead_request *req)
ihash = ohash + authsize;
scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
authsize, 0);
- return memcmp(ihash, ohash, authsize) ? -EBADMSG : 0;
+ return crypto_memneq(ihash, ohash, authsize) ? -EBADMSG : 0;
}
static int crypto_authenc_esn_iverify(struct aead_request *req, u8 *iv,
diff --git a/crypto/ccm.c b/crypto/ccm.c
index 499c91717d93..1df84217f7c9 100644
--- a/crypto/ccm.c
+++ b/crypto/ccm.c
@@ -271,7 +271,8 @@ static int crypto_ccm_auth(struct aead_request *req, struct scatterlist *plain,
}
/* compute plaintext into mac */
- get_data_to_compute(cipher, pctx, plain, cryptlen);
+ if (cryptlen)
+ get_data_to_compute(cipher, pctx, plain, cryptlen);
out:
return err;
@@ -363,7 +364,7 @@ static void crypto_ccm_decrypt_done(struct crypto_async_request *areq,
if (!err) {
err = crypto_ccm_auth(req, req->dst, cryptlen);
- if (!err && memcmp(pctx->auth_tag, pctx->odata, authsize))
+ if (!err && crypto_memneq(pctx->auth_tag, pctx->odata, authsize))
err = -EBADMSG;
}
aead_request_complete(req, err);
@@ -422,7 +423,7 @@ static int crypto_ccm_decrypt(struct aead_request *req)
return err;
/* verify */
- if (memcmp(authtag, odata, authsize))
+ if (crypto_memneq(authtag, odata, authsize))
return -EBADMSG;
return err;
diff --git a/crypto/gcm.c b/crypto/gcm.c
index 43e1fb05ea54..b4f017939004 100644
--- a/crypto/gcm.c
+++ b/crypto/gcm.c
@@ -582,7 +582,7 @@ static int crypto_gcm_verify(struct aead_request *req,
crypto_xor(auth_tag, iauth_tag, 16);
scatterwalk_map_and_copy(iauth_tag, req->src, cryptlen, authsize, 0);
- return memcmp(iauth_tag, auth_tag, authsize) ? -EBADMSG : 0;
+ return crypto_memneq(iauth_tag, auth_tag, authsize) ? -EBADMSG : 0;
}
static void gcm_decrypt_done(struct crypto_async_request *areq, int err)
diff --git a/crypto/hash_info.c b/crypto/hash_info.c
new file mode 100644
index 000000000000..3e7ff46f26e8
--- /dev/null
+++ b/crypto/hash_info.c
@@ -0,0 +1,56 @@
+/*
+ * Hash Info: Hash algorithms information
+ *
+ * Copyright (c) 2013 Dmitry Kasatkin <d.kasatkin@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+#include <linux/export.h>
+#include <crypto/hash_info.h>
+
+const char *const hash_algo_name[HASH_ALGO__LAST] = {
+ [HASH_ALGO_MD4] = "md4",
+ [HASH_ALGO_MD5] = "md5",
+ [HASH_ALGO_SHA1] = "sha1",
+ [HASH_ALGO_RIPE_MD_160] = "rmd160",
+ [HASH_ALGO_SHA256] = "sha256",
+ [HASH_ALGO_SHA384] = "sha384",
+ [HASH_ALGO_SHA512] = "sha512",
+ [HASH_ALGO_SHA224] = "sha224",
+ [HASH_ALGO_RIPE_MD_128] = "rmd128",
+ [HASH_ALGO_RIPE_MD_256] = "rmd256",
+ [HASH_ALGO_RIPE_MD_320] = "rmd320",
+ [HASH_ALGO_WP_256] = "wp256",
+ [HASH_ALGO_WP_384] = "wp384",
+ [HASH_ALGO_WP_512] = "wp512",
+ [HASH_ALGO_TGR_128] = "tgr128",
+ [HASH_ALGO_TGR_160] = "tgr160",
+ [HASH_ALGO_TGR_192] = "tgr192",
+};
+EXPORT_SYMBOL_GPL(hash_algo_name);
+
+const int hash_digest_size[HASH_ALGO__LAST] = {
+ [HASH_ALGO_MD4] = MD5_DIGEST_SIZE,
+ [HASH_ALGO_MD5] = MD5_DIGEST_SIZE,
+ [HASH_ALGO_SHA1] = SHA1_DIGEST_SIZE,
+ [HASH_ALGO_RIPE_MD_160] = RMD160_DIGEST_SIZE,
+ [HASH_ALGO_SHA256] = SHA256_DIGEST_SIZE,
+ [HASH_ALGO_SHA384] = SHA384_DIGEST_SIZE,
+ [HASH_ALGO_SHA512] = SHA512_DIGEST_SIZE,
+ [HASH_ALGO_SHA224] = SHA224_DIGEST_SIZE,
+ [HASH_ALGO_RIPE_MD_128] = RMD128_DIGEST_SIZE,
+ [HASH_ALGO_RIPE_MD_256] = RMD256_DIGEST_SIZE,
+ [HASH_ALGO_RIPE_MD_320] = RMD320_DIGEST_SIZE,
+ [HASH_ALGO_WP_256] = WP256_DIGEST_SIZE,
+ [HASH_ALGO_WP_384] = WP384_DIGEST_SIZE,
+ [HASH_ALGO_WP_512] = WP512_DIGEST_SIZE,
+ [HASH_ALGO_TGR_128] = TGR128_DIGEST_SIZE,
+ [HASH_ALGO_TGR_160] = TGR160_DIGEST_SIZE,
+ [HASH_ALGO_TGR_192] = TGR192_DIGEST_SIZE,
+};
+EXPORT_SYMBOL_GPL(hash_digest_size);
diff --git a/crypto/memneq.c b/crypto/memneq.c
new file mode 100644
index 000000000000..cd0162221c14
--- /dev/null
+++ b/crypto/memneq.c
@@ -0,0 +1,138 @@
+/*
+ * Constant-time equality testing of memory regions.
+ *
+ * Authors:
+ *
+ * James Yonan <james@openvpn.net>
+ * Daniel Borkmann <dborkman@redhat.com>
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2013 OpenVPN Technologies, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2013 OpenVPN Technologies, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of OpenVPN Technologies nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <crypto/algapi.h>
+
+#ifndef __HAVE_ARCH_CRYPTO_MEMNEQ
+
+/* Generic path for arbitrary size */
+static inline unsigned long
+__crypto_memneq_generic(const void *a, const void *b, size_t size)
+{
+ unsigned long neq = 0;
+
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+ while (size >= sizeof(unsigned long)) {
+ neq |= *(unsigned long *)a ^ *(unsigned long *)b;
+ a += sizeof(unsigned long);
+ b += sizeof(unsigned long);
+ size -= sizeof(unsigned long);
+ }
+#endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */
+ while (size > 0) {
+ neq |= *(unsigned char *)a ^ *(unsigned char *)b;
+ a += 1;
+ b += 1;
+ size -= 1;
+ }
+ return neq;
+}
+
+/* Loop-free fast-path for frequently used 16-byte size */
+static inline unsigned long __crypto_memneq_16(const void *a, const void *b)
+{
+#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+ if (sizeof(unsigned long) == 8)
+ return ((*(unsigned long *)(a) ^ *(unsigned long *)(b))
+ | (*(unsigned long *)(a+8) ^ *(unsigned long *)(b+8)));
+ else if (sizeof(unsigned int) == 4)
+ return ((*(unsigned int *)(a) ^ *(unsigned int *)(b))
+ | (*(unsigned int *)(a+4) ^ *(unsigned int *)(b+4))
+ | (*(unsigned int *)(a+8) ^ *(unsigned int *)(b+8))
+ | (*(unsigned int *)(a+12) ^ *(unsigned int *)(b+12)));
+ else
+#endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */
+ return ((*(unsigned char *)(a) ^ *(unsigned char *)(b))
+ | (*(unsigned char *)(a+1) ^ *(unsigned char *)(b+1))
+ | (*(unsigned char *)(a+2) ^ *(unsigned char *)(b+2))
+ | (*(unsigned char *)(a+3) ^ *(unsigned char *)(b+3))
+ | (*(unsigned char *)(a+4) ^ *(unsigned char *)(b+4))
+ | (*(unsigned char *)(a+5) ^ *(unsigned char *)(b+5))
+ | (*(unsigned char *)(a+6) ^ *(unsigned char *)(b+6))
+ | (*(unsigned char *)(a+7) ^ *(unsigned char *)(b+7))
+ | (*(unsigned char *)(a+8) ^ *(unsigned char *)(b+8))
+ | (*(unsigned char *)(a+9) ^ *(unsigned char *)(b+9))
+ | (*(unsigned char *)(a+10) ^ *(unsigned char *)(b+10))
+ | (*(unsigned char *)(a+11) ^ *(unsigned char *)(b+11))
+ | (*(unsigned char *)(a+12) ^ *(unsigned char *)(b+12))
+ | (*(unsigned char *)(a+13) ^ *(unsigned char *)(b+13))
+ | (*(unsigned char *)(a+14) ^ *(unsigned char *)(b+14))
+ | (*(unsigned char *)(a+15) ^ *(unsigned char *)(b+15)));
+}
+
+/* Compare two areas of memory without leaking timing information,
+ * and with special optimizations for common sizes. Users should
+ * not call this function directly, but should instead use
+ * crypto_memneq defined in crypto/algapi.h.
+ */
+noinline unsigned long __crypto_memneq(const void *a, const void *b,
+ size_t size)
+{
+ switch (size) {
+ case 16:
+ return __crypto_memneq_16(a, b);
+ default:
+ return __crypto_memneq_generic(a, b, size);
+ }
+}
+EXPORT_SYMBOL(__crypto_memneq);
+
+#endif /* __HAVE_ARCH_CRYPTO_MEMNEQ */
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index 1ab8258fcf56..001f07cdb828 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -1242,6 +1242,10 @@ static int do_test(int m)
ret += tcrypt_test("cmac(des3_ede)");
break;
+ case 155:
+ ret += tcrypt_test("authenc(hmac(sha1),cbc(aes))");
+ break;
+
case 200:
test_cipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32);
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index 432afc03e7c3..77955507f6f1 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -503,16 +503,16 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
goto out;
}
- sg_init_one(&sg[0], input,
- template[i].ilen + (enc ? authsize : 0));
-
if (diff_dst) {
output = xoutbuf[0];
output += align_offset;
+ sg_init_one(&sg[0], input, template[i].ilen);
sg_init_one(&sgout[0], output,
+ template[i].rlen);
+ } else {
+ sg_init_one(&sg[0], input,
template[i].ilen +
(enc ? authsize : 0));
- } else {
output = input;
}
@@ -612,12 +612,6 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
memcpy(q, template[i].input + temp,
template[i].tap[k]);
- n = template[i].tap[k];
- if (k == template[i].np - 1 && enc)
- n += authsize;
- if (offset_in_page(q) + n < PAGE_SIZE)
- q[n] = 0;
-
sg_set_buf(&sg[k], q, template[i].tap[k]);
if (diff_dst) {
@@ -625,13 +619,17 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
offset_in_page(IDX[k]);
memset(q, 0, template[i].tap[k]);
- if (offset_in_page(q) + n < PAGE_SIZE)
- q[n] = 0;
sg_set_buf(&sgout[k], q,
template[i].tap[k]);
}
+ n = template[i].tap[k];
+ if (k == template[i].np - 1 && enc)
+ n += authsize;
+ if (offset_in_page(q) + n < PAGE_SIZE)
+ q[n] = 0;
+
temp += template[i].tap[k];
}
@@ -650,10 +648,10 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
goto out;
}
- sg[k - 1].length += authsize;
-
if (diff_dst)
sgout[k - 1].length += authsize;
+ else
+ sg[k - 1].length += authsize;
}
sg_init_table(asg, template[i].anp);
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index c95df0b8c880..4770de5707b9 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -235,17 +235,6 @@ config ACPI_INITRD_TABLE_OVERRIDE
initrd, therefore it's safe to say Y.
See Documentation/acpi/initrd_table_override.txt for details
-config ACPI_BLACKLIST_YEAR
- int "Disable ACPI for systems before Jan 1st this year" if X86_32
- default 0
- help
- Enter a 4-digit year, e.g., 2001, to disable ACPI by default
- on platforms with DMI BIOS date before January 1st that year.
- "acpi=force" can be used to override this mechanism.
-
- Enter 0 to disable this mechanism and allow ACPI to
- run by default no matter what the year. (default)
-
config ACPI_DEBUG
bool "Debug Statements"
default n
@@ -359,7 +348,6 @@ source "drivers/acpi/apei/Kconfig"
config ACPI_EXTLOG
tristate "Extended Error Log support"
depends on X86_MCE && X86_LOCAL_APIC
- select EFI
select UEFI_CPER
default n
help
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c
index b9f0d5f4bba5..8711e3797165 100644
--- a/drivers/acpi/ac.c
+++ b/drivers/acpi/ac.c
@@ -56,7 +56,6 @@ static int ac_sleep_before_get_state_ms;
struct acpi_ac {
struct power_supply charger;
- struct acpi_device *adev;
struct platform_device *pdev;
unsigned long long state;
};
@@ -70,8 +69,9 @@ struct acpi_ac {
static int acpi_ac_get_state(struct acpi_ac *ac)
{
acpi_status status;
+ acpi_handle handle = ACPI_HANDLE(&ac->pdev->dev);
- status = acpi_evaluate_integer(ac->adev->handle, "_PSR", NULL,
+ status = acpi_evaluate_integer(handle, "_PSR", NULL,
&ac->state);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
@@ -119,6 +119,7 @@ static enum power_supply_property ac_props[] = {
static void acpi_ac_notify_handler(acpi_handle handle, u32 event, void *data)
{
struct acpi_ac *ac = data;
+ struct acpi_device *adev;
if (!ac)
return;
@@ -141,10 +142,11 @@ static void acpi_ac_notify_handler(acpi_handle handle, u32 event, void *data)
msleep(ac_sleep_before_get_state_ms);
acpi_ac_get_state(ac);
- acpi_bus_generate_netlink_event(ac->adev->pnp.device_class,
+ adev = ACPI_COMPANION(&ac->pdev->dev);
+ acpi_bus_generate_netlink_event(adev->pnp.device_class,
dev_name(&ac->pdev->dev),
event, (u32) ac->state);
- acpi_notifier_call_chain(ac->adev, event, (u32) ac->state);
+ acpi_notifier_call_chain(adev, event, (u32) ac->state);
kobject_uevent(&ac->charger.dev->kobj, KOBJ_CHANGE);
}
@@ -178,8 +180,8 @@ static int acpi_ac_probe(struct platform_device *pdev)
if (!pdev)
return -EINVAL;
- result = acpi_bus_get_device(ACPI_HANDLE(&pdev->dev), &adev);
- if (result)
+ adev = ACPI_COMPANION(&pdev->dev);
+ if (!adev)
return -ENODEV;
ac = kzalloc(sizeof(struct acpi_ac), GFP_KERNEL);
@@ -188,7 +190,6 @@ static int acpi_ac_probe(struct platform_device *pdev)
strcpy(acpi_device_name(adev), ACPI_AC_DEVICE_NAME);
strcpy(acpi_device_class(adev), ACPI_AC_CLASS);
- ac->adev = adev;
ac->pdev = pdev;
platform_set_drvdata(pdev, ac);
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index d3961014aad7..e60390597372 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -162,6 +162,16 @@ static const struct acpi_device_id acpi_lpss_device_ids[] = {
{ "80860F14", (unsigned long)&byt_sdio_dev_desc },
{ "80860F41", (unsigned long)&byt_i2c_dev_desc },
{ "INT33B2", },
+ { "INT33FC", },
+
+ { "INT3430", (unsigned long)&lpt_dev_desc },
+ { "INT3431", (unsigned long)&lpt_dev_desc },
+ { "INT3432", (unsigned long)&lpt_dev_desc },
+ { "INT3433", (unsigned long)&lpt_dev_desc },
+ { "INT3434", (unsigned long)&lpt_uart_dev_desc },
+ { "INT3435", (unsigned long)&lpt_uart_dev_desc },
+ { "INT3436", (unsigned long)&lpt_sdio_dev_desc },
+ { "INT3437", },
{ }
};
diff --git a/drivers/acpi/acpi_platform.c b/drivers/acpi/acpi_platform.c
index 8a4cfc7e71f0..dbfe49e5fd63 100644
--- a/drivers/acpi/acpi_platform.c
+++ b/drivers/acpi/acpi_platform.c
@@ -111,7 +111,7 @@ int acpi_create_platform_device(struct acpi_device *adev,
pdevinfo.id = -1;
pdevinfo.res = resources;
pdevinfo.num_res = count;
- pdevinfo.acpi_node.handle = adev->handle;
+ pdevinfo.acpi_node.companion = adev;
pdev = platform_device_register_full(&pdevinfo);
if (IS_ERR(pdev)) {
dev_err(&adev->dev, "platform device creation failed: %ld\n",
diff --git a/drivers/acpi/acpica/acresrc.h b/drivers/acpi/acpica/acresrc.h
index f691d0e4d9fa..ff97430455cb 100644
--- a/drivers/acpi/acpica/acresrc.h
+++ b/drivers/acpi/acpica/acresrc.h
@@ -184,7 +184,7 @@ acpi_rs_create_resource_list(union acpi_operand_object *aml_buffer,
struct acpi_buffer *output_buffer);
acpi_status
-acpi_rs_create_aml_resources(struct acpi_resource *linked_list_buffer,
+acpi_rs_create_aml_resources(struct acpi_buffer *resource_list,
struct acpi_buffer *output_buffer);
acpi_status
@@ -227,8 +227,8 @@ acpi_rs_get_list_length(u8 * aml_buffer,
u32 aml_buffer_length, acpi_size * size_needed);
acpi_status
-acpi_rs_get_aml_length(struct acpi_resource *linked_list_buffer,
- acpi_size * size_needed);
+acpi_rs_get_aml_length(struct acpi_resource *resource_list,
+ acpi_size resource_list_size, acpi_size * size_needed);
acpi_status
acpi_rs_get_pci_routing_table_length(union acpi_operand_object *package_object,
diff --git a/drivers/acpi/acpica/nsalloc.c b/drivers/acpi/acpica/nsalloc.c
index 243737363fb8..fd1ff54cda19 100644
--- a/drivers/acpi/acpica/nsalloc.c
+++ b/drivers/acpi/acpica/nsalloc.c
@@ -106,6 +106,7 @@ struct acpi_namespace_node *acpi_ns_create_node(u32 name)
void acpi_ns_delete_node(struct acpi_namespace_node *node)
{
union acpi_operand_object *obj_desc;
+ union acpi_operand_object *next_desc;
ACPI_FUNCTION_NAME(ns_delete_node);
@@ -114,12 +115,13 @@ void acpi_ns_delete_node(struct acpi_namespace_node *node)
acpi_ns_detach_object(node);
/*
- * Delete an attached data object if present (an object that was created
- * and attached via acpi_attach_data). Note: After any normal object is
- * detached above, the only possible remaining object is a data object.
+ * Delete an attached data object list if present (objects that were
+ * attached via acpi_attach_data). Note: After any normal object is
+ * detached above, the only possible remaining object(s) are data
+ * objects, in a linked list.
*/
obj_desc = node->object;
- if (obj_desc && (obj_desc->common.type == ACPI_TYPE_LOCAL_DATA)) {
+ while (obj_desc && (obj_desc->common.type == ACPI_TYPE_LOCAL_DATA)) {
/* Invoke the attached data deletion handler if present */
@@ -127,7 +129,15 @@ void acpi_ns_delete_node(struct acpi_namespace_node *node)
obj_desc->data.handler(node, obj_desc->data.pointer);
}
+ next_desc = obj_desc->common.next_object;
acpi_ut_remove_reference(obj_desc);
+ obj_desc = next_desc;
+ }
+
+ /* Special case for the statically allocated root node */
+
+ if (node == acpi_gbl_root_node) {
+ return;
}
/* Now we can delete the node */
diff --git a/drivers/acpi/acpica/nsutils.c b/drivers/acpi/acpica/nsutils.c
index cc2fea94c5f0..4a0665b6bcc1 100644
--- a/drivers/acpi/acpica/nsutils.c
+++ b/drivers/acpi/acpica/nsutils.c
@@ -593,24 +593,26 @@ struct acpi_namespace_node *acpi_ns_validate_handle(acpi_handle handle)
void acpi_ns_terminate(void)
{
- union acpi_operand_object *obj_desc;
+ acpi_status status;
ACPI_FUNCTION_TRACE(ns_terminate);
/*
- * 1) Free the entire namespace -- all nodes and objects
- *
- * Delete all object descriptors attached to namepsace nodes
+ * Free the entire namespace -- all nodes and all objects
+ * attached to the nodes
*/
acpi_ns_delete_namespace_subtree(acpi_gbl_root_node);
- /* Detach any objects attached to the root */
+ /* Delete any objects attached to the root node */
- obj_desc = acpi_ns_get_attached_object(acpi_gbl_root_node);
- if (obj_desc) {
- acpi_ns_detach_object(acpi_gbl_root_node);
+ status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+ if (ACPI_FAILURE(status)) {
+ return_VOID;
}
+ acpi_ns_delete_node(acpi_gbl_root_node);
+ (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Namespace freed\n"));
return_VOID;
}
diff --git a/drivers/acpi/acpica/rscalc.c b/drivers/acpi/acpica/rscalc.c
index b62a0f4f4f9b..b60c9cf82862 100644
--- a/drivers/acpi/acpica/rscalc.c
+++ b/drivers/acpi/acpica/rscalc.c
@@ -174,6 +174,7 @@ acpi_rs_stream_option_length(u32 resource_length,
* FUNCTION: acpi_rs_get_aml_length
*
* PARAMETERS: resource - Pointer to the resource linked list
+ * resource_list_size - Size of the resource linked list
* size_needed - Where the required size is returned
*
* RETURN: Status
@@ -185,16 +186,20 @@ acpi_rs_stream_option_length(u32 resource_length,
******************************************************************************/
acpi_status
-acpi_rs_get_aml_length(struct acpi_resource * resource, acpi_size * size_needed)
+acpi_rs_get_aml_length(struct acpi_resource *resource,
+ acpi_size resource_list_size, acpi_size * size_needed)
{
acpi_size aml_size_needed = 0;
+ struct acpi_resource *resource_end;
acpi_rs_length total_size;
ACPI_FUNCTION_TRACE(rs_get_aml_length);
/* Traverse entire list of internal resource descriptors */
- while (resource) {
+ resource_end =
+ ACPI_ADD_PTR(struct acpi_resource, resource, resource_list_size);
+ while (resource < resource_end) {
/* Validate the descriptor type */
diff --git a/drivers/acpi/acpica/rscreate.c b/drivers/acpi/acpica/rscreate.c
index 65f3e1c5b598..3a2ace93e62c 100644
--- a/drivers/acpi/acpica/rscreate.c
+++ b/drivers/acpi/acpica/rscreate.c
@@ -418,22 +418,21 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
*
* FUNCTION: acpi_rs_create_aml_resources
*
- * PARAMETERS: linked_list_buffer - Pointer to the resource linked list
- * output_buffer - Pointer to the user's buffer
+ * PARAMETERS: resource_list - Pointer to the resource list buffer
+ * output_buffer - Where the AML buffer is returned
*
* RETURN: Status AE_OK if okay, else a valid acpi_status code.
* If the output_buffer is too small, the error will be
* AE_BUFFER_OVERFLOW and output_buffer->Length will point
* to the size buffer needed.
*
- * DESCRIPTION: Takes the linked list of device resources and
- * creates a bytestream to be used as input for the
- * _SRS control method.
+ * DESCRIPTION: Converts a list of device resources to an AML bytestream
+ * to be used as input for the _SRS control method.
*
******************************************************************************/
acpi_status
-acpi_rs_create_aml_resources(struct acpi_resource *linked_list_buffer,
+acpi_rs_create_aml_resources(struct acpi_buffer *resource_list,
struct acpi_buffer *output_buffer)
{
acpi_status status;
@@ -441,16 +440,16 @@ acpi_rs_create_aml_resources(struct acpi_resource *linked_list_buffer,
ACPI_FUNCTION_TRACE(rs_create_aml_resources);
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "LinkedListBuffer = %p\n",
- linked_list_buffer));
+ /* Params already validated, no need to re-validate here */
- /*
- * Params already validated, so we don't re-validate here
- *
- * Pass the linked_list_buffer into a module that calculates
- * the buffer size needed for the byte stream.
- */
- status = acpi_rs_get_aml_length(linked_list_buffer, &aml_size_needed);
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "ResourceList Buffer = %p\n",
+ resource_list->pointer));
+
+ /* Get the buffer size needed for the AML byte stream */
+
+ status = acpi_rs_get_aml_length(resource_list->pointer,
+ resource_list->length,
+ &aml_size_needed);
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "AmlSizeNeeded=%X, %s\n",
(u32)aml_size_needed, acpi_format_exception(status)));
@@ -467,10 +466,9 @@ acpi_rs_create_aml_resources(struct acpi_resource *linked_list_buffer,
/* Do the conversion */
- status =
- acpi_rs_convert_resources_to_aml(linked_list_buffer,
- aml_size_needed,
- output_buffer->pointer);
+ status = acpi_rs_convert_resources_to_aml(resource_list->pointer,
+ aml_size_needed,
+ output_buffer->pointer);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/rsutils.c b/drivers/acpi/acpica/rsutils.c
index aef303d56d86..14a7982c9961 100644
--- a/drivers/acpi/acpica/rsutils.c
+++ b/drivers/acpi/acpica/rsutils.c
@@ -753,7 +753,7 @@ acpi_rs_set_srs_method_data(struct acpi_namespace_node *node,
* Convert the linked list into a byte stream
*/
buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER;
- status = acpi_rs_create_aml_resources(in_buffer->pointer, &buffer);
+ status = acpi_rs_create_aml_resources(in_buffer, &buffer);
if (ACPI_FAILURE(status)) {
goto cleanup;
}
diff --git a/drivers/acpi/acpica/utdebug.c b/drivers/acpi/acpica/utdebug.c
index 1a67b3944b3b..03ae8affe48f 100644
--- a/drivers/acpi/acpica/utdebug.c
+++ b/drivers/acpi/acpica/utdebug.c
@@ -185,6 +185,7 @@ acpi_debug_print(u32 requested_debug_level,
}
acpi_gbl_prev_thread_id = thread_id;
+ acpi_gbl_nesting_level = 0;
}
/*
@@ -193,13 +194,21 @@ acpi_debug_print(u32 requested_debug_level,
*/
acpi_os_printf("%9s-%04ld ", module_name, line_number);
+#ifdef ACPI_EXEC_APP
+ /*
+ * For acpi_exec only, emit the thread ID and nesting level.
+ * Note: nesting level is really only useful during a single-thread
+ * execution. Otherwise, multiple threads will keep resetting the
+ * level.
+ */
if (ACPI_LV_THREADS & acpi_dbg_level) {
acpi_os_printf("[%u] ", (u32)thread_id);
}
- acpi_os_printf("[%02ld] %-22.22s: ",
- acpi_gbl_nesting_level,
- acpi_ut_trim_function_name(function_name));
+ acpi_os_printf("[%02ld] ", acpi_gbl_nesting_level);
+#endif
+
+ acpi_os_printf("%-22.22s: ", acpi_ut_trim_function_name(function_name));
va_start(args, format);
acpi_os_vprintf(format, args);
@@ -420,7 +429,9 @@ acpi_ut_exit(u32 line_number,
component_id, "%s\n", acpi_gbl_fn_exit_str);
}
- acpi_gbl_nesting_level--;
+ if (acpi_gbl_nesting_level) {
+ acpi_gbl_nesting_level--;
+ }
}
ACPI_EXPORT_SYMBOL(acpi_ut_exit)
@@ -467,7 +478,9 @@ acpi_ut_status_exit(u32 line_number,
}
}
- acpi_gbl_nesting_level--;
+ if (acpi_gbl_nesting_level) {
+ acpi_gbl_nesting_level--;
+ }
}
ACPI_EXPORT_SYMBOL(acpi_ut_status_exit)
@@ -504,7 +517,9 @@ acpi_ut_value_exit(u32 line_number,
ACPI_FORMAT_UINT64(value));
}
- acpi_gbl_nesting_level--;
+ if (acpi_gbl_nesting_level) {
+ acpi_gbl_nesting_level--;
+ }
}
ACPI_EXPORT_SYMBOL(acpi_ut_value_exit)
@@ -540,7 +555,9 @@ acpi_ut_ptr_exit(u32 line_number,
ptr);
}
- acpi_gbl_nesting_level--;
+ if (acpi_gbl_nesting_level) {
+ acpi_gbl_nesting_level--;
+ }
}
#endif
diff --git a/drivers/acpi/apei/Kconfig b/drivers/acpi/apei/Kconfig
index 786294bb682c..3650b2183227 100644
--- a/drivers/acpi/apei/Kconfig
+++ b/drivers/acpi/apei/Kconfig
@@ -2,7 +2,6 @@ config ACPI_APEI
bool "ACPI Platform Error Interface (APEI)"
select MISC_FILESYSTEMS
select PSTORE
- select EFI
select UEFI_CPER
depends on X86
help
diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c
index 26311f23c824..cb1d557fc22c 100644
--- a/drivers/acpi/apei/erst.c
+++ b/drivers/acpi/apei/erst.c
@@ -942,6 +942,7 @@ static int erst_clearer(enum pstore_type_id type, u64 id, int count,
static struct pstore_info erst_info = {
.owner = THIS_MODULE,
.name = "erst",
+ .flags = PSTORE_FLAGS_FRAGILE,
.open = erst_open_pstore,
.close = erst_close_pstore,
.read = erst_reader,
diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
index fb848378d582..078c4f7fe2dd 100644
--- a/drivers/acpi/blacklist.c
+++ b/drivers/acpi/blacklist.c
@@ -75,39 +75,6 @@ static struct acpi_blacklist_item acpi_blacklist[] __initdata = {
{""}
};
-#if CONFIG_ACPI_BLACKLIST_YEAR
-
-static int __init blacklist_by_year(void)
-{
- int year;
-
- /* Doesn't exist? Likely an old system */
- if (!dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL)) {
- printk(KERN_ERR PREFIX "no DMI BIOS year, "
- "acpi=force is required to enable ACPI\n" );
- return 1;
- }
- /* 0? Likely a buggy new BIOS */
- if (year == 0) {
- printk(KERN_ERR PREFIX "DMI BIOS year==0, "
- "assuming ACPI-capable machine\n" );
- return 0;
- }
- if (year < CONFIG_ACPI_BLACKLIST_YEAR) {
- printk(KERN_ERR PREFIX "BIOS age (%d) fails cutoff (%d), "
- "acpi=force is required to enable ACPI\n",
- year, CONFIG_ACPI_BLACKLIST_YEAR);
- return 1;
- }
- return 0;
-}
-#else
-static inline int blacklist_by_year(void)
-{
- return 0;
-}
-#endif
-
int __init acpi_blacklisted(void)
{
int i = 0;
@@ -166,8 +133,6 @@ int __init acpi_blacklisted(void)
}
}
- blacklisted += blacklist_by_year();
-
dmi_check_system(acpi_osi_dmi_table);
return blacklisted;
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index d42b2fb5a7e9..b3480cf7db1a 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -22,16 +22,12 @@
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
-#include <linux/device.h>
+#include <linux/acpi.h>
#include <linux/export.h>
#include <linux/mutex.h>
#include <linux/pm_qos.h>
#include <linux/pm_runtime.h>
-#include <acpi/acpi.h>
-#include <acpi/acpi_bus.h>
-#include <acpi/acpi_drivers.h>
-
#include "internal.h"
#define _COMPONENT ACPI_POWER_COMPONENT
@@ -548,7 +544,7 @@ static int acpi_dev_pm_get_state(struct device *dev, struct acpi_device *adev,
*/
int acpi_pm_device_sleep_state(struct device *dev, int *d_min_p, int d_max_in)
{
- acpi_handle handle = DEVICE_ACPI_HANDLE(dev);
+ acpi_handle handle = ACPI_HANDLE(dev);
struct acpi_device *adev;
int ret, d_min, d_max;
@@ -656,7 +652,7 @@ int acpi_pm_device_run_wake(struct device *phys_dev, bool enable)
if (!device_run_wake(phys_dev))
return -EINVAL;
- handle = DEVICE_ACPI_HANDLE(phys_dev);
+ handle = ACPI_HANDLE(phys_dev);
if (!handle || acpi_bus_get_device(handle, &adev)) {
dev_dbg(phys_dev, "ACPI handle without context in %s!\n",
__func__);
@@ -700,7 +696,7 @@ int acpi_pm_device_sleep_wake(struct device *dev, bool enable)
if (!device_can_wakeup(dev))
return -EINVAL;
- handle = DEVICE_ACPI_HANDLE(dev);
+ handle = ACPI_HANDLE(dev);
if (!handle || acpi_bus_get_device(handle, &adev)) {
dev_dbg(dev, "ACPI handle without context in %s!\n", __func__);
return -ENODEV;
@@ -722,7 +718,7 @@ int acpi_pm_device_sleep_wake(struct device *dev, bool enable)
*/
struct acpi_device *acpi_dev_pm_get_node(struct device *dev)
{
- acpi_handle handle = DEVICE_ACPI_HANDLE(dev);
+ acpi_handle handle = ACPI_HANDLE(dev);
struct acpi_device *adev;
return handle && !acpi_bus_get_device(handle, &adev) ? adev : NULL;
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index d5309fd49458..ba5b56db9d27 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -173,9 +173,10 @@ static void start_transaction(struct acpi_ec *ec)
static void advance_transaction(struct acpi_ec *ec, u8 status)
{
unsigned long flags;
- struct transaction *t = ec->curr;
+ struct transaction *t;
spin_lock_irqsave(&ec->lock, flags);
+ t = ec->curr;
if (!t)
goto unlock;
if (t->wlen > t->wi) {
diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c
index 10f0f40587bb..a22a295edb69 100644
--- a/drivers/acpi/glue.c
+++ b/drivers/acpi/glue.c
@@ -197,30 +197,28 @@ static void acpi_physnode_link_name(char *buf, unsigned int node_id)
int acpi_bind_one(struct device *dev, acpi_handle handle)
{
- struct acpi_device *acpi_dev;
- acpi_status status;
+ struct acpi_device *acpi_dev = NULL;
struct acpi_device_physical_node *physical_node, *pn;
char physical_node_name[PHYSICAL_NODE_NAME_SIZE];
struct list_head *physnode_list;
unsigned int node_id;
int retval = -EINVAL;
- if (ACPI_HANDLE(dev)) {
+ if (ACPI_COMPANION(dev)) {
if (handle) {
- dev_warn(dev, "ACPI handle is already set\n");
+ dev_warn(dev, "ACPI companion already set\n");
return -EINVAL;
} else {
- handle = ACPI_HANDLE(dev);
+ acpi_dev = ACPI_COMPANION(dev);
}
+ } else {
+ acpi_bus_get_device(handle, &acpi_dev);
}
- if (!handle)
+ if (!acpi_dev)
return -EINVAL;
+ get_device(&acpi_dev->dev);
get_device(dev);
- status = acpi_bus_get_device(handle, &acpi_dev);
- if (ACPI_FAILURE(status))
- goto err;
-
physical_node = kzalloc(sizeof(*physical_node), GFP_KERNEL);
if (!physical_node) {
retval = -ENOMEM;
@@ -242,10 +240,11 @@ int acpi_bind_one(struct device *dev, acpi_handle handle)
dev_warn(dev, "Already associated with ACPI node\n");
kfree(physical_node);
- if (ACPI_HANDLE(dev) != handle)
+ if (ACPI_COMPANION(dev) != acpi_dev)
goto err;
put_device(dev);
+ put_device(&acpi_dev->dev);
return 0;
}
if (pn->node_id == node_id) {
@@ -259,8 +258,8 @@ int acpi_bind_one(struct device *dev, acpi_handle handle)
list_add(&physical_node->node, physnode_list);
acpi_dev->physical_node_count++;
- if (!ACPI_HANDLE(dev))
- ACPI_HANDLE_SET(dev, acpi_dev->handle);
+ if (!ACPI_COMPANION(dev))
+ ACPI_COMPANION_SET(dev, acpi_dev);
acpi_physnode_link_name(physical_node_name, node_id);
retval = sysfs_create_link(&acpi_dev->dev.kobj, &dev->kobj,
@@ -283,27 +282,21 @@ int acpi_bind_one(struct device *dev, acpi_handle handle)
return 0;
err:
- ACPI_HANDLE_SET(dev, NULL);
+ ACPI_COMPANION_SET(dev, NULL);
put_device(dev);
+ put_device(&acpi_dev->dev);
return retval;
}
EXPORT_SYMBOL_GPL(acpi_bind_one);
int acpi_unbind_one(struct device *dev)
{
+ struct acpi_device *acpi_dev = ACPI_COMPANION(dev);
struct acpi_device_physical_node *entry;
- struct acpi_device *acpi_dev;
- acpi_status status;
- if (!ACPI_HANDLE(dev))
+ if (!acpi_dev)
return 0;
- status = acpi_bus_get_device(ACPI_HANDLE(dev), &acpi_dev);
- if (ACPI_FAILURE(status)) {
- dev_err(dev, "Oops, ACPI handle corrupt in %s()\n", __func__);
- return -EINVAL;
- }
-
mutex_lock(&acpi_dev->physical_node_lock);
list_for_each_entry(entry, &acpi_dev->physical_node_list, node)
@@ -316,9 +309,10 @@ int acpi_unbind_one(struct device *dev)
acpi_physnode_link_name(physnode_name, entry->node_id);
sysfs_remove_link(&acpi_dev->dev.kobj, physnode_name);
sysfs_remove_link(&dev->kobj, "firmware_node");
- ACPI_HANDLE_SET(dev, NULL);
- /* acpi_bind_one() increase refcnt by one. */
+ ACPI_COMPANION_SET(dev, NULL);
+ /* Drop references taken by acpi_bind_one(). */
put_device(dev);
+ put_device(&acpi_dev->dev);
kfree(entry);
break;
}
@@ -328,6 +322,15 @@ int acpi_unbind_one(struct device *dev)
}
EXPORT_SYMBOL_GPL(acpi_unbind_one);
+void acpi_preset_companion(struct device *dev, acpi_handle parent, u64 addr)
+{
+ struct acpi_device *adev;
+
+ if (!acpi_bus_get_device(acpi_get_child(parent, addr), &adev))
+ ACPI_COMPANION_SET(dev, adev);
+}
+EXPORT_SYMBOL_GPL(acpi_preset_companion);
+
static int acpi_platform_notify(struct device *dev)
{
struct acpi_bus_type *type = acpi_get_bus_type(dev);
diff --git a/drivers/acpi/nvs.c b/drivers/acpi/nvs.c
index 266bc58ce0ce..386a9fe497b4 100644
--- a/drivers/acpi/nvs.c
+++ b/drivers/acpi/nvs.c
@@ -13,7 +13,6 @@
#include <linux/slab.h>
#include <linux/acpi.h>
#include <linux/acpi_io.h>
-#include <acpi/acpiosxf.h>
/* ACPI NVS regions, APEI may use it */
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index 56f05869b08d..20360e480bd8 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -65,6 +65,9 @@ static struct acpi_scan_handler pci_root_handler = {
.ids = root_device_ids,
.attach = acpi_pci_root_add,
.detach = acpi_pci_root_remove,
+ .hotplug = {
+ .ignore = true,
+ },
};
static DEFINE_MUTEX(osc_lock);
@@ -575,6 +578,7 @@ static int acpi_pci_root_add(struct acpi_device *device,
dev_err(&device->dev,
"Bus %04x:%02x not present in PCI namespace\n",
root->segment, (unsigned int)root->secondary.start);
+ device->driver_data = NULL;
result = -ENODEV;
goto end;
}
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 55f9dedbbf9f..fd39459926b1 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -289,24 +289,17 @@ void acpi_bus_device_eject(void *data, u32 ost_src)
{
struct acpi_device *device = data;
acpi_handle handle = device->handle;
- struct acpi_scan_handler *handler;
u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE;
int error;
lock_device_hotplug();
mutex_lock(&acpi_scan_lock);
- handler = device->handler;
- if (!handler || !handler->hotplug.enabled) {
- put_device(&device->dev);
- goto err_support;
- }
-
if (ost_src == ACPI_NOTIFY_EJECT_REQUEST)
acpi_evaluate_hotplug_ost(handle, ACPI_NOTIFY_EJECT_REQUEST,
ACPI_OST_SC_EJECT_IN_PROGRESS, NULL);
- if (handler->hotplug.mode == AHM_CONTAINER)
+ if (device->handler && device->handler->hotplug.mode == AHM_CONTAINER)
kobject_uevent(&device->dev.kobj, KOBJ_OFFLINE);
error = acpi_scan_hot_remove(device);
@@ -411,8 +404,7 @@ static void acpi_hotplug_notify_cb(acpi_handle handle, u32 type, void *data)
break;
case ACPI_NOTIFY_EJECT_REQUEST:
acpi_handle_debug(handle, "ACPI_NOTIFY_EJECT_REQUEST event\n");
- status = acpi_bus_get_device(handle, &adev);
- if (ACPI_FAILURE(status))
+ if (acpi_bus_get_device(handle, &adev))
goto err_out;
get_device(&adev->dev);
@@ -1780,7 +1772,7 @@ static void acpi_scan_init_hotplug(acpi_handle handle, int type)
*/
list_for_each_entry(hwid, &pnp.ids, list) {
handler = acpi_scan_match_handler(hwid->id, NULL);
- if (handler) {
+ if (handler && !handler->hotplug.ignore) {
acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
acpi_hotplug_notify_cb, handler);
break;
@@ -1997,6 +1989,7 @@ static int acpi_bus_scan_fixed(void)
if (result)
return result;
+ device->flags.match_driver = true;
result = device_attach(&device->dev);
if (result < 0)
return result;
@@ -2013,6 +2006,7 @@ static int acpi_bus_scan_fixed(void)
if (result)
return result;
+ device->flags.match_driver = true;
result = device_attach(&device->dev);
}
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 14df30580e15..721e949e606e 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -525,7 +525,7 @@ static int acpi_suspend_enter(suspend_state_t pm_state)
* generate wakeup events.
*/
if (ACPI_SUCCESS(status) && (acpi_state == ACPI_STATE_S3)) {
- acpi_event_status pwr_btn_status;
+ acpi_event_status pwr_btn_status = ACPI_EVENT_FLAG_DISABLED;
acpi_get_event_status(ACPI_EVENT_POWER_BUTTON, &pwr_btn_status);
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index db5293650f62..6dbc3ca45223 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -309,7 +309,7 @@ static void acpi_table_attr_init(struct acpi_table_attr *table_attr,
sprintf(table_attr->name + ACPI_NAME_SIZE, "%d",
table_attr->instance);
- table_attr->attr.size = 0;
+ table_attr->attr.size = table_header->length;
table_attr->attr.read = acpi_table_show;
table_attr->attr.attr.name = table_attr->name;
table_attr->attr.attr.mode = 0400;
@@ -354,8 +354,9 @@ static int acpi_tables_sysfs_init(void)
{
struct acpi_table_attr *table_attr;
struct acpi_table_header *table_header = NULL;
- int table_index = 0;
- int result;
+ int table_index;
+ acpi_status status;
+ int ret;
tables_kobj = kobject_create_and_add("tables", acpi_kobj);
if (!tables_kobj)
@@ -365,33 +366,34 @@ static int acpi_tables_sysfs_init(void)
if (!dynamic_tables_kobj)
goto err_dynamic_tables;
- do {
- result = acpi_get_table_by_index(table_index, &table_header);
- if (!result) {
- table_index++;
- table_attr = NULL;
- table_attr =
- kzalloc(sizeof(struct acpi_table_attr), GFP_KERNEL);
- if (!table_attr)
- return -ENOMEM;
-
- acpi_table_attr_init(table_attr, table_header);
- result =
- sysfs_create_bin_file(tables_kobj,
- &table_attr->attr);
- if (result) {
- kfree(table_attr);
- return result;
- } else
- list_add_tail(&table_attr->node,
- &acpi_table_attr_list);
+ for (table_index = 0;; table_index++) {
+ status = acpi_get_table_by_index(table_index, &table_header);
+
+ if (status == AE_BAD_PARAMETER)
+ break;
+
+ if (ACPI_FAILURE(status))
+ continue;
+
+ table_attr = NULL;
+ table_attr = kzalloc(sizeof(*table_attr), GFP_KERNEL);
+ if (!table_attr)
+ return -ENOMEM;
+
+ acpi_table_attr_init(table_attr, table_header);
+ ret = sysfs_create_bin_file(tables_kobj, &table_attr->attr);
+ if (ret) {
+ kfree(table_attr);
+ return ret;
}
- } while (!result);
+ list_add_tail(&table_attr->node, &acpi_table_attr_list);
+ }
+
kobject_uevent(tables_kobj, KOBJ_ADD);
kobject_uevent(dynamic_tables_kobj, KOBJ_ADD);
- result = acpi_install_table_handler(acpi_sysfs_table_handler, NULL);
+ status = acpi_install_table_handler(acpi_sysfs_table_handler, NULL);
- return result == AE_OK ? 0 : -EINVAL;
+ return ACPI_FAILURE(status) ? -EINVAL : 0;
err_dynamic_tables:
kobject_put(tables_kobj);
err:
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 18dbdff4656e..995e91bcb97b 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -82,13 +82,6 @@ static bool allow_duplicates;
module_param(allow_duplicates, bool, 0644);
/*
- * Some BIOSes claim they use minimum backlight at boot,
- * and this may bring dimming screen after boot
- */
-static bool use_bios_initial_backlight = 1;
-module_param(use_bios_initial_backlight, bool, 0644);
-
-/*
* For Windows 8 systems: if set ture and the GPU driver has
* registered a backlight interface, skip registering ACPI video's.
*/
@@ -406,12 +399,6 @@ static int __init video_set_bqc_offset(const struct dmi_system_id *d)
return 0;
}
-static int video_ignore_initial_backlight(const struct dmi_system_id *d)
-{
- use_bios_initial_backlight = 0;
- return 0;
-}
-
static struct dmi_system_id video_dmi_table[] __initdata = {
/*
* Broken _BQC workaround http://bugzilla.kernel.org/show_bug.cgi?id=13121
@@ -456,54 +443,6 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 7720"),
},
},
- {
- .callback = video_ignore_initial_backlight,
- .ident = "HP Folio 13-2000",
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP Folio 13 - 2000 Notebook PC"),
- },
- },
- {
- .callback = video_ignore_initial_backlight,
- .ident = "Fujitsu E753",
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "FUJITSU"),
- DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E753"),
- },
- },
- {
- .callback = video_ignore_initial_backlight,
- .ident = "HP Pavilion dm4",
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dm4 Notebook PC"),
- },
- },
- {
- .callback = video_ignore_initial_backlight,
- .ident = "HP Pavilion g6 Notebook PC",
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion g6 Notebook PC"),
- },
- },
- {
- .callback = video_ignore_initial_backlight,
- .ident = "HP 1000 Notebook PC",
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP 1000 Notebook PC"),
- },
- },
- {
- .callback = video_ignore_initial_backlight,
- .ident = "HP Pavilion m4",
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion m4 Notebook PC"),
- },
- },
{}
};
@@ -839,20 +778,18 @@ acpi_video_init_brightness(struct acpi_video_device *device)
if (!device->cap._BQC)
goto set_level;
- if (use_bios_initial_backlight) {
- level = acpi_video_bqc_value_to_level(device, level_old);
- /*
- * On some buggy laptops, _BQC returns an uninitialized
- * value when invoked for the first time, i.e.
- * level_old is invalid (no matter whether it's a level
- * or an index). Set the backlight to max_level in this case.
- */
- for (i = 2; i < br->count; i++)
- if (level == br->levels[i])
- break;
- if (i == br->count || !level)
- level = max_level;
- }
+ level = acpi_video_bqc_value_to_level(device, level_old);
+ /*
+ * On some buggy laptops, _BQC returns an uninitialized
+ * value when invoked for the first time, i.e.
+ * level_old is invalid (no matter whether it's a level
+ * or an index). Set the backlight to max_level in this case.
+ */
+ for (i = 2; i < br->count; i++)
+ if (level == br->levels[i])
+ break;
+ if (i == br->count || !level)
+ level = max_level;
set_level:
result = acpi_video_device_lcd_set_level(device, level);
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index e2903d03180e..c0ed4f273cf2 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -435,6 +435,8 @@ static const struct pci_device_id ahci_pci_tbl[] = {
.driver_data = board_ahci_yes_fbs }, /* 88se9172 on some Gigabyte */
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x91a3),
.driver_data = board_ahci_yes_fbs },
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9230),
+ .driver_data = board_ahci_yes_fbs },
/* Promise */
{ PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */
@@ -1236,15 +1238,6 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
return rc;
- /* AHCI controllers often implement SFF compatible interface.
- * Grab all PCI BARs just in case.
- */
- rc = pcim_iomap_regions_request_all(pdev, 1 << ahci_pci_bar, DRV_NAME);
- if (rc == -EBUSY)
- pcim_pin_device(pdev);
- if (rc)
- return rc;
-
if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
(pdev->device == 0x2652 || pdev->device == 0x2653)) {
u8 map;
@@ -1261,6 +1254,15 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
}
}
+ /* AHCI controllers often implement SFF compatible interface.
+ * Grab all PCI BARs just in case.
+ */
+ rc = pcim_iomap_regions_request_all(pdev, 1 << ahci_pci_bar, DRV_NAME);
+ if (rc == -EBUSY)
+ pcim_pin_device(pdev);
+ if (rc)
+ return rc;
+
hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
if (!hpriv)
return -ENOMEM;
diff --git a/drivers/ata/ahci_imx.c b/drivers/ata/ahci_imx.c
index ae2d73fe321e..3e23e9941dad 100644
--- a/drivers/ata/ahci_imx.c
+++ b/drivers/ata/ahci_imx.c
@@ -113,7 +113,7 @@ static int imx6q_sata_init(struct device *dev, void __iomem *mmio)
/*
* set PHY Paremeters, two steps to configure the GPR13,
* one write for rest of parameters, mask of first write
- * is 0x07fffffd, and the other one write for setting
+ * is 0x07ffffff, and the other one write for setting
* the mpll_clk_en.
*/
regmap_update_bits(imxpriv->gpr, 0x34, IMX6Q_GPR13_SATA_RX_EQ_VAL_MASK
@@ -124,6 +124,7 @@ static int imx6q_sata_init(struct device *dev, void __iomem *mmio)
| IMX6Q_GPR13_SATA_TX_ATTEN_MASK
| IMX6Q_GPR13_SATA_TX_BOOST_MASK
| IMX6Q_GPR13_SATA_TX_LVL_MASK
+ | IMX6Q_GPR13_SATA_MPLL_CLK_EN
| IMX6Q_GPR13_SATA_TX_EDGE_RATE
, IMX6Q_GPR13_SATA_RX_EQ_VAL_3_0_DB
| IMX6Q_GPR13_SATA_RX_LOS_LVL_SATA2M
diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c
index f9554318504f..4b231baceb09 100644
--- a/drivers/ata/ahci_platform.c
+++ b/drivers/ata/ahci_platform.c
@@ -329,6 +329,7 @@ static SIMPLE_DEV_PM_OPS(ahci_pm_ops, ahci_suspend, ahci_resume);
static const struct of_device_id ahci_of_match[] = {
{ .compatible = "snps,spear-ahci", },
{ .compatible = "snps,exynos5440-ahci", },
+ { .compatible = "ibm,476gtr-ahci", },
{},
};
MODULE_DEVICE_TABLE(of, ahci_of_match);
diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
index ab714d2ad978..4372cfa883c9 100644
--- a/drivers/ata/libata-acpi.c
+++ b/drivers/ata/libata-acpi.c
@@ -185,7 +185,7 @@ void ata_acpi_bind_port(struct ata_port *ap)
if (libata_noacpi || ap->flags & ATA_FLAG_ACPI_SATA || !host_handle)
return;
- ACPI_HANDLE_SET(&ap->tdev, acpi_get_child(host_handle, ap->port_no));
+ acpi_preset_companion(&ap->tdev, host_handle, ap->port_no);
if (ata_acpi_gtm(ap, &ap->__acpi_init_gtm) == 0)
ap->pflags |= ATA_PFLAG_INIT_GTM_VALID;
@@ -222,7 +222,7 @@ void ata_acpi_bind_dev(struct ata_device *dev)
parent_handle = port_handle;
}
- ACPI_HANDLE_SET(&dev->tdev, acpi_get_child(parent_handle, adr));
+ acpi_preset_companion(&dev->tdev, parent_handle, adr);
register_hotplug_dock_device(ata_dev_acpi_handle(dev),
&ata_acpi_dev_dock_ops, dev, NULL, NULL);
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 81a94a3919db..1393a5890ed5 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -2149,9 +2149,16 @@ static int ata_dev_config_ncq(struct ata_device *dev,
"failed to get NCQ Send/Recv Log Emask 0x%x\n",
err_mask);
} else {
+ u8 *cmds = dev->ncq_send_recv_cmds;
+
dev->flags |= ATA_DFLAG_NCQ_SEND_RECV;
- memcpy(dev->ncq_send_recv_cmds, ap->sector_buf,
- ATA_LOG_NCQ_SEND_RECV_SIZE);
+ memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_SEND_RECV_SIZE);
+
+ if (dev->horkage & ATA_HORKAGE_NO_NCQ_TRIM) {
+ ata_dev_dbg(dev, "disabling queued TRIM support\n");
+ cmds[ATA_LOG_NCQ_SEND_RECV_DSM_OFFSET] &=
+ ~ATA_LOG_NCQ_SEND_RECV_DSM_TRIM;
+ }
}
}
@@ -4156,6 +4163,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
{ "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
ATA_HORKAGE_FIRMWARE_WARN },
+ /* Seagate Momentus SpinPoint M8 seem to have FPMDA_AA issues */
+ { "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA },
+
/* Blacklist entries taken from Silicon Image 3124/3132
Windows driver .inf file - also several Linux problem reports */
{ "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
@@ -4202,6 +4212,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
{ "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER },
{ "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER },
+ /* devices that don't properly handle queued TRIM commands */
+ { "Micron_M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
+ { "Crucial_CT???M500SSD1", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
+
/* End Marker */
{ }
};
@@ -6304,10 +6318,9 @@ static void ata_port_detach(struct ata_port *ap)
for (i = 0; i < SATA_PMP_MAX_PORTS; i++)
ata_tlink_delete(&ap->pmp_link[i]);
}
- ata_tport_delete(ap);
-
/* remove the associated SCSI host */
scsi_remove_host(ap->scsi_host);
+ ata_tport_delete(ap);
}
/**
@@ -6520,6 +6533,7 @@ static int __init ata_parse_force_one(char **cur,
{ "norst", .lflags = ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST },
{ "rstonce", .lflags = ATA_LFLAG_RST_ONCE },
{ "atapi_dmadir", .horkage_on = ATA_HORKAGE_ATAPI_DMADIR },
+ { "disable", .horkage_on = ATA_HORKAGE_DISABLE },
};
char *start = *cur, *p = *cur;
char *id, *val, *endp;
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index db6dfcfa3e2e..377eb889f555 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -3625,6 +3625,7 @@ int ata_scsi_add_hosts(struct ata_host *host, struct scsi_host_template *sht)
shost->max_lun = 1;
shost->max_channel = 1;
shost->max_cmd_len = 16;
+ shost->no_write_same = 1;
/* Schedule policy is determined by ->qc_defer()
* callback and it needs to see every deferred qc.
@@ -3871,6 +3872,27 @@ void ata_scsi_hotplug(struct work_struct *work)
return;
}
+ /*
+ * XXX - UGLY HACK
+ *
+ * The block layer suspend/resume path is fundamentally broken due
+ * to freezable kthreads and workqueue and may deadlock if a block
+ * device gets removed while resume is in progress. I don't know
+ * what the solution is short of removing freezable kthreads and
+ * workqueues altogether.
+ *
+ * The following is an ugly hack to avoid kicking off device
+ * removal while freezer is active. This is a joke but does avoid
+ * this particular deadlock scenario.
+ *
+ * https://bugzilla.kernel.org/show_bug.cgi?id=62801
+ * http://marc.info/?l=linux-kernel&m=138695698516487
+ */
+#ifdef CONFIG_FREEZER
+ while (pm_freezing)
+ msleep(10);
+#endif
+
DPRINTK("ENTER\n");
mutex_lock(&ap->scsi_scan_mutex);
diff --git a/drivers/ata/libata-zpodd.c b/drivers/ata/libata-zpodd.c
index 68f9e3293e9c..88949c6d55dd 100644
--- a/drivers/ata/libata-zpodd.c
+++ b/drivers/ata/libata-zpodd.c
@@ -88,15 +88,13 @@ static enum odd_mech_type zpodd_get_mech_type(struct ata_device *dev)
static bool odd_can_poweroff(struct ata_device *ata_dev)
{
acpi_handle handle;
- acpi_status status;
struct acpi_device *acpi_dev;
handle = ata_dev_acpi_handle(ata_dev);
if (!handle)
return false;
- status = acpi_bus_get_device(handle, &acpi_dev);
- if (ACPI_FAILURE(status))
+ if (acpi_bus_get_device(handle, &acpi_dev))
return false;
return acpi_device_can_poweroff(acpi_dev);
diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
index 853f610af28f..73492dd4a4bc 100644
--- a/drivers/ata/pata_arasan_cf.c
+++ b/drivers/ata/pata_arasan_cf.c
@@ -319,6 +319,7 @@ static int cf_init(struct arasan_cf_dev *acdev)
ret = clk_set_rate(acdev->clk, 166000000);
if (ret) {
dev_warn(acdev->host->dev, "clock set rate failed");
+ clk_disable_unprepare(acdev->clk);
return ret;
}
@@ -396,8 +397,7 @@ dma_xfer(struct arasan_cf_dev *acdev, dma_addr_t src, dma_addr_t dest, u32 len)
struct dma_async_tx_descriptor *tx;
struct dma_chan *chan = acdev->dma_chan;
dma_cookie_t cookie;
- unsigned long flags = DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP |
- DMA_COMPL_SKIP_DEST_UNMAP;
+ unsigned long flags = DMA_PREP_INTERRUPT;
int ret = 0;
tx = chan->device->device_prep_dma_memcpy(chan, dest, src, len, flags);
diff --git a/drivers/atm/he.c b/drivers/atm/he.c
index 8557adcd34ee..aa6be2698669 100644
--- a/drivers/atm/he.c
+++ b/drivers/atm/he.c
@@ -419,7 +419,6 @@ static void he_remove_one(struct pci_dev *pci_dev)
atm_dev_deregister(atm_dev);
kfree(he_dev);
- pci_set_drvdata(pci_dev, NULL);
pci_disable_device(pci_dev);
}
diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
index 5aca5f4c5458..9587e959ce1a 100644
--- a/drivers/atm/nicstar.c
+++ b/drivers/atm/nicstar.c
@@ -52,6 +52,7 @@
#include <asm/io.h>
#include <asm/uaccess.h>
#include <linux/atomic.h>
+#include <linux/etherdevice.h>
#include "nicstar.h"
#ifdef CONFIG_ATM_NICSTAR_USE_SUNI
#include "suni.h"
@@ -781,8 +782,7 @@ static int ns_init_card(int i, struct pci_dev *pcidev)
if (mac[i] == NULL || !mac_pton(mac[i], card->atmdev->esi)) {
nicstar_read_eprom(card->membase, NICSTAR_EPROM_MAC_ADDR_OFFSET,
card->atmdev->esi, 6);
- if (memcmp(card->atmdev->esi, "\x00\x00\x00\x00\x00\x00", 6) ==
- 0) {
+ if (ether_addr_equal(card->atmdev->esi, "\x00\x00\x00\x00\x00\x00")) {
nicstar_read_eprom(card->membase,
NICSTAR_EPROM_MAC_ADDR_OFFSET_ALT,
card->atmdev->esi, 6);
diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
index 32784d18d1f7..e3fb496c7163 100644
--- a/drivers/atm/solos-pci.c
+++ b/drivers/atm/solos-pci.c
@@ -1335,7 +1335,6 @@ static int fpga_probe(struct pci_dev *dev, const struct pci_device_id *id)
out_unmap_both:
kfree(card->dma_bounce);
- pci_set_drvdata(dev, NULL);
pci_iounmap(dev, card->buffers);
out_unmap_config:
pci_iounmap(dev, card->config_regs);
@@ -1457,7 +1456,6 @@ static void fpga_remove(struct pci_dev *dev)
pci_release_regions(dev);
pci_disable_device(dev);
- pci_set_drvdata(dev, NULL);
kfree(card);
}
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 47051cd25113..3a94b799f166 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -432,7 +432,7 @@ struct platform_device *platform_device_register_full(
goto err_alloc;
pdev->dev.parent = pdevinfo->parent;
- ACPI_HANDLE_SET(&pdev->dev, pdevinfo->acpi_node.handle);
+ ACPI_COMPANION_SET(&pdev->dev, pdevinfo->acpi_node.companion);
if (pdevinfo->dma_mask) {
/*
@@ -463,7 +463,7 @@ struct platform_device *platform_device_register_full(
ret = platform_device_add(pdev);
if (ret) {
err:
- ACPI_HANDLE_SET(&pdev->dev, NULL);
+ ACPI_COMPANION_SET(&pdev->dev, NULL);
kfree(pdev->dev.dma_mask);
err_alloc:
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index c12e9b9556be..1b41fca3d65a 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -1350,6 +1350,9 @@ static int device_prepare(struct device *dev, pm_message_t state)
device_unlock(dev);
+ if (error)
+ pm_runtime_put(dev);
+
return error;
}
diff --git a/drivers/base/regmap/regmap-mmio.c b/drivers/base/regmap/regmap-mmio.c
index 98745dd77e8c..81f977510775 100644
--- a/drivers/base/regmap/regmap-mmio.c
+++ b/drivers/base/regmap/regmap-mmio.c
@@ -40,7 +40,7 @@ static int regmap_mmio_gather_write(void *context,
BUG_ON(reg_size != 4);
- if (ctx->clk) {
+ if (!IS_ERR(ctx->clk)) {
ret = clk_enable(ctx->clk);
if (ret < 0)
return ret;
@@ -73,7 +73,7 @@ static int regmap_mmio_gather_write(void *context,
offset += ctx->val_bytes;
}
- if (ctx->clk)
+ if (!IS_ERR(ctx->clk))
clk_disable(ctx->clk);
return 0;
@@ -96,7 +96,7 @@ static int regmap_mmio_read(void *context,
BUG_ON(reg_size != 4);
- if (ctx->clk) {
+ if (!IS_ERR(ctx->clk)) {
ret = clk_enable(ctx->clk);
if (ret < 0)
return ret;
@@ -129,7 +129,7 @@ static int regmap_mmio_read(void *context,
offset += ctx->val_bytes;
}
- if (ctx->clk)
+ if (!IS_ERR(ctx->clk))
clk_disable(ctx->clk);
return 0;
@@ -139,7 +139,7 @@ static void regmap_mmio_free_context(void *context)
{
struct regmap_mmio_context *ctx = context;
- if (ctx->clk) {
+ if (!IS_ERR(ctx->clk)) {
clk_unprepare(ctx->clk);
clk_put(ctx->clk);
}
@@ -209,6 +209,7 @@ static struct regmap_mmio_context *regmap_mmio_gen_context(struct device *dev,
ctx->regs = regs;
ctx->val_bytes = config->val_bits / 8;
+ ctx->clk = ERR_PTR(-ENODEV);
if (clk_id == NULL)
return ctx;
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 9c021d9cace0..c2e002100949 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -1549,7 +1549,7 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
val + (i * val_bytes),
val_bytes);
if (ret != 0)
- return ret;
+ goto out;
}
} else {
ret = _regmap_raw_write(map, reg, wval, val_bytes * val_count);
@@ -1743,7 +1743,7 @@ static int _regmap_read(struct regmap *map, unsigned int reg,
/**
* regmap_read(): Read a value from a single register
*
- * @map: Register map to write to
+ * @map: Register map to read from
* @reg: Register to be read from
* @val: Pointer to store read value
*
@@ -1770,7 +1770,7 @@ EXPORT_SYMBOL_GPL(regmap_read);
/**
* regmap_raw_read(): Read raw data from the device
*
- * @map: Register map to write to
+ * @map: Register map to read from
* @reg: First register to be read from
* @val: Pointer to store read value
* @val_len: Size of data to read
@@ -1882,7 +1882,7 @@ EXPORT_SYMBOL_GPL(regmap_fields_read);
/**
* regmap_bulk_read(): Read multiple registers from the device
*
- * @map: Register map to write to
+ * @map: Register map to read from
* @reg: First register to be read from
* @val: Pointer to store read value, in native register size for device
* @val_count: Number of registers to read
diff --git a/drivers/bcma/bcma_private.h b/drivers/bcma/bcma_private.h
index 0215f9ad755c..09b632ad0fe2 100644
--- a/drivers/bcma/bcma_private.h
+++ b/drivers/bcma/bcma_private.h
@@ -33,8 +33,6 @@ int __init bcma_bus_early_register(struct bcma_bus *bus,
int bcma_bus_suspend(struct bcma_bus *bus);
int bcma_bus_resume(struct bcma_bus *bus);
#endif
-struct bcma_device *bcma_find_core_unit(struct bcma_bus *bus, u16 coreid,
- u8 unit);
/* scan.c */
int bcma_bus_scan(struct bcma_bus *bus);
diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c
index 5a9f6bdc88f1..34ea4c588d36 100644
--- a/drivers/bcma/main.c
+++ b/drivers/bcma/main.c
@@ -78,18 +78,6 @@ static u16 bcma_cc_core_id(struct bcma_bus *bus)
return BCMA_CORE_CHIPCOMMON;
}
-struct bcma_device *bcma_find_core(struct bcma_bus *bus, u16 coreid)
-{
- struct bcma_device *core;
-
- list_for_each_entry(core, &bus->cores, list) {
- if (core->id.id == coreid)
- return core;
- }
- return NULL;
-}
-EXPORT_SYMBOL_GPL(bcma_find_core);
-
struct bcma_device *bcma_find_core_unit(struct bcma_bus *bus, u16 coreid,
u8 unit)
{
@@ -101,6 +89,7 @@ struct bcma_device *bcma_find_core_unit(struct bcma_bus *bus, u16 coreid,
}
return NULL;
}
+EXPORT_SYMBOL_GPL(bcma_find_core_unit);
bool bcma_wait_value(struct bcma_device *core, u16 reg, u32 mask, u32 value,
int timeout)
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index b5d842370cc9..a2e69d26266d 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -1,4 +1,5 @@
#include <linux/module.h>
+
#include <linux/moduleparam.h>
#include <linux/sched.h>
#include <linux/fs.h>
@@ -65,7 +66,7 @@ enum {
NULL_Q_MQ = 2,
};
-static int submit_queues = 1;
+static int submit_queues;
module_param(submit_queues, int, S_IRUGO);
MODULE_PARM_DESC(submit_queues, "Number of submission queues");
@@ -101,9 +102,9 @@ static int hw_queue_depth = 64;
module_param(hw_queue_depth, int, S_IRUGO);
MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: 64");
-static bool use_per_node_hctx = true;
+static bool use_per_node_hctx = false;
module_param(use_per_node_hctx, bool, S_IRUGO);
-MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: true");
+MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: false");
static void put_tag(struct nullb_queue *nq, unsigned int tag)
{
@@ -223,7 +224,7 @@ static void null_softirq_done_fn(struct request *rq)
blk_end_request_all(rq, 0);
}
-#if defined(CONFIG_SMP) && defined(CONFIG_USE_GENERIC_SMP_HELPERS)
+#ifdef CONFIG_SMP
static void null_ipi_cmd_end_io(void *data)
{
@@ -260,7 +261,7 @@ static void null_cmd_end_ipi(struct nullb_cmd *cmd)
put_cpu();
}
-#endif /* CONFIG_SMP && CONFIG_USE_GENERIC_SMP_HELPERS */
+#endif /* CONFIG_SMP */
static inline void null_handle_cmd(struct nullb_cmd *cmd)
{
@@ -270,7 +271,7 @@ static inline void null_handle_cmd(struct nullb_cmd *cmd)
end_cmd(cmd);
break;
case NULL_IRQ_SOFTIRQ:
-#if defined(CONFIG_SMP) && defined(CONFIG_USE_GENERIC_SMP_HELPERS)
+#ifdef CONFIG_SMP
null_cmd_end_ipi(cmd);
#else
end_cmd(cmd);
@@ -346,8 +347,37 @@ static int null_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *rq)
static struct blk_mq_hw_ctx *null_alloc_hctx(struct blk_mq_reg *reg, unsigned int hctx_index)
{
- return kzalloc_node(sizeof(struct blk_mq_hw_ctx), GFP_KERNEL,
- hctx_index);
+ int b_size = DIV_ROUND_UP(reg->nr_hw_queues, nr_online_nodes);
+ int tip = (reg->nr_hw_queues % nr_online_nodes);
+ int node = 0, i, n;
+
+ /*
+ * Split submit queues evenly wrt to the number of nodes. If uneven,
+ * fill the first buckets with one extra, until the rest is filled with
+ * no extra.
+ */
+ for (i = 0, n = 1; i < hctx_index; i++, n++) {
+ if (n % b_size == 0) {
+ n = 0;
+ node++;
+
+ tip--;
+ if (!tip)
+ b_size = reg->nr_hw_queues / nr_online_nodes;
+ }
+ }
+
+ /*
+ * A node might not be online, therefore map the relative node id to the
+ * real node id.
+ */
+ for_each_online_node(n) {
+ if (!node)
+ break;
+ node--;
+ }
+
+ return kzalloc_node(sizeof(struct blk_mq_hw_ctx), GFP_KERNEL, n);
}
static void null_free_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_index)
@@ -355,16 +385,24 @@ static void null_free_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_index)
kfree(hctx);
}
+static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq)
+{
+ BUG_ON(!nullb);
+ BUG_ON(!nq);
+
+ init_waitqueue_head(&nq->wait);
+ nq->queue_depth = nullb->queue_depth;
+}
+
static int null_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
unsigned int index)
{
struct nullb *nullb = data;
struct nullb_queue *nq = &nullb->queues[index];
- init_waitqueue_head(&nq->wait);
- nq->queue_depth = nullb->queue_depth;
- nullb->nr_queues++;
hctx->driver_data = nq;
+ null_init_queue(nullb, nq);
+ nullb->nr_queues++;
return 0;
}
@@ -417,13 +455,13 @@ static int setup_commands(struct nullb_queue *nq)
nq->cmds = kzalloc(nq->queue_depth * sizeof(*cmd), GFP_KERNEL);
if (!nq->cmds)
- return 1;
+ return -ENOMEM;
tag_size = ALIGN(nq->queue_depth, BITS_PER_LONG) / BITS_PER_LONG;
nq->tag_map = kzalloc(tag_size * sizeof(unsigned long), GFP_KERNEL);
if (!nq->tag_map) {
kfree(nq->cmds);
- return 1;
+ return -ENOMEM;
}
for (i = 0; i < nq->queue_depth; i++) {
@@ -454,33 +492,37 @@ static void cleanup_queues(struct nullb *nullb)
static int setup_queues(struct nullb *nullb)
{
- struct nullb_queue *nq;
- int i;
-
- nullb->queues = kzalloc(submit_queues * sizeof(*nq), GFP_KERNEL);
+ nullb->queues = kzalloc(submit_queues * sizeof(struct nullb_queue),
+ GFP_KERNEL);
if (!nullb->queues)
- return 1;
+ return -ENOMEM;
nullb->nr_queues = 0;
nullb->queue_depth = hw_queue_depth;
- if (queue_mode == NULL_Q_MQ)
- return 0;
+ return 0;
+}
+
+static int init_driver_queues(struct nullb *nullb)
+{
+ struct nullb_queue *nq;
+ int i, ret = 0;
for (i = 0; i < submit_queues; i++) {
nq = &nullb->queues[i];
- init_waitqueue_head(&nq->wait);
- nq->queue_depth = hw_queue_depth;
- if (setup_commands(nq))
- break;
+
+ null_init_queue(nullb, nq);
+
+ ret = setup_commands(nq);
+ if (ret)
+ goto err_queue;
nullb->nr_queues++;
}
- if (i == submit_queues)
- return 0;
-
+ return 0;
+err_queue:
cleanup_queues(nullb);
- return 1;
+ return ret;
}
static int null_add_dev(void)
@@ -495,34 +537,36 @@ static int null_add_dev(void)
spin_lock_init(&nullb->lock);
+ if (queue_mode == NULL_Q_MQ && use_per_node_hctx)
+ submit_queues = nr_online_nodes;
+
if (setup_queues(nullb))
goto err;
if (queue_mode == NULL_Q_MQ) {
null_mq_reg.numa_node = home_node;
null_mq_reg.queue_depth = hw_queue_depth;
+ null_mq_reg.nr_hw_queues = submit_queues;
if (use_per_node_hctx) {
null_mq_reg.ops->alloc_hctx = null_alloc_hctx;
null_mq_reg.ops->free_hctx = null_free_hctx;
-
- null_mq_reg.nr_hw_queues = nr_online_nodes;
} else {
null_mq_reg.ops->alloc_hctx = blk_mq_alloc_single_hw_queue;
null_mq_reg.ops->free_hctx = blk_mq_free_single_hw_queue;
-
- null_mq_reg.nr_hw_queues = submit_queues;
}
nullb->q = blk_mq_init_queue(&null_mq_reg, nullb);
} else if (queue_mode == NULL_Q_BIO) {
nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node);
blk_queue_make_request(nullb->q, null_queue_bio);
+ init_driver_queues(nullb);
} else {
nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node);
blk_queue_prep_rq(nullb->q, null_rq_prep_fn);
if (nullb->q)
blk_queue_softirq_done(nullb->q, null_softirq_done_fn);
+ init_driver_queues(nullb);
}
if (!nullb->q)
@@ -571,7 +615,7 @@ static int __init null_init(void)
{
unsigned int i;
-#if !defined(CONFIG_SMP) || !defined(CONFIG_USE_GENERIC_SMP_HELPERS)
+#if !defined(CONFIG_SMP)
if (irqmode == NULL_IRQ_SOFTIRQ) {
pr_warn("null_blk: softirq completions not available.\n");
pr_warn("null_blk: using direct completions.\n");
@@ -579,7 +623,13 @@ static int __init null_init(void)
}
#endif
- if (submit_queues > nr_cpu_ids)
+ if (queue_mode == NULL_Q_MQ && use_per_node_hctx) {
+ if (submit_queues < nr_online_nodes) {
+ pr_warn("null_blk: submit_queues param is set to %u.",
+ nr_online_nodes);
+ submit_queues = nr_online_nodes;
+ }
+ } else if (submit_queues > nr_cpu_ids)
submit_queues = nr_cpu_ids;
else if (!submit_queues)
submit_queues = 1;
diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c
index 9199c93be926..eb6e1e0e8db2 100644
--- a/drivers/block/skd_main.c
+++ b/drivers/block/skd_main.c
@@ -5269,7 +5269,7 @@ const char *skd_skdev_state_to_str(enum skd_drvr_state state)
}
}
-const char *skd_skmsg_state_to_str(enum skd_fit_msg_state state)
+static const char *skd_skmsg_state_to_str(enum skd_fit_msg_state state)
{
switch (state) {
case SKD_MSG_STATE_IDLE:
@@ -5281,7 +5281,7 @@ const char *skd_skmsg_state_to_str(enum skd_fit_msg_state state)
}
}
-const char *skd_skreq_state_to_str(enum skd_req_state state)
+static const char *skd_skreq_state_to_str(enum skd_req_state state)
{
switch (state) {
case SKD_REQ_STATE_IDLE:
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 588479d58f52..6a680d4de7f1 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -199,15 +199,16 @@ static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req)
spin_lock_irqsave(&vblk->vq_lock, flags);
if (__virtblk_add_req(vblk->vq, vbr, vbr->sg, num) < 0) {
+ virtqueue_kick(vblk->vq);
spin_unlock_irqrestore(&vblk->vq_lock, flags);
blk_mq_stop_hw_queue(hctx);
- virtqueue_kick(vblk->vq);
return BLK_MQ_RQ_QUEUE_BUSY;
}
- spin_unlock_irqrestore(&vblk->vq_lock, flags);
if (last)
virtqueue_kick(vblk->vq);
+
+ spin_unlock_irqrestore(&vblk->vq_lock, flags);
return BLK_MQ_RQ_QUEUE_OK;
}
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 432db1b59b00..c4a4c9006288 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -489,7 +489,7 @@ static int blkif_queue_request(struct request *req)
if ((ring_req->operation == BLKIF_OP_INDIRECT) &&
(i % SEGS_PER_INDIRECT_FRAME == 0)) {
- unsigned long pfn;
+ unsigned long uninitialized_var(pfn);
if (segments)
kunmap_atomic(segments);
@@ -2011,6 +2011,10 @@ static void blkif_release(struct gendisk *disk, fmode_t mode)
bdev = bdget_disk(disk, 0);
+ if (!bdev) {
+ WARN(1, "Block device %s yanked out from us!\n", disk->disk_name);
+ goto out_mutex;
+ }
if (bdev->bd_openers)
goto out;
@@ -2041,6 +2045,7 @@ static void blkif_release(struct gendisk *disk, fmode_t mode)
out:
bdput(bdev);
+out_mutex:
mutex_unlock(&blkfront_mutex);
}
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index c206de2951f2..2f2b08457c67 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -165,6 +165,19 @@ config HW_RANDOM_OMAP
If unsure, say Y.
+config HW_RANDOM_OMAP3_ROM
+ tristate "OMAP3 ROM Random Number Generator support"
+ depends on HW_RANDOM && ARCH_OMAP3
+ default HW_RANDOM
+ ---help---
+ This driver provides kernel-side support for the Random Number
+ Generator hardware found on OMAP34xx processors.
+
+ To compile this driver as a module, choose M here: the
+ module will be called omap3-rom-rng.
+
+ If unsure, say Y.
+
config HW_RANDOM_OCTEON
tristate "Octeon Random Number Generator support"
depends on HW_RANDOM && CAVIUM_OCTEON_SOC
@@ -327,3 +340,15 @@ config HW_RANDOM_TPM
module will be called tpm-rng.
If unsure, say Y.
+
+config HW_RANDOM_MSM
+ tristate "Qualcomm MSM Random Number Generator support"
+ depends on HW_RANDOM && ARCH_MSM
+ ---help---
+ This driver provides kernel-side support for the Random Number
+ Generator hardware found on Qualcomm MSM SoCs.
+
+ To compile this driver as a module, choose M here. the
+ module will be called msm-rng.
+
+ If unsure, say Y.
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
index d7d2435ff7fa..3ae7755a52e7 100644
--- a/drivers/char/hw_random/Makefile
+++ b/drivers/char/hw_random/Makefile
@@ -15,6 +15,7 @@ n2-rng-y := n2-drv.o n2-asm.o
obj-$(CONFIG_HW_RANDOM_VIA) += via-rng.o
obj-$(CONFIG_HW_RANDOM_IXP4XX) += ixp4xx-rng.o
obj-$(CONFIG_HW_RANDOM_OMAP) += omap-rng.o
+obj-$(CONFIG_HW_RANDOM_OMAP3_ROM) += omap3-rom-rng.o
obj-$(CONFIG_HW_RANDOM_PASEMI) += pasemi-rng.o
obj-$(CONFIG_HW_RANDOM_VIRTIO) += virtio-rng.o
obj-$(CONFIG_HW_RANDOM_TX4939) += tx4939-rng.o
@@ -28,3 +29,4 @@ obj-$(CONFIG_HW_RANDOM_POWERNV) += powernv-rng.o
obj-$(CONFIG_HW_RANDOM_EXYNOS) += exynos-rng.o
obj-$(CONFIG_HW_RANDOM_TPM) += tpm-rng.o
obj-$(CONFIG_HW_RANDOM_BCM2835) += bcm2835-rng.o
+obj-$(CONFIG_HW_RANDOM_MSM) += msm-rng.o
diff --git a/drivers/char/hw_random/msm-rng.c b/drivers/char/hw_random/msm-rng.c
new file mode 100644
index 000000000000..148521e51dc6
--- /dev/null
+++ b/drivers/char/hw_random/msm-rng.c
@@ -0,0 +1,197 @@
+/*
+ * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/hw_random.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+/* Device specific register offsets */
+#define PRNG_DATA_OUT 0x0000
+#define PRNG_STATUS 0x0004
+#define PRNG_LFSR_CFG 0x0100
+#define PRNG_CONFIG 0x0104
+
+/* Device specific register masks and config values */
+#define PRNG_LFSR_CFG_MASK 0x0000ffff
+#define PRNG_LFSR_CFG_CLOCKS 0x0000dddd
+#define PRNG_CONFIG_HW_ENABLE BIT(1)
+#define PRNG_STATUS_DATA_AVAIL BIT(0)
+
+#define MAX_HW_FIFO_DEPTH 16
+#define MAX_HW_FIFO_SIZE (MAX_HW_FIFO_DEPTH * 4)
+#define WORD_SZ 4
+
+struct msm_rng {
+ void __iomem *base;
+ struct clk *clk;
+ struct hwrng hwrng;
+};
+
+#define to_msm_rng(p) container_of(p, struct msm_rng, hwrng)
+
+static int msm_rng_enable(struct hwrng *hwrng, int enable)
+{
+ struct msm_rng *rng = to_msm_rng(hwrng);
+ u32 val;
+ int ret;
+
+ ret = clk_prepare_enable(rng->clk);
+ if (ret)
+ return ret;
+
+ if (enable) {
+ /* Enable PRNG only if it is not already enabled */
+ val = readl_relaxed(rng->base + PRNG_CONFIG);
+ if (val & PRNG_CONFIG_HW_ENABLE)
+ goto already_enabled;
+
+ val = readl_relaxed(rng->base + PRNG_LFSR_CFG);
+ val &= ~PRNG_LFSR_CFG_MASK;
+ val |= PRNG_LFSR_CFG_CLOCKS;
+ writel(val, rng->base + PRNG_LFSR_CFG);
+
+ val = readl_relaxed(rng->base + PRNG_CONFIG);
+ val |= PRNG_CONFIG_HW_ENABLE;
+ writel(val, rng->base + PRNG_CONFIG);
+ } else {
+ val = readl_relaxed(rng->base + PRNG_CONFIG);
+ val &= ~PRNG_CONFIG_HW_ENABLE;
+ writel(val, rng->base + PRNG_CONFIG);
+ }
+
+already_enabled:
+ clk_disable_unprepare(rng->clk);
+ return 0;
+}
+
+static int msm_rng_read(struct hwrng *hwrng, void *data, size_t max, bool wait)
+{
+ struct msm_rng *rng = to_msm_rng(hwrng);
+ size_t currsize = 0;
+ u32 *retdata = data;
+ size_t maxsize;
+ int ret;
+ u32 val;
+
+ /* calculate max size bytes to transfer back to caller */
+ maxsize = min_t(size_t, MAX_HW_FIFO_SIZE, max);
+
+ /* no room for word data */
+ if (maxsize < WORD_SZ)
+ return 0;
+
+ ret = clk_prepare_enable(rng->clk);
+ if (ret)
+ return ret;
+
+ /* read random data from hardware */
+ do {
+ val = readl_relaxed(rng->base + PRNG_STATUS);
+ if (!(val & PRNG_STATUS_DATA_AVAIL))
+ break;
+
+ val = readl_relaxed(rng->base + PRNG_DATA_OUT);
+ if (!val)
+ break;
+
+ *retdata++ = val;
+ currsize += WORD_SZ;
+
+ /* make sure we stay on 32bit boundary */
+ if ((maxsize - currsize) < WORD_SZ)
+ break;
+ } while (currsize < maxsize);
+
+ clk_disable_unprepare(rng->clk);
+
+ return currsize;
+}
+
+static int msm_rng_init(struct hwrng *hwrng)
+{
+ return msm_rng_enable(hwrng, 1);
+}
+
+static void msm_rng_cleanup(struct hwrng *hwrng)
+{
+ msm_rng_enable(hwrng, 0);
+}
+
+static int msm_rng_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct msm_rng *rng;
+ int ret;
+
+ rng = devm_kzalloc(&pdev->dev, sizeof(*rng), GFP_KERNEL);
+ if (!rng)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, rng);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ rng->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(rng->base))
+ return PTR_ERR(rng->base);
+
+ rng->clk = devm_clk_get(&pdev->dev, "core");
+ if (IS_ERR(rng->clk))
+ return PTR_ERR(rng->clk);
+
+ rng->hwrng.name = KBUILD_MODNAME,
+ rng->hwrng.init = msm_rng_init,
+ rng->hwrng.cleanup = msm_rng_cleanup,
+ rng->hwrng.read = msm_rng_read,
+
+ ret = hwrng_register(&rng->hwrng);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register hwrng\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int msm_rng_remove(struct platform_device *pdev)
+{
+ struct msm_rng *rng = platform_get_drvdata(pdev);
+
+ hwrng_unregister(&rng->hwrng);
+ return 0;
+}
+
+static const struct of_device_id msm_rng_of_match[] = {
+ { .compatible = "qcom,prng", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, msm_rng_of_match);
+
+static struct platform_driver msm_rng_driver = {
+ .probe = msm_rng_probe,
+ .remove = msm_rng_remove,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(msm_rng_of_match),
+ }
+};
+module_platform_driver(msm_rng_driver);
+
+MODULE_ALIAS("platform:" KBUILD_MODNAME);
+MODULE_AUTHOR("The Linux Foundation");
+MODULE_DESCRIPTION("Qualcomm MSM random number generator driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/char/hw_random/omap3-rom-rng.c b/drivers/char/hw_random/omap3-rom-rng.c
new file mode 100644
index 000000000000..c853e9e68573
--- /dev/null
+++ b/drivers/char/hw_random/omap3-rom-rng.c
@@ -0,0 +1,141 @@
+/*
+ * omap3-rom-rng.c - RNG driver for TI OMAP3 CPU family
+ *
+ * Copyright (C) 2009 Nokia Corporation
+ * Author: Juha Yrjola <juha.yrjola@solidboot.com>
+ *
+ * Copyright (C) 2013 Pali Rohár <pali.rohar@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/random.h>
+#include <linux/hw_random.h>
+#include <linux/timer.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+
+#define RNG_RESET 0x01
+#define RNG_GEN_PRNG_HW_INIT 0x02
+#define RNG_GEN_HW 0x08
+
+/* param1: ptr, param2: count, param3: flag */
+static u32 (*omap3_rom_rng_call)(u32, u32, u32);
+
+static struct timer_list idle_timer;
+static int rng_idle;
+static struct clk *rng_clk;
+
+static void omap3_rom_rng_idle(unsigned long data)
+{
+ int r;
+
+ r = omap3_rom_rng_call(0, 0, RNG_RESET);
+ if (r != 0) {
+ pr_err("reset failed: %d\n", r);
+ return;
+ }
+ clk_disable_unprepare(rng_clk);
+ rng_idle = 1;
+}
+
+static int omap3_rom_rng_get_random(void *buf, unsigned int count)
+{
+ u32 r;
+ u32 ptr;
+
+ del_timer_sync(&idle_timer);
+ if (rng_idle) {
+ clk_prepare_enable(rng_clk);
+ r = omap3_rom_rng_call(0, 0, RNG_GEN_PRNG_HW_INIT);
+ if (r != 0) {
+ clk_disable_unprepare(rng_clk);
+ pr_err("HW init failed: %d\n", r);
+ return -EIO;
+ }
+ rng_idle = 0;
+ }
+
+ ptr = virt_to_phys(buf);
+ r = omap3_rom_rng_call(ptr, count, RNG_GEN_HW);
+ mod_timer(&idle_timer, jiffies + msecs_to_jiffies(500));
+ if (r != 0)
+ return -EINVAL;
+ return 0;
+}
+
+static int omap3_rom_rng_data_present(struct hwrng *rng, int wait)
+{
+ return 1;
+}
+
+static int omap3_rom_rng_data_read(struct hwrng *rng, u32 *data)
+{
+ int r;
+
+ r = omap3_rom_rng_get_random(data, 4);
+ if (r < 0)
+ return r;
+ return 4;
+}
+
+static struct hwrng omap3_rom_rng_ops = {
+ .name = "omap3-rom",
+ .data_present = omap3_rom_rng_data_present,
+ .data_read = omap3_rom_rng_data_read,
+};
+
+static int omap3_rom_rng_probe(struct platform_device *pdev)
+{
+ pr_info("initializing\n");
+
+ omap3_rom_rng_call = pdev->dev.platform_data;
+ if (!omap3_rom_rng_call) {
+ pr_err("omap3_rom_rng_call is NULL\n");
+ return -EINVAL;
+ }
+
+ setup_timer(&idle_timer, omap3_rom_rng_idle, 0);
+ rng_clk = clk_get(&pdev->dev, "ick");
+ if (IS_ERR(rng_clk)) {
+ pr_err("unable to get RNG clock\n");
+ return PTR_ERR(rng_clk);
+ }
+
+ /* Leave the RNG in reset state. */
+ clk_prepare_enable(rng_clk);
+ omap3_rom_rng_idle(0);
+
+ return hwrng_register(&omap3_rom_rng_ops);
+}
+
+static int omap3_rom_rng_remove(struct platform_device *pdev)
+{
+ hwrng_unregister(&omap3_rom_rng_ops);
+ clk_disable_unprepare(rng_clk);
+ clk_put(rng_clk);
+ return 0;
+}
+
+static struct platform_driver omap3_rom_rng_driver = {
+ .driver = {
+ .name = "omap3-rom-rng",
+ .owner = THIS_MODULE,
+ },
+ .probe = omap3_rom_rng_probe,
+ .remove = omap3_rom_rng_remove,
+};
+
+module_platform_driver(omap3_rom_rng_driver);
+
+MODULE_ALIAS("platform:omap3-rom-rng");
+MODULE_AUTHOR("Juha Yrjola");
+MODULE_AUTHOR("Pali Rohár <pali.rohar@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/pseries-rng.c b/drivers/char/hw_random/pseries-rng.c
index b761459a3436..ab7ffdec0ec3 100644
--- a/drivers/char/hw_random/pseries-rng.c
+++ b/drivers/char/hw_random/pseries-rng.c
@@ -24,7 +24,6 @@
#include <linux/hw_random.h>
#include <asm/vio.h>
-#define MODULE_NAME "pseries-rng"
static int pseries_rng_data_read(struct hwrng *rng, u32 *data)
{
@@ -55,7 +54,7 @@ static unsigned long pseries_rng_get_desired_dma(struct vio_dev *vdev)
};
static struct hwrng pseries_rng = {
- .name = MODULE_NAME,
+ .name = KBUILD_MODNAME,
.data_read = pseries_rng_data_read,
};
@@ -78,7 +77,7 @@ static struct vio_device_id pseries_rng_driver_ids[] = {
MODULE_DEVICE_TABLE(vio, pseries_rng_driver_ids);
static struct vio_driver pseries_rng_driver = {
- .name = MODULE_NAME,
+ .name = KBUILD_MODNAME,
.probe = pseries_rng_probe,
.remove = pseries_rng_remove,
.get_desired_dma = pseries_rng_get_desired_dma,
diff --git a/drivers/char/hw_random/via-rng.c b/drivers/char/hw_random/via-rng.c
index e737772ad69a..de5a6dcfb3e2 100644
--- a/drivers/char/hw_random/via-rng.c
+++ b/drivers/char/hw_random/via-rng.c
@@ -221,7 +221,7 @@ static void __exit mod_exit(void)
module_init(mod_init);
module_exit(mod_exit);
-static struct x86_cpu_id via_rng_cpu_id[] = {
+static struct x86_cpu_id __maybe_unused via_rng_cpu_id[] = {
X86_FEATURE_MATCH(X86_FEATURE_XSTORE),
{}
};
diff --git a/drivers/char/i8k.c b/drivers/char/i8k.c
index 40cc0cf2ded6..e6939e13e338 100644
--- a/drivers/char/i8k.c
+++ b/drivers/char/i8k.c
@@ -664,6 +664,13 @@ static struct dmi_system_id __initdata i8k_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "Vostro"),
},
},
+ {
+ .ident = "Dell XPS421",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "XPS L421X"),
+ },
+ },
{ }
};
diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig
index 94c0c74434ea..1a65838888cd 100644
--- a/drivers/char/tpm/Kconfig
+++ b/drivers/char/tpm/Kconfig
@@ -33,6 +33,15 @@ config TCG_TIS
from within Linux. To compile this driver as a module, choose
M here; the module will be called tpm_tis.
+config TCG_TIS_I2C_ATMEL
+ tristate "TPM Interface Specification 1.2 Interface (I2C - Atmel)"
+ depends on I2C
+ ---help---
+ If you have an Atmel I2C TPM security chip say Yes and it will be
+ accessible from within Linux.
+ To compile this driver as a module, choose M here; the module will
+ be called tpm_tis_i2c_atmel.
+
config TCG_TIS_I2C_INFINEON
tristate "TPM Interface Specification 1.2 Interface (I2C - Infineon)"
depends on I2C
@@ -42,7 +51,17 @@ config TCG_TIS_I2C_INFINEON
Specification 0.20 say Yes and it will be accessible from within
Linux.
To compile this driver as a module, choose M here; the module
- will be called tpm_tis_i2c_infineon.
+ will be called tpm_i2c_infineon.
+
+config TCG_TIS_I2C_NUVOTON
+ tristate "TPM Interface Specification 1.2 Interface (I2C - Nuvoton)"
+ depends on I2C
+ ---help---
+ If you have a TPM security chip with an I2C interface from
+ Nuvoton Technology Corp. say Yes and it will be accessible
+ from within Linux.
+ To compile this driver as a module, choose M here; the module
+ will be called tpm_i2c_nuvoton.
config TCG_NSC
tristate "National Semiconductor TPM Interface"
@@ -82,14 +101,14 @@ config TCG_IBMVTPM
as a module, choose M here; the module will be called tpm_ibmvtpm.
config TCG_ST33_I2C
- tristate "STMicroelectronics ST33 I2C TPM"
- depends on I2C
- depends on GPIOLIB
- ---help---
- If you have a TPM security chip from STMicroelectronics working with
- an I2C bus say Yes and it will be accessible from within Linux.
- To compile this driver as a module, choose M here; the module will be
- called tpm_stm_st33_i2c.
+ tristate "STMicroelectronics ST33 I2C TPM"
+ depends on I2C
+ depends on GPIOLIB
+ ---help---
+ If you have a TPM security chip from STMicroelectronics working with
+ an I2C bus say Yes and it will be accessible from within Linux.
+ To compile this driver as a module, choose M here; the module will be
+ called tpm_stm_st33_i2c.
config TCG_XEN
tristate "XEN TPM Interface"
diff --git a/drivers/char/tpm/Makefile b/drivers/char/tpm/Makefile
index eb41ff97d0ad..b80a4000daee 100644
--- a/drivers/char/tpm/Makefile
+++ b/drivers/char/tpm/Makefile
@@ -2,17 +2,20 @@
# Makefile for the kernel tpm device drivers.
#
obj-$(CONFIG_TCG_TPM) += tpm.o
+tpm-y := tpm-interface.o
+tpm-$(CONFIG_ACPI) += tpm_ppi.o
+
ifdef CONFIG_ACPI
- obj-$(CONFIG_TCG_TPM) += tpm_bios.o
- tpm_bios-objs += tpm_eventlog.o tpm_acpi.o tpm_ppi.o
+ tpm-y += tpm_eventlog.o tpm_acpi.o
else
ifdef CONFIG_TCG_IBMVTPM
- obj-$(CONFIG_TCG_TPM) += tpm_bios.o
- tpm_bios-objs += tpm_eventlog.o tpm_of.o
+ tpm-y += tpm_eventlog.o tpm_of.o
endif
endif
obj-$(CONFIG_TCG_TIS) += tpm_tis.o
+obj-$(CONFIG_TCG_TIS_I2C_ATMEL) += tpm_i2c_atmel.o
obj-$(CONFIG_TCG_TIS_I2C_INFINEON) += tpm_i2c_infineon.o
+obj-$(CONFIG_TCG_TIS_I2C_NUVOTON) += tpm_i2c_nuvoton.o
obj-$(CONFIG_TCG_NSC) += tpm_nsc.o
obj-$(CONFIG_TCG_ATMEL) += tpm_atmel.o
obj-$(CONFIG_TCG_INFINEON) += tpm_infineon.o
diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm-interface.c
index e3c974a6c522..6ae41d337630 100644
--- a/drivers/char/tpm/tpm.c
+++ b/drivers/char/tpm/tpm-interface.c
@@ -10,13 +10,13 @@
* Maintained by: <tpmdd-devel@lists.sourceforge.net>
*
* Device driver for TCG/TCPA TPM (trusted platform module).
- * Specifications at www.trustedcomputinggroup.org
+ * Specifications at www.trustedcomputinggroup.org
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation, version 2 of the
* License.
- *
+ *
* Note, the TPM chip is not interrupt driven (only polling)
* and can have very long timeouts (minutes!). Hence the unusual
* calls to msleep.
@@ -371,13 +371,14 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
return -ENODATA;
if (count > bufsiz) {
dev_err(chip->dev,
- "invalid count value %x %zx \n", count, bufsiz);
+ "invalid count value %x %zx\n", count, bufsiz);
return -E2BIG;
}
mutex_lock(&chip->tpm_mutex);
- if ((rc = chip->vendor.send(chip, (u8 *) buf, count)) < 0) {
+ rc = chip->vendor.send(chip, (u8 *) buf, count);
+ if (rc < 0) {
dev_err(chip->dev,
"tpm_transmit: tpm_send: error %zd\n", rc);
goto out;
@@ -444,7 +445,7 @@ static ssize_t transmit_cmd(struct tpm_chip *chip, struct tpm_cmd_t *cmd,
{
int err;
- len = tpm_transmit(chip,(u8 *) cmd, len);
+ len = tpm_transmit(chip, (u8 *) cmd, len);
if (len < 0)
return len;
else if (len < TPM_HEADER_SIZE)
@@ -658,7 +659,7 @@ static int tpm_continue_selftest(struct tpm_chip *chip)
return rc;
}
-ssize_t tpm_show_enabled(struct device * dev, struct device_attribute * attr,
+ssize_t tpm_show_enabled(struct device *dev, struct device_attribute *attr,
char *buf)
{
cap_t cap;
@@ -674,7 +675,7 @@ ssize_t tpm_show_enabled(struct device * dev, struct device_attribute * attr,
}
EXPORT_SYMBOL_GPL(tpm_show_enabled);
-ssize_t tpm_show_active(struct device * dev, struct device_attribute * attr,
+ssize_t tpm_show_active(struct device *dev, struct device_attribute *attr,
char *buf)
{
cap_t cap;
@@ -690,7 +691,7 @@ ssize_t tpm_show_active(struct device * dev, struct device_attribute * attr,
}
EXPORT_SYMBOL_GPL(tpm_show_active);
-ssize_t tpm_show_owned(struct device * dev, struct device_attribute * attr,
+ssize_t tpm_show_owned(struct device *dev, struct device_attribute *attr,
char *buf)
{
cap_t cap;
@@ -706,8 +707,8 @@ ssize_t tpm_show_owned(struct device * dev, struct device_attribute * attr,
}
EXPORT_SYMBOL_GPL(tpm_show_owned);
-ssize_t tpm_show_temp_deactivated(struct device * dev,
- struct device_attribute * attr, char *buf)
+ssize_t tpm_show_temp_deactivated(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
cap_t cap;
ssize_t rc;
@@ -769,10 +770,10 @@ static int __tpm_pcr_read(struct tpm_chip *chip, int pcr_idx, u8 *res_buf)
/**
* tpm_pcr_read - read a pcr value
- * @chip_num: tpm idx # or ANY
+ * @chip_num: tpm idx # or ANY
* @pcr_idx: pcr idx to retrieve
- * @res_buf: TPM_PCR value
- * size of res_buf is 20 bytes (or NULL if you don't care)
+ * @res_buf: TPM_PCR value
+ * size of res_buf is 20 bytes (or NULL if you don't care)
*
* The TPM driver should be built-in, but for whatever reason it
* isn't, protect against the chip disappearing, by incrementing
@@ -794,9 +795,9 @@ EXPORT_SYMBOL_GPL(tpm_pcr_read);
/**
* tpm_pcr_extend - extend pcr value with hash
- * @chip_num: tpm idx # or AN&
+ * @chip_num: tpm idx # or AN&
* @pcr_idx: pcr idx to extend
- * @hash: hash value used to extend pcr value
+ * @hash: hash value used to extend pcr value
*
* The TPM driver should be built-in, but for whatever reason it
* isn't, protect against the chip disappearing, by incrementing
@@ -847,8 +848,7 @@ int tpm_do_selftest(struct tpm_chip *chip)
unsigned long duration;
struct tpm_cmd_t cmd;
- duration = tpm_calc_ordinal_duration(chip,
- TPM_ORD_CONTINUE_SELFTEST);
+ duration = tpm_calc_ordinal_duration(chip, TPM_ORD_CONTINUE_SELFTEST);
loops = jiffies_to_msecs(duration) / delay_msec;
@@ -965,12 +965,12 @@ ssize_t tpm_show_pubek(struct device *dev, struct device_attribute *attr,
if (err)
goto out;
- /*
+ /*
ignore header 10 bytes
algorithm 32 bits (1 == RSA )
encscheme 16 bits
sigscheme 16 bits
- parameters (RSA 12->bytes: keybit, #primes, expbit)
+ parameters (RSA 12->bytes: keybit, #primes, expbit)
keylenbytes 32 bits
256 byte modulus
ignore checksum 20 bytes
@@ -1020,43 +1020,33 @@ ssize_t tpm_show_caps(struct device *dev, struct device_attribute *attr,
str += sprintf(str, "Manufacturer: 0x%x\n",
be32_to_cpu(cap.manufacturer_id));
- rc = tpm_getcap(dev, CAP_VERSION_1_1, &cap,
- "attempting to determine the 1.1 version");
- if (rc)
- return 0;
- str += sprintf(str,
- "TCG version: %d.%d\nFirmware version: %d.%d\n",
- cap.tpm_version.Major, cap.tpm_version.Minor,
- cap.tpm_version.revMajor, cap.tpm_version.revMinor);
- return str - buf;
-}
-EXPORT_SYMBOL_GPL(tpm_show_caps);
-
-ssize_t tpm_show_caps_1_2(struct device * dev,
- struct device_attribute * attr, char *buf)
-{
- cap_t cap;
- ssize_t rc;
- char *str = buf;
-
- rc = tpm_getcap(dev, TPM_CAP_PROP_MANUFACTURER, &cap,
- "attempting to determine the manufacturer");
- if (rc)
- return 0;
- str += sprintf(str, "Manufacturer: 0x%x\n",
- be32_to_cpu(cap.manufacturer_id));
+ /* Try to get a TPM version 1.2 TPM_CAP_VERSION_INFO */
rc = tpm_getcap(dev, CAP_VERSION_1_2, &cap,
"attempting to determine the 1.2 version");
- if (rc)
- return 0;
- str += sprintf(str,
- "TCG version: %d.%d\nFirmware version: %d.%d\n",
- cap.tpm_version_1_2.Major, cap.tpm_version_1_2.Minor,
- cap.tpm_version_1_2.revMajor,
- cap.tpm_version_1_2.revMinor);
+ if (!rc) {
+ str += sprintf(str,
+ "TCG version: %d.%d\nFirmware version: %d.%d\n",
+ cap.tpm_version_1_2.Major,
+ cap.tpm_version_1_2.Minor,
+ cap.tpm_version_1_2.revMajor,
+ cap.tpm_version_1_2.revMinor);
+ } else {
+ /* Otherwise just use TPM_STRUCT_VER */
+ rc = tpm_getcap(dev, CAP_VERSION_1_1, &cap,
+ "attempting to determine the 1.1 version");
+ if (rc)
+ return 0;
+ str += sprintf(str,
+ "TCG version: %d.%d\nFirmware version: %d.%d\n",
+ cap.tpm_version.Major,
+ cap.tpm_version.Minor,
+ cap.tpm_version.revMajor,
+ cap.tpm_version.revMinor);
+ }
+
return str - buf;
}
-EXPORT_SYMBOL_GPL(tpm_show_caps_1_2);
+EXPORT_SYMBOL_GPL(tpm_show_caps);
ssize_t tpm_show_durations(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -1102,8 +1092,8 @@ ssize_t tpm_store_cancel(struct device *dev, struct device_attribute *attr,
}
EXPORT_SYMBOL_GPL(tpm_store_cancel);
-static bool wait_for_tpm_stat_cond(struct tpm_chip *chip, u8 mask, bool check_cancel,
- bool *canceled)
+static bool wait_for_tpm_stat_cond(struct tpm_chip *chip, u8 mask,
+ bool check_cancel, bool *canceled)
{
u8 status = chip->vendor.status(chip);
@@ -1170,38 +1160,25 @@ EXPORT_SYMBOL_GPL(wait_for_tpm_stat);
*/
int tpm_open(struct inode *inode, struct file *file)
{
- int minor = iminor(inode);
- struct tpm_chip *chip = NULL, *pos;
-
- rcu_read_lock();
- list_for_each_entry_rcu(pos, &tpm_chip_list, list) {
- if (pos->vendor.miscdev.minor == minor) {
- chip = pos;
- get_device(chip->dev);
- break;
- }
- }
- rcu_read_unlock();
-
- if (!chip)
- return -ENODEV;
+ struct miscdevice *misc = file->private_data;
+ struct tpm_chip *chip = container_of(misc, struct tpm_chip,
+ vendor.miscdev);
if (test_and_set_bit(0, &chip->is_open)) {
dev_dbg(chip->dev, "Another process owns this TPM\n");
- put_device(chip->dev);
return -EBUSY;
}
chip->data_buffer = kzalloc(TPM_BUFSIZE, GFP_KERNEL);
if (chip->data_buffer == NULL) {
clear_bit(0, &chip->is_open);
- put_device(chip->dev);
return -ENOMEM;
}
atomic_set(&chip->data_pending, 0);
file->private_data = chip;
+ get_device(chip->dev);
return 0;
}
EXPORT_SYMBOL_GPL(tpm_open);
@@ -1463,7 +1440,6 @@ void tpm_dev_vendor_release(struct tpm_chip *chip)
chip->vendor.release(chip->dev);
clear_bit(chip->dev_num, dev_mask);
- kfree(chip->vendor.miscdev.name);
}
EXPORT_SYMBOL_GPL(tpm_dev_vendor_release);
@@ -1487,7 +1463,7 @@ void tpm_dev_release(struct device *dev)
EXPORT_SYMBOL_GPL(tpm_dev_release);
/*
- * Called from tpm_<specific>.c probe function only for devices
+ * Called from tpm_<specific>.c probe function only for devices
* the driver has determined it should claim. Prior to calling
* this function the specific probe function has called pci_enable_device
* upon errant exit from this function specific probe function should call
@@ -1496,17 +1472,13 @@ EXPORT_SYMBOL_GPL(tpm_dev_release);
struct tpm_chip *tpm_register_hardware(struct device *dev,
const struct tpm_vendor_specific *entry)
{
-#define DEVNAME_SIZE 7
-
- char *devname;
struct tpm_chip *chip;
/* Driver specific per-device data */
chip = kzalloc(sizeof(*chip), GFP_KERNEL);
- devname = kmalloc(DEVNAME_SIZE, GFP_KERNEL);
- if (chip == NULL || devname == NULL)
- goto out_free;
+ if (chip == NULL)
+ return NULL;
mutex_init(&chip->buffer_mutex);
mutex_init(&chip->tpm_mutex);
@@ -1531,8 +1503,9 @@ struct tpm_chip *tpm_register_hardware(struct device *dev,
set_bit(chip->dev_num, dev_mask);
- scnprintf(devname, DEVNAME_SIZE, "%s%d", "tpm", chip->dev_num);
- chip->vendor.miscdev.name = devname;
+ scnprintf(chip->devname, sizeof(chip->devname), "%s%d", "tpm",
+ chip->dev_num);
+ chip->vendor.miscdev.name = chip->devname;
chip->vendor.miscdev.parent = dev;
chip->dev = get_device(dev);
@@ -1558,7 +1531,7 @@ struct tpm_chip *tpm_register_hardware(struct device *dev,
goto put_device;
}
- chip->bios_dir = tpm_bios_log_setup(devname);
+ chip->bios_dir = tpm_bios_log_setup(chip->devname);
/* Make chip available */
spin_lock(&driver_lock);
@@ -1571,7 +1544,6 @@ put_device:
put_device(chip->dev);
out_free:
kfree(chip);
- kfree(devname);
return NULL;
}
EXPORT_SYMBOL_GPL(tpm_register_hardware);
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index a7bfc176ed43..f32847872193 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -59,8 +59,6 @@ extern ssize_t tpm_show_pcrs(struct device *, struct device_attribute *attr,
char *);
extern ssize_t tpm_show_caps(struct device *, struct device_attribute *attr,
char *);
-extern ssize_t tpm_show_caps_1_2(struct device *, struct device_attribute *attr,
- char *);
extern ssize_t tpm_store_cancel(struct device *, struct device_attribute *attr,
const char *, size_t);
extern ssize_t tpm_show_enabled(struct device *, struct device_attribute *attr,
@@ -122,6 +120,7 @@ struct tpm_chip {
struct device *dev; /* Device stuff */
int dev_num; /* /dev/tpm# */
+ char devname[7];
unsigned long is_open; /* only one allowed */
int time_expired;
diff --git a/drivers/char/tpm/tpm_atmel.c b/drivers/char/tpm/tpm_atmel.c
index 99d6820c611d..c9a528d25d22 100644
--- a/drivers/char/tpm/tpm_atmel.c
+++ b/drivers/char/tpm/tpm_atmel.c
@@ -202,7 +202,7 @@ static int __init init_atmel(void)
have_region =
(atmel_request_region
- (tpm_atmel.base, region_size, "tpm_atmel0") == NULL) ? 0 : 1;
+ (base, region_size, "tpm_atmel0") == NULL) ? 0 : 1;
pdev = platform_device_register_simple("tpm_atmel", -1, NULL, 0);
if (IS_ERR(pdev)) {
diff --git a/drivers/char/tpm/tpm_eventlog.c b/drivers/char/tpm/tpm_eventlog.c
index 84ddc557b8f8..59f7cb28260b 100644
--- a/drivers/char/tpm/tpm_eventlog.c
+++ b/drivers/char/tpm/tpm_eventlog.c
@@ -406,7 +406,6 @@ out_tpm:
out:
return NULL;
}
-EXPORT_SYMBOL_GPL(tpm_bios_log_setup);
void tpm_bios_log_teardown(struct dentry **lst)
{
@@ -415,5 +414,3 @@ void tpm_bios_log_teardown(struct dentry **lst)
for (i = 0; i < 3; i++)
securityfs_remove(lst[i]);
}
-EXPORT_SYMBOL_GPL(tpm_bios_log_teardown);
-MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/tpm_i2c_atmel.c b/drivers/char/tpm/tpm_i2c_atmel.c
new file mode 100644
index 000000000000..c3cd7fe481a1
--- /dev/null
+++ b/drivers/char/tpm/tpm_i2c_atmel.c
@@ -0,0 +1,284 @@
+/*
+ * ATMEL I2C TPM AT97SC3204T
+ *
+ * Copyright (C) 2012 V Lab Technologies
+ * Teddy Reed <teddy@prosauce.org>
+ * Copyright (C) 2013, Obsidian Research Corp.
+ * Jason Gunthorpe <jgunthorpe@obsidianresearch.com>
+ * Device driver for ATMEL I2C TPMs.
+ *
+ * Teddy Reed determined the basic I2C command flow, unlike other I2C TPM
+ * devices the raw TCG formatted TPM command data is written via I2C and then
+ * raw TCG formatted TPM command data is returned via I2C.
+ *
+ * TGC status/locality/etc functions seen in the LPC implementation do not
+ * seem to be present.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see http://www.gnu.org/licenses/>.
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include "tpm.h"
+
+#define I2C_DRIVER_NAME "tpm_i2c_atmel"
+
+#define TPM_I2C_SHORT_TIMEOUT 750 /* ms */
+#define TPM_I2C_LONG_TIMEOUT 2000 /* 2 sec */
+
+#define ATMEL_STS_OK 1
+
+struct priv_data {
+ size_t len;
+ /* This is the amount we read on the first try. 25 was chosen to fit a
+ * fair number of read responses in the buffer so a 2nd retry can be
+ * avoided in small message cases. */
+ u8 buffer[sizeof(struct tpm_output_header) + 25];
+};
+
+static int i2c_atmel_send(struct tpm_chip *chip, u8 *buf, size_t len)
+{
+ struct priv_data *priv = chip->vendor.priv;
+ struct i2c_client *client = to_i2c_client(chip->dev);
+ s32 status;
+
+ priv->len = 0;
+
+ if (len <= 2)
+ return -EIO;
+
+ status = i2c_master_send(client, buf, len);
+
+ dev_dbg(chip->dev,
+ "%s(buf=%*ph len=%0zx) -> sts=%d\n", __func__,
+ (int)min_t(size_t, 64, len), buf, len, status);
+ return status;
+}
+
+static int i2c_atmel_recv(struct tpm_chip *chip, u8 *buf, size_t count)
+{
+ struct priv_data *priv = chip->vendor.priv;
+ struct i2c_client *client = to_i2c_client(chip->dev);
+ struct tpm_output_header *hdr =
+ (struct tpm_output_header *)priv->buffer;
+ u32 expected_len;
+ int rc;
+
+ if (priv->len == 0)
+ return -EIO;
+
+ /* Get the message size from the message header, if we didn't get the
+ * whole message in read_status then we need to re-read the
+ * message. */
+ expected_len = be32_to_cpu(hdr->length);
+ if (expected_len > count)
+ return -ENOMEM;
+
+ if (priv->len >= expected_len) {
+ dev_dbg(chip->dev,
+ "%s early(buf=%*ph count=%0zx) -> ret=%d\n", __func__,
+ (int)min_t(size_t, 64, expected_len), buf, count,
+ expected_len);
+ memcpy(buf, priv->buffer, expected_len);
+ return expected_len;
+ }
+
+ rc = i2c_master_recv(client, buf, expected_len);
+ dev_dbg(chip->dev,
+ "%s reread(buf=%*ph count=%0zx) -> ret=%d\n", __func__,
+ (int)min_t(size_t, 64, expected_len), buf, count,
+ expected_len);
+ return rc;
+}
+
+static void i2c_atmel_cancel(struct tpm_chip *chip)
+{
+ dev_err(chip->dev, "TPM operation cancellation was requested, but is not supported");
+}
+
+static u8 i2c_atmel_read_status(struct tpm_chip *chip)
+{
+ struct priv_data *priv = chip->vendor.priv;
+ struct i2c_client *client = to_i2c_client(chip->dev);
+ int rc;
+
+ /* The TPM fails the I2C read until it is ready, so we do the entire
+ * transfer here and buffer it locally. This way the common code can
+ * properly handle the timeouts. */
+ priv->len = 0;
+ memset(priv->buffer, 0, sizeof(priv->buffer));
+
+
+ /* Once the TPM has completed the command the command remains readable
+ * until another command is issued. */
+ rc = i2c_master_recv(client, priv->buffer, sizeof(priv->buffer));
+ dev_dbg(chip->dev,
+ "%s: sts=%d", __func__, rc);
+ if (rc <= 0)
+ return 0;
+
+ priv->len = rc;
+
+ return ATMEL_STS_OK;
+}
+
+static const struct file_operations i2c_atmel_ops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .open = tpm_open,
+ .read = tpm_read,
+ .write = tpm_write,
+ .release = tpm_release,
+};
+
+static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL);
+static DEVICE_ATTR(pcrs, S_IRUGO, tpm_show_pcrs, NULL);
+static DEVICE_ATTR(enabled, S_IRUGO, tpm_show_enabled, NULL);
+static DEVICE_ATTR(active, S_IRUGO, tpm_show_active, NULL);
+static DEVICE_ATTR(owned, S_IRUGO, tpm_show_owned, NULL);
+static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated, NULL);
+static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL);
+static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel);
+static DEVICE_ATTR(durations, S_IRUGO, tpm_show_durations, NULL);
+static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL);
+
+static struct attribute *i2c_atmel_attrs[] = {
+ &dev_attr_pubek.attr,
+ &dev_attr_pcrs.attr,
+ &dev_attr_enabled.attr,
+ &dev_attr_active.attr,
+ &dev_attr_owned.attr,
+ &dev_attr_temp_deactivated.attr,
+ &dev_attr_caps.attr,
+ &dev_attr_cancel.attr,
+ &dev_attr_durations.attr,
+ &dev_attr_timeouts.attr,
+ NULL,
+};
+
+static struct attribute_group i2c_atmel_attr_grp = {
+ .attrs = i2c_atmel_attrs
+};
+
+static bool i2c_atmel_req_canceled(struct tpm_chip *chip, u8 status)
+{
+ return 0;
+}
+
+static const struct tpm_vendor_specific i2c_atmel = {
+ .status = i2c_atmel_read_status,
+ .recv = i2c_atmel_recv,
+ .send = i2c_atmel_send,
+ .cancel = i2c_atmel_cancel,
+ .req_complete_mask = ATMEL_STS_OK,
+ .req_complete_val = ATMEL_STS_OK,
+ .req_canceled = i2c_atmel_req_canceled,
+ .attr_group = &i2c_atmel_attr_grp,
+ .miscdev.fops = &i2c_atmel_ops,
+};
+
+static int i2c_atmel_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int rc;
+ struct tpm_chip *chip;
+ struct device *dev = &client->dev;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
+ return -ENODEV;
+
+ chip = tpm_register_hardware(dev, &i2c_atmel);
+ if (!chip) {
+ dev_err(dev, "%s() error in tpm_register_hardware\n", __func__);
+ return -ENODEV;
+ }
+
+ chip->vendor.priv = devm_kzalloc(dev, sizeof(struct priv_data),
+ GFP_KERNEL);
+
+ /* Default timeouts */
+ chip->vendor.timeout_a = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT);
+ chip->vendor.timeout_b = msecs_to_jiffies(TPM_I2C_LONG_TIMEOUT);
+ chip->vendor.timeout_c = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT);
+ chip->vendor.timeout_d = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT);
+ chip->vendor.irq = 0;
+
+ /* There is no known way to probe for this device, and all version
+ * information seems to be read via TPM commands. Thus we rely on the
+ * TPM startup process in the common code to detect the device. */
+ if (tpm_get_timeouts(chip)) {
+ rc = -ENODEV;
+ goto out_err;
+ }
+
+ if (tpm_do_selftest(chip)) {
+ rc = -ENODEV;
+ goto out_err;
+ }
+
+ return 0;
+
+out_err:
+ tpm_dev_vendor_release(chip);
+ tpm_remove_hardware(chip->dev);
+ return rc;
+}
+
+static int i2c_atmel_remove(struct i2c_client *client)
+{
+ struct device *dev = &(client->dev);
+ struct tpm_chip *chip = dev_get_drvdata(dev);
+
+ if (chip)
+ tpm_dev_vendor_release(chip);
+ tpm_remove_hardware(dev);
+ kfree(chip);
+ return 0;
+}
+
+static const struct i2c_device_id i2c_atmel_id[] = {
+ {I2C_DRIVER_NAME, 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, i2c_atmel_id);
+
+#ifdef CONFIG_OF
+static const struct of_device_id i2c_atmel_of_match[] = {
+ {.compatible = "atmel,at97sc3204t"},
+ {},
+};
+MODULE_DEVICE_TABLE(of, i2c_atmel_of_match);
+#endif
+
+static SIMPLE_DEV_PM_OPS(i2c_atmel_pm_ops, tpm_pm_suspend, tpm_pm_resume);
+
+static struct i2c_driver i2c_atmel_driver = {
+ .id_table = i2c_atmel_id,
+ .probe = i2c_atmel_probe,
+ .remove = i2c_atmel_remove,
+ .driver = {
+ .name = I2C_DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .pm = &i2c_atmel_pm_ops,
+ .of_match_table = of_match_ptr(i2c_atmel_of_match),
+ },
+};
+
+module_i2c_driver(i2c_atmel_driver);
+
+MODULE_AUTHOR("Jason Gunthorpe <jgunthorpe@obsidianresearch.com>");
+MODULE_DESCRIPTION("Atmel TPM I2C Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/tpm_i2c_infineon.c b/drivers/char/tpm/tpm_i2c_infineon.c
index b8735de8ce95..fefd2aa5c81e 100644
--- a/drivers/char/tpm/tpm_i2c_infineon.c
+++ b/drivers/char/tpm/tpm_i2c_infineon.c
@@ -581,7 +581,7 @@ static DEVICE_ATTR(enabled, S_IRUGO, tpm_show_enabled, NULL);
static DEVICE_ATTR(active, S_IRUGO, tpm_show_active, NULL);
static DEVICE_ATTR(owned, S_IRUGO, tpm_show_owned, NULL);
static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated, NULL);
-static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps_1_2, NULL);
+static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL);
static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel);
static DEVICE_ATTR(durations, S_IRUGO, tpm_show_durations, NULL);
static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL);
@@ -685,7 +685,6 @@ out_vendor:
chip->dev->release = NULL;
chip->release = NULL;
tpm_dev.client = NULL;
- dev_set_drvdata(chip->dev, chip);
out_err:
return rc;
}
@@ -766,7 +765,6 @@ static int tpm_tis_i2c_remove(struct i2c_client *client)
chip->dev->release = NULL;
chip->release = NULL;
tpm_dev.client = NULL;
- dev_set_drvdata(chip->dev, chip);
return 0;
}
diff --git a/drivers/char/tpm/tpm_i2c_nuvoton.c b/drivers/char/tpm/tpm_i2c_nuvoton.c
new file mode 100644
index 000000000000..6276fea01ff0
--- /dev/null
+++ b/drivers/char/tpm/tpm_i2c_nuvoton.c
@@ -0,0 +1,710 @@
+/******************************************************************************
+ * Nuvoton TPM I2C Device Driver Interface for WPCT301/NPCT501,
+ * based on the TCG TPM Interface Spec version 1.2.
+ * Specifications at www.trustedcomputinggroup.org
+ *
+ * Copyright (C) 2011, Nuvoton Technology Corporation.
+ * Dan Morav <dan.morav@nuvoton.com>
+ * Copyright (C) 2013, Obsidian Research Corp.
+ * Jason Gunthorpe <jgunthorpe@obsidianresearch.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see http://www.gnu.org/licenses/>.
+ *
+ * Nuvoton contact information: APC.Support@nuvoton.com
+ *****************************************************************************/
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/wait.h>
+#include <linux/i2c.h>
+#include "tpm.h"
+
+/* I2C interface offsets */
+#define TPM_STS 0x00
+#define TPM_BURST_COUNT 0x01
+#define TPM_DATA_FIFO_W 0x20
+#define TPM_DATA_FIFO_R 0x40
+#define TPM_VID_DID_RID 0x60
+/* TPM command header size */
+#define TPM_HEADER_SIZE 10
+#define TPM_RETRY 5
+/*
+ * I2C bus device maximum buffer size w/o counting I2C address or command
+ * i.e. max size required for I2C write is 34 = addr, command, 32 bytes data
+ */
+#define TPM_I2C_MAX_BUF_SIZE 32
+#define TPM_I2C_RETRY_COUNT 32
+#define TPM_I2C_BUS_DELAY 1 /* msec */
+#define TPM_I2C_RETRY_DELAY_SHORT 2 /* msec */
+#define TPM_I2C_RETRY_DELAY_LONG 10 /* msec */
+
+#define I2C_DRIVER_NAME "tpm_i2c_nuvoton"
+
+struct priv_data {
+ unsigned int intrs;
+};
+
+static s32 i2c_nuvoton_read_buf(struct i2c_client *client, u8 offset, u8 size,
+ u8 *data)
+{
+ s32 status;
+
+ status = i2c_smbus_read_i2c_block_data(client, offset, size, data);
+ dev_dbg(&client->dev,
+ "%s(offset=%u size=%u data=%*ph) -> sts=%d\n", __func__,
+ offset, size, (int)size, data, status);
+ return status;
+}
+
+static s32 i2c_nuvoton_write_buf(struct i2c_client *client, u8 offset, u8 size,
+ u8 *data)
+{
+ s32 status;
+
+ status = i2c_smbus_write_i2c_block_data(client, offset, size, data);
+ dev_dbg(&client->dev,
+ "%s(offset=%u size=%u data=%*ph) -> sts=%d\n", __func__,
+ offset, size, (int)size, data, status);
+ return status;
+}
+
+#define TPM_STS_VALID 0x80
+#define TPM_STS_COMMAND_READY 0x40
+#define TPM_STS_GO 0x20
+#define TPM_STS_DATA_AVAIL 0x10
+#define TPM_STS_EXPECT 0x08
+#define TPM_STS_RESPONSE_RETRY 0x02
+#define TPM_STS_ERR_VAL 0x07 /* bit2...bit0 reads always 0 */
+
+#define TPM_I2C_SHORT_TIMEOUT 750 /* ms */
+#define TPM_I2C_LONG_TIMEOUT 2000 /* 2 sec */
+
+/* read TPM_STS register */
+static u8 i2c_nuvoton_read_status(struct tpm_chip *chip)
+{
+ struct i2c_client *client = to_i2c_client(chip->dev);
+ s32 status;
+ u8 data;
+
+ status = i2c_nuvoton_read_buf(client, TPM_STS, 1, &data);
+ if (status <= 0) {
+ dev_err(chip->dev, "%s() error return %d\n", __func__,
+ status);
+ data = TPM_STS_ERR_VAL;
+ }
+
+ return data;
+}
+
+/* write byte to TPM_STS register */
+static s32 i2c_nuvoton_write_status(struct i2c_client *client, u8 data)
+{
+ s32 status;
+ int i;
+
+ /* this causes the current command to be aborted */
+ for (i = 0, status = -1; i < TPM_I2C_RETRY_COUNT && status < 0; i++) {
+ status = i2c_nuvoton_write_buf(client, TPM_STS, 1, &data);
+ msleep(TPM_I2C_BUS_DELAY);
+ }
+ return status;
+}
+
+/* write commandReady to TPM_STS register */
+static void i2c_nuvoton_ready(struct tpm_chip *chip)
+{
+ struct i2c_client *client = to_i2c_client(chip->dev);
+ s32 status;
+
+ /* this causes the current command to be aborted */
+ status = i2c_nuvoton_write_status(client, TPM_STS_COMMAND_READY);
+ if (status < 0)
+ dev_err(chip->dev,
+ "%s() fail to write TPM_STS.commandReady\n", __func__);
+}
+
+/* read burstCount field from TPM_STS register
+ * return -1 on fail to read */
+static int i2c_nuvoton_get_burstcount(struct i2c_client *client,
+ struct tpm_chip *chip)
+{
+ unsigned long stop = jiffies + chip->vendor.timeout_d;
+ s32 status;
+ int burst_count = -1;
+ u8 data;
+
+ /* wait for burstcount to be non-zero */
+ do {
+ /* in I2C burstCount is 1 byte */
+ status = i2c_nuvoton_read_buf(client, TPM_BURST_COUNT, 1,
+ &data);
+ if (status > 0 && data > 0) {
+ burst_count = min_t(u8, TPM_I2C_MAX_BUF_SIZE, data);
+ break;
+ }
+ msleep(TPM_I2C_BUS_DELAY);
+ } while (time_before(jiffies, stop));
+
+ return burst_count;
+}
+
+/*
+ * WPCT301/NPCT501 SINT# supports only dataAvail
+ * any call to this function which is not waiting for dataAvail will
+ * set queue to NULL to avoid waiting for interrupt
+ */
+static bool i2c_nuvoton_check_status(struct tpm_chip *chip, u8 mask, u8 value)
+{
+ u8 status = i2c_nuvoton_read_status(chip);
+ return (status != TPM_STS_ERR_VAL) && ((status & mask) == value);
+}
+
+static int i2c_nuvoton_wait_for_stat(struct tpm_chip *chip, u8 mask, u8 value,
+ u32 timeout, wait_queue_head_t *queue)
+{
+ if (chip->vendor.irq && queue) {
+ s32 rc;
+ DEFINE_WAIT(wait);
+ struct priv_data *priv = chip->vendor.priv;
+ unsigned int cur_intrs = priv->intrs;
+
+ enable_irq(chip->vendor.irq);
+ rc = wait_event_interruptible_timeout(*queue,
+ cur_intrs != priv->intrs,
+ timeout);
+ if (rc > 0)
+ return 0;
+ /* At this point we know that the SINT pin is asserted, so we
+ * do not need to do i2c_nuvoton_check_status */
+ } else {
+ unsigned long ten_msec, stop;
+ bool status_valid;
+
+ /* check current status */
+ status_valid = i2c_nuvoton_check_status(chip, mask, value);
+ if (status_valid)
+ return 0;
+
+ /* use polling to wait for the event */
+ ten_msec = jiffies + msecs_to_jiffies(TPM_I2C_RETRY_DELAY_LONG);
+ stop = jiffies + timeout;
+ do {
+ if (time_before(jiffies, ten_msec))
+ msleep(TPM_I2C_RETRY_DELAY_SHORT);
+ else
+ msleep(TPM_I2C_RETRY_DELAY_LONG);
+ status_valid = i2c_nuvoton_check_status(chip, mask,
+ value);
+ if (status_valid)
+ return 0;
+ } while (time_before(jiffies, stop));
+ }
+ dev_err(chip->dev, "%s(%02x, %02x) -> timeout\n", __func__, mask,
+ value);
+ return -ETIMEDOUT;
+}
+
+/* wait for dataAvail field to be set in the TPM_STS register */
+static int i2c_nuvoton_wait_for_data_avail(struct tpm_chip *chip, u32 timeout,
+ wait_queue_head_t *queue)
+{
+ return i2c_nuvoton_wait_for_stat(chip,
+ TPM_STS_DATA_AVAIL | TPM_STS_VALID,
+ TPM_STS_DATA_AVAIL | TPM_STS_VALID,
+ timeout, queue);
+}
+
+/* Read @count bytes into @buf from TPM_RD_FIFO register */
+static int i2c_nuvoton_recv_data(struct i2c_client *client,
+ struct tpm_chip *chip, u8 *buf, size_t count)
+{
+ s32 rc;
+ int burst_count, bytes2read, size = 0;
+
+ while (size < count &&
+ i2c_nuvoton_wait_for_data_avail(chip,
+ chip->vendor.timeout_c,
+ &chip->vendor.read_queue) == 0) {
+ burst_count = i2c_nuvoton_get_burstcount(client, chip);
+ if (burst_count < 0) {
+ dev_err(chip->dev,
+ "%s() fail to read burstCount=%d\n", __func__,
+ burst_count);
+ return -EIO;
+ }
+ bytes2read = min_t(size_t, burst_count, count - size);
+ rc = i2c_nuvoton_read_buf(client, TPM_DATA_FIFO_R,
+ bytes2read, &buf[size]);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "%s() fail on i2c_nuvoton_read_buf()=%d\n",
+ __func__, rc);
+ return -EIO;
+ }
+ dev_dbg(chip->dev, "%s(%d):", __func__, bytes2read);
+ size += bytes2read;
+ }
+
+ return size;
+}
+
+/* Read TPM command results */
+static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count)
+{
+ struct device *dev = chip->dev;
+ struct i2c_client *client = to_i2c_client(dev);
+ s32 rc;
+ int expected, status, burst_count, retries, size = 0;
+
+ if (count < TPM_HEADER_SIZE) {
+ i2c_nuvoton_ready(chip); /* return to idle */
+ dev_err(dev, "%s() count < header size\n", __func__);
+ return -EIO;
+ }
+ for (retries = 0; retries < TPM_RETRY; retries++) {
+ if (retries > 0) {
+ /* if this is not the first trial, set responseRetry */
+ i2c_nuvoton_write_status(client,
+ TPM_STS_RESPONSE_RETRY);
+ }
+ /*
+ * read first available (> 10 bytes), including:
+ * tag, paramsize, and result
+ */
+ status = i2c_nuvoton_wait_for_data_avail(
+ chip, chip->vendor.timeout_c, &chip->vendor.read_queue);
+ if (status != 0) {
+ dev_err(dev, "%s() timeout on dataAvail\n", __func__);
+ size = -ETIMEDOUT;
+ continue;
+ }
+ burst_count = i2c_nuvoton_get_burstcount(client, chip);
+ if (burst_count < 0) {
+ dev_err(dev, "%s() fail to get burstCount\n", __func__);
+ size = -EIO;
+ continue;
+ }
+ size = i2c_nuvoton_recv_data(client, chip, buf,
+ burst_count);
+ if (size < TPM_HEADER_SIZE) {
+ dev_err(dev, "%s() fail to read header\n", __func__);
+ size = -EIO;
+ continue;
+ }
+ /*
+ * convert number of expected bytes field from big endian 32 bit
+ * to machine native
+ */
+ expected = be32_to_cpu(*(__be32 *) (buf + 2));
+ if (expected > count) {
+ dev_err(dev, "%s() expected > count\n", __func__);
+ size = -EIO;
+ continue;
+ }
+ rc = i2c_nuvoton_recv_data(client, chip, &buf[size],
+ expected - size);
+ size += rc;
+ if (rc < 0 || size < expected) {
+ dev_err(dev, "%s() fail to read remainder of result\n",
+ __func__);
+ size = -EIO;
+ continue;
+ }
+ if (i2c_nuvoton_wait_for_stat(
+ chip, TPM_STS_VALID | TPM_STS_DATA_AVAIL,
+ TPM_STS_VALID, chip->vendor.timeout_c,
+ NULL)) {
+ dev_err(dev, "%s() error left over data\n", __func__);
+ size = -ETIMEDOUT;
+ continue;
+ }
+ break;
+ }
+ i2c_nuvoton_ready(chip);
+ dev_dbg(chip->dev, "%s() -> %d\n", __func__, size);
+ return size;
+}
+
+/*
+ * Send TPM command.
+ *
+ * If interrupts are used (signaled by an irq set in the vendor structure)
+ * tpm.c can skip polling for the data to be available as the interrupt is
+ * waited for here
+ */
+static int i2c_nuvoton_send(struct tpm_chip *chip, u8 *buf, size_t len)
+{
+ struct device *dev = chip->dev;
+ struct i2c_client *client = to_i2c_client(dev);
+ u32 ordinal;
+ size_t count = 0;
+ int burst_count, bytes2write, retries, rc = -EIO;
+
+ for (retries = 0; retries < TPM_RETRY; retries++) {
+ i2c_nuvoton_ready(chip);
+ if (i2c_nuvoton_wait_for_stat(chip, TPM_STS_COMMAND_READY,
+ TPM_STS_COMMAND_READY,
+ chip->vendor.timeout_b, NULL)) {
+ dev_err(dev, "%s() timeout on commandReady\n",
+ __func__);
+ rc = -EIO;
+ continue;
+ }
+ rc = 0;
+ while (count < len - 1) {
+ burst_count = i2c_nuvoton_get_burstcount(client,
+ chip);
+ if (burst_count < 0) {
+ dev_err(dev, "%s() fail get burstCount\n",
+ __func__);
+ rc = -EIO;
+ break;
+ }
+ bytes2write = min_t(size_t, burst_count,
+ len - 1 - count);
+ rc = i2c_nuvoton_write_buf(client, TPM_DATA_FIFO_W,
+ bytes2write, &buf[count]);
+ if (rc < 0) {
+ dev_err(dev, "%s() fail i2cWriteBuf\n",
+ __func__);
+ break;
+ }
+ dev_dbg(dev, "%s(%d):", __func__, bytes2write);
+ count += bytes2write;
+ rc = i2c_nuvoton_wait_for_stat(chip,
+ TPM_STS_VALID |
+ TPM_STS_EXPECT,
+ TPM_STS_VALID |
+ TPM_STS_EXPECT,
+ chip->vendor.timeout_c,
+ NULL);
+ if (rc < 0) {
+ dev_err(dev, "%s() timeout on Expect\n",
+ __func__);
+ rc = -ETIMEDOUT;
+ break;
+ }
+ }
+ if (rc < 0)
+ continue;
+
+ /* write last byte */
+ rc = i2c_nuvoton_write_buf(client, TPM_DATA_FIFO_W, 1,
+ &buf[count]);
+ if (rc < 0) {
+ dev_err(dev, "%s() fail to write last byte\n",
+ __func__);
+ rc = -EIO;
+ continue;
+ }
+ dev_dbg(dev, "%s(last): %02x", __func__, buf[count]);
+ rc = i2c_nuvoton_wait_for_stat(chip,
+ TPM_STS_VALID | TPM_STS_EXPECT,
+ TPM_STS_VALID,
+ chip->vendor.timeout_c, NULL);
+ if (rc) {
+ dev_err(dev, "%s() timeout on Expect to clear\n",
+ __func__);
+ rc = -ETIMEDOUT;
+ continue;
+ }
+ break;
+ }
+ if (rc < 0) {
+ /* retries == TPM_RETRY */
+ i2c_nuvoton_ready(chip);
+ return rc;
+ }
+ /* execute the TPM command */
+ rc = i2c_nuvoton_write_status(client, TPM_STS_GO);
+ if (rc < 0) {
+ dev_err(dev, "%s() fail to write Go\n", __func__);
+ i2c_nuvoton_ready(chip);
+ return rc;
+ }
+ ordinal = be32_to_cpu(*((__be32 *) (buf + 6)));
+ rc = i2c_nuvoton_wait_for_data_avail(chip,
+ tpm_calc_ordinal_duration(chip,
+ ordinal),
+ &chip->vendor.read_queue);
+ if (rc) {
+ dev_err(dev, "%s() timeout command duration\n", __func__);
+ i2c_nuvoton_ready(chip);
+ return rc;
+ }
+
+ dev_dbg(dev, "%s() -> %zd\n", __func__, len);
+ return len;
+}
+
+static bool i2c_nuvoton_req_canceled(struct tpm_chip *chip, u8 status)
+{
+ return (status == TPM_STS_COMMAND_READY);
+}
+
+static const struct file_operations i2c_nuvoton_ops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .open = tpm_open,
+ .read = tpm_read,
+ .write = tpm_write,
+ .release = tpm_release,
+};
+
+static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL);
+static DEVICE_ATTR(pcrs, S_IRUGO, tpm_show_pcrs, NULL);
+static DEVICE_ATTR(enabled, S_IRUGO, tpm_show_enabled, NULL);
+static DEVICE_ATTR(active, S_IRUGO, tpm_show_active, NULL);
+static DEVICE_ATTR(owned, S_IRUGO, tpm_show_owned, NULL);
+static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated, NULL);
+static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL);
+static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel);
+static DEVICE_ATTR(durations, S_IRUGO, tpm_show_durations, NULL);
+static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL);
+
+static struct attribute *i2c_nuvoton_attrs[] = {
+ &dev_attr_pubek.attr,
+ &dev_attr_pcrs.attr,
+ &dev_attr_enabled.attr,
+ &dev_attr_active.attr,
+ &dev_attr_owned.attr,
+ &dev_attr_temp_deactivated.attr,
+ &dev_attr_caps.attr,
+ &dev_attr_cancel.attr,
+ &dev_attr_durations.attr,
+ &dev_attr_timeouts.attr,
+ NULL,
+};
+
+static struct attribute_group i2c_nuvoton_attr_grp = {
+ .attrs = i2c_nuvoton_attrs
+};
+
+static const struct tpm_vendor_specific tpm_i2c = {
+ .status = i2c_nuvoton_read_status,
+ .recv = i2c_nuvoton_recv,
+ .send = i2c_nuvoton_send,
+ .cancel = i2c_nuvoton_ready,
+ .req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
+ .req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
+ .req_canceled = i2c_nuvoton_req_canceled,
+ .attr_group = &i2c_nuvoton_attr_grp,
+ .miscdev.fops = &i2c_nuvoton_ops,
+};
+
+/* The only purpose for the handler is to signal to any waiting threads that
+ * the interrupt is currently being asserted. The driver does not do any
+ * processing triggered by interrupts, and the chip provides no way to mask at
+ * the source (plus that would be slow over I2C). Run the IRQ as a one-shot,
+ * this means it cannot be shared. */
+static irqreturn_t i2c_nuvoton_int_handler(int dummy, void *dev_id)
+{
+ struct tpm_chip *chip = dev_id;
+ struct priv_data *priv = chip->vendor.priv;
+
+ priv->intrs++;
+ wake_up(&chip->vendor.read_queue);
+ disable_irq_nosync(chip->vendor.irq);
+ return IRQ_HANDLED;
+}
+
+static int get_vid(struct i2c_client *client, u32 *res)
+{
+ static const u8 vid_did_rid_value[] = { 0x50, 0x10, 0xfe };
+ u32 temp;
+ s32 rc;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
+ return -ENODEV;
+ rc = i2c_nuvoton_read_buf(client, TPM_VID_DID_RID, 4, (u8 *)&temp);
+ if (rc < 0)
+ return rc;
+
+ /* check WPCT301 values - ignore RID */
+ if (memcmp(&temp, vid_did_rid_value, sizeof(vid_did_rid_value))) {
+ /*
+ * f/w rev 2.81 has an issue where the VID_DID_RID is not
+ * reporting the right value. so give it another chance at
+ * offset 0x20 (FIFO_W).
+ */
+ rc = i2c_nuvoton_read_buf(client, TPM_DATA_FIFO_W, 4,
+ (u8 *) (&temp));
+ if (rc < 0)
+ return rc;
+
+ /* check WPCT301 values - ignore RID */
+ if (memcmp(&temp, vid_did_rid_value,
+ sizeof(vid_did_rid_value)))
+ return -ENODEV;
+ }
+
+ *res = temp;
+ return 0;
+}
+
+static int i2c_nuvoton_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int rc;
+ struct tpm_chip *chip;
+ struct device *dev = &client->dev;
+ u32 vid = 0;
+
+ rc = get_vid(client, &vid);
+ if (rc)
+ return rc;
+
+ dev_info(dev, "VID: %04X DID: %02X RID: %02X\n", (u16) vid,
+ (u8) (vid >> 16), (u8) (vid >> 24));
+
+ chip = tpm_register_hardware(dev, &tpm_i2c);
+ if (!chip) {
+ dev_err(dev, "%s() error in tpm_register_hardware\n", __func__);
+ return -ENODEV;
+ }
+
+ chip->vendor.priv = devm_kzalloc(dev, sizeof(struct priv_data),
+ GFP_KERNEL);
+ init_waitqueue_head(&chip->vendor.read_queue);
+ init_waitqueue_head(&chip->vendor.int_queue);
+
+ /* Default timeouts */
+ chip->vendor.timeout_a = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT);
+ chip->vendor.timeout_b = msecs_to_jiffies(TPM_I2C_LONG_TIMEOUT);
+ chip->vendor.timeout_c = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT);
+ chip->vendor.timeout_d = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT);
+
+ /*
+ * I2C intfcaps (interrupt capabilitieis) in the chip are hard coded to:
+ * TPM_INTF_INT_LEVEL_LOW | TPM_INTF_DATA_AVAIL_INT
+ * The IRQ should be set in the i2c_board_info (which is done
+ * automatically in of_i2c_register_devices, for device tree users */
+ chip->vendor.irq = client->irq;
+
+ if (chip->vendor.irq) {
+ dev_dbg(dev, "%s() chip-vendor.irq\n", __func__);
+ rc = devm_request_irq(dev, chip->vendor.irq,
+ i2c_nuvoton_int_handler,
+ IRQF_TRIGGER_LOW,
+ chip->vendor.miscdev.name,
+ chip);
+ if (rc) {
+ dev_err(dev, "%s() Unable to request irq: %d for use\n",
+ __func__, chip->vendor.irq);
+ chip->vendor.irq = 0;
+ } else {
+ /* Clear any pending interrupt */
+ i2c_nuvoton_ready(chip);
+ /* - wait for TPM_STS==0xA0 (stsValid, commandReady) */
+ rc = i2c_nuvoton_wait_for_stat(chip,
+ TPM_STS_COMMAND_READY,
+ TPM_STS_COMMAND_READY,
+ chip->vendor.timeout_b,
+ NULL);
+ if (rc == 0) {
+ /*
+ * TIS is in ready state
+ * write dummy byte to enter reception state
+ * TPM_DATA_FIFO_W <- rc (0)
+ */
+ rc = i2c_nuvoton_write_buf(client,
+ TPM_DATA_FIFO_W,
+ 1, (u8 *) (&rc));
+ if (rc < 0)
+ goto out_err;
+ /* TPM_STS <- 0x40 (commandReady) */
+ i2c_nuvoton_ready(chip);
+ } else {
+ /*
+ * timeout_b reached - command was
+ * aborted. TIS should now be in idle state -
+ * only TPM_STS_VALID should be set
+ */
+ if (i2c_nuvoton_read_status(chip) !=
+ TPM_STS_VALID) {
+ rc = -EIO;
+ goto out_err;
+ }
+ }
+ }
+ }
+
+ if (tpm_get_timeouts(chip)) {
+ rc = -ENODEV;
+ goto out_err;
+ }
+
+ if (tpm_do_selftest(chip)) {
+ rc = -ENODEV;
+ goto out_err;
+ }
+
+ return 0;
+
+out_err:
+ tpm_dev_vendor_release(chip);
+ tpm_remove_hardware(chip->dev);
+ return rc;
+}
+
+static int i2c_nuvoton_remove(struct i2c_client *client)
+{
+ struct device *dev = &(client->dev);
+ struct tpm_chip *chip = dev_get_drvdata(dev);
+
+ if (chip)
+ tpm_dev_vendor_release(chip);
+ tpm_remove_hardware(dev);
+ kfree(chip);
+ return 0;
+}
+
+
+static const struct i2c_device_id i2c_nuvoton_id[] = {
+ {I2C_DRIVER_NAME, 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, i2c_nuvoton_id);
+
+#ifdef CONFIG_OF
+static const struct of_device_id i2c_nuvoton_of_match[] = {
+ {.compatible = "nuvoton,npct501"},
+ {.compatible = "winbond,wpct301"},
+ {},
+};
+MODULE_DEVICE_TABLE(of, i2c_nuvoton_of_match);
+#endif
+
+static SIMPLE_DEV_PM_OPS(i2c_nuvoton_pm_ops, tpm_pm_suspend, tpm_pm_resume);
+
+static struct i2c_driver i2c_nuvoton_driver = {
+ .id_table = i2c_nuvoton_id,
+ .probe = i2c_nuvoton_probe,
+ .remove = i2c_nuvoton_remove,
+ .driver = {
+ .name = I2C_DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .pm = &i2c_nuvoton_pm_ops,
+ .of_match_table = of_match_ptr(i2c_nuvoton_of_match),
+ },
+};
+
+module_i2c_driver(i2c_nuvoton_driver);
+
+MODULE_AUTHOR("Dan Morav (dan.morav@nuvoton.com)");
+MODULE_DESCRIPTION("Nuvoton TPM I2C Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/tpm_i2c_stm_st33.c b/drivers/char/tpm/tpm_i2c_stm_st33.c
index 5bb8e2ddd3b3..a0d6ceb5d005 100644
--- a/drivers/char/tpm/tpm_i2c_stm_st33.c
+++ b/drivers/char/tpm/tpm_i2c_stm_st33.c
@@ -584,7 +584,7 @@ static DEVICE_ATTR(enabled, S_IRUGO, tpm_show_enabled, NULL);
static DEVICE_ATTR(active, S_IRUGO, tpm_show_active, NULL);
static DEVICE_ATTR(owned, S_IRUGO, tpm_show_owned, NULL);
static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated, NULL);
-static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps_1_2, NULL);
+static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL);
static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel);
static struct attribute *stm_tpm_attrs[] = {
@@ -746,8 +746,6 @@ tpm_st33_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id)
tpm_get_timeouts(chip);
- i2c_set_clientdata(client, chip);
-
dev_info(chip->dev, "TPM I2C Initialized\n");
return 0;
_irq_set:
@@ -807,24 +805,18 @@ static int tpm_st33_i2c_remove(struct i2c_client *client)
#ifdef CONFIG_PM_SLEEP
/*
* tpm_st33_i2c_pm_suspend suspend the TPM device
- * Added: Work around when suspend and no tpm application is running, suspend
- * may fail because chip->data_buffer is not set (only set in tpm_open in Linux
- * TPM core)
* @param: client, the i2c_client drescription (TPM I2C description).
* @param: mesg, the power management message.
* @return: 0 in case of success.
*/
static int tpm_st33_i2c_pm_suspend(struct device *dev)
{
- struct tpm_chip *chip = dev_get_drvdata(dev);
struct st33zp24_platform_data *pin_infos = dev->platform_data;
int ret = 0;
if (power_mgt) {
gpio_set_value(pin_infos->io_lpcpd, 0);
} else {
- if (chip->data_buffer == NULL)
- chip->data_buffer = pin_infos->tpm_i2c_buffer[0];
ret = tpm_pm_suspend(dev);
}
return ret;
@@ -849,8 +841,6 @@ static int tpm_st33_i2c_pm_resume(struct device *dev)
TPM_STS_VALID) == TPM_STS_VALID,
chip->vendor.timeout_b);
} else {
- if (chip->data_buffer == NULL)
- chip->data_buffer = pin_infos->tpm_i2c_buffer[0];
ret = tpm_pm_resume(dev);
if (!ret)
tpm_do_selftest(chip);
diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c
index 56b07c35a13e..2783a42aa732 100644
--- a/drivers/char/tpm/tpm_ibmvtpm.c
+++ b/drivers/char/tpm/tpm_ibmvtpm.c
@@ -98,7 +98,7 @@ static int tpm_ibmvtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
if (count < len) {
dev_err(ibmvtpm->dev,
- "Invalid size in recv: count=%ld, crq_size=%d\n",
+ "Invalid size in recv: count=%zd, crq_size=%d\n",
count, len);
return -EIO;
}
@@ -136,7 +136,7 @@ static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
if (count > ibmvtpm->rtce_size) {
dev_err(ibmvtpm->dev,
- "Invalid size in send: count=%ld, rtce_size=%d\n",
+ "Invalid size in send: count=%zd, rtce_size=%d\n",
count, ibmvtpm->rtce_size);
return -EIO;
}
@@ -419,7 +419,7 @@ static DEVICE_ATTR(active, S_IRUGO, tpm_show_active, NULL);
static DEVICE_ATTR(owned, S_IRUGO, tpm_show_owned, NULL);
static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated,
NULL);
-static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps_1_2, NULL);
+static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL);
static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel);
static DEVICE_ATTR(durations, S_IRUGO, tpm_show_durations, NULL);
static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL);
diff --git a/drivers/char/tpm/tpm_ppi.c b/drivers/char/tpm/tpm_ppi.c
index 2168d15bc728..8e562dc65601 100644
--- a/drivers/char/tpm/tpm_ppi.c
+++ b/drivers/char/tpm/tpm_ppi.c
@@ -452,12 +452,8 @@ int tpm_add_ppi(struct kobject *parent)
{
return sysfs_create_group(parent, &ppi_attr_grp);
}
-EXPORT_SYMBOL_GPL(tpm_add_ppi);
void tpm_remove_ppi(struct kobject *parent)
{
sysfs_remove_group(parent, &ppi_attr_grp);
}
-EXPORT_SYMBOL_GPL(tpm_remove_ppi);
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index 5796d0157ce0..1b74459c0723 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -448,7 +448,7 @@ static DEVICE_ATTR(active, S_IRUGO, tpm_show_active, NULL);
static DEVICE_ATTR(owned, S_IRUGO, tpm_show_owned, NULL);
static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated,
NULL);
-static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps_1_2, NULL);
+static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL);
static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel);
static DEVICE_ATTR(durations, S_IRUGO, tpm_show_durations, NULL);
static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL);
diff --git a/drivers/char/tpm/xen-tpmfront.c b/drivers/char/tpm/xen-tpmfront.c
index 94c280d36e8b..c8ff4df81779 100644
--- a/drivers/char/tpm/xen-tpmfront.c
+++ b/drivers/char/tpm/xen-tpmfront.c
@@ -351,8 +351,6 @@ static int tpmfront_probe(struct xenbus_device *dev,
tpm_get_timeouts(priv->chip);
- dev_set_drvdata(&dev->dev, priv->chip);
-
return rv;
}
diff --git a/drivers/clk/clk-s2mps11.c b/drivers/clk/clk-s2mps11.c
index 7be41e676a64..00a3abe103a5 100644
--- a/drivers/clk/clk-s2mps11.c
+++ b/drivers/clk/clk-s2mps11.c
@@ -60,7 +60,7 @@ static int s2mps11_clk_prepare(struct clk_hw *hw)
struct s2mps11_clk *s2mps11 = to_s2mps11_clk(hw);
int ret;
- ret = regmap_update_bits(s2mps11->iodev->regmap,
+ ret = regmap_update_bits(s2mps11->iodev->regmap_pmic,
S2MPS11_REG_RTC_CTRL,
s2mps11->mask, s2mps11->mask);
if (!ret)
@@ -74,7 +74,7 @@ static void s2mps11_clk_unprepare(struct clk_hw *hw)
struct s2mps11_clk *s2mps11 = to_s2mps11_clk(hw);
int ret;
- ret = regmap_update_bits(s2mps11->iodev->regmap, S2MPS11_REG_RTC_CTRL,
+ ret = regmap_update_bits(s2mps11->iodev->regmap_pmic, S2MPS11_REG_RTC_CTRL,
s2mps11->mask, ~s2mps11->mask);
if (!ret)
@@ -174,7 +174,7 @@ static int s2mps11_clk_probe(struct platform_device *pdev)
s2mps11_clk->hw.init = &s2mps11_clks_init[i];
s2mps11_clk->mask = 1 << i;
- ret = regmap_read(s2mps11_clk->iodev->regmap,
+ ret = regmap_read(s2mps11_clk->iodev->regmap_pmic,
S2MPS11_REG_RTC_CTRL, &val);
if (ret < 0)
goto err_reg;
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index bdb953e15d2a..634c4d6dd45a 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -75,6 +75,7 @@ config CLKSRC_DBX500_PRCMU_SCHED_CLOCK
config CLKSRC_EFM32
bool "Clocksource for Energy Micro's EFM32 SoCs" if !ARCH_EFM32
depends on OF && ARM && (ARCH_EFM32 || COMPILE_TEST)
+ select CLKSRC_MMIO
default ARCH_EFM32
help
Support to use the timers of EFM32 SoCs as clock source and clock
@@ -87,6 +88,7 @@ config ARM_ARCH_TIMER
config ARM_ARCH_TIMER_EVTSTREAM
bool "Support for ARM architected timer event stream generation"
default y if ARM_ARCH_TIMER
+ depends on ARM_ARCH_TIMER
help
This option enables support for event stream generation based on
the ARM architected timer. It is used for waking up CPUs executing
diff --git a/drivers/clocksource/clksrc-of.c b/drivers/clocksource/clksrc-of.c
index 35639cf4e5a2..b9ddd9e3a2f5 100644
--- a/drivers/clocksource/clksrc-of.c
+++ b/drivers/clocksource/clksrc-of.c
@@ -35,6 +35,5 @@ void __init clocksource_of_init(void)
init_func = match->data;
init_func(np);
- of_node_put(np);
}
}
diff --git a/drivers/clocksource/dw_apb_timer_of.c b/drivers/clocksource/dw_apb_timer_of.c
index 45ba8aecc729..2a2ea2717f3a 100644
--- a/drivers/clocksource/dw_apb_timer_of.c
+++ b/drivers/clocksource/dw_apb_timer_of.c
@@ -108,12 +108,11 @@ static void __init add_clocksource(struct device_node *source_timer)
static u64 read_sched_clock(void)
{
- return __raw_readl(sched_io_base);
+ return ~__raw_readl(sched_io_base);
}
static const struct of_device_id sptimer_ids[] __initconst = {
{ .compatible = "picochip,pc3x2-rtc" },
- { .compatible = "snps,dw-apb-timer-sp" },
{ /* Sentinel */ },
};
@@ -151,4 +150,6 @@ static void __init dw_apb_timer_init(struct device_node *timer)
num_called++;
}
CLOCKSOURCE_OF_DECLARE(pc3x2_timer, "picochip,pc3x2-timer", dw_apb_timer_init);
-CLOCKSOURCE_OF_DECLARE(apb_timer, "snps,dw-apb-timer-osc", dw_apb_timer_init);
+CLOCKSOURCE_OF_DECLARE(apb_timer_osc, "snps,dw-apb-timer-osc", dw_apb_timer_init);
+CLOCKSOURCE_OF_DECLARE(apb_timer_sp, "snps,dw-apb-timer-sp", dw_apb_timer_init);
+CLOCKSOURCE_OF_DECLARE(apb_timer, "snps,dw-apb-timer", dw_apb_timer_init);
diff --git a/drivers/clocksource/sh_mtu2.c b/drivers/clocksource/sh_mtu2.c
index 4aac9ee0d0c0..3cf12834681e 100644
--- a/drivers/clocksource/sh_mtu2.c
+++ b/drivers/clocksource/sh_mtu2.c
@@ -313,8 +313,20 @@ static int sh_mtu2_setup(struct sh_mtu2_priv *p, struct platform_device *pdev)
goto err1;
}
- return sh_mtu2_register(p, (char *)dev_name(&p->pdev->dev),
- cfg->clockevent_rating);
+ ret = clk_prepare(p->clk);
+ if (ret < 0)
+ goto err2;
+
+ ret = sh_mtu2_register(p, (char *)dev_name(&p->pdev->dev),
+ cfg->clockevent_rating);
+ if (ret < 0)
+ goto err3;
+
+ return 0;
+ err3:
+ clk_unprepare(p->clk);
+ err2:
+ clk_put(p->clk);
err1:
iounmap(p->mapbase);
err0:
diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c
index 78b8dae49628..63557cda0a7d 100644
--- a/drivers/clocksource/sh_tmu.c
+++ b/drivers/clocksource/sh_tmu.c
@@ -472,12 +472,26 @@ static int sh_tmu_setup(struct sh_tmu_priv *p, struct platform_device *pdev)
ret = PTR_ERR(p->clk);
goto err1;
}
+
+ ret = clk_prepare(p->clk);
+ if (ret < 0)
+ goto err2;
+
p->cs_enabled = false;
p->enable_count = 0;
- return sh_tmu_register(p, (char *)dev_name(&p->pdev->dev),
- cfg->clockevent_rating,
- cfg->clocksource_rating);
+ ret = sh_tmu_register(p, (char *)dev_name(&p->pdev->dev),
+ cfg->clockevent_rating,
+ cfg->clocksource_rating);
+ if (ret < 0)
+ goto err3;
+
+ return 0;
+
+ err3:
+ clk_unprepare(p->clk);
+ err2:
+ clk_put(p->clk);
err1:
iounmap(p->mapbase);
err0:
diff --git a/drivers/clocksource/sun4i_timer.c b/drivers/clocksource/sun4i_timer.c
index 2fb4695a28d8..a4f6119aafd8 100644
--- a/drivers/clocksource/sun4i_timer.c
+++ b/drivers/clocksource/sun4i_timer.c
@@ -179,6 +179,9 @@ static void __init sun4i_timer_init(struct device_node *node)
writel(TIMER_CTL_CLK_SRC(TIMER_CTL_CLK_SRC_OSC24M),
timer_base + TIMER_CTL_REG(0));
+ /* Make sure timer is stopped before playing with interrupts */
+ sun4i_clkevt_time_stop(0);
+
ret = setup_irq(irq, &sun4i_timer_irq);
if (ret)
pr_warn("failed to setup irq %d\n", irq);
diff --git a/drivers/clocksource/time-armada-370-xp.c b/drivers/clocksource/time-armada-370-xp.c
index d8e47e502785..4e7f6802e840 100644
--- a/drivers/clocksource/time-armada-370-xp.c
+++ b/drivers/clocksource/time-armada-370-xp.c
@@ -256,11 +256,6 @@ static void __init armada_370_xp_timer_common_init(struct device_node *np)
ticks_per_jiffy = (timer_clk + HZ / 2) / HZ;
/*
- * Set scale and timer for sched_clock.
- */
- sched_clock_register(armada_370_xp_read_sched_clock, 32, timer_clk);
-
- /*
* Setup free-running clocksource timer (interrupts
* disabled).
*/
@@ -270,6 +265,11 @@ static void __init armada_370_xp_timer_common_init(struct device_node *np)
timer_ctrl_clrset(0, TIMER0_EN | TIMER0_RELOAD_EN |
TIMER0_DIV(TIMER_DIVIDER_SHIFT));
+ /*
+ * Set scale and timer for sched_clock.
+ */
+ sched_clock_register(armada_370_xp_read_sched_clock, 32, timer_clk);
+
clocksource_mmio_init(timer_base + TIMER0_VAL_OFF,
"armada_370_xp_clocksource",
timer_clk, 300, 32, clocksource_mmio_readl_down);
diff --git a/drivers/cpufreq/at32ap-cpufreq.c b/drivers/cpufreq/at32ap-cpufreq.c
index 856ad80418ae..7c03dd84f66a 100644
--- a/drivers/cpufreq/at32ap-cpufreq.c
+++ b/drivers/cpufreq/at32ap-cpufreq.c
@@ -58,7 +58,7 @@ static int at32_set_target(struct cpufreq_policy *policy, unsigned int index)
return 0;
}
-static int __init at32_cpufreq_driver_init(struct cpufreq_policy *policy)
+static int at32_cpufreq_driver_init(struct cpufreq_policy *policy)
{
unsigned int frequency, rate, min_freq;
int retval, steps, i;
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 02d534da22dd..16d7b4ac94be 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -828,6 +828,12 @@ static void cpufreq_init_policy(struct cpufreq_policy *policy)
int ret = 0;
memcpy(&new_policy, policy, sizeof(*policy));
+
+ /* Use the default policy if its valid. */
+ if (cpufreq_driver->setpolicy)
+ cpufreq_parse_governor(policy->governor->name,
+ &new_policy.policy, NULL);
+
/* assure that the starting sequence is run in cpufreq_set_policy */
policy->governor = NULL;
@@ -845,8 +851,7 @@ static void cpufreq_init_policy(struct cpufreq_policy *policy)
#ifdef CONFIG_HOTPLUG_CPU
static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
- unsigned int cpu, struct device *dev,
- bool frozen)
+ unsigned int cpu, struct device *dev)
{
int ret = 0;
unsigned long flags;
@@ -877,11 +882,7 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
}
}
- /* Don't touch sysfs links during light-weight init */
- if (!frozen)
- ret = sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
-
- return ret;
+ return sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
}
#endif
@@ -926,6 +927,27 @@ err_free_policy:
return NULL;
}
+static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
+{
+ struct kobject *kobj;
+ struct completion *cmp;
+
+ down_read(&policy->rwsem);
+ kobj = &policy->kobj;
+ cmp = &policy->kobj_unregister;
+ up_read(&policy->rwsem);
+ kobject_put(kobj);
+
+ /*
+ * We need to make sure that the underlying kobj is
+ * actually not referenced anymore by anybody before we
+ * proceed with unloading.
+ */
+ pr_debug("waiting for dropping of refcount\n");
+ wait_for_completion(cmp);
+ pr_debug("wait complete\n");
+}
+
static void cpufreq_policy_free(struct cpufreq_policy *policy)
{
free_cpumask_var(policy->related_cpus);
@@ -986,7 +1008,7 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
list_for_each_entry(tpolicy, &cpufreq_policy_list, policy_list) {
if (cpumask_test_cpu(cpu, tpolicy->related_cpus)) {
read_unlock_irqrestore(&cpufreq_driver_lock, flags);
- ret = cpufreq_add_policy_cpu(tpolicy, cpu, dev, frozen);
+ ret = cpufreq_add_policy_cpu(tpolicy, cpu, dev);
up_read(&cpufreq_rwsem);
return ret;
}
@@ -1096,7 +1118,10 @@ err_get_freq:
if (cpufreq_driver->exit)
cpufreq_driver->exit(policy);
err_set_policy_cpu:
+ if (frozen)
+ cpufreq_policy_put_kobj(policy);
cpufreq_policy_free(policy);
+
nomem_out:
up_read(&cpufreq_rwsem);
@@ -1118,7 +1143,7 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
}
static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy,
- unsigned int old_cpu, bool frozen)
+ unsigned int old_cpu)
{
struct device *cpu_dev;
int ret;
@@ -1126,10 +1151,6 @@ static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy,
/* first sibling now owns the new sysfs dir */
cpu_dev = get_cpu_device(cpumask_any_but(policy->cpus, old_cpu));
- /* Don't touch sysfs files during light-weight tear-down */
- if (frozen)
- return cpu_dev->id;
-
sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
ret = kobject_move(&policy->kobj, &cpu_dev->kobj);
if (ret) {
@@ -1196,7 +1217,7 @@ static int __cpufreq_remove_dev_prepare(struct device *dev,
if (!frozen)
sysfs_remove_link(&dev->kobj, "cpufreq");
} else if (cpus > 1) {
- new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu, frozen);
+ new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu);
if (new_cpu >= 0) {
update_policy_cpu(policy, new_cpu);
@@ -1218,8 +1239,6 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
int ret;
unsigned long flags;
struct cpufreq_policy *policy;
- struct kobject *kobj;
- struct completion *cmp;
read_lock_irqsave(&cpufreq_driver_lock, flags);
policy = per_cpu(cpufreq_cpu_data, cpu);
@@ -1249,22 +1268,8 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
}
}
- if (!frozen) {
- down_read(&policy->rwsem);
- kobj = &policy->kobj;
- cmp = &policy->kobj_unregister;
- up_read(&policy->rwsem);
- kobject_put(kobj);
-
- /*
- * We need to make sure that the underlying kobj is
- * actually not referenced anymore by anybody before we
- * proceed with unloading.
- */
- pr_debug("waiting for dropping of refcount\n");
- wait_for_completion(cmp);
- pr_debug("wait complete\n");
- }
+ if (!frozen)
+ cpufreq_policy_put_kobj(policy);
/*
* Perform the ->exit() even during light-weight tear-down,
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 218460fcd2e4..25a70d06c5bf 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -68,6 +68,9 @@ static void cs_check_cpu(int cpu, unsigned int load)
dbs_info->requested_freq += get_freq_target(cs_tuners, policy);
+ if (dbs_info->requested_freq > policy->max)
+ dbs_info->requested_freq = policy->max;
+
__cpufreq_driver_target(policy, dbs_info->requested_freq,
CPUFREQ_RELATION_H);
return;
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index 0806c31e5764..e6be63561fa6 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -328,10 +328,6 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
dbs_data->cdata->gov_dbs_timer);
}
- /*
- * conservative does not implement micro like ondemand
- * governor, thus we are bound to jiffes/HZ
- */
if (dbs_data->cdata->governor == GOV_CONSERVATIVE) {
cs_dbs_info->down_skip = 0;
cs_dbs_info->enable = 1;
diff --git a/drivers/cpufreq/exynos4210-cpufreq.c b/drivers/cpufreq/exynos4210-cpufreq.c
index f2c75065ce19..dfd1643b0b2f 100644
--- a/drivers/cpufreq/exynos4210-cpufreq.c
+++ b/drivers/cpufreq/exynos4210-cpufreq.c
@@ -157,4 +157,3 @@ err_moutcore:
pr_debug("%s: failed initialization\n", __func__);
return -EINVAL;
}
-EXPORT_SYMBOL(exynos4210_cpufreq_init);
diff --git a/drivers/cpufreq/exynos4x12-cpufreq.c b/drivers/cpufreq/exynos4x12-cpufreq.c
index 8683304ce62c..efad5e657f6f 100644
--- a/drivers/cpufreq/exynos4x12-cpufreq.c
+++ b/drivers/cpufreq/exynos4x12-cpufreq.c
@@ -211,4 +211,3 @@ err_moutcore:
pr_debug("%s: failed initialization\n", __func__);
return -EINVAL;
}
-EXPORT_SYMBOL(exynos4x12_cpufreq_init);
diff --git a/drivers/cpufreq/exynos5250-cpufreq.c b/drivers/cpufreq/exynos5250-cpufreq.c
index 9fae466d7746..8feda86fe42c 100644
--- a/drivers/cpufreq/exynos5250-cpufreq.c
+++ b/drivers/cpufreq/exynos5250-cpufreq.c
@@ -236,4 +236,3 @@ err_moutcore:
pr_err("%s: failed initialization\n", __func__);
return -EINVAL;
}
-EXPORT_SYMBOL(exynos5250_cpufreq_init);
diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c
index be6d14307aa8..a0acd0bfba40 100644
--- a/drivers/cpufreq/omap-cpufreq.c
+++ b/drivers/cpufreq/omap-cpufreq.c
@@ -53,6 +53,7 @@ static unsigned int omap_getspeed(unsigned int cpu)
static int omap_target(struct cpufreq_policy *policy, unsigned int index)
{
+ int r, ret;
struct dev_pm_opp *opp;
unsigned long freq, volt = 0, volt_old = 0, tol = 0;
unsigned int old_freq, new_freq;
diff --git a/drivers/cpufreq/tegra-cpufreq.c b/drivers/cpufreq/tegra-cpufreq.c
index f42df7ec03c5..b7309c37033d 100644
--- a/drivers/cpufreq/tegra-cpufreq.c
+++ b/drivers/cpufreq/tegra-cpufreq.c
@@ -142,10 +142,8 @@ static int tegra_target(struct cpufreq_policy *policy, unsigned int index)
mutex_lock(&tegra_cpu_lock);
- if (is_suspended) {
- ret = -EBUSY;
+ if (is_suspended)
goto out;
- }
freq = freq_table[index].frequency;
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 2a991e468f78..a55e68f2cfc8 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -400,7 +400,7 @@ EXPORT_SYMBOL_GPL(cpuidle_register_device);
*/
void cpuidle_unregister_device(struct cpuidle_device *dev)
{
- if (dev->registered == 0)
+ if (!dev || dev->registered == 0)
return;
cpuidle_pause_and_lock();
diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig
index ca89f6b84b06..e7555ff4cafd 100644
--- a/drivers/crypto/caam/Kconfig
+++ b/drivers/crypto/caam/Kconfig
@@ -4,16 +4,29 @@ config CRYPTO_DEV_FSL_CAAM
help
Enables the driver module for Freescale's Cryptographic Accelerator
and Assurance Module (CAAM), also known as the SEC version 4 (SEC4).
- This module adds a job ring operation interface, and configures h/w
+ This module creates job ring devices, and configures h/w
to operate as a DPAA component automatically, depending
on h/w feature availability.
To compile this driver as a module, choose M here: the module
will be called caam.
+config CRYPTO_DEV_FSL_CAAM_JR
+ tristate "Freescale CAAM Job Ring driver backend"
+ depends on CRYPTO_DEV_FSL_CAAM
+ default y
+ help
+ Enables the driver module for Job Rings which are part of
+ Freescale's Cryptographic Accelerator
+ and Assurance Module (CAAM). This module adds a job ring operation
+ interface.
+
+ To compile this driver as a module, choose M here: the module
+ will be called caam_jr.
+
config CRYPTO_DEV_FSL_CAAM_RINGSIZE
int "Job Ring size"
- depends on CRYPTO_DEV_FSL_CAAM
+ depends on CRYPTO_DEV_FSL_CAAM_JR
range 2 9
default "9"
help
@@ -31,7 +44,7 @@ config CRYPTO_DEV_FSL_CAAM_RINGSIZE
config CRYPTO_DEV_FSL_CAAM_INTC
bool "Job Ring interrupt coalescing"
- depends on CRYPTO_DEV_FSL_CAAM
+ depends on CRYPTO_DEV_FSL_CAAM_JR
default n
help
Enable the Job Ring's interrupt coalescing feature.
@@ -62,7 +75,7 @@ config CRYPTO_DEV_FSL_CAAM_INTC_TIME_THLD
config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
tristate "Register algorithm implementations with the Crypto API"
- depends on CRYPTO_DEV_FSL_CAAM
+ depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
default y
select CRYPTO_ALGAPI
select CRYPTO_AUTHENC
@@ -76,7 +89,7 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
config CRYPTO_DEV_FSL_CAAM_AHASH_API
tristate "Register hash algorithm implementations with Crypto API"
- depends on CRYPTO_DEV_FSL_CAAM
+ depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
default y
select CRYPTO_HASH
help
@@ -88,7 +101,7 @@ config CRYPTO_DEV_FSL_CAAM_AHASH_API
config CRYPTO_DEV_FSL_CAAM_RNG_API
tristate "Register caam device for hwrng API"
- depends on CRYPTO_DEV_FSL_CAAM
+ depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
default y
select CRYPTO_RNG
select HW_RANDOM
diff --git a/drivers/crypto/caam/Makefile b/drivers/crypto/caam/Makefile
index d56bd0ec65d8..550758a333e7 100644
--- a/drivers/crypto/caam/Makefile
+++ b/drivers/crypto/caam/Makefile
@@ -6,8 +6,10 @@ ifeq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_DEBUG), y)
endif
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o
+obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_JR) += caam_jr.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o
-caam-objs := ctrl.o jr.o error.o key_gen.o
+caam-objs := ctrl.o
+caam_jr-objs := jr.o key_gen.o error.o
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 7c63b72ecd75..4cf5dec826e1 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -86,6 +86,7 @@
#else
#define debug(format, arg...)
#endif
+static struct list_head alg_list;
/* Set DK bit in class 1 operation if shared */
static inline void append_dec_op1(u32 *desc, u32 type)
@@ -817,7 +818,7 @@ static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
ivsize, 1);
print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->dst),
- req->cryptlen, 1);
+ req->cryptlen - ctx->authsize, 1);
#endif
if (err) {
@@ -971,12 +972,9 @@ static void init_aead_job(u32 *sh_desc, dma_addr_t ptr,
(edesc->src_nents ? : 1);
in_options = LDST_SGF;
}
- if (encrypt)
- append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize +
- req->cryptlen - authsize, in_options);
- else
- append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize +
- req->cryptlen, in_options);
+
+ append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + req->cryptlen,
+ in_options);
if (likely(req->src == req->dst)) {
if (all_contig) {
@@ -997,7 +995,8 @@ static void init_aead_job(u32 *sh_desc, dma_addr_t ptr,
}
}
if (encrypt)
- append_seq_out_ptr(desc, dst_dma, req->cryptlen, out_options);
+ append_seq_out_ptr(desc, dst_dma, req->cryptlen + authsize,
+ out_options);
else
append_seq_out_ptr(desc, dst_dma, req->cryptlen - authsize,
out_options);
@@ -1047,8 +1046,8 @@ static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr,
sec4_sg_index += edesc->assoc_nents + 1 + edesc->src_nents;
in_options = LDST_SGF;
}
- append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize +
- req->cryptlen - authsize, in_options);
+ append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + req->cryptlen,
+ in_options);
if (contig & GIV_DST_CONTIG) {
dst_dma = edesc->iv_dma;
@@ -1065,7 +1064,8 @@ static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr,
}
}
- append_seq_out_ptr(desc, dst_dma, ivsize + req->cryptlen, out_options);
+ append_seq_out_ptr(desc, dst_dma, ivsize + req->cryptlen + authsize,
+ out_options);
}
/*
@@ -1129,7 +1129,8 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
* allocate and map the aead extended descriptor
*/
static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
- int desc_bytes, bool *all_contig_ptr)
+ int desc_bytes, bool *all_contig_ptr,
+ bool encrypt)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct caam_ctx *ctx = crypto_aead_ctx(aead);
@@ -1144,12 +1145,22 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
bool assoc_chained = false, src_chained = false, dst_chained = false;
int ivsize = crypto_aead_ivsize(aead);
int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
+ unsigned int authsize = ctx->authsize;
assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained);
- src_nents = sg_count(req->src, req->cryptlen, &src_chained);
- if (unlikely(req->dst != req->src))
- dst_nents = sg_count(req->dst, req->cryptlen, &dst_chained);
+ if (unlikely(req->dst != req->src)) {
+ src_nents = sg_count(req->src, req->cryptlen, &src_chained);
+ dst_nents = sg_count(req->dst,
+ req->cryptlen +
+ (encrypt ? authsize : (-authsize)),
+ &dst_chained);
+ } else {
+ src_nents = sg_count(req->src,
+ req->cryptlen +
+ (encrypt ? authsize : 0),
+ &src_chained);
+ }
sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
DMA_TO_DEVICE, assoc_chained);
@@ -1233,11 +1244,9 @@ static int aead_encrypt(struct aead_request *req)
u32 *desc;
int ret = 0;
- req->cryptlen += ctx->authsize;
-
/* allocate extended descriptor */
edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN *
- CAAM_CMD_SZ, &all_contig);
+ CAAM_CMD_SZ, &all_contig, true);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
@@ -1274,7 +1283,7 @@ static int aead_decrypt(struct aead_request *req)
/* allocate extended descriptor */
edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN *
- CAAM_CMD_SZ, &all_contig);
+ CAAM_CMD_SZ, &all_contig, false);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
@@ -1331,7 +1340,8 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
src_nents = sg_count(req->src, req->cryptlen, &src_chained);
if (unlikely(req->dst != req->src))
- dst_nents = sg_count(req->dst, req->cryptlen, &dst_chained);
+ dst_nents = sg_count(req->dst, req->cryptlen + ctx->authsize,
+ &dst_chained);
sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
DMA_TO_DEVICE, assoc_chained);
@@ -1425,8 +1435,6 @@ static int aead_givencrypt(struct aead_givcrypt_request *areq)
u32 *desc;
int ret = 0;
- req->cryptlen += ctx->authsize;
-
/* allocate extended descriptor */
edesc = aead_giv_edesc_alloc(areq, DESC_JOB_IO_LEN *
CAAM_CMD_SZ, &contig);
@@ -2057,7 +2065,6 @@ static struct caam_alg_template driver_algs[] = {
struct caam_crypto_alg {
struct list_head entry;
- struct device *ctrldev;
int class1_alg_type;
int class2_alg_type;
int alg_op;
@@ -2070,14 +2077,12 @@ static int caam_cra_init(struct crypto_tfm *tfm)
struct caam_crypto_alg *caam_alg =
container_of(alg, struct caam_crypto_alg, crypto_alg);
struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
- struct caam_drv_private *priv = dev_get_drvdata(caam_alg->ctrldev);
- int tgt_jr = atomic_inc_return(&priv->tfm_count);
- /*
- * distribute tfms across job rings to ensure in-order
- * crypto request processing per tfm
- */
- ctx->jrdev = priv->jrdev[(tgt_jr / 2) % priv->total_jobrs];
+ ctx->jrdev = caam_jr_alloc();
+ if (IS_ERR(ctx->jrdev)) {
+ pr_err("Job Ring Device allocation for transform failed\n");
+ return PTR_ERR(ctx->jrdev);
+ }
/* copy descriptor header template value */
ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam_alg->class1_alg_type;
@@ -2104,44 +2109,26 @@ static void caam_cra_exit(struct crypto_tfm *tfm)
dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma,
desc_bytes(ctx->sh_desc_givenc),
DMA_TO_DEVICE);
+
+ caam_jr_free(ctx->jrdev);
}
static void __exit caam_algapi_exit(void)
{
- struct device_node *dev_node;
- struct platform_device *pdev;
- struct device *ctrldev;
- struct caam_drv_private *priv;
struct caam_crypto_alg *t_alg, *n;
- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
- if (!dev_node) {
- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
- if (!dev_node)
- return;
- }
-
- pdev = of_find_device_by_node(dev_node);
- if (!pdev)
- return;
-
- ctrldev = &pdev->dev;
- of_node_put(dev_node);
- priv = dev_get_drvdata(ctrldev);
-
- if (!priv->alg_list.next)
+ if (!alg_list.next)
return;
- list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
+ list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
crypto_unregister_alg(&t_alg->crypto_alg);
list_del(&t_alg->entry);
kfree(t_alg);
}
}
-static struct caam_crypto_alg *caam_alg_alloc(struct device *ctrldev,
- struct caam_alg_template
+static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
*template)
{
struct caam_crypto_alg *t_alg;
@@ -2149,7 +2136,7 @@ static struct caam_crypto_alg *caam_alg_alloc(struct device *ctrldev,
t_alg = kzalloc(sizeof(struct caam_crypto_alg), GFP_KERNEL);
if (!t_alg) {
- dev_err(ctrldev, "failed to allocate t_alg\n");
+ pr_err("failed to allocate t_alg\n");
return ERR_PTR(-ENOMEM);
}
@@ -2181,62 +2168,39 @@ static struct caam_crypto_alg *caam_alg_alloc(struct device *ctrldev,
t_alg->class1_alg_type = template->class1_alg_type;
t_alg->class2_alg_type = template->class2_alg_type;
t_alg->alg_op = template->alg_op;
- t_alg->ctrldev = ctrldev;
return t_alg;
}
static int __init caam_algapi_init(void)
{
- struct device_node *dev_node;
- struct platform_device *pdev;
- struct device *ctrldev;
- struct caam_drv_private *priv;
int i = 0, err = 0;
- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
- if (!dev_node) {
- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
- if (!dev_node)
- return -ENODEV;
- }
-
- pdev = of_find_device_by_node(dev_node);
- if (!pdev)
- return -ENODEV;
-
- ctrldev = &pdev->dev;
- priv = dev_get_drvdata(ctrldev);
- of_node_put(dev_node);
-
- INIT_LIST_HEAD(&priv->alg_list);
-
- atomic_set(&priv->tfm_count, -1);
+ INIT_LIST_HEAD(&alg_list);
/* register crypto algorithms the device supports */
for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
/* TODO: check if h/w supports alg */
struct caam_crypto_alg *t_alg;
- t_alg = caam_alg_alloc(ctrldev, &driver_algs[i]);
+ t_alg = caam_alg_alloc(&driver_algs[i]);
if (IS_ERR(t_alg)) {
err = PTR_ERR(t_alg);
- dev_warn(ctrldev, "%s alg allocation failed\n",
- driver_algs[i].driver_name);
+ pr_warn("%s alg allocation failed\n",
+ driver_algs[i].driver_name);
continue;
}
err = crypto_register_alg(&t_alg->crypto_alg);
if (err) {
- dev_warn(ctrldev, "%s alg registration failed\n",
+ pr_warn("%s alg registration failed\n",
t_alg->crypto_alg.cra_driver_name);
kfree(t_alg);
} else
- list_add_tail(&t_alg->entry, &priv->alg_list);
+ list_add_tail(&t_alg->entry, &alg_list);
}
- if (!list_empty(&priv->alg_list))
- dev_info(ctrldev, "%s algorithms registered in /proc/crypto\n",
- (char *)of_get_property(dev_node, "compatible", NULL));
+ if (!list_empty(&alg_list))
+ pr_info("caam algorithms registered in /proc/crypto\n");
return err;
}
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index e732bd962e98..0378328f47a7 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -94,6 +94,9 @@
#define debug(format, arg...)
#endif
+
+static struct list_head hash_list;
+
/* ahash per-session context */
struct caam_hash_ctx {
struct device *jrdev;
@@ -1653,7 +1656,6 @@ static struct caam_hash_template driver_hash[] = {
struct caam_hash_alg {
struct list_head entry;
- struct device *ctrldev;
int alg_type;
int alg_op;
struct ahash_alg ahash_alg;
@@ -1670,7 +1672,6 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
struct caam_hash_alg *caam_hash =
container_of(alg, struct caam_hash_alg, ahash_alg);
struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
- struct caam_drv_private *priv = dev_get_drvdata(caam_hash->ctrldev);
/* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
HASH_MSG_LEN + SHA1_DIGEST_SIZE,
@@ -1678,15 +1679,17 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
HASH_MSG_LEN + SHA256_DIGEST_SIZE,
HASH_MSG_LEN + 64,
HASH_MSG_LEN + SHA512_DIGEST_SIZE };
- int tgt_jr = atomic_inc_return(&priv->tfm_count);
int ret = 0;
/*
- * distribute tfms across job rings to ensure in-order
+ * Get a Job ring from Job Ring driver to ensure in-order
* crypto request processing per tfm
*/
- ctx->jrdev = priv->jrdev[tgt_jr % priv->total_jobrs];
-
+ ctx->jrdev = caam_jr_alloc();
+ if (IS_ERR(ctx->jrdev)) {
+ pr_err("Job Ring Device allocation for transform failed\n");
+ return PTR_ERR(ctx->jrdev);
+ }
/* copy descriptor header template value */
ctx->alg_type = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_hash->alg_op;
@@ -1729,35 +1732,18 @@ static void caam_hash_cra_exit(struct crypto_tfm *tfm)
!dma_mapping_error(ctx->jrdev, ctx->sh_desc_finup_dma))
dma_unmap_single(ctx->jrdev, ctx->sh_desc_finup_dma,
desc_bytes(ctx->sh_desc_finup), DMA_TO_DEVICE);
+
+ caam_jr_free(ctx->jrdev);
}
static void __exit caam_algapi_hash_exit(void)
{
- struct device_node *dev_node;
- struct platform_device *pdev;
- struct device *ctrldev;
- struct caam_drv_private *priv;
struct caam_hash_alg *t_alg, *n;
- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
- if (!dev_node) {
- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
- if (!dev_node)
- return;
- }
-
- pdev = of_find_device_by_node(dev_node);
- if (!pdev)
+ if (!hash_list.next)
return;
- ctrldev = &pdev->dev;
- of_node_put(dev_node);
- priv = dev_get_drvdata(ctrldev);
-
- if (!priv->hash_list.next)
- return;
-
- list_for_each_entry_safe(t_alg, n, &priv->hash_list, entry) {
+ list_for_each_entry_safe(t_alg, n, &hash_list, entry) {
crypto_unregister_ahash(&t_alg->ahash_alg);
list_del(&t_alg->entry);
kfree(t_alg);
@@ -1765,7 +1751,7 @@ static void __exit caam_algapi_hash_exit(void)
}
static struct caam_hash_alg *
-caam_hash_alloc(struct device *ctrldev, struct caam_hash_template *template,
+caam_hash_alloc(struct caam_hash_template *template,
bool keyed)
{
struct caam_hash_alg *t_alg;
@@ -1774,7 +1760,7 @@ caam_hash_alloc(struct device *ctrldev, struct caam_hash_template *template,
t_alg = kzalloc(sizeof(struct caam_hash_alg), GFP_KERNEL);
if (!t_alg) {
- dev_err(ctrldev, "failed to allocate t_alg\n");
+ pr_err("failed to allocate t_alg\n");
return ERR_PTR(-ENOMEM);
}
@@ -1805,37 +1791,15 @@ caam_hash_alloc(struct device *ctrldev, struct caam_hash_template *template,
t_alg->alg_type = template->alg_type;
t_alg->alg_op = template->alg_op;
- t_alg->ctrldev = ctrldev;
return t_alg;
}
static int __init caam_algapi_hash_init(void)
{
- struct device_node *dev_node;
- struct platform_device *pdev;
- struct device *ctrldev;
- struct caam_drv_private *priv;
int i = 0, err = 0;
- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
- if (!dev_node) {
- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
- if (!dev_node)
- return -ENODEV;
- }
-
- pdev = of_find_device_by_node(dev_node);
- if (!pdev)
- return -ENODEV;
-
- ctrldev = &pdev->dev;
- priv = dev_get_drvdata(ctrldev);
- of_node_put(dev_node);
-
- INIT_LIST_HEAD(&priv->hash_list);
-
- atomic_set(&priv->tfm_count, -1);
+ INIT_LIST_HEAD(&hash_list);
/* register crypto algorithms the device supports */
for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
@@ -1843,38 +1807,38 @@ static int __init caam_algapi_hash_init(void)
struct caam_hash_alg *t_alg;
/* register hmac version */
- t_alg = caam_hash_alloc(ctrldev, &driver_hash[i], true);
+ t_alg = caam_hash_alloc(&driver_hash[i], true);
if (IS_ERR(t_alg)) {
err = PTR_ERR(t_alg);
- dev_warn(ctrldev, "%s alg allocation failed\n",
- driver_hash[i].driver_name);
+ pr_warn("%s alg allocation failed\n",
+ driver_hash[i].driver_name);
continue;
}
err = crypto_register_ahash(&t_alg->ahash_alg);
if (err) {
- dev_warn(ctrldev, "%s alg registration failed\n",
+ pr_warn("%s alg registration failed\n",
t_alg->ahash_alg.halg.base.cra_driver_name);
kfree(t_alg);
} else
- list_add_tail(&t_alg->entry, &priv->hash_list);
+ list_add_tail(&t_alg->entry, &hash_list);
/* register unkeyed version */
- t_alg = caam_hash_alloc(ctrldev, &driver_hash[i], false);
+ t_alg = caam_hash_alloc(&driver_hash[i], false);
if (IS_ERR(t_alg)) {
err = PTR_ERR(t_alg);
- dev_warn(ctrldev, "%s alg allocation failed\n",
- driver_hash[i].driver_name);
+ pr_warn("%s alg allocation failed\n",
+ driver_hash[i].driver_name);
continue;
}
err = crypto_register_ahash(&t_alg->ahash_alg);
if (err) {
- dev_warn(ctrldev, "%s alg registration failed\n",
+ pr_warn("%s alg registration failed\n",
t_alg->ahash_alg.halg.base.cra_driver_name);
kfree(t_alg);
} else
- list_add_tail(&t_alg->entry, &priv->hash_list);
+ list_add_tail(&t_alg->entry, &hash_list);
}
return err;
diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c
index d1939a9539c0..28486b19fc36 100644
--- a/drivers/crypto/caam/caamrng.c
+++ b/drivers/crypto/caam/caamrng.c
@@ -273,34 +273,23 @@ static struct hwrng caam_rng = {
static void __exit caam_rng_exit(void)
{
+ caam_jr_free(rng_ctx.jrdev);
hwrng_unregister(&caam_rng);
}
static int __init caam_rng_init(void)
{
- struct device_node *dev_node;
- struct platform_device *pdev;
- struct device *ctrldev;
- struct caam_drv_private *priv;
-
- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
- if (!dev_node) {
- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
- if (!dev_node)
- return -ENODEV;
- }
-
- pdev = of_find_device_by_node(dev_node);
- if (!pdev)
- return -ENODEV;
+ struct device *dev;
- ctrldev = &pdev->dev;
- priv = dev_get_drvdata(ctrldev);
- of_node_put(dev_node);
+ dev = caam_jr_alloc();
+ if (IS_ERR(dev)) {
+ pr_err("Job Ring Device allocation for transform failed\n");
+ return PTR_ERR(dev);
+ }
- caam_init_rng(&rng_ctx, priv->jrdev[0]);
+ caam_init_rng(&rng_ctx, dev);
- dev_info(priv->jrdev[0], "registering rng-caam\n");
+ dev_info(dev, "registering rng-caam\n");
return hwrng_register(&caam_rng);
}
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index bc6d820812b6..63fb1af2c431 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -16,82 +16,75 @@
#include "error.h"
#include "ctrl.h"
-static int caam_remove(struct platform_device *pdev)
-{
- struct device *ctrldev;
- struct caam_drv_private *ctrlpriv;
- struct caam_drv_private_jr *jrpriv;
- struct caam_full __iomem *topregs;
- int ring, ret = 0;
-
- ctrldev = &pdev->dev;
- ctrlpriv = dev_get_drvdata(ctrldev);
- topregs = (struct caam_full __iomem *)ctrlpriv->ctrl;
-
- /* shut down JobRs */
- for (ring = 0; ring < ctrlpriv->total_jobrs; ring++) {
- ret |= caam_jr_shutdown(ctrlpriv->jrdev[ring]);
- jrpriv = dev_get_drvdata(ctrlpriv->jrdev[ring]);
- irq_dispose_mapping(jrpriv->irq);
- }
-
- /* Shut down debug views */
-#ifdef CONFIG_DEBUG_FS
- debugfs_remove_recursive(ctrlpriv->dfs_root);
-#endif
-
- /* Unmap controller region */
- iounmap(&topregs->ctrl);
-
- kfree(ctrlpriv->jrdev);
- kfree(ctrlpriv);
-
- return ret;
-}
-
/*
* Descriptor to instantiate RNG State Handle 0 in normal mode and
* load the JDKEK, TDKEK and TDSK registers
*/
-static void build_instantiation_desc(u32 *desc)
+static void build_instantiation_desc(u32 *desc, int handle, int do_sk)
{
- u32 *jump_cmd;
+ u32 *jump_cmd, op_flags;
init_job_desc(desc, 0);
+ op_flags = OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
+ (handle << OP_ALG_AAI_SHIFT) | OP_ALG_AS_INIT;
+
/* INIT RNG in non-test mode */
- append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
- OP_ALG_AS_INIT);
+ append_operation(desc, op_flags);
+
+ if (!handle && do_sk) {
+ /*
+ * For SH0, Secure Keys must be generated as well
+ */
+
+ /* wait for done */
+ jump_cmd = append_jump(desc, JUMP_CLASS_CLASS1);
+ set_jump_tgt_here(desc, jump_cmd);
+
+ /*
+ * load 1 to clear written reg:
+ * resets the done interrrupt and returns the RNG to idle.
+ */
+ append_load_imm_u32(desc, 1, LDST_SRCDST_WORD_CLRW);
+
+ /* Initialize State Handle */
+ append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
+ OP_ALG_AAI_RNG4_SK);
+ }
- /* wait for done */
- jump_cmd = append_jump(desc, JUMP_CLASS_CLASS1);
- set_jump_tgt_here(desc, jump_cmd);
+ append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TYPE_HALT);
+}
- /*
- * load 1 to clear written reg:
- * resets the done interrupt and returns the RNG to idle.
- */
- append_load_imm_u32(desc, 1, LDST_SRCDST_WORD_CLRW);
+/* Descriptor for deinstantiation of State Handle 0 of the RNG block. */
+static void build_deinstantiation_desc(u32 *desc, int handle)
+{
+ init_job_desc(desc, 0);
- /* generate secure keys (non-test) */
+ /* Uninstantiate State Handle 0 */
append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
- OP_ALG_RNG4_SK);
+ (handle << OP_ALG_AAI_SHIFT) | OP_ALG_AS_INITFINAL);
+
+ append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TYPE_HALT);
}
-static int instantiate_rng(struct device *ctrldev)
+/*
+ * run_descriptor_deco0 - runs a descriptor on DECO0, under direct control of
+ * the software (no JR/QI used).
+ * @ctrldev - pointer to device
+ * @status - descriptor status, after being run
+ *
+ * Return: - 0 if no error occurred
+ * - -ENODEV if the DECO couldn't be acquired
+ * - -EAGAIN if an error occurred while executing the descriptor
+ */
+static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc,
+ u32 *status)
{
struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
struct caam_full __iomem *topregs;
unsigned int timeout = 100000;
- u32 *desc;
- int i, ret = 0;
-
- desc = kmalloc(CAAM_CMD_SZ * 6, GFP_KERNEL | GFP_DMA);
- if (!desc) {
- dev_err(ctrldev, "can't allocate RNG init descriptor memory\n");
- return -ENOMEM;
- }
- build_instantiation_desc(desc);
+ u32 deco_dbg_reg, flags;
+ int i;
/* Set the bit to request direct access to DECO0 */
topregs = (struct caam_full __iomem *)ctrlpriv->ctrl;
@@ -103,36 +96,219 @@ static int instantiate_rng(struct device *ctrldev)
if (!timeout) {
dev_err(ctrldev, "failed to acquire DECO 0\n");
- ret = -EIO;
- goto out;
+ clrbits32(&topregs->ctrl.deco_rq, DECORR_RQD0ENABLE);
+ return -ENODEV;
}
for (i = 0; i < desc_len(desc); i++)
- topregs->deco.descbuf[i] = *(desc + i);
+ wr_reg32(&topregs->deco.descbuf[i], *(desc + i));
+
+ flags = DECO_JQCR_WHL;
+ /*
+ * If the descriptor length is longer than 4 words, then the
+ * FOUR bit in JRCTRL register must be set.
+ */
+ if (desc_len(desc) >= 4)
+ flags |= DECO_JQCR_FOUR;
- wr_reg32(&topregs->deco.jr_ctl_hi, DECO_JQCR_WHL | DECO_JQCR_FOUR);
+ /* Instruct the DECO to execute it */
+ wr_reg32(&topregs->deco.jr_ctl_hi, flags);
timeout = 10000000;
- while ((rd_reg32(&topregs->deco.desc_dbg) & DECO_DBG_VALID) &&
- --timeout)
+ do {
+ deco_dbg_reg = rd_reg32(&topregs->deco.desc_dbg);
+ /*
+ * If an error occured in the descriptor, then
+ * the DECO status field will be set to 0x0D
+ */
+ if ((deco_dbg_reg & DESC_DBG_DECO_STAT_MASK) ==
+ DESC_DBG_DECO_STAT_HOST_ERR)
+ break;
cpu_relax();
+ } while ((deco_dbg_reg & DESC_DBG_DECO_STAT_VALID) && --timeout);
- if (!timeout) {
- dev_err(ctrldev, "failed to instantiate RNG\n");
- ret = -EIO;
- }
+ *status = rd_reg32(&topregs->deco.op_status_hi) &
+ DECO_OP_STATUS_HI_ERR_MASK;
+ /* Mark the DECO as free */
clrbits32(&topregs->ctrl.deco_rq, DECORR_RQD0ENABLE);
-out:
+
+ if (!timeout)
+ return -EAGAIN;
+
+ return 0;
+}
+
+/*
+ * instantiate_rng - builds and executes a descriptor on DECO0,
+ * which initializes the RNG block.
+ * @ctrldev - pointer to device
+ * @state_handle_mask - bitmask containing the instantiation status
+ * for the RNG4 state handles which exist in
+ * the RNG4 block: 1 if it's been instantiated
+ * by an external entry, 0 otherwise.
+ * @gen_sk - generate data to be loaded into the JDKEK, TDKEK and TDSK;
+ * Caution: this can be done only once; if the keys need to be
+ * regenerated, a POR is required
+ *
+ * Return: - 0 if no error occurred
+ * - -ENOMEM if there isn't enough memory to allocate the descriptor
+ * - -ENODEV if DECO0 couldn't be acquired
+ * - -EAGAIN if an error occurred when executing the descriptor
+ * f.i. there was a RNG hardware error due to not "good enough"
+ * entropy being aquired.
+ */
+static int instantiate_rng(struct device *ctrldev, int state_handle_mask,
+ int gen_sk)
+{
+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
+ struct caam_full __iomem *topregs;
+ struct rng4tst __iomem *r4tst;
+ u32 *desc, status, rdsta_val;
+ int ret = 0, sh_idx;
+
+ topregs = (struct caam_full __iomem *)ctrlpriv->ctrl;
+ r4tst = &topregs->ctrl.r4tst[0];
+
+ desc = kmalloc(CAAM_CMD_SZ * 7, GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
+ for (sh_idx = 0; sh_idx < RNG4_MAX_HANDLES; sh_idx++) {
+ /*
+ * If the corresponding bit is set, this state handle
+ * was initialized by somebody else, so it's left alone.
+ */
+ if ((1 << sh_idx) & state_handle_mask)
+ continue;
+
+ /* Create the descriptor for instantiating RNG State Handle */
+ build_instantiation_desc(desc, sh_idx, gen_sk);
+
+ /* Try to run it through DECO0 */
+ ret = run_descriptor_deco0(ctrldev, desc, &status);
+
+ /*
+ * If ret is not 0, or descriptor status is not 0, then
+ * something went wrong. No need to try the next state
+ * handle (if available), bail out here.
+ * Also, if for some reason, the State Handle didn't get
+ * instantiated although the descriptor has finished
+ * without any error (HW optimizations for later
+ * CAAM eras), then try again.
+ */
+ rdsta_val =
+ rd_reg32(&topregs->ctrl.r4tst[0].rdsta) & RDSTA_IFMASK;
+ if (status || !(rdsta_val & (1 << sh_idx)))
+ ret = -EAGAIN;
+ if (ret)
+ break;
+
+ dev_info(ctrldev, "Instantiated RNG4 SH%d\n", sh_idx);
+ /* Clear the contents before recreating the descriptor */
+ memset(desc, 0x00, CAAM_CMD_SZ * 7);
+ }
+
kfree(desc);
+
return ret;
}
/*
- * By default, the TRNG runs for 200 clocks per sample;
- * 1600 clocks per sample generates better entropy.
+ * deinstantiate_rng - builds and executes a descriptor on DECO0,
+ * which deinitializes the RNG block.
+ * @ctrldev - pointer to device
+ * @state_handle_mask - bitmask containing the instantiation status
+ * for the RNG4 state handles which exist in
+ * the RNG4 block: 1 if it's been instantiated
+ *
+ * Return: - 0 if no error occurred
+ * - -ENOMEM if there isn't enough memory to allocate the descriptor
+ * - -ENODEV if DECO0 couldn't be acquired
+ * - -EAGAIN if an error occurred when executing the descriptor
*/
-static void kick_trng(struct platform_device *pdev)
+static int deinstantiate_rng(struct device *ctrldev, int state_handle_mask)
+{
+ u32 *desc, status;
+ int sh_idx, ret = 0;
+
+ desc = kmalloc(CAAM_CMD_SZ * 3, GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
+ for (sh_idx = 0; sh_idx < RNG4_MAX_HANDLES; sh_idx++) {
+ /*
+ * If the corresponding bit is set, then it means the state
+ * handle was initialized by us, and thus it needs to be
+ * deintialized as well
+ */
+ if ((1 << sh_idx) & state_handle_mask) {
+ /*
+ * Create the descriptor for deinstantating this state
+ * handle
+ */
+ build_deinstantiation_desc(desc, sh_idx);
+
+ /* Try to run it through DECO0 */
+ ret = run_descriptor_deco0(ctrldev, desc, &status);
+
+ if (ret || status) {
+ dev_err(ctrldev,
+ "Failed to deinstantiate RNG4 SH%d\n",
+ sh_idx);
+ break;
+ }
+ dev_info(ctrldev, "Deinstantiated RNG4 SH%d\n", sh_idx);
+ }
+ }
+
+ kfree(desc);
+
+ return ret;
+}
+
+static int caam_remove(struct platform_device *pdev)
+{
+ struct device *ctrldev;
+ struct caam_drv_private *ctrlpriv;
+ struct caam_full __iomem *topregs;
+ int ring, ret = 0;
+
+ ctrldev = &pdev->dev;
+ ctrlpriv = dev_get_drvdata(ctrldev);
+ topregs = (struct caam_full __iomem *)ctrlpriv->ctrl;
+
+ /* Remove platform devices for JobRs */
+ for (ring = 0; ring < ctrlpriv->total_jobrs; ring++) {
+ if (ctrlpriv->jrpdev[ring])
+ of_device_unregister(ctrlpriv->jrpdev[ring]);
+ }
+
+ /* De-initialize RNG state handles initialized by this driver. */
+ if (ctrlpriv->rng4_sh_init)
+ deinstantiate_rng(ctrldev, ctrlpriv->rng4_sh_init);
+
+ /* Shut down debug views */
+#ifdef CONFIG_DEBUG_FS
+ debugfs_remove_recursive(ctrlpriv->dfs_root);
+#endif
+
+ /* Unmap controller region */
+ iounmap(&topregs->ctrl);
+
+ kfree(ctrlpriv->jrpdev);
+ kfree(ctrlpriv);
+
+ return ret;
+}
+
+/*
+ * kick_trng - sets the various parameters for enabling the initialization
+ * of the RNG4 block in CAAM
+ * @pdev - pointer to the platform device
+ * @ent_delay - Defines the length (in system clocks) of each entropy sample.
+ */
+static void kick_trng(struct platform_device *pdev, int ent_delay)
{
struct device *ctrldev = &pdev->dev;
struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
@@ -145,14 +321,31 @@ static void kick_trng(struct platform_device *pdev)
/* put RNG4 into program mode */
setbits32(&r4tst->rtmctl, RTMCTL_PRGM);
- /* 1600 clocks per sample */
+
+ /*
+ * Performance-wise, it does not make sense to
+ * set the delay to a value that is lower
+ * than the last one that worked (i.e. the state handles
+ * were instantiated properly. Thus, instead of wasting
+ * time trying to set the values controlling the sample
+ * frequency, the function simply returns.
+ */
+ val = (rd_reg32(&r4tst->rtsdctl) & RTSDCTL_ENT_DLY_MASK)
+ >> RTSDCTL_ENT_DLY_SHIFT;
+ if (ent_delay <= val) {
+ /* put RNG4 into run mode */
+ clrbits32(&r4tst->rtmctl, RTMCTL_PRGM);
+ return;
+ }
+
val = rd_reg32(&r4tst->rtsdctl);
- val = (val & ~RTSDCTL_ENT_DLY_MASK) | (1600 << RTSDCTL_ENT_DLY_SHIFT);
+ val = (val & ~RTSDCTL_ENT_DLY_MASK) |
+ (ent_delay << RTSDCTL_ENT_DLY_SHIFT);
wr_reg32(&r4tst->rtsdctl, val);
- /* min. freq. count */
- wr_reg32(&r4tst->rtfrqmin, 400);
- /* max. freq. count */
- wr_reg32(&r4tst->rtfrqmax, 6400);
+ /* min. freq. count, equal to 1/4 of the entropy sample length */
+ wr_reg32(&r4tst->rtfrqmin, ent_delay >> 2);
+ /* max. freq. count, equal to 8 times the entropy sample length */
+ wr_reg32(&r4tst->rtfrqmax, ent_delay << 3);
/* put RNG4 into run mode */
clrbits32(&r4tst->rtmctl, RTMCTL_PRGM);
}
@@ -193,7 +386,7 @@ EXPORT_SYMBOL(caam_get_era);
/* Probe routine for CAAM top (controller) level */
static int caam_probe(struct platform_device *pdev)
{
- int ret, ring, rspec;
+ int ret, ring, rspec, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN;
u64 caam_id;
struct device *dev;
struct device_node *nprop, *np;
@@ -258,8 +451,9 @@ static int caam_probe(struct platform_device *pdev)
rspec++;
}
- ctrlpriv->jrdev = kzalloc(sizeof(struct device *) * rspec, GFP_KERNEL);
- if (ctrlpriv->jrdev == NULL) {
+ ctrlpriv->jrpdev = kzalloc(sizeof(struct platform_device *) * rspec,
+ GFP_KERNEL);
+ if (ctrlpriv->jrpdev == NULL) {
iounmap(&topregs->ctrl);
return -ENOMEM;
}
@@ -267,13 +461,24 @@ static int caam_probe(struct platform_device *pdev)
ring = 0;
ctrlpriv->total_jobrs = 0;
for_each_compatible_node(np, NULL, "fsl,sec-v4.0-job-ring") {
- caam_jr_probe(pdev, np, ring);
+ ctrlpriv->jrpdev[ring] =
+ of_platform_device_create(np, NULL, dev);
+ if (!ctrlpriv->jrpdev[ring]) {
+ pr_warn("JR%d Platform device creation error\n", ring);
+ continue;
+ }
ctrlpriv->total_jobrs++;
ring++;
}
if (!ring) {
for_each_compatible_node(np, NULL, "fsl,sec4.0-job-ring") {
- caam_jr_probe(pdev, np, ring);
+ ctrlpriv->jrpdev[ring] =
+ of_platform_device_create(np, NULL, dev);
+ if (!ctrlpriv->jrpdev[ring]) {
+ pr_warn("JR%d Platform device creation error\n",
+ ring);
+ continue;
+ }
ctrlpriv->total_jobrs++;
ring++;
}
@@ -299,16 +504,55 @@ static int caam_probe(struct platform_device *pdev)
/*
* If SEC has RNG version >= 4 and RNG state handle has not been
- * already instantiated ,do RNG instantiation
+ * already instantiated, do RNG instantiation
*/
- if ((cha_vid & CHA_ID_RNG_MASK) >> CHA_ID_RNG_SHIFT >= 4 &&
- !(rd_reg32(&topregs->ctrl.r4tst[0].rdsta) & RDSTA_IF0)) {
- kick_trng(pdev);
- ret = instantiate_rng(dev);
+ if ((cha_vid & CHA_ID_RNG_MASK) >> CHA_ID_RNG_SHIFT >= 4) {
+ ctrlpriv->rng4_sh_init =
+ rd_reg32(&topregs->ctrl.r4tst[0].rdsta);
+ /*
+ * If the secure keys (TDKEK, JDKEK, TDSK), were already
+ * generated, signal this to the function that is instantiating
+ * the state handles. An error would occur if RNG4 attempts
+ * to regenerate these keys before the next POR.
+ */
+ gen_sk = ctrlpriv->rng4_sh_init & RDSTA_SKVN ? 0 : 1;
+ ctrlpriv->rng4_sh_init &= RDSTA_IFMASK;
+ do {
+ int inst_handles =
+ rd_reg32(&topregs->ctrl.r4tst[0].rdsta) &
+ RDSTA_IFMASK;
+ /*
+ * If either SH were instantiated by somebody else
+ * (e.g. u-boot) then it is assumed that the entropy
+ * parameters are properly set and thus the function
+ * setting these (kick_trng(...)) is skipped.
+ * Also, if a handle was instantiated, do not change
+ * the TRNG parameters.
+ */
+ if (!(ctrlpriv->rng4_sh_init || inst_handles)) {
+ kick_trng(pdev, ent_delay);
+ ent_delay += 400;
+ }
+ /*
+ * if instantiate_rng(...) fails, the loop will rerun
+ * and the kick_trng(...) function will modfiy the
+ * upper and lower limits of the entropy sampling
+ * interval, leading to a sucessful initialization of
+ * the RNG.
+ */
+ ret = instantiate_rng(dev, inst_handles,
+ gen_sk);
+ } while ((ret == -EAGAIN) && (ent_delay < RTSDCTL_ENT_DLY_MAX));
if (ret) {
+ dev_err(dev, "failed to instantiate RNG");
caam_remove(pdev);
return ret;
}
+ /*
+ * Set handles init'ed by this module as the complement of the
+ * already initialized ones
+ */
+ ctrlpriv->rng4_sh_init = ~ctrlpriv->rng4_sh_init & RDSTA_IFMASK;
/* Enable RDB bit so that RNG works faster */
setbits32(&topregs->ctrl.scfgr, SCFGR_RDBENABLE);
diff --git a/drivers/crypto/caam/desc.h b/drivers/crypto/caam/desc.h
index 53b296f78b0d..7e4500f18df6 100644
--- a/drivers/crypto/caam/desc.h
+++ b/drivers/crypto/caam/desc.h
@@ -1155,8 +1155,15 @@ struct sec4_sg_entry {
/* randomizer AAI set */
#define OP_ALG_AAI_RNG (0x00 << OP_ALG_AAI_SHIFT)
-#define OP_ALG_AAI_RNG_NOZERO (0x10 << OP_ALG_AAI_SHIFT)
-#define OP_ALG_AAI_RNG_ODD (0x20 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_RNG_NZB (0x10 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_RNG_OBP (0x20 << OP_ALG_AAI_SHIFT)
+
+/* RNG4 AAI set */
+#define OP_ALG_AAI_RNG4_SH_0 (0x00 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_RNG4_SH_1 (0x01 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_RNG4_PS (0x40 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_RNG4_AI (0x80 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_RNG4_SK (0x100 << OP_ALG_AAI_SHIFT)
/* hmac/smac AAI set */
#define OP_ALG_AAI_HASH (0x00 << OP_ALG_AAI_SHIFT)
@@ -1178,12 +1185,6 @@ struct sec4_sg_entry {
#define OP_ALG_AAI_GSM (0x10 << OP_ALG_AAI_SHIFT)
#define OP_ALG_AAI_EDGE (0x20 << OP_ALG_AAI_SHIFT)
-/* RNG4 set */
-#define OP_ALG_RNG4_SHIFT 4
-#define OP_ALG_RNG4_MASK (0x1f3 << OP_ALG_RNG4_SHIFT)
-
-#define OP_ALG_RNG4_SK (0x100 << OP_ALG_RNG4_SHIFT)
-
#define OP_ALG_AS_SHIFT 2
#define OP_ALG_AS_MASK (0x3 << OP_ALG_AS_SHIFT)
#define OP_ALG_AS_UPDATE (0 << OP_ALG_AS_SHIFT)
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
index 34c4b9f7fbfa..6d85fcc5bd0a 100644
--- a/drivers/crypto/caam/intern.h
+++ b/drivers/crypto/caam/intern.h
@@ -37,13 +37,16 @@ struct caam_jrentry_info {
/* Private sub-storage for a single JobR */
struct caam_drv_private_jr {
- struct device *parentdev; /* points back to controller dev */
- struct platform_device *jr_pdev;/* points to platform device for JR */
+ struct list_head list_node; /* Job Ring device list */
+ struct device *dev;
int ridx;
struct caam_job_ring __iomem *rregs; /* JobR's register space */
struct tasklet_struct irqtask;
int irq; /* One per queue */
+ /* Number of scatterlist crypt transforms active on the JobR */
+ atomic_t tfm_count ____cacheline_aligned;
+
/* Job ring info */
int ringsize; /* Size of rings (assume input = output) */
struct caam_jrentry_info *entinfo; /* Alloc'ed 1 per ring entry */
@@ -63,7 +66,7 @@ struct caam_drv_private_jr {
struct caam_drv_private {
struct device *dev;
- struct device **jrdev; /* Alloc'ed array per sub-device */
+ struct platform_device **jrpdev; /* Alloc'ed array per sub-device */
struct platform_device *pdev;
/* Physical-presence section */
@@ -80,12 +83,11 @@ struct caam_drv_private {
u8 qi_present; /* Nonzero if QI present in device */
int secvio_irq; /* Security violation interrupt number */
- /* which jr allocated to scatterlist crypto */
- atomic_t tfm_count ____cacheline_aligned;
- /* list of registered crypto algorithms (mk generic context handle?) */
- struct list_head alg_list;
- /* list of registered hash algorithms (mk generic context handle?) */
- struct list_head hash_list;
+#define RNG4_MAX_HANDLES 2
+ /* RNG4 block */
+ u32 rng4_sh_init; /* This bitmap shows which of the State
+ Handles of the RNG4 block are initialized
+ by this driver */
/*
* debugfs entries for developer view into driver/device
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index bdb786d5a5e5..1d80bd3636c5 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -6,6 +6,7 @@
*/
#include <linux/of_irq.h>
+#include <linux/of_address.h>
#include "compat.h"
#include "regs.h"
@@ -13,6 +14,113 @@
#include "desc.h"
#include "intern.h"
+struct jr_driver_data {
+ /* List of Physical JobR's with the Driver */
+ struct list_head jr_list;
+ spinlock_t jr_alloc_lock; /* jr_list lock */
+} ____cacheline_aligned;
+
+static struct jr_driver_data driver_data;
+
+static int caam_reset_hw_jr(struct device *dev)
+{
+ struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
+ unsigned int timeout = 100000;
+
+ /*
+ * mask interrupts since we are going to poll
+ * for reset completion status
+ */
+ setbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK);
+
+ /* initiate flush (required prior to reset) */
+ wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET);
+ while (((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) ==
+ JRINT_ERR_HALT_INPROGRESS) && --timeout)
+ cpu_relax();
+
+ if ((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) !=
+ JRINT_ERR_HALT_COMPLETE || timeout == 0) {
+ dev_err(dev, "failed to flush job ring %d\n", jrp->ridx);
+ return -EIO;
+ }
+
+ /* initiate reset */
+ timeout = 100000;
+ wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET);
+ while ((rd_reg32(&jrp->rregs->jrcommand) & JRCR_RESET) && --timeout)
+ cpu_relax();
+
+ if (timeout == 0) {
+ dev_err(dev, "failed to reset job ring %d\n", jrp->ridx);
+ return -EIO;
+ }
+
+ /* unmask interrupts */
+ clrbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK);
+
+ return 0;
+}
+
+/*
+ * Shutdown JobR independent of platform property code
+ */
+int caam_jr_shutdown(struct device *dev)
+{
+ struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
+ dma_addr_t inpbusaddr, outbusaddr;
+ int ret;
+
+ ret = caam_reset_hw_jr(dev);
+
+ tasklet_kill(&jrp->irqtask);
+
+ /* Release interrupt */
+ free_irq(jrp->irq, dev);
+
+ /* Free rings */
+ inpbusaddr = rd_reg64(&jrp->rregs->inpring_base);
+ outbusaddr = rd_reg64(&jrp->rregs->outring_base);
+ dma_free_coherent(dev, sizeof(dma_addr_t) * JOBR_DEPTH,
+ jrp->inpring, inpbusaddr);
+ dma_free_coherent(dev, sizeof(struct jr_outentry) * JOBR_DEPTH,
+ jrp->outring, outbusaddr);
+ kfree(jrp->entinfo);
+
+ return ret;
+}
+
+static int caam_jr_remove(struct platform_device *pdev)
+{
+ int ret;
+ struct device *jrdev;
+ struct caam_drv_private_jr *jrpriv;
+
+ jrdev = &pdev->dev;
+ jrpriv = dev_get_drvdata(jrdev);
+
+ /*
+ * Return EBUSY if job ring already allocated.
+ */
+ if (atomic_read(&jrpriv->tfm_count)) {
+ dev_err(jrdev, "Device is busy\n");
+ return -EBUSY;
+ }
+
+ /* Remove the node from Physical JobR list maintained by driver */
+ spin_lock(&driver_data.jr_alloc_lock);
+ list_del(&jrpriv->list_node);
+ spin_unlock(&driver_data.jr_alloc_lock);
+
+ /* Release ring */
+ ret = caam_jr_shutdown(jrdev);
+ if (ret)
+ dev_err(jrdev, "Failed to shut down job ring\n");
+ irq_dispose_mapping(jrpriv->irq);
+
+ return ret;
+}
+
/* Main per-ring interrupt handler */
static irqreturn_t caam_jr_interrupt(int irq, void *st_dev)
{
@@ -128,6 +236,59 @@ static void caam_jr_dequeue(unsigned long devarg)
}
/**
+ * caam_jr_alloc() - Alloc a job ring for someone to use as needed.
+ *
+ * returns : pointer to the newly allocated physical
+ * JobR dev can be written to if successful.
+ **/
+struct device *caam_jr_alloc(void)
+{
+ struct caam_drv_private_jr *jrpriv, *min_jrpriv = NULL;
+ struct device *dev = NULL;
+ int min_tfm_cnt = INT_MAX;
+ int tfm_cnt;
+
+ spin_lock(&driver_data.jr_alloc_lock);
+
+ if (list_empty(&driver_data.jr_list)) {
+ spin_unlock(&driver_data.jr_alloc_lock);
+ return ERR_PTR(-ENODEV);
+ }
+
+ list_for_each_entry(jrpriv, &driver_data.jr_list, list_node) {
+ tfm_cnt = atomic_read(&jrpriv->tfm_count);
+ if (tfm_cnt < min_tfm_cnt) {
+ min_tfm_cnt = tfm_cnt;
+ min_jrpriv = jrpriv;
+ }
+ if (!min_tfm_cnt)
+ break;
+ }
+
+ if (min_jrpriv) {
+ atomic_inc(&min_jrpriv->tfm_count);
+ dev = min_jrpriv->dev;
+ }
+ spin_unlock(&driver_data.jr_alloc_lock);
+
+ return dev;
+}
+EXPORT_SYMBOL(caam_jr_alloc);
+
+/**
+ * caam_jr_free() - Free the Job Ring
+ * @rdev - points to the dev that identifies the Job ring to
+ * be released.
+ **/
+void caam_jr_free(struct device *rdev)
+{
+ struct caam_drv_private_jr *jrpriv = dev_get_drvdata(rdev);
+
+ atomic_dec(&jrpriv->tfm_count);
+}
+EXPORT_SYMBOL(caam_jr_free);
+
+/**
* caam_jr_enqueue() - Enqueue a job descriptor head. Returns 0 if OK,
* -EBUSY if the queue is full, -EIO if it cannot map the caller's
* descriptor.
@@ -207,46 +368,6 @@ int caam_jr_enqueue(struct device *dev, u32 *desc,
}
EXPORT_SYMBOL(caam_jr_enqueue);
-static int caam_reset_hw_jr(struct device *dev)
-{
- struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
- unsigned int timeout = 100000;
-
- /*
- * mask interrupts since we are going to poll
- * for reset completion status
- */
- setbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK);
-
- /* initiate flush (required prior to reset) */
- wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET);
- while (((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) ==
- JRINT_ERR_HALT_INPROGRESS) && --timeout)
- cpu_relax();
-
- if ((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) !=
- JRINT_ERR_HALT_COMPLETE || timeout == 0) {
- dev_err(dev, "failed to flush job ring %d\n", jrp->ridx);
- return -EIO;
- }
-
- /* initiate reset */
- timeout = 100000;
- wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET);
- while ((rd_reg32(&jrp->rregs->jrcommand) & JRCR_RESET) && --timeout)
- cpu_relax();
-
- if (timeout == 0) {
- dev_err(dev, "failed to reset job ring %d\n", jrp->ridx);
- return -EIO;
- }
-
- /* unmask interrupts */
- clrbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK);
-
- return 0;
-}
-
/*
* Init JobR independent of platform property detection
*/
@@ -262,7 +383,7 @@ static int caam_jr_init(struct device *dev)
/* Connect job ring interrupt handler. */
error = request_irq(jrp->irq, caam_jr_interrupt, IRQF_SHARED,
- "caam-jobr", dev);
+ dev_name(dev), dev);
if (error) {
dev_err(dev, "can't connect JobR %d interrupt (%d)\n",
jrp->ridx, jrp->irq);
@@ -318,86 +439,43 @@ static int caam_jr_init(struct device *dev)
return 0;
}
-/*
- * Shutdown JobR independent of platform property code
- */
-int caam_jr_shutdown(struct device *dev)
-{
- struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
- dma_addr_t inpbusaddr, outbusaddr;
- int ret;
-
- ret = caam_reset_hw_jr(dev);
-
- tasklet_kill(&jrp->irqtask);
-
- /* Release interrupt */
- free_irq(jrp->irq, dev);
-
- /* Free rings */
- inpbusaddr = rd_reg64(&jrp->rregs->inpring_base);
- outbusaddr = rd_reg64(&jrp->rregs->outring_base);
- dma_free_coherent(dev, sizeof(dma_addr_t) * JOBR_DEPTH,
- jrp->inpring, inpbusaddr);
- dma_free_coherent(dev, sizeof(struct jr_outentry) * JOBR_DEPTH,
- jrp->outring, outbusaddr);
- kfree(jrp->entinfo);
- of_device_unregister(jrp->jr_pdev);
-
- return ret;
-}
/*
- * Probe routine for each detected JobR subsystem. It assumes that
- * property detection was picked up externally.
+ * Probe routine for each detected JobR subsystem.
*/
-int caam_jr_probe(struct platform_device *pdev, struct device_node *np,
- int ring)
+static int caam_jr_probe(struct platform_device *pdev)
{
- struct device *ctrldev, *jrdev;
- struct platform_device *jr_pdev;
- struct caam_drv_private *ctrlpriv;
+ struct device *jrdev;
+ struct device_node *nprop;
+ struct caam_job_ring __iomem *ctrl;
struct caam_drv_private_jr *jrpriv;
- u32 *jroffset;
+ static int total_jobrs;
int error;
- ctrldev = &pdev->dev;
- ctrlpriv = dev_get_drvdata(ctrldev);
-
+ jrdev = &pdev->dev;
jrpriv = kmalloc(sizeof(struct caam_drv_private_jr),
GFP_KERNEL);
- if (jrpriv == NULL) {
- dev_err(ctrldev, "can't alloc private mem for job ring %d\n",
- ring);
+ if (!jrpriv)
return -ENOMEM;
- }
- jrpriv->parentdev = ctrldev; /* point back to parent */
- jrpriv->ridx = ring; /* save ring identity relative to detection */
- /*
- * Derive a pointer to the detected JobRs regs
- * Driver has already iomapped the entire space, we just
- * need to add in the offset to this JobR. Don't know if I
- * like this long-term, but it'll run
- */
- jroffset = (u32 *)of_get_property(np, "reg", NULL);
- jrpriv->rregs = (struct caam_job_ring __iomem *)((void *)ctrlpriv->ctrl
- + *jroffset);
+ dev_set_drvdata(jrdev, jrpriv);
- /* Build a local dev for each detected queue */
- jr_pdev = of_platform_device_create(np, NULL, ctrldev);
- if (jr_pdev == NULL) {
- kfree(jrpriv);
- return -EINVAL;
+ /* save ring identity relative to detection */
+ jrpriv->ridx = total_jobrs++;
+
+ nprop = pdev->dev.of_node;
+ /* Get configuration properties from device tree */
+ /* First, get register page */
+ ctrl = of_iomap(nprop, 0);
+ if (!ctrl) {
+ dev_err(jrdev, "of_iomap() failed\n");
+ return -ENOMEM;
}
- jrpriv->jr_pdev = jr_pdev;
- jrdev = &jr_pdev->dev;
- dev_set_drvdata(jrdev, jrpriv);
- ctrlpriv->jrdev[ring] = jrdev;
+ jrpriv->rregs = (struct caam_job_ring __force *)ctrl;
if (sizeof(dma_addr_t) == sizeof(u64))
- if (of_device_is_compatible(np, "fsl,sec-v5.0-job-ring"))
+ if (of_device_is_compatible(nprop, "fsl,sec-v5.0-job-ring"))
dma_set_mask(jrdev, DMA_BIT_MASK(40));
else
dma_set_mask(jrdev, DMA_BIT_MASK(36));
@@ -405,15 +483,61 @@ int caam_jr_probe(struct platform_device *pdev, struct device_node *np,
dma_set_mask(jrdev, DMA_BIT_MASK(32));
/* Identify the interrupt */
- jrpriv->irq = irq_of_parse_and_map(np, 0);
+ jrpriv->irq = irq_of_parse_and_map(nprop, 0);
/* Now do the platform independent part */
error = caam_jr_init(jrdev); /* now turn on hardware */
if (error) {
- of_device_unregister(jr_pdev);
kfree(jrpriv);
return error;
}
- return error;
+ jrpriv->dev = jrdev;
+ spin_lock(&driver_data.jr_alloc_lock);
+ list_add_tail(&jrpriv->list_node, &driver_data.jr_list);
+ spin_unlock(&driver_data.jr_alloc_lock);
+
+ atomic_set(&jrpriv->tfm_count, 0);
+
+ return 0;
+}
+
+static struct of_device_id caam_jr_match[] = {
+ {
+ .compatible = "fsl,sec-v4.0-job-ring",
+ },
+ {
+ .compatible = "fsl,sec4.0-job-ring",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, caam_jr_match);
+
+static struct platform_driver caam_jr_driver = {
+ .driver = {
+ .name = "caam_jr",
+ .owner = THIS_MODULE,
+ .of_match_table = caam_jr_match,
+ },
+ .probe = caam_jr_probe,
+ .remove = caam_jr_remove,
+};
+
+static int __init jr_driver_init(void)
+{
+ spin_lock_init(&driver_data.jr_alloc_lock);
+ INIT_LIST_HEAD(&driver_data.jr_list);
+ return platform_driver_register(&caam_jr_driver);
+}
+
+static void __exit jr_driver_exit(void)
+{
+ platform_driver_unregister(&caam_jr_driver);
}
+
+module_init(jr_driver_init);
+module_exit(jr_driver_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("FSL CAAM JR request backend");
+MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");
diff --git a/drivers/crypto/caam/jr.h b/drivers/crypto/caam/jr.h
index 9d8741a59037..97113a6d6c58 100644
--- a/drivers/crypto/caam/jr.h
+++ b/drivers/crypto/caam/jr.h
@@ -8,12 +8,11 @@
#define JR_H
/* Prototypes for backend-level services exposed to APIs */
+struct device *caam_jr_alloc(void);
+void caam_jr_free(struct device *rdev);
int caam_jr_enqueue(struct device *dev, u32 *desc,
void (*cbk)(struct device *dev, u32 *desc, u32 status,
void *areq),
void *areq);
-extern int caam_jr_probe(struct platform_device *pdev, struct device_node *np,
- int ring);
-extern int caam_jr_shutdown(struct device *dev);
#endif /* JR_H */
diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h
index 4455396918de..d50174f45b21 100644
--- a/drivers/crypto/caam/regs.h
+++ b/drivers/crypto/caam/regs.h
@@ -245,7 +245,7 @@ struct rngtst {
/* RNG4 TRNG test registers */
struct rng4tst {
-#define RTMCTL_PRGM 0x00010000 /* 1 -> program mode, 0 -> run mode */
+#define RTMCTL_PRGM 0x00010000 /* 1 -> program mode, 0 -> run mode */
u32 rtmctl; /* misc. control register */
u32 rtscmisc; /* statistical check misc. register */
u32 rtpkrrng; /* poker range register */
@@ -255,6 +255,8 @@ struct rng4tst {
};
#define RTSDCTL_ENT_DLY_SHIFT 16
#define RTSDCTL_ENT_DLY_MASK (0xffff << RTSDCTL_ENT_DLY_SHIFT)
+#define RTSDCTL_ENT_DLY_MIN 1200
+#define RTSDCTL_ENT_DLY_MAX 12800
u32 rtsdctl; /* seed control register */
union {
u32 rtsblim; /* PRGM=1: sparse bit limit register */
@@ -266,7 +268,11 @@ struct rng4tst {
u32 rtfrqcnt; /* PRGM=0: freq. count register */
};
u32 rsvd1[40];
+#define RDSTA_SKVT 0x80000000
+#define RDSTA_SKVN 0x40000000
#define RDSTA_IF0 0x00000001
+#define RDSTA_IF1 0x00000002
+#define RDSTA_IFMASK (RDSTA_IF1 | RDSTA_IF0)
u32 rdsta;
u32 rsvd2[15];
};
@@ -692,6 +698,7 @@ struct caam_deco {
u32 jr_ctl_hi; /* CxJRR - JobR Control Register @800 */
u32 jr_ctl_lo;
u64 jr_descaddr; /* CxDADR - JobR Descriptor Address */
+#define DECO_OP_STATUS_HI_ERR_MASK 0xF00000FF
u32 op_status_hi; /* DxOPSTA - DECO Operation Status */
u32 op_status_lo;
u32 rsvd24[2];
@@ -706,12 +713,13 @@ struct caam_deco {
u32 rsvd29[48];
u32 descbuf[64]; /* DxDESB - Descriptor buffer */
u32 rscvd30[193];
+#define DESC_DBG_DECO_STAT_HOST_ERR 0x00D00000
+#define DESC_DBG_DECO_STAT_VALID 0x80000000
+#define DESC_DBG_DECO_STAT_MASK 0x00F00000
u32 desc_dbg; /* DxDDR - DECO Debug Register */
u32 rsvd31[126];
};
-/* DECO DBG Register Valid Bit*/
-#define DECO_DBG_VALID 0x80000000
#define DECO_JQCR_WHL 0x20000000
#define DECO_JQCR_FOUR 0x10000000
diff --git a/drivers/crypto/caam/sg_sw_sec4.h b/drivers/crypto/caam/sg_sw_sec4.h
index e0037c8ee243..b12ff85f4241 100644
--- a/drivers/crypto/caam/sg_sw_sec4.h
+++ b/drivers/crypto/caam/sg_sw_sec4.h
@@ -117,6 +117,21 @@ static int dma_unmap_sg_chained(struct device *dev, struct scatterlist *sg,
return nents;
}
+/* Map SG page in kernel virtual address space and copy */
+static inline void sg_map_copy(u8 *dest, struct scatterlist *sg,
+ int len, int offset)
+{
+ u8 *mapped_addr;
+
+ /*
+ * Page here can be user-space pinned using get_user_pages
+ * Same must be kmapped before use and kunmapped subsequently
+ */
+ mapped_addr = kmap_atomic(sg_page(sg));
+ memcpy(dest, mapped_addr + offset, len);
+ kunmap_atomic(mapped_addr);
+}
+
/* Copy from len bytes of sg to dest, starting from beginning */
static inline void sg_copy(u8 *dest, struct scatterlist *sg, unsigned int len)
{
@@ -124,15 +139,15 @@ static inline void sg_copy(u8 *dest, struct scatterlist *sg, unsigned int len)
int cpy_index = 0, next_cpy_index = current_sg->length;
while (next_cpy_index < len) {
- memcpy(dest + cpy_index, (u8 *) sg_virt(current_sg),
- current_sg->length);
+ sg_map_copy(dest + cpy_index, current_sg, current_sg->length,
+ current_sg->offset);
current_sg = scatterwalk_sg_next(current_sg);
cpy_index = next_cpy_index;
next_cpy_index += current_sg->length;
}
if (cpy_index < len)
- memcpy(dest + cpy_index, (u8 *) sg_virt(current_sg),
- len - cpy_index);
+ sg_map_copy(dest + cpy_index, current_sg, len-cpy_index,
+ current_sg->offset);
}
/* Copy sg data, from to_skip to end, to dest */
@@ -140,7 +155,7 @@ static inline void sg_copy_part(u8 *dest, struct scatterlist *sg,
int to_skip, unsigned int end)
{
struct scatterlist *current_sg = sg;
- int sg_index, cpy_index;
+ int sg_index, cpy_index, offset;
sg_index = current_sg->length;
while (sg_index <= to_skip) {
@@ -148,9 +163,10 @@ static inline void sg_copy_part(u8 *dest, struct scatterlist *sg,
sg_index += current_sg->length;
}
cpy_index = sg_index - to_skip;
- memcpy(dest, (u8 *) sg_virt(current_sg) +
- current_sg->length - cpy_index, cpy_index);
- current_sg = scatterwalk_sg_next(current_sg);
- if (end - sg_index)
+ offset = current_sg->offset + current_sg->length - cpy_index;
+ sg_map_copy(dest, current_sg, cpy_index, offset);
+ if (end - sg_index) {
+ current_sg = scatterwalk_sg_next(current_sg);
sg_copy(dest + cpy_index, current_sg, end - sg_index);
+ }
}
diff --git a/drivers/crypto/dcp.c b/drivers/crypto/dcp.c
index a8a7dd4b0d25..247ab8048f5b 100644
--- a/drivers/crypto/dcp.c
+++ b/drivers/crypto/dcp.c
@@ -733,12 +733,9 @@ static int dcp_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, dev);
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!r) {
- dev_err(&pdev->dev, "failed to get IORESOURCE_MEM\n");
- return -ENXIO;
- }
- dev->dcp_regs_base = devm_ioremap(&pdev->dev, r->start,
- resource_size(r));
+ dev->dcp_regs_base = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(dev->dcp_regs_base))
+ return PTR_ERR(dev->dcp_regs_base);
dcp_set(dev, DCP_CTRL_SFRST, DCP_REG_CTRL);
udelay(10);
@@ -762,7 +759,8 @@ static int dcp_probe(struct platform_device *pdev)
return -EIO;
}
dev->dcp_vmi_irq = r->start;
- ret = request_irq(dev->dcp_vmi_irq, dcp_vmi_irq, 0, "dcp", dev);
+ ret = devm_request_irq(&pdev->dev, dev->dcp_vmi_irq, dcp_vmi_irq, 0,
+ "dcp", dev);
if (ret != 0) {
dev_err(&pdev->dev, "can't request_irq (0)\n");
return -EIO;
@@ -771,15 +769,14 @@ static int dcp_probe(struct platform_device *pdev)
r = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
if (!r) {
dev_err(&pdev->dev, "can't get IRQ resource (1)\n");
- ret = -EIO;
- goto err_free_irq0;
+ return -EIO;
}
dev->dcp_irq = r->start;
- ret = request_irq(dev->dcp_irq, dcp_irq, 0, "dcp", dev);
+ ret = devm_request_irq(&pdev->dev, dev->dcp_irq, dcp_irq, 0, "dcp",
+ dev);
if (ret != 0) {
dev_err(&pdev->dev, "can't request_irq (1)\n");
- ret = -EIO;
- goto err_free_irq0;
+ return -EIO;
}
dev->hw_pkg[0] = dma_alloc_coherent(&pdev->dev,
@@ -788,8 +785,7 @@ static int dcp_probe(struct platform_device *pdev)
GFP_KERNEL);
if (!dev->hw_pkg[0]) {
dev_err(&pdev->dev, "Could not allocate hw descriptors\n");
- ret = -ENOMEM;
- goto err_free_irq1;
+ return -ENOMEM;
}
for (i = 1; i < DCP_MAX_PKG; i++) {
@@ -848,16 +844,14 @@ err_unregister:
for (j = 0; j < i; j++)
crypto_unregister_alg(&algs[j]);
err_free_key_iv:
+ tasklet_kill(&dev->done_task);
+ tasklet_kill(&dev->queue_task);
dma_free_coherent(&pdev->dev, 2 * AES_KEYSIZE_128, dev->payload_base,
dev->payload_base_dma);
err_free_hw_packet:
dma_free_coherent(&pdev->dev, DCP_MAX_PKG *
sizeof(struct dcp_hw_packet), dev->hw_pkg[0],
dev->hw_phys_pkg);
-err_free_irq1:
- free_irq(dev->dcp_irq, dev);
-err_free_irq0:
- free_irq(dev->dcp_vmi_irq, dev);
return ret;
}
@@ -868,23 +862,20 @@ static int dcp_remove(struct platform_device *pdev)
int j;
dev = platform_get_drvdata(pdev);
- dma_free_coherent(&pdev->dev,
- DCP_MAX_PKG * sizeof(struct dcp_hw_packet),
- dev->hw_pkg[0], dev->hw_phys_pkg);
-
- dma_free_coherent(&pdev->dev, 2 * AES_KEYSIZE_128, dev->payload_base,
- dev->payload_base_dma);
+ misc_deregister(&dev->dcp_bootstream_misc);
- free_irq(dev->dcp_irq, dev);
- free_irq(dev->dcp_vmi_irq, dev);
+ for (j = 0; j < ARRAY_SIZE(algs); j++)
+ crypto_unregister_alg(&algs[j]);
tasklet_kill(&dev->done_task);
tasklet_kill(&dev->queue_task);
- for (j = 0; j < ARRAY_SIZE(algs); j++)
- crypto_unregister_alg(&algs[j]);
+ dma_free_coherent(&pdev->dev, 2 * AES_KEYSIZE_128, dev->payload_base,
+ dev->payload_base_dma);
- misc_deregister(&dev->dcp_bootstream_misc);
+ dma_free_coherent(&pdev->dev,
+ DCP_MAX_PKG * sizeof(struct dcp_hw_packet),
+ dev->hw_pkg[0], dev->hw_phys_pkg);
return 0;
}
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index 214357e12dc0..9dd6e01eac33 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -1149,32 +1149,24 @@ static int aead_setkey(struct crypto_aead *tfm, const u8 *key,
unsigned int keylen)
{
struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
- struct rtattr *rta = (struct rtattr *)key;
- struct crypto_authenc_key_param *param;
+ struct crypto_authenc_keys keys;
- if (!RTA_OK(rta, keylen))
- goto badkey;
- if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
- goto badkey;
- if (RTA_PAYLOAD(rta) < sizeof(*param))
+ if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
goto badkey;
- param = RTA_DATA(rta);
- ctx->enckey_len = be32_to_cpu(param->enckeylen);
-
- key += RTA_ALIGN(rta->rta_len);
- keylen -= RTA_ALIGN(rta->rta_len);
+ if (keys.authkeylen > sizeof(ctx->authkey))
+ goto badkey;
- if (keylen < ctx->enckey_len)
+ if (keys.enckeylen > sizeof(ctx->enckey))
goto badkey;
- ctx->authkey_len = keylen - ctx->enckey_len;
- memcpy(ctx->enckey, key + ctx->authkey_len, ctx->enckey_len);
- memcpy(ctx->authkey, key, ctx->authkey_len);
+ memcpy(ctx->authkey, keys.authkey, keys.authkeylen);
+ memcpy(ctx->enckey, keys.enckey, keys.enckeylen);
+ ctx->authkey_len = keys.authkeylen;
+ ctx->enckey_len = keys.enckeylen;
return aead_setup(tfm, crypto_aead_authsize(tfm));
badkey:
- ctx->enckey_len = 0;
crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c
index 3374a3ebe4c7..8d1e6f8e9e9c 100644
--- a/drivers/crypto/mv_cesa.c
+++ b/drivers/crypto/mv_cesa.c
@@ -907,7 +907,7 @@ static int mv_cra_hash_hmac_sha1_init(struct crypto_tfm *tfm)
return mv_cra_hash_init(tfm, "sha1", COP_HMAC_SHA1, SHA1_BLOCK_SIZE);
}
-irqreturn_t crypto_int(int irq, void *priv)
+static irqreturn_t crypto_int(int irq, void *priv)
{
u32 val;
@@ -928,7 +928,7 @@ irqreturn_t crypto_int(int irq, void *priv)
return IRQ_HANDLED;
}
-struct crypto_alg mv_aes_alg_ecb = {
+static struct crypto_alg mv_aes_alg_ecb = {
.cra_name = "ecb(aes)",
.cra_driver_name = "mv-ecb-aes",
.cra_priority = 300,
@@ -951,7 +951,7 @@ struct crypto_alg mv_aes_alg_ecb = {
},
};
-struct crypto_alg mv_aes_alg_cbc = {
+static struct crypto_alg mv_aes_alg_cbc = {
.cra_name = "cbc(aes)",
.cra_driver_name = "mv-cbc-aes",
.cra_priority = 300,
@@ -975,7 +975,7 @@ struct crypto_alg mv_aes_alg_cbc = {
},
};
-struct ahash_alg mv_sha1_alg = {
+static struct ahash_alg mv_sha1_alg = {
.init = mv_hash_init,
.update = mv_hash_update,
.final = mv_hash_final,
@@ -999,7 +999,7 @@ struct ahash_alg mv_sha1_alg = {
}
};
-struct ahash_alg mv_hmac_sha1_alg = {
+static struct ahash_alg mv_hmac_sha1_alg = {
.init = mv_hash_init,
.update = mv_hash_update,
.final = mv_hash_final,
@@ -1084,7 +1084,7 @@ static int mv_probe(struct platform_device *pdev)
goto err_unmap_sram;
}
- ret = request_irq(irq, crypto_int, IRQF_DISABLED, dev_name(&pdev->dev),
+ ret = request_irq(irq, crypto_int, 0, dev_name(&pdev->dev),
cp);
if (ret)
goto err_thread;
@@ -1187,7 +1187,7 @@ static struct platform_driver marvell_crypto = {
.driver = {
.owner = THIS_MODULE,
.name = "mv_crypto",
- .of_match_table = of_match_ptr(mv_cesa_of_match_table),
+ .of_match_table = mv_cesa_of_match_table,
},
};
MODULE_ALIAS("platform:mv_crypto");
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index ce791c2f81f7..a9ccbf14096e 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -275,7 +275,7 @@ static int omap_aes_write_ctrl(struct omap_aes_dev *dd)
if (dd->flags & FLAGS_CBC)
val |= AES_REG_CTRL_CBC;
if (dd->flags & FLAGS_CTR) {
- val |= AES_REG_CTRL_CTR | AES_REG_CTRL_CTR_WIDTH_32;
+ val |= AES_REG_CTRL_CTR | AES_REG_CTRL_CTR_WIDTH_128;
mask = AES_REG_CTRL_CTR | AES_REG_CTRL_CTR_WIDTH_MASK;
}
if (dd->flags & FLAGS_ENCRYPT)
@@ -554,7 +554,7 @@ static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd)
return err;
}
-int omap_aes_check_aligned(struct scatterlist *sg)
+static int omap_aes_check_aligned(struct scatterlist *sg)
{
while (sg) {
if (!IS_ALIGNED(sg->offset, 4))
@@ -566,7 +566,7 @@ int omap_aes_check_aligned(struct scatterlist *sg)
return 0;
}
-int omap_aes_copy_sgs(struct omap_aes_dev *dd)
+static int omap_aes_copy_sgs(struct omap_aes_dev *dd)
{
void *buf_in, *buf_out;
int pages;
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index e28104b4aab0..e45aaaf0db30 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -2033,3 +2033,4 @@ module_platform_driver(omap_sham_driver);
MODULE_DESCRIPTION("OMAP SHA1/MD5 hw acceleration support.");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Dmitry Kasatkin");
+MODULE_ALIAS("platform:omap-sham");
diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c
index 888f7f4a6d3f..a6175ba6d238 100644
--- a/drivers/crypto/picoxcell_crypto.c
+++ b/drivers/crypto/picoxcell_crypto.c
@@ -495,45 +495,29 @@ static int spacc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
{
struct spacc_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct spacc_alg *alg = to_spacc_alg(tfm->base.__crt_alg);
- struct rtattr *rta = (void *)key;
- struct crypto_authenc_key_param *param;
- unsigned int authkeylen, enckeylen;
+ struct crypto_authenc_keys keys;
int err = -EINVAL;
- if (!RTA_OK(rta, keylen))
+ if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
goto badkey;
- if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
+ if (keys.enckeylen > AES_MAX_KEY_SIZE)
goto badkey;
- if (RTA_PAYLOAD(rta) < sizeof(*param))
- goto badkey;
-
- param = RTA_DATA(rta);
- enckeylen = be32_to_cpu(param->enckeylen);
-
- key += RTA_ALIGN(rta->rta_len);
- keylen -= RTA_ALIGN(rta->rta_len);
-
- if (keylen < enckeylen)
- goto badkey;
-
- authkeylen = keylen - enckeylen;
-
- if (enckeylen > AES_MAX_KEY_SIZE)
+ if (keys.authkeylen > sizeof(ctx->hash_ctx))
goto badkey;
if ((alg->ctrl_default & SPACC_CRYPTO_ALG_MASK) ==
SPA_CTRL_CIPH_ALG_AES)
- err = spacc_aead_aes_setkey(tfm, key + authkeylen, enckeylen);
+ err = spacc_aead_aes_setkey(tfm, keys.enckey, keys.enckeylen);
else
- err = spacc_aead_des_setkey(tfm, key + authkeylen, enckeylen);
+ err = spacc_aead_des_setkey(tfm, keys.enckey, keys.enckeylen);
if (err)
goto badkey;
- memcpy(ctx->hash_ctx, key, authkeylen);
- ctx->hash_key_len = authkeylen;
+ memcpy(ctx->hash_ctx, keys.authkey, keys.authkeylen);
+ ctx->hash_key_len = keys.authkeylen;
return 0;
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
index d7bb8bac36e9..785a9ded7bdf 100644
--- a/drivers/crypto/sahara.c
+++ b/drivers/crypto/sahara.c
@@ -1058,7 +1058,7 @@ static struct platform_driver sahara_driver = {
.driver = {
.name = SAHARA_NAME,
.owner = THIS_MODULE,
- .of_match_table = of_match_ptr(sahara_dt_ids),
+ .of_match_table = sahara_dt_ids,
},
.id_table = sahara_platform_ids,
};
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 6cd0e6038583..b44f4ddc565c 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -673,39 +673,20 @@ static int aead_setkey(struct crypto_aead *authenc,
const u8 *key, unsigned int keylen)
{
struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
- struct rtattr *rta = (void *)key;
- struct crypto_authenc_key_param *param;
- unsigned int authkeylen;
- unsigned int enckeylen;
-
- if (!RTA_OK(rta, keylen))
- goto badkey;
+ struct crypto_authenc_keys keys;
- if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
+ if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
goto badkey;
- if (RTA_PAYLOAD(rta) < sizeof(*param))
+ if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
goto badkey;
- param = RTA_DATA(rta);
- enckeylen = be32_to_cpu(param->enckeylen);
-
- key += RTA_ALIGN(rta->rta_len);
- keylen -= RTA_ALIGN(rta->rta_len);
+ memcpy(ctx->key, keys.authkey, keys.authkeylen);
+ memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
- if (keylen < enckeylen)
- goto badkey;
-
- authkeylen = keylen - enckeylen;
-
- if (keylen > TALITOS_MAX_KEY_SIZE)
- goto badkey;
-
- memcpy(&ctx->key, key, keylen);
-
- ctx->keylen = keylen;
- ctx->enckeylen = enckeylen;
- ctx->authkeylen = authkeylen;
+ ctx->keylen = keys.authkeylen + keys.enckeylen;
+ ctx->enckeylen = keys.enckeylen;
+ ctx->authkeylen = keys.authkeylen;
return 0;
@@ -809,7 +790,7 @@ static void ipsec_esp_unmap(struct device *dev,
if (edesc->assoc_chained)
talitos_unmap_sg_chain(dev, areq->assoc, DMA_TO_DEVICE);
- else
+ else if (areq->assoclen)
/* assoc_nents counts also for IV in non-contiguous cases */
dma_unmap_sg(dev, areq->assoc,
edesc->assoc_nents ? edesc->assoc_nents - 1 : 1,
@@ -992,7 +973,11 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
dma_sync_single_for_device(dev, edesc->dma_link_tbl,
edesc->dma_len, DMA_BIDIRECTIONAL);
} else {
- to_talitos_ptr(&desc->ptr[1], sg_dma_address(areq->assoc));
+ if (areq->assoclen)
+ to_talitos_ptr(&desc->ptr[1],
+ sg_dma_address(areq->assoc));
+ else
+ to_talitos_ptr(&desc->ptr[1], edesc->iv_dma);
desc->ptr[1].j_extent = 0;
}
@@ -1127,7 +1112,8 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
unsigned int authsize,
unsigned int ivsize,
int icv_stashing,
- u32 cryptoflags)
+ u32 cryptoflags,
+ bool encrypt)
{
struct talitos_edesc *edesc;
int assoc_nents = 0, src_nents, dst_nents, alloc_len, dma_len;
@@ -1141,10 +1127,10 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
return ERR_PTR(-EINVAL);
}
- if (iv)
+ if (ivsize)
iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
- if (assoc) {
+ if (assoclen) {
/*
* Currently it is assumed that iv is provided whenever assoc
* is.
@@ -1160,19 +1146,17 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
assoc_nents = assoc_nents ? assoc_nents + 1 : 2;
}
- src_nents = sg_count(src, cryptlen + authsize, &src_chained);
- src_nents = (src_nents == 1) ? 0 : src_nents;
-
- if (!dst) {
- dst_nents = 0;
- } else {
- if (dst == src) {
- dst_nents = src_nents;
- } else {
- dst_nents = sg_count(dst, cryptlen + authsize,
- &dst_chained);
- dst_nents = (dst_nents == 1) ? 0 : dst_nents;
- }
+ if (!dst || dst == src) {
+ src_nents = sg_count(src, cryptlen + authsize, &src_chained);
+ src_nents = (src_nents == 1) ? 0 : src_nents;
+ dst_nents = dst ? src_nents : 0;
+ } else { /* dst && dst != src*/
+ src_nents = sg_count(src, cryptlen + (encrypt ? 0 : authsize),
+ &src_chained);
+ src_nents = (src_nents == 1) ? 0 : src_nents;
+ dst_nents = sg_count(dst, cryptlen + (encrypt ? authsize : 0),
+ &dst_chained);
+ dst_nents = (dst_nents == 1) ? 0 : dst_nents;
}
/*
@@ -1192,9 +1176,16 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
edesc = kmalloc(alloc_len, GFP_DMA | flags);
if (!edesc) {
- talitos_unmap_sg_chain(dev, assoc, DMA_TO_DEVICE);
+ if (assoc_chained)
+ talitos_unmap_sg_chain(dev, assoc, DMA_TO_DEVICE);
+ else if (assoclen)
+ dma_unmap_sg(dev, assoc,
+ assoc_nents ? assoc_nents - 1 : 1,
+ DMA_TO_DEVICE);
+
if (iv_dma)
dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
+
dev_err(dev, "could not allocate edescriptor\n");
return ERR_PTR(-ENOMEM);
}
@@ -1216,7 +1207,7 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
}
static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
- int icv_stashing)
+ int icv_stashing, bool encrypt)
{
struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
@@ -1225,7 +1216,7 @@ static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
return talitos_edesc_alloc(ctx->dev, areq->assoc, areq->src, areq->dst,
iv, areq->assoclen, areq->cryptlen,
ctx->authsize, ivsize, icv_stashing,
- areq->base.flags);
+ areq->base.flags, encrypt);
}
static int aead_encrypt(struct aead_request *req)
@@ -1235,7 +1226,7 @@ static int aead_encrypt(struct aead_request *req)
struct talitos_edesc *edesc;
/* allocate extended descriptor */
- edesc = aead_edesc_alloc(req, req->iv, 0);
+ edesc = aead_edesc_alloc(req, req->iv, 0, true);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
@@ -1258,7 +1249,7 @@ static int aead_decrypt(struct aead_request *req)
req->cryptlen -= authsize;
/* allocate extended descriptor */
- edesc = aead_edesc_alloc(req, req->iv, 1);
+ edesc = aead_edesc_alloc(req, req->iv, 1, false);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
@@ -1304,7 +1295,7 @@ static int aead_givencrypt(struct aead_givcrypt_request *req)
struct talitos_edesc *edesc;
/* allocate extended descriptor */
- edesc = aead_edesc_alloc(areq, req->giv, 0);
+ edesc = aead_edesc_alloc(areq, req->giv, 0, true);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
@@ -1460,7 +1451,7 @@ static int common_nonsnoop(struct talitos_edesc *edesc,
}
static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *
- areq)
+ areq, bool encrypt)
{
struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
@@ -1468,7 +1459,7 @@ static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *
return talitos_edesc_alloc(ctx->dev, NULL, areq->src, areq->dst,
areq->info, 0, areq->nbytes, 0, ivsize, 0,
- areq->base.flags);
+ areq->base.flags, encrypt);
}
static int ablkcipher_encrypt(struct ablkcipher_request *areq)
@@ -1478,7 +1469,7 @@ static int ablkcipher_encrypt(struct ablkcipher_request *areq)
struct talitos_edesc *edesc;
/* allocate extended descriptor */
- edesc = ablkcipher_edesc_alloc(areq);
+ edesc = ablkcipher_edesc_alloc(areq, true);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
@@ -1495,7 +1486,7 @@ static int ablkcipher_decrypt(struct ablkcipher_request *areq)
struct talitos_edesc *edesc;
/* allocate extended descriptor */
- edesc = ablkcipher_edesc_alloc(areq);
+ edesc = ablkcipher_edesc_alloc(areq, false);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
@@ -1647,7 +1638,7 @@ static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
return talitos_edesc_alloc(ctx->dev, NULL, req_ctx->psrc, NULL, NULL, 0,
- nbytes, 0, 0, 0, areq->base.flags);
+ nbytes, 0, 0, 0, areq->base.flags, false);
}
static int ahash_init(struct ahash_request *areq)
diff --git a/drivers/crypto/tegra-aes.c b/drivers/crypto/tegra-aes.c
index fa05e3c329bd..060eecc5dbc3 100644
--- a/drivers/crypto/tegra-aes.c
+++ b/drivers/crypto/tegra-aes.c
@@ -27,6 +27,8 @@
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/init.h>
#include <linux/errno.h>
@@ -199,8 +201,6 @@ static void aes_workqueue_handler(struct work_struct *work);
static DECLARE_WORK(aes_work, aes_workqueue_handler);
static struct workqueue_struct *aes_wq;
-extern unsigned long long tegra_chip_uid(void);
-
static inline u32 aes_readl(struct tegra_aes_dev *dd, u32 offset)
{
return readl(dd->io_base + offset);
@@ -713,13 +713,12 @@ static int tegra_aes_rng_reset(struct crypto_rng *tfm, u8 *seed,
struct tegra_aes_dev *dd = aes_dev;
struct tegra_aes_ctx *ctx = &rng_ctx;
struct tegra_aes_slot *key_slot;
- struct timespec ts;
int ret = 0;
- u64 nsec, tmp[2];
+ u8 tmp[16]; /* 16 bytes = 128 bits of entropy */
u8 *dt;
if (!ctx || !dd) {
- dev_err(dd->dev, "ctx=0x%x, dd=0x%x\n",
+ pr_err("ctx=0x%x, dd=0x%x\n",
(unsigned int)ctx, (unsigned int)dd);
return -EINVAL;
}
@@ -778,14 +777,8 @@ static int tegra_aes_rng_reset(struct crypto_rng *tfm, u8 *seed,
if (dd->ivlen >= (2 * DEFAULT_RNG_BLK_SZ + AES_KEYSIZE_128)) {
dt = dd->iv + DEFAULT_RNG_BLK_SZ + AES_KEYSIZE_128;
} else {
- getnstimeofday(&ts);
- nsec = timespec_to_ns(&ts);
- do_div(nsec, 1000);
- nsec ^= dd->ctr << 56;
- dd->ctr++;
- tmp[0] = nsec;
- tmp[1] = tegra_chip_uid();
- dt = (u8 *)tmp;
+ get_random_bytes(tmp, sizeof(tmp));
+ dt = tmp;
}
memcpy(dd->dt, dt, DEFAULT_RNG_BLK_SZ);
@@ -804,7 +797,7 @@ static int tegra_aes_cra_init(struct crypto_tfm *tfm)
return 0;
}
-void tegra_aes_cra_exit(struct crypto_tfm *tfm)
+static void tegra_aes_cra_exit(struct crypto_tfm *tfm)
{
struct tegra_aes_ctx *ctx =
crypto_ablkcipher_ctx((struct crypto_ablkcipher *)tfm);
@@ -924,7 +917,7 @@ static int tegra_aes_probe(struct platform_device *pdev)
}
/* Initialize the vde clock */
- dd->aes_clk = clk_get(dev, "vde");
+ dd->aes_clk = devm_clk_get(dev, "vde");
if (IS_ERR(dd->aes_clk)) {
dev_err(dev, "iclock intialization failed.\n");
err = -ENODEV;
@@ -1033,8 +1026,6 @@ out:
if (dd->buf_out)
dma_free_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES,
dd->buf_out, dd->dma_buf_out);
- if (!IS_ERR(dd->aes_clk))
- clk_put(dd->aes_clk);
if (aes_wq)
destroy_workqueue(aes_wq);
spin_lock(&list_lock);
@@ -1068,7 +1059,6 @@ static int tegra_aes_remove(struct platform_device *pdev)
dd->buf_in, dd->dma_buf_in);
dma_free_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES,
dd->buf_out, dd->dma_buf_out);
- clk_put(dd->aes_clk);
aes_dev = NULL;
return 0;
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index dd2874ec1927..c823daaf9043 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -62,6 +62,7 @@ config INTEL_IOATDMA
tristate "Intel I/OAT DMA support"
depends on PCI && X86
select DMA_ENGINE
+ select DMA_ENGINE_RAID
select DCA
help
Enable support for the Intel(R) I/OAT DMA engine present
@@ -89,14 +90,15 @@ config AT_HDMAC
Support the Atmel AHB DMA controller.
config FSL_DMA
- tristate "Freescale Elo and Elo Plus DMA support"
+ tristate "Freescale Elo series DMA support"
depends on FSL_SOC
select DMA_ENGINE
select ASYNC_TX_ENABLE_CHANNEL_SWITCH
---help---
- Enable support for the Freescale Elo and Elo Plus DMA controllers.
- The Elo is the DMA controller on some 82xx and 83xx parts, and the
- Elo Plus is the DMA controller on 85xx and 86xx parts.
+ Enable support for the Freescale Elo series DMA controllers.
+ The Elo is the DMA controller on some mpc82xx and mpc83xx parts, the
+ EloPlus is on mpc85xx and mpc86xx and Pxxx parts, and the Elo3 is on
+ some Txxx and Bxxx parts.
config MPC512X_DMA
tristate "Freescale MPC512x built-in DMA engine support"
@@ -111,6 +113,7 @@ config MV_XOR
bool "Marvell XOR engine support"
depends on PLAT_ORION
select DMA_ENGINE
+ select DMA_ENGINE_RAID
select ASYNC_TX_ENABLE_CHANNEL_SWITCH
---help---
Enable support for the Marvell XOR engine.
@@ -186,6 +189,7 @@ config AMCC_PPC440SPE_ADMA
tristate "AMCC PPC440SPe ADMA support"
depends on 440SPe || 440SP
select DMA_ENGINE
+ select DMA_ENGINE_RAID
select ARCH_HAS_ASYNC_TX_FIND_CHANNEL
select ASYNC_TX_ENABLE_CHANNEL_SWITCH
help
@@ -351,6 +355,7 @@ config NET_DMA
bool "Network: TCP receive copy offload"
depends on DMA_ENGINE && NET
default (INTEL_IOATDMA || FSL_DMA)
+ depends on BROKEN
help
This enables the use of DMA engines in the network stack to
offload receive copy-to-user operations, freeing CPU cycles.
@@ -376,4 +381,7 @@ config DMATEST
Simple DMA test client. Say N unless you're debugging a
DMA Device driver.
+config DMA_ENGINE_RAID
+ bool
+
endif
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index e51a9832ef0d..ec4ee5c1fe9d 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -1164,42 +1164,12 @@ static void pl08x_free_txd(struct pl08x_driver_data *pl08x,
kfree(txd);
}
-static void pl08x_unmap_buffers(struct pl08x_txd *txd)
-{
- struct device *dev = txd->vd.tx.chan->device->dev;
- struct pl08x_sg *dsg;
-
- if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
- if (txd->vd.tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE)
- list_for_each_entry(dsg, &txd->dsg_list, node)
- dma_unmap_single(dev, dsg->src_addr, dsg->len,
- DMA_TO_DEVICE);
- else {
- list_for_each_entry(dsg, &txd->dsg_list, node)
- dma_unmap_page(dev, dsg->src_addr, dsg->len,
- DMA_TO_DEVICE);
- }
- }
- if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
- if (txd->vd.tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE)
- list_for_each_entry(dsg, &txd->dsg_list, node)
- dma_unmap_single(dev, dsg->dst_addr, dsg->len,
- DMA_FROM_DEVICE);
- else
- list_for_each_entry(dsg, &txd->dsg_list, node)
- dma_unmap_page(dev, dsg->dst_addr, dsg->len,
- DMA_FROM_DEVICE);
- }
-}
-
static void pl08x_desc_free(struct virt_dma_desc *vd)
{
struct pl08x_txd *txd = to_pl08x_txd(&vd->tx);
struct pl08x_dma_chan *plchan = to_pl08x_chan(vd->tx.chan);
- if (!plchan->slave)
- pl08x_unmap_buffers(txd);
-
+ dma_descriptor_unmap(&vd->tx);
if (!txd->done)
pl08x_release_mux(plchan);
@@ -1252,7 +1222,7 @@ static enum dma_status pl08x_dma_tx_status(struct dma_chan *chan,
size_t bytes = 0;
ret = dma_cookie_status(chan, cookie, txstate);
- if (ret == DMA_SUCCESS)
+ if (ret == DMA_COMPLETE)
return ret;
/*
@@ -1267,7 +1237,7 @@ static enum dma_status pl08x_dma_tx_status(struct dma_chan *chan,
spin_lock_irqsave(&plchan->vc.lock, flags);
ret = dma_cookie_status(chan, cookie, txstate);
- if (ret != DMA_SUCCESS) {
+ if (ret != DMA_COMPLETE) {
vd = vchan_find_desc(&plchan->vc, cookie);
if (vd) {
/* On the issued list, so hasn't been processed yet */
@@ -2138,8 +2108,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR);
writel(0x000000FF, pl08x->base + PL080_TC_CLEAR);
- ret = request_irq(adev->irq[0], pl08x_irq, IRQF_DISABLED,
- DRIVER_NAME, pl08x);
+ ret = request_irq(adev->irq[0], pl08x_irq, 0, DRIVER_NAME, pl08x);
if (ret) {
dev_err(&adev->dev, "%s failed to request interrupt %d\n",
__func__, adev->irq[0]);
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index c787f38a186a..e2c04dc81e2a 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -344,31 +344,7 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
/* move myself to free_list */
list_move(&desc->desc_node, &atchan->free_list);
- /* unmap dma addresses (not on slave channels) */
- if (!atchan->chan_common.private) {
- struct device *parent = chan2parent(&atchan->chan_common);
- if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
- if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
- dma_unmap_single(parent,
- desc->lli.daddr,
- desc->len, DMA_FROM_DEVICE);
- else
- dma_unmap_page(parent,
- desc->lli.daddr,
- desc->len, DMA_FROM_DEVICE);
- }
- if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
- if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
- dma_unmap_single(parent,
- desc->lli.saddr,
- desc->len, DMA_TO_DEVICE);
- else
- dma_unmap_page(parent,
- desc->lli.saddr,
- desc->len, DMA_TO_DEVICE);
- }
- }
-
+ dma_descriptor_unmap(txd);
/* for cyclic transfers,
* no need to replay callback function while stopping */
if (!atc_chan_is_cyclic(atchan)) {
@@ -1102,7 +1078,7 @@ atc_tx_status(struct dma_chan *chan,
int bytes = 0;
ret = dma_cookie_status(chan, cookie, txstate);
- if (ret == DMA_SUCCESS)
+ if (ret == DMA_COMPLETE)
return ret;
/*
* There's no point calculating the residue if there's
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h
index f31d647acdfa..2787aba60c6b 100644
--- a/drivers/dma/at_hdmac_regs.h
+++ b/drivers/dma/at_hdmac_regs.h
@@ -347,10 +347,6 @@ static struct device *chan2dev(struct dma_chan *chan)
{
return &chan->dev->device;
}
-static struct device *chan2parent(struct dma_chan *chan)
-{
- return chan->dev->device.parent;
-}
#if defined(VERBOSE_DEBUG)
static void vdbg_dump_regs(struct at_dma_chan *atchan)
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
index 31011d2a26fc..3c6716e0b78e 100644
--- a/drivers/dma/coh901318.c
+++ b/drivers/dma/coh901318.c
@@ -2369,7 +2369,7 @@ coh901318_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
enum dma_status ret;
ret = dma_cookie_status(chan, cookie, txstate);
- if (ret == DMA_SUCCESS)
+ if (ret == DMA_COMPLETE)
return ret;
dma_set_residue(txstate, coh901318_get_bytes_left(chan));
@@ -2694,7 +2694,7 @@ static int __init coh901318_probe(struct platform_device *pdev)
if (irq < 0)
return irq;
- err = devm_request_irq(&pdev->dev, irq, dma_irq_handler, IRQF_DISABLED,
+ err = devm_request_irq(&pdev->dev, irq, dma_irq_handler, 0,
"coh901318", base);
if (err)
return err;
diff --git a/drivers/dma/cppi41.c b/drivers/dma/cppi41.c
index 7c82b92f9b16..c29dacff66fa 100644
--- a/drivers/dma/cppi41.c
+++ b/drivers/dma/cppi41.c
@@ -141,6 +141,9 @@ struct cppi41_dd {
const struct chan_queues *queues_rx;
const struct chan_queues *queues_tx;
struct chan_queues td_queue;
+
+ /* context for suspend/resume */
+ unsigned int dma_tdfdq;
};
#define FIST_COMPLETION_QUEUE 93
@@ -263,6 +266,15 @@ static u32 pd_trans_len(u32 val)
return val & ((1 << (DESC_LENGTH_BITS_NUM + 1)) - 1);
}
+static u32 cppi41_pop_desc(struct cppi41_dd *cdd, unsigned queue_num)
+{
+ u32 desc;
+
+ desc = cppi_readl(cdd->qmgr_mem + QMGR_QUEUE_D(queue_num));
+ desc &= ~0x1f;
+ return desc;
+}
+
static irqreturn_t cppi41_irq(int irq, void *data)
{
struct cppi41_dd *cdd = data;
@@ -300,8 +312,7 @@ static irqreturn_t cppi41_irq(int irq, void *data)
q_num = __fls(val);
val &= ~(1 << q_num);
q_num += 32 * i;
- desc = cppi_readl(cdd->qmgr_mem + QMGR_QUEUE_D(q_num));
- desc &= ~0x1f;
+ desc = cppi41_pop_desc(cdd, q_num);
c = desc_to_chan(cdd, desc);
if (WARN_ON(!c)) {
pr_err("%s() q %d desc %08x\n", __func__,
@@ -353,7 +364,7 @@ static enum dma_status cppi41_dma_tx_status(struct dma_chan *chan,
/* lock */
ret = dma_cookie_status(chan, cookie, txstate);
- if (txstate && ret == DMA_SUCCESS)
+ if (txstate && ret == DMA_COMPLETE)
txstate->residue = c->residue;
/* unlock */
@@ -517,15 +528,6 @@ static void cppi41_compute_td_desc(struct cppi41_desc *d)
d->pd0 = DESC_TYPE_TEARD << DESC_TYPE;
}
-static u32 cppi41_pop_desc(struct cppi41_dd *cdd, unsigned queue_num)
-{
- u32 desc;
-
- desc = cppi_readl(cdd->qmgr_mem + QMGR_QUEUE_D(queue_num));
- desc &= ~0x1f;
- return desc;
-}
-
static int cppi41_tear_down_chan(struct cppi41_channel *c)
{
struct cppi41_dd *cdd = c->cdd;
@@ -561,36 +563,26 @@ static int cppi41_tear_down_chan(struct cppi41_channel *c)
c->td_retry = 100;
}
- if (!c->td_seen) {
- unsigned td_comp_queue;
+ if (!c->td_seen || !c->td_desc_seen) {
- if (c->is_tx)
- td_comp_queue = cdd->td_queue.complete;
- else
- td_comp_queue = c->q_comp_num;
+ desc_phys = cppi41_pop_desc(cdd, cdd->td_queue.complete);
+ if (!desc_phys)
+ desc_phys = cppi41_pop_desc(cdd, c->q_comp_num);
- desc_phys = cppi41_pop_desc(cdd, td_comp_queue);
- if (desc_phys) {
- __iormb();
+ if (desc_phys == c->desc_phys) {
+ c->td_desc_seen = 1;
+
+ } else if (desc_phys == td_desc_phys) {
+ u32 pd0;
- if (desc_phys == td_desc_phys) {
- u32 pd0;
- pd0 = td->pd0;
- WARN_ON((pd0 >> DESC_TYPE) != DESC_TYPE_TEARD);
- WARN_ON(!c->is_tx && !(pd0 & TD_DESC_IS_RX));
- WARN_ON((pd0 & 0x1f) != c->port_num);
- } else {
- WARN_ON_ONCE(1);
- }
- c->td_seen = 1;
- }
- }
- if (!c->td_desc_seen) {
- desc_phys = cppi41_pop_desc(cdd, c->q_comp_num);
- if (desc_phys) {
__iormb();
- WARN_ON(c->desc_phys != desc_phys);
- c->td_desc_seen = 1;
+ pd0 = td->pd0;
+ WARN_ON((pd0 >> DESC_TYPE) != DESC_TYPE_TEARD);
+ WARN_ON(!c->is_tx && !(pd0 & TD_DESC_IS_RX));
+ WARN_ON((pd0 & 0x1f) != c->port_num);
+ c->td_seen = 1;
+ } else if (desc_phys) {
+ WARN_ON_ONCE(1);
}
}
c->td_retry--;
@@ -609,7 +601,7 @@ static int cppi41_tear_down_chan(struct cppi41_channel *c)
WARN_ON(!c->td_retry);
if (!c->td_desc_seen) {
- desc_phys = cppi_readl(cdd->qmgr_mem + QMGR_QUEUE_D(c->q_num));
+ desc_phys = cppi41_pop_desc(cdd, c->q_num);
WARN_ON(!desc_phys);
}
@@ -674,14 +666,14 @@ static void cleanup_chans(struct cppi41_dd *cdd)
}
}
-static int cppi41_add_chans(struct platform_device *pdev, struct cppi41_dd *cdd)
+static int cppi41_add_chans(struct device *dev, struct cppi41_dd *cdd)
{
struct cppi41_channel *cchan;
int i;
int ret;
u32 n_chans;
- ret = of_property_read_u32(pdev->dev.of_node, "#dma-channels",
+ ret = of_property_read_u32(dev->of_node, "#dma-channels",
&n_chans);
if (ret)
return ret;
@@ -719,7 +711,7 @@ err:
return -ENOMEM;
}
-static void purge_descs(struct platform_device *pdev, struct cppi41_dd *cdd)
+static void purge_descs(struct device *dev, struct cppi41_dd *cdd)
{
unsigned int mem_decs;
int i;
@@ -731,7 +723,7 @@ static void purge_descs(struct platform_device *pdev, struct cppi41_dd *cdd)
cppi_writel(0, cdd->qmgr_mem + QMGR_MEMBASE(i));
cppi_writel(0, cdd->qmgr_mem + QMGR_MEMCTRL(i));
- dma_free_coherent(&pdev->dev, mem_decs, cdd->cd,
+ dma_free_coherent(dev, mem_decs, cdd->cd,
cdd->descs_phys);
}
}
@@ -741,19 +733,19 @@ static void disable_sched(struct cppi41_dd *cdd)
cppi_writel(0, cdd->sched_mem + DMA_SCHED_CTRL);
}
-static void deinit_cpii41(struct platform_device *pdev, struct cppi41_dd *cdd)
+static void deinit_cppi41(struct device *dev, struct cppi41_dd *cdd)
{
disable_sched(cdd);
- purge_descs(pdev, cdd);
+ purge_descs(dev, cdd);
cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM0_BASE);
cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM0_BASE);
- dma_free_coherent(&pdev->dev, QMGR_SCRATCH_SIZE, cdd->qmgr_scratch,
+ dma_free_coherent(dev, QMGR_SCRATCH_SIZE, cdd->qmgr_scratch,
cdd->scratch_phys);
}
-static int init_descs(struct platform_device *pdev, struct cppi41_dd *cdd)
+static int init_descs(struct device *dev, struct cppi41_dd *cdd)
{
unsigned int desc_size;
unsigned int mem_decs;
@@ -777,7 +769,7 @@ static int init_descs(struct platform_device *pdev, struct cppi41_dd *cdd)
reg |= ilog2(ALLOC_DECS_NUM) - 5;
BUILD_BUG_ON(DESCS_AREAS != 1);
- cdd->cd = dma_alloc_coherent(&pdev->dev, mem_decs,
+ cdd->cd = dma_alloc_coherent(dev, mem_decs,
&cdd->descs_phys, GFP_KERNEL);
if (!cdd->cd)
return -ENOMEM;
@@ -813,12 +805,12 @@ static void init_sched(struct cppi41_dd *cdd)
cppi_writel(reg, cdd->sched_mem + DMA_SCHED_CTRL);
}
-static int init_cppi41(struct platform_device *pdev, struct cppi41_dd *cdd)
+static int init_cppi41(struct device *dev, struct cppi41_dd *cdd)
{
int ret;
BUILD_BUG_ON(QMGR_SCRATCH_SIZE > ((1 << 14) - 1));
- cdd->qmgr_scratch = dma_alloc_coherent(&pdev->dev, QMGR_SCRATCH_SIZE,
+ cdd->qmgr_scratch = dma_alloc_coherent(dev, QMGR_SCRATCH_SIZE,
&cdd->scratch_phys, GFP_KERNEL);
if (!cdd->qmgr_scratch)
return -ENOMEM;
@@ -827,7 +819,7 @@ static int init_cppi41(struct platform_device *pdev, struct cppi41_dd *cdd)
cppi_writel(QMGR_SCRATCH_SIZE, cdd->qmgr_mem + QMGR_LRAM_SIZE);
cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM1_BASE);
- ret = init_descs(pdev, cdd);
+ ret = init_descs(dev, cdd);
if (ret)
goto err_td;
@@ -835,7 +827,7 @@ static int init_cppi41(struct platform_device *pdev, struct cppi41_dd *cdd)
init_sched(cdd);
return 0;
err_td:
- deinit_cpii41(pdev, cdd);
+ deinit_cppi41(dev, cdd);
return ret;
}
@@ -914,11 +906,11 @@ static const struct of_device_id cppi41_dma_ids[] = {
};
MODULE_DEVICE_TABLE(of, cppi41_dma_ids);
-static const struct cppi_glue_infos *get_glue_info(struct platform_device *pdev)
+static const struct cppi_glue_infos *get_glue_info(struct device *dev)
{
const struct of_device_id *of_id;
- of_id = of_match_node(cppi41_dma_ids, pdev->dev.of_node);
+ of_id = of_match_node(cppi41_dma_ids, dev->of_node);
if (!of_id)
return NULL;
return of_id->data;
@@ -927,11 +919,12 @@ static const struct cppi_glue_infos *get_glue_info(struct platform_device *pdev)
static int cppi41_dma_probe(struct platform_device *pdev)
{
struct cppi41_dd *cdd;
+ struct device *dev = &pdev->dev;
const struct cppi_glue_infos *glue_info;
int irq;
int ret;
- glue_info = get_glue_info(pdev);
+ glue_info = get_glue_info(dev);
if (!glue_info)
return -EINVAL;
@@ -946,14 +939,14 @@ static int cppi41_dma_probe(struct platform_device *pdev)
cdd->ddev.device_issue_pending = cppi41_dma_issue_pending;
cdd->ddev.device_prep_slave_sg = cppi41_dma_prep_slave_sg;
cdd->ddev.device_control = cppi41_dma_control;
- cdd->ddev.dev = &pdev->dev;
+ cdd->ddev.dev = dev;
INIT_LIST_HEAD(&cdd->ddev.channels);
cpp41_dma_info.dma_cap = cdd->ddev.cap_mask;
- cdd->usbss_mem = of_iomap(pdev->dev.of_node, 0);
- cdd->ctrl_mem = of_iomap(pdev->dev.of_node, 1);
- cdd->sched_mem = of_iomap(pdev->dev.of_node, 2);
- cdd->qmgr_mem = of_iomap(pdev->dev.of_node, 3);
+ cdd->usbss_mem = of_iomap(dev->of_node, 0);
+ cdd->ctrl_mem = of_iomap(dev->of_node, 1);
+ cdd->sched_mem = of_iomap(dev->of_node, 2);
+ cdd->qmgr_mem = of_iomap(dev->of_node, 3);
if (!cdd->usbss_mem || !cdd->ctrl_mem || !cdd->sched_mem ||
!cdd->qmgr_mem) {
@@ -961,31 +954,31 @@ static int cppi41_dma_probe(struct platform_device *pdev)
goto err_remap;
}
- pm_runtime_enable(&pdev->dev);
- ret = pm_runtime_get_sync(&pdev->dev);
- if (ret)
+ pm_runtime_enable(dev);
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0)
goto err_get_sync;
cdd->queues_rx = glue_info->queues_rx;
cdd->queues_tx = glue_info->queues_tx;
cdd->td_queue = glue_info->td_queue;
- ret = init_cppi41(pdev, cdd);
+ ret = init_cppi41(dev, cdd);
if (ret)
goto err_init_cppi;
- ret = cppi41_add_chans(pdev, cdd);
+ ret = cppi41_add_chans(dev, cdd);
if (ret)
goto err_chans;
- irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
+ irq = irq_of_parse_and_map(dev->of_node, 0);
if (!irq)
goto err_irq;
cppi_writel(USBSS_IRQ_PD_COMP, cdd->usbss_mem + USBSS_IRQ_ENABLER);
ret = request_irq(irq, glue_info->isr, IRQF_SHARED,
- dev_name(&pdev->dev), cdd);
+ dev_name(dev), cdd);
if (ret)
goto err_irq;
cdd->irq = irq;
@@ -994,7 +987,7 @@ static int cppi41_dma_probe(struct platform_device *pdev)
if (ret)
goto err_dma_reg;
- ret = of_dma_controller_register(pdev->dev.of_node,
+ ret = of_dma_controller_register(dev->of_node,
cppi41_dma_xlate, &cpp41_dma_info);
if (ret)
goto err_of;
@@ -1009,11 +1002,11 @@ err_irq:
cppi_writel(0, cdd->usbss_mem + USBSS_IRQ_CLEARR);
cleanup_chans(cdd);
err_chans:
- deinit_cpii41(pdev, cdd);
+ deinit_cppi41(dev, cdd);
err_init_cppi:
- pm_runtime_put(&pdev->dev);
+ pm_runtime_put(dev);
err_get_sync:
- pm_runtime_disable(&pdev->dev);
+ pm_runtime_disable(dev);
iounmap(cdd->usbss_mem);
iounmap(cdd->ctrl_mem);
iounmap(cdd->sched_mem);
@@ -1033,7 +1026,7 @@ static int cppi41_dma_remove(struct platform_device *pdev)
cppi_writel(0, cdd->usbss_mem + USBSS_IRQ_CLEARR);
free_irq(cdd->irq, cdd);
cleanup_chans(cdd);
- deinit_cpii41(pdev, cdd);
+ deinit_cppi41(&pdev->dev, cdd);
iounmap(cdd->usbss_mem);
iounmap(cdd->ctrl_mem);
iounmap(cdd->sched_mem);
@@ -1044,12 +1037,53 @@ static int cppi41_dma_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+static int cppi41_suspend(struct device *dev)
+{
+ struct cppi41_dd *cdd = dev_get_drvdata(dev);
+
+ cdd->dma_tdfdq = cppi_readl(cdd->ctrl_mem + DMA_TDFDQ);
+ cppi_writel(0, cdd->usbss_mem + USBSS_IRQ_CLEARR);
+ disable_sched(cdd);
+
+ return 0;
+}
+
+static int cppi41_resume(struct device *dev)
+{
+ struct cppi41_dd *cdd = dev_get_drvdata(dev);
+ struct cppi41_channel *c;
+ int i;
+
+ for (i = 0; i < DESCS_AREAS; i++)
+ cppi_writel(cdd->descs_phys, cdd->qmgr_mem + QMGR_MEMBASE(i));
+
+ list_for_each_entry(c, &cdd->ddev.channels, chan.device_node)
+ if (!c->is_tx)
+ cppi_writel(c->q_num, c->gcr_reg + RXHPCRA0);
+
+ init_sched(cdd);
+
+ cppi_writel(cdd->dma_tdfdq, cdd->ctrl_mem + DMA_TDFDQ);
+ cppi_writel(cdd->scratch_phys, cdd->qmgr_mem + QMGR_LRAM0_BASE);
+ cppi_writel(QMGR_SCRATCH_SIZE, cdd->qmgr_mem + QMGR_LRAM_SIZE);
+ cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM1_BASE);
+
+ cppi_writel(USBSS_IRQ_PD_COMP, cdd->usbss_mem + USBSS_IRQ_ENABLER);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(cppi41_pm_ops, cppi41_suspend, cppi41_resume);
+
static struct platform_driver cpp41_dma_driver = {
.probe = cppi41_dma_probe,
.remove = cppi41_dma_remove,
.driver = {
.name = "cppi41-dma-engine",
.owner = THIS_MODULE,
+ .pm = &cppi41_pm_ops,
.of_match_table = of_match_ptr(cppi41_dma_ids),
},
};
diff --git a/drivers/dma/dma-jz4740.c b/drivers/dma/dma-jz4740.c
index b0c0c8268d42..94c380f07538 100644
--- a/drivers/dma/dma-jz4740.c
+++ b/drivers/dma/dma-jz4740.c
@@ -491,7 +491,7 @@ static enum dma_status jz4740_dma_tx_status(struct dma_chan *c,
unsigned long flags;
status = dma_cookie_status(c, cookie, state);
- if (status == DMA_SUCCESS || !state)
+ if (status == DMA_COMPLETE || !state)
return status;
spin_lock_irqsave(&chan->vchan.lock, flags);
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 9162ac80c18f..ef63b9058f3c 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -65,6 +65,7 @@
#include <linux/acpi.h>
#include <linux/acpi_dma.h>
#include <linux/of_dma.h>
+#include <linux/mempool.h>
static DEFINE_MUTEX(dma_list_mutex);
static DEFINE_IDR(dma_idr);
@@ -901,98 +902,132 @@ void dma_async_device_unregister(struct dma_device *device)
}
EXPORT_SYMBOL(dma_async_device_unregister);
-/**
- * dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses
- * @chan: DMA channel to offload copy to
- * @dest: destination address (virtual)
- * @src: source address (virtual)
- * @len: length
- *
- * Both @dest and @src must be mappable to a bus address according to the
- * DMA mapping API rules for streaming mappings.
- * Both @dest and @src must stay memory resident (kernel memory or locked
- * user space pages).
- */
-dma_cookie_t
-dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest,
- void *src, size_t len)
-{
- struct dma_device *dev = chan->device;
- struct dma_async_tx_descriptor *tx;
- dma_addr_t dma_dest, dma_src;
- dma_cookie_t cookie;
- unsigned long flags;
+struct dmaengine_unmap_pool {
+ struct kmem_cache *cache;
+ const char *name;
+ mempool_t *pool;
+ size_t size;
+};
- dma_src = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE);
- dma_dest = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE);
- flags = DMA_CTRL_ACK |
- DMA_COMPL_SRC_UNMAP_SINGLE |
- DMA_COMPL_DEST_UNMAP_SINGLE;
- tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags);
+#define __UNMAP_POOL(x) { .size = x, .name = "dmaengine-unmap-" __stringify(x) }
+static struct dmaengine_unmap_pool unmap_pool[] = {
+ __UNMAP_POOL(2),
+ #if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
+ __UNMAP_POOL(16),
+ __UNMAP_POOL(128),
+ __UNMAP_POOL(256),
+ #endif
+};
- if (!tx) {
- dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE);
- dma_unmap_single(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
- return -ENOMEM;
+static struct dmaengine_unmap_pool *__get_unmap_pool(int nr)
+{
+ int order = get_count_order(nr);
+
+ switch (order) {
+ case 0 ... 1:
+ return &unmap_pool[0];
+ case 2 ... 4:
+ return &unmap_pool[1];
+ case 5 ... 7:
+ return &unmap_pool[2];
+ case 8:
+ return &unmap_pool[3];
+ default:
+ BUG();
+ return NULL;
}
+}
- tx->callback = NULL;
- cookie = tx->tx_submit(tx);
+static void dmaengine_unmap(struct kref *kref)
+{
+ struct dmaengine_unmap_data *unmap = container_of(kref, typeof(*unmap), kref);
+ struct device *dev = unmap->dev;
+ int cnt, i;
+
+ cnt = unmap->to_cnt;
+ for (i = 0; i < cnt; i++)
+ dma_unmap_page(dev, unmap->addr[i], unmap->len,
+ DMA_TO_DEVICE);
+ cnt += unmap->from_cnt;
+ for (; i < cnt; i++)
+ dma_unmap_page(dev, unmap->addr[i], unmap->len,
+ DMA_FROM_DEVICE);
+ cnt += unmap->bidi_cnt;
+ for (; i < cnt; i++) {
+ if (unmap->addr[i] == 0)
+ continue;
+ dma_unmap_page(dev, unmap->addr[i], unmap->len,
+ DMA_BIDIRECTIONAL);
+ }
+ mempool_free(unmap, __get_unmap_pool(cnt)->pool);
+}
- preempt_disable();
- __this_cpu_add(chan->local->bytes_transferred, len);
- __this_cpu_inc(chan->local->memcpy_count);
- preempt_enable();
+void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap)
+{
+ if (unmap)
+ kref_put(&unmap->kref, dmaengine_unmap);
+}
+EXPORT_SYMBOL_GPL(dmaengine_unmap_put);
- return cookie;
+static void dmaengine_destroy_unmap_pool(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) {
+ struct dmaengine_unmap_pool *p = &unmap_pool[i];
+
+ if (p->pool)
+ mempool_destroy(p->pool);
+ p->pool = NULL;
+ if (p->cache)
+ kmem_cache_destroy(p->cache);
+ p->cache = NULL;
+ }
}
-EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf);
-/**
- * dma_async_memcpy_buf_to_pg - offloaded copy from address to page
- * @chan: DMA channel to offload copy to
- * @page: destination page
- * @offset: offset in page to copy to
- * @kdata: source address (virtual)
- * @len: length
- *
- * Both @page/@offset and @kdata must be mappable to a bus address according
- * to the DMA mapping API rules for streaming mappings.
- * Both @page/@offset and @kdata must stay memory resident (kernel memory or
- * locked user space pages)
- */
-dma_cookie_t
-dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page,
- unsigned int offset, void *kdata, size_t len)
+static int __init dmaengine_init_unmap_pool(void)
{
- struct dma_device *dev = chan->device;
- struct dma_async_tx_descriptor *tx;
- dma_addr_t dma_dest, dma_src;
- dma_cookie_t cookie;
- unsigned long flags;
+ int i;
- dma_src = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE);
- dma_dest = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE);
- flags = DMA_CTRL_ACK | DMA_COMPL_SRC_UNMAP_SINGLE;
- tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags);
+ for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) {
+ struct dmaengine_unmap_pool *p = &unmap_pool[i];
+ size_t size;
- if (!tx) {
- dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE);
- dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
- return -ENOMEM;
+ size = sizeof(struct dmaengine_unmap_data) +
+ sizeof(dma_addr_t) * p->size;
+
+ p->cache = kmem_cache_create(p->name, size, 0,
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (!p->cache)
+ break;
+ p->pool = mempool_create_slab_pool(1, p->cache);
+ if (!p->pool)
+ break;
}
- tx->callback = NULL;
- cookie = tx->tx_submit(tx);
+ if (i == ARRAY_SIZE(unmap_pool))
+ return 0;
- preempt_disable();
- __this_cpu_add(chan->local->bytes_transferred, len);
- __this_cpu_inc(chan->local->memcpy_count);
- preempt_enable();
+ dmaengine_destroy_unmap_pool();
+ return -ENOMEM;
+}
- return cookie;
+struct dmaengine_unmap_data *
+dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags)
+{
+ struct dmaengine_unmap_data *unmap;
+
+ unmap = mempool_alloc(__get_unmap_pool(nr)->pool, flags);
+ if (!unmap)
+ return NULL;
+
+ memset(unmap, 0, sizeof(*unmap));
+ kref_init(&unmap->kref);
+ unmap->dev = dev;
+
+ return unmap;
}
-EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg);
+EXPORT_SYMBOL(dmaengine_get_unmap_data);
/**
* dma_async_memcpy_pg_to_pg - offloaded copy from page to page
@@ -1015,24 +1050,33 @@ dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
{
struct dma_device *dev = chan->device;
struct dma_async_tx_descriptor *tx;
- dma_addr_t dma_dest, dma_src;
+ struct dmaengine_unmap_data *unmap;
dma_cookie_t cookie;
unsigned long flags;
- dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE);
- dma_dest = dma_map_page(dev->dev, dest_pg, dest_off, len,
- DMA_FROM_DEVICE);
+ unmap = dmaengine_get_unmap_data(dev->dev, 2, GFP_NOWAIT);
+ if (!unmap)
+ return -ENOMEM;
+
+ unmap->to_cnt = 1;
+ unmap->from_cnt = 1;
+ unmap->addr[0] = dma_map_page(dev->dev, src_pg, src_off, len,
+ DMA_TO_DEVICE);
+ unmap->addr[1] = dma_map_page(dev->dev, dest_pg, dest_off, len,
+ DMA_FROM_DEVICE);
+ unmap->len = len;
flags = DMA_CTRL_ACK;
- tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags);
+ tx = dev->device_prep_dma_memcpy(chan, unmap->addr[1], unmap->addr[0],
+ len, flags);
if (!tx) {
- dma_unmap_page(dev->dev, dma_src, len, DMA_TO_DEVICE);
- dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
+ dmaengine_unmap_put(unmap);
return -ENOMEM;
}
- tx->callback = NULL;
+ dma_set_unmap(tx, unmap);
cookie = tx->tx_submit(tx);
+ dmaengine_unmap_put(unmap);
preempt_disable();
__this_cpu_add(chan->local->bytes_transferred, len);
@@ -1043,6 +1087,52 @@ dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
}
EXPORT_SYMBOL(dma_async_memcpy_pg_to_pg);
+/**
+ * dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses
+ * @chan: DMA channel to offload copy to
+ * @dest: destination address (virtual)
+ * @src: source address (virtual)
+ * @len: length
+ *
+ * Both @dest and @src must be mappable to a bus address according to the
+ * DMA mapping API rules for streaming mappings.
+ * Both @dest and @src must stay memory resident (kernel memory or locked
+ * user space pages).
+ */
+dma_cookie_t
+dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest,
+ void *src, size_t len)
+{
+ return dma_async_memcpy_pg_to_pg(chan, virt_to_page(dest),
+ (unsigned long) dest & ~PAGE_MASK,
+ virt_to_page(src),
+ (unsigned long) src & ~PAGE_MASK, len);
+}
+EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf);
+
+/**
+ * dma_async_memcpy_buf_to_pg - offloaded copy from address to page
+ * @chan: DMA channel to offload copy to
+ * @page: destination page
+ * @offset: offset in page to copy to
+ * @kdata: source address (virtual)
+ * @len: length
+ *
+ * Both @page/@offset and @kdata must be mappable to a bus address according
+ * to the DMA mapping API rules for streaming mappings.
+ * Both @page/@offset and @kdata must stay memory resident (kernel memory or
+ * locked user space pages)
+ */
+dma_cookie_t
+dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page,
+ unsigned int offset, void *kdata, size_t len)
+{
+ return dma_async_memcpy_pg_to_pg(chan, page, offset,
+ virt_to_page(kdata),
+ (unsigned long) kdata & ~PAGE_MASK, len);
+}
+EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg);
+
void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
struct dma_chan *chan)
{
@@ -1062,7 +1152,7 @@ dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
if (!tx)
- return DMA_SUCCESS;
+ return DMA_COMPLETE;
while (tx->cookie == -EBUSY) {
if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
@@ -1116,6 +1206,10 @@ EXPORT_SYMBOL_GPL(dma_run_dependencies);
static int __init dma_bus_init(void)
{
+ int err = dmaengine_init_unmap_pool();
+
+ if (err)
+ return err;
return class_register(&dma_devclass);
}
arch_initcall(dma_bus_init);
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index 92f796cdc6ab..9dfcaf5c1288 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -8,6 +8,8 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
@@ -19,10 +21,6 @@
#include <linux/random.h>
#include <linux/slab.h>
#include <linux/wait.h>
-#include <linux/ctype.h>
-#include <linux/debugfs.h>
-#include <linux/uaccess.h>
-#include <linux/seq_file.h>
static unsigned int test_buf_size = 16384;
module_param(test_buf_size, uint, S_IRUGO | S_IWUSR);
@@ -68,92 +66,13 @@ module_param(timeout, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), "
"Pass -1 for infinite timeout");
-/* Maximum amount of mismatched bytes in buffer to print */
-#define MAX_ERROR_COUNT 32
-
-/*
- * Initialization patterns. All bytes in the source buffer has bit 7
- * set, all bytes in the destination buffer has bit 7 cleared.
- *
- * Bit 6 is set for all bytes which are to be copied by the DMA
- * engine. Bit 5 is set for all bytes which are to be overwritten by
- * the DMA engine.
- *
- * The remaining bits are the inverse of a counter which increments by
- * one for each byte address.
- */
-#define PATTERN_SRC 0x80
-#define PATTERN_DST 0x00
-#define PATTERN_COPY 0x40
-#define PATTERN_OVERWRITE 0x20
-#define PATTERN_COUNT_MASK 0x1f
-
-enum dmatest_error_type {
- DMATEST_ET_OK,
- DMATEST_ET_MAP_SRC,
- DMATEST_ET_MAP_DST,
- DMATEST_ET_PREP,
- DMATEST_ET_SUBMIT,
- DMATEST_ET_TIMEOUT,
- DMATEST_ET_DMA_ERROR,
- DMATEST_ET_DMA_IN_PROGRESS,
- DMATEST_ET_VERIFY,
- DMATEST_ET_VERIFY_BUF,
-};
-
-struct dmatest_verify_buffer {
- unsigned int index;
- u8 expected;
- u8 actual;
-};
-
-struct dmatest_verify_result {
- unsigned int error_count;
- struct dmatest_verify_buffer data[MAX_ERROR_COUNT];
- u8 pattern;
- bool is_srcbuf;
-};
-
-struct dmatest_thread_result {
- struct list_head node;
- unsigned int n;
- unsigned int src_off;
- unsigned int dst_off;
- unsigned int len;
- enum dmatest_error_type type;
- union {
- unsigned long data;
- dma_cookie_t cookie;
- enum dma_status status;
- int error;
- struct dmatest_verify_result *vr;
- };
-};
-
-struct dmatest_result {
- struct list_head node;
- char *name;
- struct list_head results;
-};
-
-struct dmatest_info;
-
-struct dmatest_thread {
- struct list_head node;
- struct dmatest_info *info;
- struct task_struct *task;
- struct dma_chan *chan;
- u8 **srcs;
- u8 **dsts;
- enum dma_transaction_type type;
- bool done;
-};
+static bool noverify;
+module_param(noverify, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(noverify, "Disable random data setup and verification");
-struct dmatest_chan {
- struct list_head node;
- struct dma_chan *chan;
- struct list_head threads;
-};
+static bool verbose;
+module_param(verbose, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(verbose, "Enable \"success\" result messages (default: off)");
/**
* struct dmatest_params - test parameters.
@@ -177,6 +96,7 @@ struct dmatest_params {
unsigned int xor_sources;
unsigned int pq_sources;
int timeout;
+ bool noverify;
};
/**
@@ -184,7 +104,7 @@ struct dmatest_params {
* @params: test parameters
* @lock: access protection to the fields of this structure
*/
-struct dmatest_info {
+static struct dmatest_info {
/* Test parameters */
struct dmatest_params params;
@@ -192,16 +112,95 @@ struct dmatest_info {
struct list_head channels;
unsigned int nr_channels;
struct mutex lock;
+ bool did_init;
+} test_info = {
+ .channels = LIST_HEAD_INIT(test_info.channels),
+ .lock = __MUTEX_INITIALIZER(test_info.lock),
+};
+
+static int dmatest_run_set(const char *val, const struct kernel_param *kp);
+static int dmatest_run_get(char *val, const struct kernel_param *kp);
+static struct kernel_param_ops run_ops = {
+ .set = dmatest_run_set,
+ .get = dmatest_run_get,
+};
+static bool dmatest_run;
+module_param_cb(run, &run_ops, &dmatest_run, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(run, "Run the test (default: false)");
+
+/* Maximum amount of mismatched bytes in buffer to print */
+#define MAX_ERROR_COUNT 32
+
+/*
+ * Initialization patterns. All bytes in the source buffer has bit 7
+ * set, all bytes in the destination buffer has bit 7 cleared.
+ *
+ * Bit 6 is set for all bytes which are to be copied by the DMA
+ * engine. Bit 5 is set for all bytes which are to be overwritten by
+ * the DMA engine.
+ *
+ * The remaining bits are the inverse of a counter which increments by
+ * one for each byte address.
+ */
+#define PATTERN_SRC 0x80
+#define PATTERN_DST 0x00
+#define PATTERN_COPY 0x40
+#define PATTERN_OVERWRITE 0x20
+#define PATTERN_COUNT_MASK 0x1f
- /* debugfs related stuff */
- struct dentry *root;
+struct dmatest_thread {
+ struct list_head node;
+ struct dmatest_info *info;
+ struct task_struct *task;
+ struct dma_chan *chan;
+ u8 **srcs;
+ u8 **dsts;
+ enum dma_transaction_type type;
+ bool done;
+};
- /* Test results */
- struct list_head results;
- struct mutex results_lock;
+struct dmatest_chan {
+ struct list_head node;
+ struct dma_chan *chan;
+ struct list_head threads;
};
-static struct dmatest_info test_info;
+static DECLARE_WAIT_QUEUE_HEAD(thread_wait);
+static bool wait;
+
+static bool is_threaded_test_run(struct dmatest_info *info)
+{
+ struct dmatest_chan *dtc;
+
+ list_for_each_entry(dtc, &info->channels, node) {
+ struct dmatest_thread *thread;
+
+ list_for_each_entry(thread, &dtc->threads, node) {
+ if (!thread->done)
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static int dmatest_wait_get(char *val, const struct kernel_param *kp)
+{
+ struct dmatest_info *info = &test_info;
+ struct dmatest_params *params = &info->params;
+
+ if (params->iterations)
+ wait_event(thread_wait, !is_threaded_test_run(info));
+ wait = true;
+ return param_get_bool(val, kp);
+}
+
+static struct kernel_param_ops wait_ops = {
+ .get = dmatest_wait_get,
+ .set = param_set_bool,
+};
+module_param_cb(wait, &wait_ops, &wait, S_IRUGO);
+MODULE_PARM_DESC(wait, "Wait for tests to complete (default: false)");
static bool dmatest_match_channel(struct dmatest_params *params,
struct dma_chan *chan)
@@ -223,7 +222,7 @@ static unsigned long dmatest_random(void)
{
unsigned long buf;
- get_random_bytes(&buf, sizeof(buf));
+ prandom_bytes(&buf, sizeof(buf));
return buf;
}
@@ -262,9 +261,31 @@ static void dmatest_init_dsts(u8 **bufs, unsigned int start, unsigned int len,
}
}
-static unsigned int dmatest_verify(struct dmatest_verify_result *vr, u8 **bufs,
- unsigned int start, unsigned int end, unsigned int counter,
- u8 pattern, bool is_srcbuf)
+static void dmatest_mismatch(u8 actual, u8 pattern, unsigned int index,
+ unsigned int counter, bool is_srcbuf)
+{
+ u8 diff = actual ^ pattern;
+ u8 expected = pattern | (~counter & PATTERN_COUNT_MASK);
+ const char *thread_name = current->comm;
+
+ if (is_srcbuf)
+ pr_warn("%s: srcbuf[0x%x] overwritten! Expected %02x, got %02x\n",
+ thread_name, index, expected, actual);
+ else if ((pattern & PATTERN_COPY)
+ && (diff & (PATTERN_COPY | PATTERN_OVERWRITE)))
+ pr_warn("%s: dstbuf[0x%x] not copied! Expected %02x, got %02x\n",
+ thread_name, index, expected, actual);
+ else if (diff & PATTERN_SRC)
+ pr_warn("%s: dstbuf[0x%x] was copied! Expected %02x, got %02x\n",
+ thread_name, index, expected, actual);
+ else
+ pr_warn("%s: dstbuf[0x%x] mismatch! Expected %02x, got %02x\n",
+ thread_name, index, expected, actual);
+}
+
+static unsigned int dmatest_verify(u8 **bufs, unsigned int start,
+ unsigned int end, unsigned int counter, u8 pattern,
+ bool is_srcbuf)
{
unsigned int i;
unsigned int error_count = 0;
@@ -272,7 +293,6 @@ static unsigned int dmatest_verify(struct dmatest_verify_result *vr, u8 **bufs,
u8 expected;
u8 *buf;
unsigned int counter_orig = counter;
- struct dmatest_verify_buffer *vb;
for (; (buf = *bufs); bufs++) {
counter = counter_orig;
@@ -280,12 +300,9 @@ static unsigned int dmatest_verify(struct dmatest_verify_result *vr, u8 **bufs,
actual = buf[i];
expected = pattern | (~counter & PATTERN_COUNT_MASK);
if (actual != expected) {
- if (error_count < MAX_ERROR_COUNT && vr) {
- vb = &vr->data[error_count];
- vb->index = i;
- vb->expected = expected;
- vb->actual = actual;
- }
+ if (error_count < MAX_ERROR_COUNT)
+ dmatest_mismatch(actual, pattern, i,
+ counter, is_srcbuf);
error_count++;
}
counter++;
@@ -293,7 +310,7 @@ static unsigned int dmatest_verify(struct dmatest_verify_result *vr, u8 **bufs,
}
if (error_count > MAX_ERROR_COUNT)
- pr_warning("%s: %u errors suppressed\n",
+ pr_warn("%s: %u errors suppressed\n",
current->comm, error_count - MAX_ERROR_COUNT);
return error_count;
@@ -313,20 +330,6 @@ static void dmatest_callback(void *arg)
wake_up_all(done->wait);
}
-static inline void unmap_src(struct device *dev, dma_addr_t *addr, size_t len,
- unsigned int count)
-{
- while (count--)
- dma_unmap_single(dev, addr[count], len, DMA_TO_DEVICE);
-}
-
-static inline void unmap_dst(struct device *dev, dma_addr_t *addr, size_t len,
- unsigned int count)
-{
- while (count--)
- dma_unmap_single(dev, addr[count], len, DMA_BIDIRECTIONAL);
-}
-
static unsigned int min_odd(unsigned int x, unsigned int y)
{
unsigned int val = min(x, y);
@@ -334,172 +337,49 @@ static unsigned int min_odd(unsigned int x, unsigned int y)
return val % 2 ? val : val - 1;
}
-static char *verify_result_get_one(struct dmatest_verify_result *vr,
- unsigned int i)
+static void result(const char *err, unsigned int n, unsigned int src_off,
+ unsigned int dst_off, unsigned int len, unsigned long data)
{
- struct dmatest_verify_buffer *vb = &vr->data[i];
- u8 diff = vb->actual ^ vr->pattern;
- static char buf[512];
- char *msg;
-
- if (vr->is_srcbuf)
- msg = "srcbuf overwritten!";
- else if ((vr->pattern & PATTERN_COPY)
- && (diff & (PATTERN_COPY | PATTERN_OVERWRITE)))
- msg = "dstbuf not copied!";
- else if (diff & PATTERN_SRC)
- msg = "dstbuf was copied!";
- else
- msg = "dstbuf mismatch!";
-
- snprintf(buf, sizeof(buf) - 1, "%s [0x%x] Expected %02x, got %02x", msg,
- vb->index, vb->expected, vb->actual);
-
- return buf;
+ pr_info("%s: result #%u: '%s' with src_off=0x%x dst_off=0x%x len=0x%x (%lu)",
+ current->comm, n, err, src_off, dst_off, len, data);
}
-static char *thread_result_get(const char *name,
- struct dmatest_thread_result *tr)
+static void dbg_result(const char *err, unsigned int n, unsigned int src_off,
+ unsigned int dst_off, unsigned int len,
+ unsigned long data)
{
- static const char * const messages[] = {
- [DMATEST_ET_OK] = "No errors",
- [DMATEST_ET_MAP_SRC] = "src mapping error",
- [DMATEST_ET_MAP_DST] = "dst mapping error",
- [DMATEST_ET_PREP] = "prep error",
- [DMATEST_ET_SUBMIT] = "submit error",
- [DMATEST_ET_TIMEOUT] = "test timed out",
- [DMATEST_ET_DMA_ERROR] =
- "got completion callback (DMA_ERROR)",
- [DMATEST_ET_DMA_IN_PROGRESS] =
- "got completion callback (DMA_IN_PROGRESS)",
- [DMATEST_ET_VERIFY] = "errors",
- [DMATEST_ET_VERIFY_BUF] = "verify errors",
- };
- static char buf[512];
-
- snprintf(buf, sizeof(buf) - 1,
- "%s: #%u: %s with src_off=0x%x ""dst_off=0x%x len=0x%x (%lu)",
- name, tr->n, messages[tr->type], tr->src_off, tr->dst_off,
- tr->len, tr->data);
-
- return buf;
+ pr_debug("%s: result #%u: '%s' with src_off=0x%x dst_off=0x%x len=0x%x (%lu)",
+ current->comm, n, err, src_off, dst_off, len, data);
}
-static int thread_result_add(struct dmatest_info *info,
- struct dmatest_result *r, enum dmatest_error_type type,
- unsigned int n, unsigned int src_off, unsigned int dst_off,
- unsigned int len, unsigned long data)
-{
- struct dmatest_thread_result *tr;
-
- tr = kzalloc(sizeof(*tr), GFP_KERNEL);
- if (!tr)
- return -ENOMEM;
-
- tr->type = type;
- tr->n = n;
- tr->src_off = src_off;
- tr->dst_off = dst_off;
- tr->len = len;
- tr->data = data;
+#define verbose_result(err, n, src_off, dst_off, len, data) ({ \
+ if (verbose) \
+ result(err, n, src_off, dst_off, len, data); \
+ else \
+ dbg_result(err, n, src_off, dst_off, len, data); \
+})
- mutex_lock(&info->results_lock);
- list_add_tail(&tr->node, &r->results);
- mutex_unlock(&info->results_lock);
-
- if (tr->type == DMATEST_ET_OK)
- pr_debug("%s\n", thread_result_get(r->name, tr));
- else
- pr_warn("%s\n", thread_result_get(r->name, tr));
-
- return 0;
-}
-
-static unsigned int verify_result_add(struct dmatest_info *info,
- struct dmatest_result *r, unsigned int n,
- unsigned int src_off, unsigned int dst_off, unsigned int len,
- u8 **bufs, int whence, unsigned int counter, u8 pattern,
- bool is_srcbuf)
+static unsigned long long dmatest_persec(s64 runtime, unsigned int val)
{
- struct dmatest_verify_result *vr;
- unsigned int error_count;
- unsigned int buf_off = is_srcbuf ? src_off : dst_off;
- unsigned int start, end;
-
- if (whence < 0) {
- start = 0;
- end = buf_off;
- } else if (whence > 0) {
- start = buf_off + len;
- end = info->params.buf_size;
- } else {
- start = buf_off;
- end = buf_off + len;
- }
+ unsigned long long per_sec = 1000000;
- vr = kmalloc(sizeof(*vr), GFP_KERNEL);
- if (!vr) {
- pr_warn("dmatest: No memory to store verify result\n");
- return dmatest_verify(NULL, bufs, start, end, counter, pattern,
- is_srcbuf);
- }
-
- vr->pattern = pattern;
- vr->is_srcbuf = is_srcbuf;
-
- error_count = dmatest_verify(vr, bufs, start, end, counter, pattern,
- is_srcbuf);
- if (error_count) {
- vr->error_count = error_count;
- thread_result_add(info, r, DMATEST_ET_VERIFY_BUF, n, src_off,
- dst_off, len, (unsigned long)vr);
- return error_count;
- }
-
- kfree(vr);
- return 0;
-}
-
-static void result_free(struct dmatest_info *info, const char *name)
-{
- struct dmatest_result *r, *_r;
-
- mutex_lock(&info->results_lock);
- list_for_each_entry_safe(r, _r, &info->results, node) {
- struct dmatest_thread_result *tr, *_tr;
-
- if (name && strcmp(r->name, name))
- continue;
-
- list_for_each_entry_safe(tr, _tr, &r->results, node) {
- if (tr->type == DMATEST_ET_VERIFY_BUF)
- kfree(tr->vr);
- list_del(&tr->node);
- kfree(tr);
- }
+ if (runtime <= 0)
+ return 0;
- kfree(r->name);
- list_del(&r->node);
- kfree(r);
+ /* drop precision until runtime is 32-bits */
+ while (runtime > UINT_MAX) {
+ runtime >>= 1;
+ per_sec <<= 1;
}
- mutex_unlock(&info->results_lock);
+ per_sec *= val;
+ do_div(per_sec, runtime);
+ return per_sec;
}
-static struct dmatest_result *result_init(struct dmatest_info *info,
- const char *name)
+static unsigned long long dmatest_KBs(s64 runtime, unsigned long long len)
{
- struct dmatest_result *r;
-
- r = kzalloc(sizeof(*r), GFP_KERNEL);
- if (r) {
- r->name = kstrdup(name, GFP_KERNEL);
- INIT_LIST_HEAD(&r->results);
- mutex_lock(&info->results_lock);
- list_add_tail(&r->node, &info->results);
- mutex_unlock(&info->results_lock);
- }
- return r;
+ return dmatest_persec(runtime, len >> 10);
}
/*
@@ -525,7 +405,6 @@ static int dmatest_func(void *data)
struct dmatest_params *params;
struct dma_chan *chan;
struct dma_device *dev;
- const char *thread_name;
unsigned int src_off, dst_off, len;
unsigned int error_count;
unsigned int failed_tests = 0;
@@ -538,9 +417,10 @@ static int dmatest_func(void *data)
int src_cnt;
int dst_cnt;
int i;
- struct dmatest_result *result;
+ ktime_t ktime;
+ s64 runtime = 0;
+ unsigned long long total_len = 0;
- thread_name = current->comm;
set_freezable();
ret = -ENOMEM;
@@ -570,10 +450,6 @@ static int dmatest_func(void *data)
} else
goto err_thread_type;
- result = result_init(info, thread_name);
- if (!result)
- goto err_srcs;
-
thread->srcs = kcalloc(src_cnt+1, sizeof(u8 *), GFP_KERNEL);
if (!thread->srcs)
goto err_srcs;
@@ -597,17 +473,17 @@ static int dmatest_func(void *data)
set_user_nice(current, 10);
/*
- * src buffers are freed by the DMAEngine code with dma_unmap_single()
- * dst buffers are freed by ourselves below
+ * src and dst buffers are freed by ourselves below
*/
- flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT
- | DMA_COMPL_SKIP_DEST_UNMAP | DMA_COMPL_SRC_UNMAP_SINGLE;
+ flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
+ ktime = ktime_get();
while (!kthread_should_stop()
&& !(params->iterations && total_tests >= params->iterations)) {
struct dma_async_tx_descriptor *tx = NULL;
- dma_addr_t dma_srcs[src_cnt];
- dma_addr_t dma_dsts[dst_cnt];
+ struct dmaengine_unmap_data *um;
+ dma_addr_t srcs[src_cnt];
+ dma_addr_t *dsts;
u8 align = 0;
total_tests++;
@@ -626,81 +502,103 @@ static int dmatest_func(void *data)
break;
}
- len = dmatest_random() % params->buf_size + 1;
+ if (params->noverify) {
+ len = params->buf_size;
+ src_off = 0;
+ dst_off = 0;
+ } else {
+ len = dmatest_random() % params->buf_size + 1;
+ len = (len >> align) << align;
+ if (!len)
+ len = 1 << align;
+ src_off = dmatest_random() % (params->buf_size - len + 1);
+ dst_off = dmatest_random() % (params->buf_size - len + 1);
+
+ src_off = (src_off >> align) << align;
+ dst_off = (dst_off >> align) << align;
+
+ dmatest_init_srcs(thread->srcs, src_off, len,
+ params->buf_size);
+ dmatest_init_dsts(thread->dsts, dst_off, len,
+ params->buf_size);
+ }
+
len = (len >> align) << align;
if (!len)
len = 1 << align;
- src_off = dmatest_random() % (params->buf_size - len + 1);
- dst_off = dmatest_random() % (params->buf_size - len + 1);
+ total_len += len;
- src_off = (src_off >> align) << align;
- dst_off = (dst_off >> align) << align;
-
- dmatest_init_srcs(thread->srcs, src_off, len, params->buf_size);
- dmatest_init_dsts(thread->dsts, dst_off, len, params->buf_size);
+ um = dmaengine_get_unmap_data(dev->dev, src_cnt+dst_cnt,
+ GFP_KERNEL);
+ if (!um) {
+ failed_tests++;
+ result("unmap data NULL", total_tests,
+ src_off, dst_off, len, ret);
+ continue;
+ }
+ um->len = params->buf_size;
for (i = 0; i < src_cnt; i++) {
- u8 *buf = thread->srcs[i] + src_off;
-
- dma_srcs[i] = dma_map_single(dev->dev, buf, len,
- DMA_TO_DEVICE);
- ret = dma_mapping_error(dev->dev, dma_srcs[i]);
+ void *buf = thread->srcs[i];
+ struct page *pg = virt_to_page(buf);
+ unsigned pg_off = (unsigned long) buf & ~PAGE_MASK;
+
+ um->addr[i] = dma_map_page(dev->dev, pg, pg_off,
+ um->len, DMA_TO_DEVICE);
+ srcs[i] = um->addr[i] + src_off;
+ ret = dma_mapping_error(dev->dev, um->addr[i]);
if (ret) {
- unmap_src(dev->dev, dma_srcs, len, i);
- thread_result_add(info, result,
- DMATEST_ET_MAP_SRC,
- total_tests, src_off, dst_off,
- len, ret);
+ dmaengine_unmap_put(um);
+ result("src mapping error", total_tests,
+ src_off, dst_off, len, ret);
failed_tests++;
continue;
}
+ um->to_cnt++;
}
/* map with DMA_BIDIRECTIONAL to force writeback/invalidate */
+ dsts = &um->addr[src_cnt];
for (i = 0; i < dst_cnt; i++) {
- dma_dsts[i] = dma_map_single(dev->dev, thread->dsts[i],
- params->buf_size,
- DMA_BIDIRECTIONAL);
- ret = dma_mapping_error(dev->dev, dma_dsts[i]);
+ void *buf = thread->dsts[i];
+ struct page *pg = virt_to_page(buf);
+ unsigned pg_off = (unsigned long) buf & ~PAGE_MASK;
+
+ dsts[i] = dma_map_page(dev->dev, pg, pg_off, um->len,
+ DMA_BIDIRECTIONAL);
+ ret = dma_mapping_error(dev->dev, dsts[i]);
if (ret) {
- unmap_src(dev->dev, dma_srcs, len, src_cnt);
- unmap_dst(dev->dev, dma_dsts, params->buf_size,
- i);
- thread_result_add(info, result,
- DMATEST_ET_MAP_DST,
- total_tests, src_off, dst_off,
- len, ret);
+ dmaengine_unmap_put(um);
+ result("dst mapping error", total_tests,
+ src_off, dst_off, len, ret);
failed_tests++;
continue;
}
+ um->bidi_cnt++;
}
if (thread->type == DMA_MEMCPY)
tx = dev->device_prep_dma_memcpy(chan,
- dma_dsts[0] + dst_off,
- dma_srcs[0], len,
- flags);
+ dsts[0] + dst_off,
+ srcs[0], len, flags);
else if (thread->type == DMA_XOR)
tx = dev->device_prep_dma_xor(chan,
- dma_dsts[0] + dst_off,
- dma_srcs, src_cnt,
+ dsts[0] + dst_off,
+ srcs, src_cnt,
len, flags);
else if (thread->type == DMA_PQ) {
dma_addr_t dma_pq[dst_cnt];
for (i = 0; i < dst_cnt; i++)
- dma_pq[i] = dma_dsts[i] + dst_off;
- tx = dev->device_prep_dma_pq(chan, dma_pq, dma_srcs,
+ dma_pq[i] = dsts[i] + dst_off;
+ tx = dev->device_prep_dma_pq(chan, dma_pq, srcs,
src_cnt, pq_coefs,
len, flags);
}
if (!tx) {
- unmap_src(dev->dev, dma_srcs, len, src_cnt);
- unmap_dst(dev->dev, dma_dsts, params->buf_size,
- dst_cnt);
- thread_result_add(info, result, DMATEST_ET_PREP,
- total_tests, src_off, dst_off,
- len, 0);
+ dmaengine_unmap_put(um);
+ result("prep error", total_tests, src_off,
+ dst_off, len, ret);
msleep(100);
failed_tests++;
continue;
@@ -712,9 +610,9 @@ static int dmatest_func(void *data)
cookie = tx->tx_submit(tx);
if (dma_submit_error(cookie)) {
- thread_result_add(info, result, DMATEST_ET_SUBMIT,
- total_tests, src_off, dst_off,
- len, cookie);
+ dmaengine_unmap_put(um);
+ result("submit error", total_tests, src_off,
+ dst_off, len, ret);
msleep(100);
failed_tests++;
continue;
@@ -735,59 +633,59 @@ static int dmatest_func(void *data)
* free it this time?" dancing. For now, just
* leave it dangling.
*/
- thread_result_add(info, result, DMATEST_ET_TIMEOUT,
- total_tests, src_off, dst_off,
- len, 0);
+ dmaengine_unmap_put(um);
+ result("test timed out", total_tests, src_off, dst_off,
+ len, 0);
failed_tests++;
continue;
- } else if (status != DMA_SUCCESS) {
- enum dmatest_error_type type = (status == DMA_ERROR) ?
- DMATEST_ET_DMA_ERROR : DMATEST_ET_DMA_IN_PROGRESS;
- thread_result_add(info, result, type,
- total_tests, src_off, dst_off,
- len, status);
+ } else if (status != DMA_COMPLETE) {
+ dmaengine_unmap_put(um);
+ result(status == DMA_ERROR ?
+ "completion error status" :
+ "completion busy status", total_tests, src_off,
+ dst_off, len, ret);
failed_tests++;
continue;
}
- /* Unmap by myself (see DMA_COMPL_SKIP_DEST_UNMAP above) */
- unmap_dst(dev->dev, dma_dsts, params->buf_size, dst_cnt);
+ dmaengine_unmap_put(um);
- error_count = 0;
+ if (params->noverify) {
+ verbose_result("test passed", total_tests, src_off,
+ dst_off, len, 0);
+ continue;
+ }
- pr_debug("%s: verifying source buffer...\n", thread_name);
- error_count += verify_result_add(info, result, total_tests,
- src_off, dst_off, len, thread->srcs, -1,
+ pr_debug("%s: verifying source buffer...\n", current->comm);
+ error_count = dmatest_verify(thread->srcs, 0, src_off,
0, PATTERN_SRC, true);
- error_count += verify_result_add(info, result, total_tests,
- src_off, dst_off, len, thread->srcs, 0,
- src_off, PATTERN_SRC | PATTERN_COPY, true);
- error_count += verify_result_add(info, result, total_tests,
- src_off, dst_off, len, thread->srcs, 1,
- src_off + len, PATTERN_SRC, true);
-
- pr_debug("%s: verifying dest buffer...\n", thread_name);
- error_count += verify_result_add(info, result, total_tests,
- src_off, dst_off, len, thread->dsts, -1,
+ error_count += dmatest_verify(thread->srcs, src_off,
+ src_off + len, src_off,
+ PATTERN_SRC | PATTERN_COPY, true);
+ error_count += dmatest_verify(thread->srcs, src_off + len,
+ params->buf_size, src_off + len,
+ PATTERN_SRC, true);
+
+ pr_debug("%s: verifying dest buffer...\n", current->comm);
+ error_count += dmatest_verify(thread->dsts, 0, dst_off,
0, PATTERN_DST, false);
- error_count += verify_result_add(info, result, total_tests,
- src_off, dst_off, len, thread->dsts, 0,
- src_off, PATTERN_SRC | PATTERN_COPY, false);
- error_count += verify_result_add(info, result, total_tests,
- src_off, dst_off, len, thread->dsts, 1,
- dst_off + len, PATTERN_DST, false);
+ error_count += dmatest_verify(thread->dsts, dst_off,
+ dst_off + len, src_off,
+ PATTERN_SRC | PATTERN_COPY, false);
+ error_count += dmatest_verify(thread->dsts, dst_off + len,
+ params->buf_size, dst_off + len,
+ PATTERN_DST, false);
if (error_count) {
- thread_result_add(info, result, DMATEST_ET_VERIFY,
- total_tests, src_off, dst_off,
- len, error_count);
+ result("data error", total_tests, src_off, dst_off,
+ len, error_count);
failed_tests++;
} else {
- thread_result_add(info, result, DMATEST_ET_OK,
- total_tests, src_off, dst_off,
- len, 0);
+ verbose_result("test passed", total_tests, src_off,
+ dst_off, len, 0);
}
}
+ runtime = ktime_us_delta(ktime_get(), ktime);
ret = 0;
for (i = 0; thread->dsts[i]; i++)
@@ -802,20 +700,17 @@ err_srcbuf:
err_srcs:
kfree(pq_coefs);
err_thread_type:
- pr_notice("%s: terminating after %u tests, %u failures (status %d)\n",
- thread_name, total_tests, failed_tests, ret);
+ pr_info("%s: summary %u tests, %u failures %llu iops %llu KB/s (%d)\n",
+ current->comm, total_tests, failed_tests,
+ dmatest_persec(runtime, total_tests),
+ dmatest_KBs(runtime, total_len), ret);
/* terminate all transfers on specified channels */
if (ret)
dmaengine_terminate_all(chan);
thread->done = true;
-
- if (params->iterations > 0)
- while (!kthread_should_stop()) {
- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait_dmatest_exit);
- interruptible_sleep_on(&wait_dmatest_exit);
- }
+ wake_up(&thread_wait);
return ret;
}
@@ -828,9 +723,10 @@ static void dmatest_cleanup_channel(struct dmatest_chan *dtc)
list_for_each_entry_safe(thread, _thread, &dtc->threads, node) {
ret = kthread_stop(thread->task);
- pr_debug("dmatest: thread %s exited with status %d\n",
- thread->task->comm, ret);
+ pr_debug("thread %s exited with status %d\n",
+ thread->task->comm, ret);
list_del(&thread->node);
+ put_task_struct(thread->task);
kfree(thread);
}
@@ -861,27 +757,27 @@ static int dmatest_add_threads(struct dmatest_info *info,
for (i = 0; i < params->threads_per_chan; i++) {
thread = kzalloc(sizeof(struct dmatest_thread), GFP_KERNEL);
if (!thread) {
- pr_warning("dmatest: No memory for %s-%s%u\n",
- dma_chan_name(chan), op, i);
-
+ pr_warn("No memory for %s-%s%u\n",
+ dma_chan_name(chan), op, i);
break;
}
thread->info = info;
thread->chan = dtc->chan;
thread->type = type;
smp_wmb();
- thread->task = kthread_run(dmatest_func, thread, "%s-%s%u",
+ thread->task = kthread_create(dmatest_func, thread, "%s-%s%u",
dma_chan_name(chan), op, i);
if (IS_ERR(thread->task)) {
- pr_warning("dmatest: Failed to run thread %s-%s%u\n",
- dma_chan_name(chan), op, i);
+ pr_warn("Failed to create thread %s-%s%u\n",
+ dma_chan_name(chan), op, i);
kfree(thread);
break;
}
/* srcbuf and dstbuf are allocated by the thread itself */
-
+ get_task_struct(thread->task);
list_add_tail(&thread->node, &dtc->threads);
+ wake_up_process(thread->task);
}
return i;
@@ -897,7 +793,7 @@ static int dmatest_add_channel(struct dmatest_info *info,
dtc = kmalloc(sizeof(struct dmatest_chan), GFP_KERNEL);
if (!dtc) {
- pr_warning("dmatest: No memory for %s\n", dma_chan_name(chan));
+ pr_warn("No memory for %s\n", dma_chan_name(chan));
return -ENOMEM;
}
@@ -917,7 +813,7 @@ static int dmatest_add_channel(struct dmatest_info *info,
thread_count += cnt > 0 ? cnt : 0;
}
- pr_info("dmatest: Started %u threads using %s\n",
+ pr_info("Started %u threads using %s\n",
thread_count, dma_chan_name(chan));
list_add_tail(&dtc->node, &info->channels);
@@ -937,20 +833,20 @@ static bool filter(struct dma_chan *chan, void *param)
return true;
}
-static int __run_threaded_test(struct dmatest_info *info)
+static void request_channels(struct dmatest_info *info,
+ enum dma_transaction_type type)
{
dma_cap_mask_t mask;
- struct dma_chan *chan;
- struct dmatest_params *params = &info->params;
- int err = 0;
dma_cap_zero(mask);
- dma_cap_set(DMA_MEMCPY, mask);
+ dma_cap_set(type, mask);
for (;;) {
+ struct dmatest_params *params = &info->params;
+ struct dma_chan *chan;
+
chan = dma_request_channel(mask, filter, params);
if (chan) {
- err = dmatest_add_channel(info, chan);
- if (err) {
+ if (dmatest_add_channel(info, chan)) {
dma_release_channel(chan);
break; /* add_channel failed, punt */
}
@@ -960,22 +856,30 @@ static int __run_threaded_test(struct dmatest_info *info)
info->nr_channels >= params->max_channels)
break; /* we have all we need */
}
- return err;
}
-#ifndef MODULE
-static int run_threaded_test(struct dmatest_info *info)
+static void run_threaded_test(struct dmatest_info *info)
{
- int ret;
+ struct dmatest_params *params = &info->params;
- mutex_lock(&info->lock);
- ret = __run_threaded_test(info);
- mutex_unlock(&info->lock);
- return ret;
+ /* Copy test parameters */
+ params->buf_size = test_buf_size;
+ strlcpy(params->channel, strim(test_channel), sizeof(params->channel));
+ strlcpy(params->device, strim(test_device), sizeof(params->device));
+ params->threads_per_chan = threads_per_chan;
+ params->max_channels = max_channels;
+ params->iterations = iterations;
+ params->xor_sources = xor_sources;
+ params->pq_sources = pq_sources;
+ params->timeout = timeout;
+ params->noverify = noverify;
+
+ request_channels(info, DMA_MEMCPY);
+ request_channels(info, DMA_XOR);
+ request_channels(info, DMA_PQ);
}
-#endif
-static void __stop_threaded_test(struct dmatest_info *info)
+static void stop_threaded_test(struct dmatest_info *info)
{
struct dmatest_chan *dtc, *_dtc;
struct dma_chan *chan;
@@ -984,203 +888,86 @@ static void __stop_threaded_test(struct dmatest_info *info)
list_del(&dtc->node);
chan = dtc->chan;
dmatest_cleanup_channel(dtc);
- pr_debug("dmatest: dropped channel %s\n", dma_chan_name(chan));
+ pr_debug("dropped channel %s\n", dma_chan_name(chan));
dma_release_channel(chan);
}
info->nr_channels = 0;
}
-static void stop_threaded_test(struct dmatest_info *info)
+static void restart_threaded_test(struct dmatest_info *info, bool run)
{
- mutex_lock(&info->lock);
- __stop_threaded_test(info);
- mutex_unlock(&info->lock);
-}
-
-static int __restart_threaded_test(struct dmatest_info *info, bool run)
-{
- struct dmatest_params *params = &info->params;
+ /* we might be called early to set run=, defer running until all
+ * parameters have been evaluated
+ */
+ if (!info->did_init)
+ return;
/* Stop any running test first */
- __stop_threaded_test(info);
-
- if (run == false)
- return 0;
-
- /* Clear results from previous run */
- result_free(info, NULL);
-
- /* Copy test parameters */
- params->buf_size = test_buf_size;
- strlcpy(params->channel, strim(test_channel), sizeof(params->channel));
- strlcpy(params->device, strim(test_device), sizeof(params->device));
- params->threads_per_chan = threads_per_chan;
- params->max_channels = max_channels;
- params->iterations = iterations;
- params->xor_sources = xor_sources;
- params->pq_sources = pq_sources;
- params->timeout = timeout;
+ stop_threaded_test(info);
/* Run test with new parameters */
- return __run_threaded_test(info);
-}
-
-static bool __is_threaded_test_run(struct dmatest_info *info)
-{
- struct dmatest_chan *dtc;
-
- list_for_each_entry(dtc, &info->channels, node) {
- struct dmatest_thread *thread;
-
- list_for_each_entry(thread, &dtc->threads, node) {
- if (!thread->done)
- return true;
- }
- }
-
- return false;
+ run_threaded_test(info);
}
-static ssize_t dtf_read_run(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
+static int dmatest_run_get(char *val, const struct kernel_param *kp)
{
- struct dmatest_info *info = file->private_data;
- char buf[3];
+ struct dmatest_info *info = &test_info;
mutex_lock(&info->lock);
-
- if (__is_threaded_test_run(info)) {
- buf[0] = 'Y';
+ if (is_threaded_test_run(info)) {
+ dmatest_run = true;
} else {
- __stop_threaded_test(info);
- buf[0] = 'N';
+ stop_threaded_test(info);
+ dmatest_run = false;
}
-
mutex_unlock(&info->lock);
- buf[1] = '\n';
- buf[2] = 0x00;
- return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
-}
-
-static ssize_t dtf_write_run(struct file *file, const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct dmatest_info *info = file->private_data;
- char buf[16];
- bool bv;
- int ret = 0;
- if (copy_from_user(buf, user_buf, min(count, (sizeof(buf) - 1))))
- return -EFAULT;
-
- if (strtobool(buf, &bv) == 0) {
- mutex_lock(&info->lock);
-
- if (__is_threaded_test_run(info))
- ret = -EBUSY;
- else
- ret = __restart_threaded_test(info, bv);
-
- mutex_unlock(&info->lock);
- }
-
- return ret ? ret : count;
+ return param_get_bool(val, kp);
}
-static const struct file_operations dtf_run_fops = {
- .read = dtf_read_run,
- .write = dtf_write_run,
- .open = simple_open,
- .llseek = default_llseek,
-};
-
-static int dtf_results_show(struct seq_file *sf, void *data)
+static int dmatest_run_set(const char *val, const struct kernel_param *kp)
{
- struct dmatest_info *info = sf->private;
- struct dmatest_result *result;
- struct dmatest_thread_result *tr;
- unsigned int i;
+ struct dmatest_info *info = &test_info;
+ int ret;
- mutex_lock(&info->results_lock);
- list_for_each_entry(result, &info->results, node) {
- list_for_each_entry(tr, &result->results, node) {
- seq_printf(sf, "%s\n",
- thread_result_get(result->name, tr));
- if (tr->type == DMATEST_ET_VERIFY_BUF) {
- for (i = 0; i < tr->vr->error_count; i++) {
- seq_printf(sf, "\t%s\n",
- verify_result_get_one(tr->vr, i));
- }
- }
- }
+ mutex_lock(&info->lock);
+ ret = param_set_bool(val, kp);
+ if (ret) {
+ mutex_unlock(&info->lock);
+ return ret;
}
- mutex_unlock(&info->results_lock);
- return 0;
-}
-
-static int dtf_results_open(struct inode *inode, struct file *file)
-{
- return single_open(file, dtf_results_show, inode->i_private);
-}
-
-static const struct file_operations dtf_results_fops = {
- .open = dtf_results_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int dmatest_register_dbgfs(struct dmatest_info *info)
-{
- struct dentry *d;
-
- d = debugfs_create_dir("dmatest", NULL);
- if (IS_ERR(d))
- return PTR_ERR(d);
- if (!d)
- goto err_root;
+ if (is_threaded_test_run(info))
+ ret = -EBUSY;
+ else if (dmatest_run)
+ restart_threaded_test(info, dmatest_run);
- info->root = d;
-
- /* Run or stop threaded test */
- debugfs_create_file("run", S_IWUSR | S_IRUGO, info->root, info,
- &dtf_run_fops);
-
- /* Results of test in progress */
- debugfs_create_file("results", S_IRUGO, info->root, info,
- &dtf_results_fops);
-
- return 0;
+ mutex_unlock(&info->lock);
-err_root:
- pr_err("dmatest: Failed to initialize debugfs\n");
- return -ENOMEM;
+ return ret;
}
static int __init dmatest_init(void)
{
struct dmatest_info *info = &test_info;
- int ret;
-
- memset(info, 0, sizeof(*info));
+ struct dmatest_params *params = &info->params;
- mutex_init(&info->lock);
- INIT_LIST_HEAD(&info->channels);
+ if (dmatest_run) {
+ mutex_lock(&info->lock);
+ run_threaded_test(info);
+ mutex_unlock(&info->lock);
+ }
- mutex_init(&info->results_lock);
- INIT_LIST_HEAD(&info->results);
+ if (params->iterations && wait)
+ wait_event(thread_wait, !is_threaded_test_run(info));
- ret = dmatest_register_dbgfs(info);
- if (ret)
- return ret;
+ /* module parameters are stable, inittime tests are started,
+ * let userspace take over 'run' control
+ */
+ info->did_init = true;
-#ifdef MODULE
return 0;
-#else
- return run_threaded_test(info);
-#endif
}
/* when compiled-in wait for drivers to load first */
late_initcall(dmatest_init);
@@ -1189,9 +976,9 @@ static void __exit dmatest_exit(void)
{
struct dmatest_info *info = &test_info;
- debugfs_remove_recursive(info->root);
+ mutex_lock(&info->lock);
stop_threaded_test(info);
- result_free(info, NULL);
+ mutex_unlock(&info->lock);
}
module_exit(dmatest_exit);
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index 89eb89f22284..7516be4677cf 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -85,10 +85,6 @@ static struct device *chan2dev(struct dma_chan *chan)
{
return &chan->dev->device;
}
-static struct device *chan2parent(struct dma_chan *chan)
-{
- return chan->dev->device.parent;
-}
static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc)
{
@@ -311,26 +307,7 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc,
list_splice_init(&desc->tx_list, &dwc->free_list);
list_move(&desc->desc_node, &dwc->free_list);
- if (!is_slave_direction(dwc->direction)) {
- struct device *parent = chan2parent(&dwc->chan);
- if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
- if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
- dma_unmap_single(parent, desc->lli.dar,
- desc->total_len, DMA_FROM_DEVICE);
- else
- dma_unmap_page(parent, desc->lli.dar,
- desc->total_len, DMA_FROM_DEVICE);
- }
- if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
- if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
- dma_unmap_single(parent, desc->lli.sar,
- desc->total_len, DMA_TO_DEVICE);
- else
- dma_unmap_page(parent, desc->lli.sar,
- desc->total_len, DMA_TO_DEVICE);
- }
- }
-
+ dma_descriptor_unmap(txd);
spin_unlock_irqrestore(&dwc->lock, flags);
if (callback)
@@ -1098,13 +1075,13 @@ dwc_tx_status(struct dma_chan *chan,
enum dma_status ret;
ret = dma_cookie_status(chan, cookie, txstate);
- if (ret == DMA_SUCCESS)
+ if (ret == DMA_COMPLETE)
return ret;
dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
ret = dma_cookie_status(chan, cookie, txstate);
- if (ret != DMA_SUCCESS)
+ if (ret != DMA_COMPLETE)
dma_set_residue(txstate, dwc_get_residue(dwc));
if (dwc->paused && ret == DMA_IN_PROGRESS)
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index bef8a368c8dd..2539ea0cbc63 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -46,14 +46,21 @@
#define EDMA_CHANS 64
#endif /* CONFIG_ARCH_DAVINCI_DA8XX */
-/* Max of 16 segments per channel to conserve PaRAM slots */
-#define MAX_NR_SG 16
+/*
+ * Max of 20 segments per channel to conserve PaRAM slots
+ * Also note that MAX_NR_SG should be atleast the no.of periods
+ * that are required for ASoC, otherwise DMA prep calls will
+ * fail. Today davinci-pcm is the only user of this driver and
+ * requires atleast 17 slots, so we setup the default to 20.
+ */
+#define MAX_NR_SG 20
#define EDMA_MAX_SLOTS MAX_NR_SG
#define EDMA_DESCRIPTORS 16
struct edma_desc {
struct virt_dma_desc vdesc;
struct list_head node;
+ int cyclic;
int absync;
int pset_nr;
int processed;
@@ -167,8 +174,13 @@ static void edma_execute(struct edma_chan *echan)
* then setup a link to the dummy slot, this results in all future
* events being absorbed and that's OK because we're done
*/
- if (edesc->processed == edesc->pset_nr)
- edma_link(echan->slot[nslots-1], echan->ecc->dummy_slot);
+ if (edesc->processed == edesc->pset_nr) {
+ if (edesc->cyclic)
+ edma_link(echan->slot[nslots-1], echan->slot[1]);
+ else
+ edma_link(echan->slot[nslots-1],
+ echan->ecc->dummy_slot);
+ }
edma_resume(echan->ch_num);
@@ -250,6 +262,117 @@ static int edma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
return ret;
}
+/*
+ * A PaRAM set configuration abstraction used by other modes
+ * @chan: Channel who's PaRAM set we're configuring
+ * @pset: PaRAM set to initialize and setup.
+ * @src_addr: Source address of the DMA
+ * @dst_addr: Destination address of the DMA
+ * @burst: In units of dev_width, how much to send
+ * @dev_width: How much is the dev_width
+ * @dma_length: Total length of the DMA transfer
+ * @direction: Direction of the transfer
+ */
+static int edma_config_pset(struct dma_chan *chan, struct edmacc_param *pset,
+ dma_addr_t src_addr, dma_addr_t dst_addr, u32 burst,
+ enum dma_slave_buswidth dev_width, unsigned int dma_length,
+ enum dma_transfer_direction direction)
+{
+ struct edma_chan *echan = to_edma_chan(chan);
+ struct device *dev = chan->device->dev;
+ int acnt, bcnt, ccnt, cidx;
+ int src_bidx, dst_bidx, src_cidx, dst_cidx;
+ int absync;
+
+ acnt = dev_width;
+ /*
+ * If the maxburst is equal to the fifo width, use
+ * A-synced transfers. This allows for large contiguous
+ * buffer transfers using only one PaRAM set.
+ */
+ if (burst == 1) {
+ /*
+ * For the A-sync case, bcnt and ccnt are the remainder
+ * and quotient respectively of the division of:
+ * (dma_length / acnt) by (SZ_64K -1). This is so
+ * that in case bcnt over flows, we have ccnt to use.
+ * Note: In A-sync tranfer only, bcntrld is used, but it
+ * only applies for sg_dma_len(sg) >= SZ_64K.
+ * In this case, the best way adopted is- bccnt for the
+ * first frame will be the remainder below. Then for
+ * every successive frame, bcnt will be SZ_64K-1. This
+ * is assured as bcntrld = 0xffff in end of function.
+ */
+ absync = false;
+ ccnt = dma_length / acnt / (SZ_64K - 1);
+ bcnt = dma_length / acnt - ccnt * (SZ_64K - 1);
+ /*
+ * If bcnt is non-zero, we have a remainder and hence an
+ * extra frame to transfer, so increment ccnt.
+ */
+ if (bcnt)
+ ccnt++;
+ else
+ bcnt = SZ_64K - 1;
+ cidx = acnt;
+ } else {
+ /*
+ * If maxburst is greater than the fifo address_width,
+ * use AB-synced transfers where A count is the fifo
+ * address_width and B count is the maxburst. In this
+ * case, we are limited to transfers of C count frames
+ * of (address_width * maxburst) where C count is limited
+ * to SZ_64K-1. This places an upper bound on the length
+ * of an SG segment that can be handled.
+ */
+ absync = true;
+ bcnt = burst;
+ ccnt = dma_length / (acnt * bcnt);
+ if (ccnt > (SZ_64K - 1)) {
+ dev_err(dev, "Exceeded max SG segment size\n");
+ return -EINVAL;
+ }
+ cidx = acnt * bcnt;
+ }
+
+ if (direction == DMA_MEM_TO_DEV) {
+ src_bidx = acnt;
+ src_cidx = cidx;
+ dst_bidx = 0;
+ dst_cidx = 0;
+ } else if (direction == DMA_DEV_TO_MEM) {
+ src_bidx = 0;
+ src_cidx = 0;
+ dst_bidx = acnt;
+ dst_cidx = cidx;
+ } else {
+ dev_err(dev, "%s: direction not implemented yet\n", __func__);
+ return -EINVAL;
+ }
+
+ pset->opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num));
+ /* Configure A or AB synchronized transfers */
+ if (absync)
+ pset->opt |= SYNCDIM;
+
+ pset->src = src_addr;
+ pset->dst = dst_addr;
+
+ pset->src_dst_bidx = (dst_bidx << 16) | src_bidx;
+ pset->src_dst_cidx = (dst_cidx << 16) | src_cidx;
+
+ pset->a_b_cnt = bcnt << 16 | acnt;
+ pset->ccnt = ccnt;
+ /*
+ * Only time when (bcntrld) auto reload is required is for
+ * A-sync case, and in this case, a requirement of reload value
+ * of SZ_64K-1 only is assured. 'link' is initially set to NULL
+ * and then later will be populated by edma_execute.
+ */
+ pset->link_bcntrld = 0xffffffff;
+ return absync;
+}
+
static struct dma_async_tx_descriptor *edma_prep_slave_sg(
struct dma_chan *chan, struct scatterlist *sgl,
unsigned int sg_len, enum dma_transfer_direction direction,
@@ -258,23 +381,21 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
struct edma_chan *echan = to_edma_chan(chan);
struct device *dev = chan->device->dev;
struct edma_desc *edesc;
- dma_addr_t dev_addr;
+ dma_addr_t src_addr = 0, dst_addr = 0;
enum dma_slave_buswidth dev_width;
u32 burst;
struct scatterlist *sg;
- int acnt, bcnt, ccnt, src, dst, cidx;
- int src_bidx, dst_bidx, src_cidx, dst_cidx;
- int i, nslots;
+ int i, nslots, ret;
if (unlikely(!echan || !sgl || !sg_len))
return NULL;
if (direction == DMA_DEV_TO_MEM) {
- dev_addr = echan->cfg.src_addr;
+ src_addr = echan->cfg.src_addr;
dev_width = echan->cfg.src_addr_width;
burst = echan->cfg.src_maxburst;
} else if (direction == DMA_MEM_TO_DEV) {
- dev_addr = echan->cfg.dst_addr;
+ dst_addr = echan->cfg.dst_addr;
dev_width = echan->cfg.dst_addr_width;
burst = echan->cfg.dst_maxburst;
} else {
@@ -307,7 +428,6 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
if (echan->slot[i] < 0) {
kfree(edesc);
dev_err(dev, "Failed to allocate slot\n");
- kfree(edesc);
return NULL;
}
}
@@ -315,64 +435,21 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
/* Configure PaRAM sets for each SG */
for_each_sg(sgl, sg, sg_len, i) {
-
- acnt = dev_width;
-
- /*
- * If the maxburst is equal to the fifo width, use
- * A-synced transfers. This allows for large contiguous
- * buffer transfers using only one PaRAM set.
- */
- if (burst == 1) {
- edesc->absync = false;
- ccnt = sg_dma_len(sg) / acnt / (SZ_64K - 1);
- bcnt = sg_dma_len(sg) / acnt - ccnt * (SZ_64K - 1);
- if (bcnt)
- ccnt++;
- else
- bcnt = SZ_64K - 1;
- cidx = acnt;
- /*
- * If maxburst is greater than the fifo address_width,
- * use AB-synced transfers where A count is the fifo
- * address_width and B count is the maxburst. In this
- * case, we are limited to transfers of C count frames
- * of (address_width * maxburst) where C count is limited
- * to SZ_64K-1. This places an upper bound on the length
- * of an SG segment that can be handled.
- */
- } else {
- edesc->absync = true;
- bcnt = burst;
- ccnt = sg_dma_len(sg) / (acnt * bcnt);
- if (ccnt > (SZ_64K - 1)) {
- dev_err(dev, "Exceeded max SG segment size\n");
- kfree(edesc);
- return NULL;
- }
- cidx = acnt * bcnt;
+ /* Get address for each SG */
+ if (direction == DMA_DEV_TO_MEM)
+ dst_addr = sg_dma_address(sg);
+ else
+ src_addr = sg_dma_address(sg);
+
+ ret = edma_config_pset(chan, &edesc->pset[i], src_addr,
+ dst_addr, burst, dev_width,
+ sg_dma_len(sg), direction);
+ if (ret < 0) {
+ kfree(edesc);
+ return NULL;
}
- if (direction == DMA_MEM_TO_DEV) {
- src = sg_dma_address(sg);
- dst = dev_addr;
- src_bidx = acnt;
- src_cidx = cidx;
- dst_bidx = 0;
- dst_cidx = 0;
- } else {
- src = dev_addr;
- dst = sg_dma_address(sg);
- src_bidx = 0;
- src_cidx = 0;
- dst_bidx = acnt;
- dst_cidx = cidx;
- }
-
- edesc->pset[i].opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num));
- /* Configure A or AB synchronized transfers */
- if (edesc->absync)
- edesc->pset[i].opt |= SYNCDIM;
+ edesc->absync = ret;
/* If this is the last in a current SG set of transactions,
enable interrupts so that next set is processed */
@@ -382,17 +459,138 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
/* If this is the last set, enable completion interrupt flag */
if (i == sg_len - 1)
edesc->pset[i].opt |= TCINTEN;
+ }
- edesc->pset[i].src = src;
- edesc->pset[i].dst = dst;
+ return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
+}
- edesc->pset[i].src_dst_bidx = (dst_bidx << 16) | src_bidx;
- edesc->pset[i].src_dst_cidx = (dst_cidx << 16) | src_cidx;
+static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
+ struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
+ size_t period_len, enum dma_transfer_direction direction,
+ unsigned long tx_flags, void *context)
+{
+ struct edma_chan *echan = to_edma_chan(chan);
+ struct device *dev = chan->device->dev;
+ struct edma_desc *edesc;
+ dma_addr_t src_addr, dst_addr;
+ enum dma_slave_buswidth dev_width;
+ u32 burst;
+ int i, ret, nslots;
+
+ if (unlikely(!echan || !buf_len || !period_len))
+ return NULL;
+
+ if (direction == DMA_DEV_TO_MEM) {
+ src_addr = echan->cfg.src_addr;
+ dst_addr = buf_addr;
+ dev_width = echan->cfg.src_addr_width;
+ burst = echan->cfg.src_maxburst;
+ } else if (direction == DMA_MEM_TO_DEV) {
+ src_addr = buf_addr;
+ dst_addr = echan->cfg.dst_addr;
+ dev_width = echan->cfg.dst_addr_width;
+ burst = echan->cfg.dst_maxburst;
+ } else {
+ dev_err(dev, "%s: bad direction?\n", __func__);
+ return NULL;
+ }
+
+ if (dev_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) {
+ dev_err(dev, "Undefined slave buswidth\n");
+ return NULL;
+ }
+
+ if (unlikely(buf_len % period_len)) {
+ dev_err(dev, "Period should be multiple of Buffer length\n");
+ return NULL;
+ }
+
+ nslots = (buf_len / period_len) + 1;
+
+ /*
+ * Cyclic DMA users such as audio cannot tolerate delays introduced
+ * by cases where the number of periods is more than the maximum
+ * number of SGs the EDMA driver can handle at a time. For DMA types
+ * such as Slave SGs, such delays are tolerable and synchronized,
+ * but the synchronization is difficult to achieve with Cyclic and
+ * cannot be guaranteed, so we error out early.
+ */
+ if (nslots > MAX_NR_SG)
+ return NULL;
+
+ edesc = kzalloc(sizeof(*edesc) + nslots *
+ sizeof(edesc->pset[0]), GFP_ATOMIC);
+ if (!edesc) {
+ dev_dbg(dev, "Failed to allocate a descriptor\n");
+ return NULL;
+ }
+
+ edesc->cyclic = 1;
+ edesc->pset_nr = nslots;
+
+ dev_dbg(dev, "%s: nslots=%d\n", __func__, nslots);
+ dev_dbg(dev, "%s: period_len=%d\n", __func__, period_len);
+ dev_dbg(dev, "%s: buf_len=%d\n", __func__, buf_len);
+
+ for (i = 0; i < nslots; i++) {
+ /* Allocate a PaRAM slot, if needed */
+ if (echan->slot[i] < 0) {
+ echan->slot[i] =
+ edma_alloc_slot(EDMA_CTLR(echan->ch_num),
+ EDMA_SLOT_ANY);
+ if (echan->slot[i] < 0) {
+ dev_err(dev, "Failed to allocate slot\n");
+ return NULL;
+ }
+ }
+
+ if (i == nslots - 1) {
+ memcpy(&edesc->pset[i], &edesc->pset[0],
+ sizeof(edesc->pset[0]));
+ break;
+ }
+
+ ret = edma_config_pset(chan, &edesc->pset[i], src_addr,
+ dst_addr, burst, dev_width, period_len,
+ direction);
+ if (ret < 0)
+ return NULL;
- edesc->pset[i].a_b_cnt = bcnt << 16 | acnt;
- edesc->pset[i].ccnt = ccnt;
- edesc->pset[i].link_bcntrld = 0xffffffff;
+ if (direction == DMA_DEV_TO_MEM)
+ dst_addr += period_len;
+ else
+ src_addr += period_len;
+ dev_dbg(dev, "%s: Configure period %d of buf:\n", __func__, i);
+ dev_dbg(dev,
+ "\n pset[%d]:\n"
+ " chnum\t%d\n"
+ " slot\t%d\n"
+ " opt\t%08x\n"
+ " src\t%08x\n"
+ " dst\t%08x\n"
+ " abcnt\t%08x\n"
+ " ccnt\t%08x\n"
+ " bidx\t%08x\n"
+ " cidx\t%08x\n"
+ " lkrld\t%08x\n",
+ i, echan->ch_num, echan->slot[i],
+ edesc->pset[i].opt,
+ edesc->pset[i].src,
+ edesc->pset[i].dst,
+ edesc->pset[i].a_b_cnt,
+ edesc->pset[i].ccnt,
+ edesc->pset[i].src_dst_bidx,
+ edesc->pset[i].src_dst_cidx,
+ edesc->pset[i].link_bcntrld);
+
+ edesc->absync = ret;
+
+ /*
+ * Enable interrupts for every period because callback
+ * has to be called for every period.
+ */
+ edesc->pset[i].opt |= TCINTEN;
}
return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
@@ -406,30 +604,34 @@ static void edma_callback(unsigned ch_num, u16 ch_status, void *data)
unsigned long flags;
struct edmacc_param p;
- /* Pause the channel */
- edma_pause(echan->ch_num);
+ edesc = echan->edesc;
+
+ /* Pause the channel for non-cyclic */
+ if (!edesc || (edesc && !edesc->cyclic))
+ edma_pause(echan->ch_num);
switch (ch_status) {
- case DMA_COMPLETE:
+ case EDMA_DMA_COMPLETE:
spin_lock_irqsave(&echan->vchan.lock, flags);
- edesc = echan->edesc;
if (edesc) {
- if (edesc->processed == edesc->pset_nr) {
+ if (edesc->cyclic) {
+ vchan_cyclic_callback(&edesc->vdesc);
+ } else if (edesc->processed == edesc->pset_nr) {
dev_dbg(dev, "Transfer complete, stopping channel %d\n", ch_num);
edma_stop(echan->ch_num);
vchan_cookie_complete(&edesc->vdesc);
+ edma_execute(echan);
} else {
dev_dbg(dev, "Intermediate transfer complete on channel %d\n", ch_num);
+ edma_execute(echan);
}
-
- edma_execute(echan);
}
spin_unlock_irqrestore(&echan->vchan.lock, flags);
break;
- case DMA_CC_ERROR:
+ case EDMA_DMA_CC_ERROR:
spin_lock_irqsave(&echan->vchan.lock, flags);
edma_read_slot(EDMA_CHAN_SLOT(echan->slot[0]), &p);
@@ -579,7 +781,7 @@ static enum dma_status edma_tx_status(struct dma_chan *chan,
unsigned long flags;
ret = dma_cookie_status(chan, cookie, txstate);
- if (ret == DMA_SUCCESS || !txstate)
+ if (ret == DMA_COMPLETE || !txstate)
return ret;
spin_lock_irqsave(&echan->vchan.lock, flags);
@@ -619,6 +821,7 @@ static void edma_dma_init(struct edma_cc *ecc, struct dma_device *dma,
struct device *dev)
{
dma->device_prep_slave_sg = edma_prep_slave_sg;
+ dma->device_prep_dma_cyclic = edma_prep_dma_cyclic;
dma->device_alloc_chan_resources = edma_alloc_chan_resources;
dma->device_free_chan_resources = edma_free_chan_resources;
dma->device_issue_pending = edma_issue_pending;
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c
index 591cd8c63abb..cb4bf682a708 100644
--- a/drivers/dma/ep93xx_dma.c
+++ b/drivers/dma/ep93xx_dma.c
@@ -733,28 +733,6 @@ static void ep93xx_dma_advance_work(struct ep93xx_dma_chan *edmac)
spin_unlock_irqrestore(&edmac->lock, flags);
}
-static void ep93xx_dma_unmap_buffers(struct ep93xx_dma_desc *desc)
-{
- struct device *dev = desc->txd.chan->device->dev;
-
- if (!(desc->txd.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
- if (desc->txd.flags & DMA_COMPL_SRC_UNMAP_SINGLE)
- dma_unmap_single(dev, desc->src_addr, desc->size,
- DMA_TO_DEVICE);
- else
- dma_unmap_page(dev, desc->src_addr, desc->size,
- DMA_TO_DEVICE);
- }
- if (!(desc->txd.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
- if (desc->txd.flags & DMA_COMPL_DEST_UNMAP_SINGLE)
- dma_unmap_single(dev, desc->dst_addr, desc->size,
- DMA_FROM_DEVICE);
- else
- dma_unmap_page(dev, desc->dst_addr, desc->size,
- DMA_FROM_DEVICE);
- }
-}
-
static void ep93xx_dma_tasklet(unsigned long data)
{
struct ep93xx_dma_chan *edmac = (struct ep93xx_dma_chan *)data;
@@ -787,13 +765,7 @@ static void ep93xx_dma_tasklet(unsigned long data)
/* Now we can release all the chained descriptors */
list_for_each_entry_safe(desc, d, &list, node) {
- /*
- * For the memcpy channels the API requires us to unmap the
- * buffers unless requested otherwise.
- */
- if (!edmac->chan.private)
- ep93xx_dma_unmap_buffers(desc);
-
+ dma_descriptor_unmap(&desc->txd);
ep93xx_dma_desc_put(edmac, desc);
}
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 61517dd0d0b7..f157c6f76b32 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -86,11 +86,6 @@ static void set_desc_cnt(struct fsldma_chan *chan,
hw->count = CPU_TO_DMA(chan, count, 32);
}
-static u32 get_desc_cnt(struct fsldma_chan *chan, struct fsl_desc_sw *desc)
-{
- return DMA_TO_CPU(chan, desc->hw.count, 32);
-}
-
static void set_desc_src(struct fsldma_chan *chan,
struct fsl_dma_ld_hw *hw, dma_addr_t src)
{
@@ -101,16 +96,6 @@ static void set_desc_src(struct fsldma_chan *chan,
hw->src_addr = CPU_TO_DMA(chan, snoop_bits | src, 64);
}
-static dma_addr_t get_desc_src(struct fsldma_chan *chan,
- struct fsl_desc_sw *desc)
-{
- u64 snoop_bits;
-
- snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
- ? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0;
- return DMA_TO_CPU(chan, desc->hw.src_addr, 64) & ~snoop_bits;
-}
-
static void set_desc_dst(struct fsldma_chan *chan,
struct fsl_dma_ld_hw *hw, dma_addr_t dst)
{
@@ -121,16 +106,6 @@ static void set_desc_dst(struct fsldma_chan *chan,
hw->dst_addr = CPU_TO_DMA(chan, snoop_bits | dst, 64);
}
-static dma_addr_t get_desc_dst(struct fsldma_chan *chan,
- struct fsl_desc_sw *desc)
-{
- u64 snoop_bits;
-
- snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
- ? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0;
- return DMA_TO_CPU(chan, desc->hw.dst_addr, 64) & ~snoop_bits;
-}
-
static void set_desc_next(struct fsldma_chan *chan,
struct fsl_dma_ld_hw *hw, dma_addr_t next)
{
@@ -408,7 +383,7 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
struct fsl_desc_sw *desc = tx_to_fsl_desc(tx);
struct fsl_desc_sw *child;
unsigned long flags;
- dma_cookie_t cookie;
+ dma_cookie_t cookie = -EINVAL;
spin_lock_irqsave(&chan->desc_lock, flags);
@@ -854,10 +829,6 @@ static void fsldma_cleanup_descriptor(struct fsldma_chan *chan,
struct fsl_desc_sw *desc)
{
struct dma_async_tx_descriptor *txd = &desc->async_tx;
- struct device *dev = chan->common.device->dev;
- dma_addr_t src = get_desc_src(chan, desc);
- dma_addr_t dst = get_desc_dst(chan, desc);
- u32 len = get_desc_cnt(chan, desc);
/* Run the link descriptor callback function */
if (txd->callback) {
@@ -870,22 +841,7 @@ static void fsldma_cleanup_descriptor(struct fsldma_chan *chan,
/* Run any dependencies */
dma_run_dependencies(txd);
- /* Unmap the dst buffer, if requested */
- if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
- if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
- dma_unmap_single(dev, dst, len, DMA_FROM_DEVICE);
- else
- dma_unmap_page(dev, dst, len, DMA_FROM_DEVICE);
- }
-
- /* Unmap the src buffer, if requested */
- if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
- if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
- dma_unmap_single(dev, src, len, DMA_TO_DEVICE);
- else
- dma_unmap_page(dev, src, len, DMA_TO_DEVICE);
- }
-
+ dma_descriptor_unmap(txd);
#ifdef FSL_DMA_LD_DEBUG
chan_dbg(chan, "LD %p free\n", desc);
#endif
@@ -1255,7 +1211,9 @@ static int fsl_dma_chan_probe(struct fsldma_device *fdev,
WARN_ON(fdev->feature != chan->feature);
chan->dev = fdev->dev;
- chan->id = ((res.start - 0x100) & 0xfff) >> 7;
+ chan->id = (res.start & 0xfff) < 0x300 ?
+ ((res.start - 0x100) & 0xfff) >> 7 :
+ ((res.start - 0x200) & 0xfff) >> 7;
if (chan->id >= FSL_DMA_MAX_CHANS_PER_DEVICE) {
dev_err(fdev->dev, "too many channels for device\n");
err = -EINVAL;
@@ -1428,6 +1386,7 @@ static int fsldma_of_remove(struct platform_device *op)
}
static const struct of_device_id fsldma_of_ids[] = {
+ { .compatible = "fsl,elo3-dma", },
{ .compatible = "fsl,eloplus-dma", },
{ .compatible = "fsl,elo-dma", },
{}
@@ -1449,7 +1408,7 @@ static struct platform_driver fsldma_of_driver = {
static __init int fsldma_init(void)
{
- pr_info("Freescale Elo / Elo Plus DMA driver\n");
+ pr_info("Freescale Elo series DMA driver\n");
return platform_driver_register(&fsldma_of_driver);
}
@@ -1461,5 +1420,5 @@ static void __exit fsldma_exit(void)
subsys_initcall(fsldma_init);
module_exit(fsldma_exit);
-MODULE_DESCRIPTION("Freescale Elo / Elo Plus DMA driver");
+MODULE_DESCRIPTION("Freescale Elo series DMA driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h
index f5c38791fc74..1ffc24484d23 100644
--- a/drivers/dma/fsldma.h
+++ b/drivers/dma/fsldma.h
@@ -112,7 +112,7 @@ struct fsldma_chan_regs {
};
struct fsldma_chan;
-#define FSL_DMA_MAX_CHANS_PER_DEVICE 4
+#define FSL_DMA_MAX_CHANS_PER_DEVICE 8
struct fsldma_device {
void __iomem *regs; /* DGSR register base */
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index 55852c026791..6f9ac2022abd 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -572,9 +572,11 @@ static int imxdma_xfer_desc(struct imxdma_desc *d)
imx_dmav1_writel(imxdma, d->len, DMA_CNTR(imxdmac->channel));
- dev_dbg(imxdma->dev, "%s channel: %d dest=0x%08x src=0x%08x "
- "dma_length=%d\n", __func__, imxdmac->channel,
- d->dest, d->src, d->len);
+ dev_dbg(imxdma->dev,
+ "%s channel: %d dest=0x%08llx src=0x%08llx dma_length=%zu\n",
+ __func__, imxdmac->channel,
+ (unsigned long long)d->dest,
+ (unsigned long long)d->src, d->len);
break;
/* Cyclic transfer is the same as slave_sg with special sg configuration. */
@@ -586,20 +588,22 @@ static int imxdma_xfer_desc(struct imxdma_desc *d)
imx_dmav1_writel(imxdma, imxdmac->ccr_from_device,
DMA_CCR(imxdmac->channel));
- dev_dbg(imxdma->dev, "%s channel: %d sg=%p sgcount=%d "
- "total length=%d dev_addr=0x%08x (dev2mem)\n",
- __func__, imxdmac->channel, d->sg, d->sgcount,
- d->len, imxdmac->per_address);
+ dev_dbg(imxdma->dev,
+ "%s channel: %d sg=%p sgcount=%d total length=%zu dev_addr=0x%08llx (dev2mem)\n",
+ __func__, imxdmac->channel,
+ d->sg, d->sgcount, d->len,
+ (unsigned long long)imxdmac->per_address);
} else if (d->direction == DMA_MEM_TO_DEV) {
imx_dmav1_writel(imxdma, imxdmac->per_address,
DMA_DAR(imxdmac->channel));
imx_dmav1_writel(imxdma, imxdmac->ccr_to_device,
DMA_CCR(imxdmac->channel));
- dev_dbg(imxdma->dev, "%s channel: %d sg=%p sgcount=%d "
- "total length=%d dev_addr=0x%08x (mem2dev)\n",
- __func__, imxdmac->channel, d->sg, d->sgcount,
- d->len, imxdmac->per_address);
+ dev_dbg(imxdma->dev,
+ "%s channel: %d sg=%p sgcount=%d total length=%zu dev_addr=0x%08llx (mem2dev)\n",
+ __func__, imxdmac->channel,
+ d->sg, d->sgcount, d->len,
+ (unsigned long long)imxdmac->per_address);
} else {
dev_err(imxdma->dev, "%s channel: %d bad dma mode\n",
__func__, imxdmac->channel);
@@ -771,7 +775,7 @@ static int imxdma_alloc_chan_resources(struct dma_chan *chan)
desc->desc.tx_submit = imxdma_tx_submit;
/* txd.flags will be overwritten in prep funcs */
desc->desc.flags = DMA_CTRL_ACK;
- desc->status = DMA_SUCCESS;
+ desc->status = DMA_COMPLETE;
list_add_tail(&desc->node, &imxdmac->ld_free);
imxdmac->descs_allocated++;
@@ -870,7 +874,7 @@ static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic(
int i;
unsigned int periods = buf_len / period_len;
- dev_dbg(imxdma->dev, "%s channel: %d buf_len=%d period_len=%d\n",
+ dev_dbg(imxdma->dev, "%s channel: %d buf_len=%zu period_len=%zu\n",
__func__, imxdmac->channel, buf_len, period_len);
if (list_empty(&imxdmac->ld_free) ||
@@ -926,8 +930,9 @@ static struct dma_async_tx_descriptor *imxdma_prep_dma_memcpy(
struct imxdma_engine *imxdma = imxdmac->imxdma;
struct imxdma_desc *desc;
- dev_dbg(imxdma->dev, "%s channel: %d src=0x%x dst=0x%x len=%d\n",
- __func__, imxdmac->channel, src, dest, len);
+ dev_dbg(imxdma->dev, "%s channel: %d src=0x%llx dst=0x%llx len=%zu\n",
+ __func__, imxdmac->channel, (unsigned long long)src,
+ (unsigned long long)dest, len);
if (list_empty(&imxdmac->ld_free) ||
imxdma_chan_is_doing_cyclic(imxdmac))
@@ -956,9 +961,10 @@ static struct dma_async_tx_descriptor *imxdma_prep_dma_interleaved(
struct imxdma_engine *imxdma = imxdmac->imxdma;
struct imxdma_desc *desc;
- dev_dbg(imxdma->dev, "%s channel: %d src_start=0x%x dst_start=0x%x\n"
- " src_sgl=%s dst_sgl=%s numf=%d frame_size=%d\n", __func__,
- imxdmac->channel, xt->src_start, xt->dst_start,
+ dev_dbg(imxdma->dev, "%s channel: %d src_start=0x%llx dst_start=0x%llx\n"
+ " src_sgl=%s dst_sgl=%s numf=%zu frame_size=%zu\n", __func__,
+ imxdmac->channel, (unsigned long long)xt->src_start,
+ (unsigned long long) xt->dst_start,
xt->src_sgl ? "true" : "false", xt->dst_sgl ? "true" : "false",
xt->numf, xt->frame_size);
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index c1fd504cae28..c75679d42028 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -638,7 +638,7 @@ static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac)
if (error)
sdmac->status = DMA_ERROR;
else
- sdmac->status = DMA_SUCCESS;
+ sdmac->status = DMA_COMPLETE;
dma_cookie_complete(&sdmac->desc);
if (sdmac->desc.callback)
@@ -1089,8 +1089,8 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
param &= ~BD_CONT;
}
- dev_dbg(sdma->dev, "entry %d: count: %d dma: 0x%08x %s%s\n",
- i, count, sg->dma_address,
+ dev_dbg(sdma->dev, "entry %d: count: %d dma: %#llx %s%s\n",
+ i, count, (u64)sg->dma_address,
param & BD_WRAP ? "wrap" : "",
param & BD_INTR ? " intr" : "");
@@ -1163,8 +1163,8 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
if (i + 1 == num_periods)
param |= BD_WRAP;
- dev_dbg(sdma->dev, "entry %d: count: %d dma: 0x%08x %s%s\n",
- i, period_len, dma_addr,
+ dev_dbg(sdma->dev, "entry %d: count: %d dma: %#llx %s%s\n",
+ i, period_len, (u64)dma_addr,
param & BD_WRAP ? "wrap" : "",
param & BD_INTR ? " intr" : "");
diff --git a/drivers/dma/intel_mid_dma.c b/drivers/dma/intel_mid_dma.c
index a975ebebea8a..1aab8130efa1 100644
--- a/drivers/dma/intel_mid_dma.c
+++ b/drivers/dma/intel_mid_dma.c
@@ -309,7 +309,7 @@ static void midc_descriptor_complete(struct intel_mid_dma_chan *midc,
callback_txd(param_txd);
}
if (midc->raw_tfr) {
- desc->status = DMA_SUCCESS;
+ desc->status = DMA_COMPLETE;
if (desc->lli != NULL) {
pci_pool_free(desc->lli_pool, desc->lli,
desc->lli_phys);
@@ -481,7 +481,7 @@ static enum dma_status intel_mid_dma_tx_status(struct dma_chan *chan,
enum dma_status ret;
ret = dma_cookie_status(chan, cookie, txstate);
- if (ret != DMA_SUCCESS) {
+ if (ret != DMA_COMPLETE) {
spin_lock_bh(&midc->lock);
midc_scan_descriptors(to_middma_device(chan->device), midc);
spin_unlock_bh(&midc->lock);
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 5ff6fc1819dc..1a49c777607c 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -531,21 +531,6 @@ static void ioat1_cleanup_event(unsigned long data)
writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
}
-void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags,
- size_t len, struct ioat_dma_descriptor *hw)
-{
- struct pci_dev *pdev = chan->device->pdev;
- size_t offset = len - hw->size;
-
- if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP))
- ioat_unmap(pdev, hw->dst_addr - offset, len,
- PCI_DMA_FROMDEVICE, flags, 1);
-
- if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP))
- ioat_unmap(pdev, hw->src_addr - offset, len,
- PCI_DMA_TODEVICE, flags, 0);
-}
-
dma_addr_t ioat_get_current_completion(struct ioat_chan_common *chan)
{
dma_addr_t phys_complete;
@@ -602,7 +587,7 @@ static void __cleanup(struct ioat_dma_chan *ioat, dma_addr_t phys_complete)
dump_desc_dbg(ioat, desc);
if (tx->cookie) {
dma_cookie_complete(tx);
- ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw);
+ dma_descriptor_unmap(tx);
ioat->active -= desc->hw->tx_cnt;
if (tx->callback) {
tx->callback(tx->callback_param);
@@ -733,7 +718,7 @@ ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie,
enum dma_status ret;
ret = dma_cookie_status(c, cookie, txstate);
- if (ret == DMA_SUCCESS)
+ if (ret == DMA_COMPLETE)
return ret;
device->cleanup_fn((unsigned long) c);
@@ -833,8 +818,7 @@ int ioat_dma_self_test(struct ioatdma_device *device)
dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
- flags = DMA_COMPL_SKIP_SRC_UNMAP | DMA_COMPL_SKIP_DEST_UNMAP |
- DMA_PREP_INTERRUPT;
+ flags = DMA_PREP_INTERRUPT;
tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src,
IOAT_TEST_SIZE, flags);
if (!tx) {
@@ -859,7 +843,7 @@ int ioat_dma_self_test(struct ioatdma_device *device)
if (tmo == 0 ||
dma->device_tx_status(dma_chan, cookie, NULL)
- != DMA_SUCCESS) {
+ != DMA_COMPLETE) {
dev_err(dev, "Self-test copy timed out, disabling\n");
err = -ENODEV;
goto unmap_dma;
@@ -885,8 +869,7 @@ static char ioat_interrupt_style[32] = "msix";
module_param_string(ioat_interrupt_style, ioat_interrupt_style,
sizeof(ioat_interrupt_style), 0644);
MODULE_PARM_DESC(ioat_interrupt_style,
- "set ioat interrupt style: msix (default), "
- "msix-single-vector, msi, intx)");
+ "set ioat interrupt style: msix (default), msi, intx");
/**
* ioat_dma_setup_interrupts - setup interrupt handler
@@ -904,8 +887,6 @@ int ioat_dma_setup_interrupts(struct ioatdma_device *device)
if (!strcmp(ioat_interrupt_style, "msix"))
goto msix;
- if (!strcmp(ioat_interrupt_style, "msix-single-vector"))
- goto msix_single_vector;
if (!strcmp(ioat_interrupt_style, "msi"))
goto msi;
if (!strcmp(ioat_interrupt_style, "intx"))
@@ -920,10 +901,8 @@ msix:
device->msix_entries[i].entry = i;
err = pci_enable_msix(pdev, device->msix_entries, msixcnt);
- if (err < 0)
+ if (err)
goto msi;
- if (err > 0)
- goto msix_single_vector;
for (i = 0; i < msixcnt; i++) {
msix = &device->msix_entries[i];
@@ -937,29 +916,13 @@ msix:
chan = ioat_chan_by_index(device, j);
devm_free_irq(dev, msix->vector, chan);
}
- goto msix_single_vector;
+ goto msi;
}
}
intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL;
device->irq_mode = IOAT_MSIX;
goto done;
-msix_single_vector:
- msix = &device->msix_entries[0];
- msix->entry = 0;
- err = pci_enable_msix(pdev, device->msix_entries, 1);
- if (err)
- goto msi;
-
- err = devm_request_irq(dev, msix->vector, ioat_dma_do_interrupt, 0,
- "ioat-msix", device);
- if (err) {
- pci_disable_msix(pdev);
- goto msi;
- }
- device->irq_mode = IOAT_MSIX_SINGLE;
- goto done;
-
msi:
err = pci_enable_msi(pdev);
if (err)
@@ -971,7 +934,7 @@ msi:
pci_disable_msi(pdev);
goto intx;
}
- device->irq_mode = IOAT_MSIX;
+ device->irq_mode = IOAT_MSI;
goto done;
intx:
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index 54fb7b9ff9aa..11fb877ddca9 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -52,7 +52,6 @@
enum ioat_irq_mode {
IOAT_NOIRQ = 0,
IOAT_MSIX,
- IOAT_MSIX_SINGLE,
IOAT_MSI,
IOAT_INTX
};
@@ -83,7 +82,6 @@ struct ioatdma_device {
struct pci_pool *completion_pool;
#define MAX_SED_POOLS 5
struct dma_pool *sed_hw_pool[MAX_SED_POOLS];
- struct kmem_cache *sed_pool;
struct dma_device common;
u8 version;
struct msix_entry msix_entries[4];
@@ -342,16 +340,6 @@ static inline bool is_ioat_bug(unsigned long err)
return !!err;
}
-static inline void ioat_unmap(struct pci_dev *pdev, dma_addr_t addr, size_t len,
- int direction, enum dma_ctrl_flags flags, bool dst)
-{
- if ((dst && (flags & DMA_COMPL_DEST_UNMAP_SINGLE)) ||
- (!dst && (flags & DMA_COMPL_SRC_UNMAP_SINGLE)))
- pci_unmap_single(pdev, addr, len, direction);
- else
- pci_unmap_page(pdev, addr, len, direction);
-}
-
int ioat_probe(struct ioatdma_device *device);
int ioat_register(struct ioatdma_device *device);
int ioat1_dma_probe(struct ioatdma_device *dev, int dca);
@@ -363,8 +351,6 @@ void ioat_init_channel(struct ioatdma_device *device,
struct ioat_chan_common *chan, int idx);
enum dma_status ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie,
struct dma_tx_state *txstate);
-void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags,
- size_t len, struct ioat_dma_descriptor *hw);
bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
dma_addr_t *phys_complete);
void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type);
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c
index b925e1b1d139..5d3affe7e976 100644
--- a/drivers/dma/ioat/dma_v2.c
+++ b/drivers/dma/ioat/dma_v2.c
@@ -148,7 +148,7 @@ static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete)
tx = &desc->txd;
dump_desc_dbg(ioat, desc);
if (tx->cookie) {
- ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw);
+ dma_descriptor_unmap(tx);
dma_cookie_complete(tx);
if (tx->callback) {
tx->callback(tx->callback_param);
diff --git a/drivers/dma/ioat/dma_v2.h b/drivers/dma/ioat/dma_v2.h
index 212d584fe427..470292767e68 100644
--- a/drivers/dma/ioat/dma_v2.h
+++ b/drivers/dma/ioat/dma_v2.h
@@ -157,7 +157,6 @@ static inline void ioat2_set_chainaddr(struct ioat2_dma_chan *ioat, u64 addr)
int ioat2_dma_probe(struct ioatdma_device *dev, int dca);
int ioat3_dma_probe(struct ioatdma_device *dev, int dca);
-void ioat3_dma_remove(struct ioatdma_device *dev);
struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase);
struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase);
int ioat2_check_space_lock(struct ioat2_dma_chan *ioat, int num_descs);
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
index d8ececaf1b57..820817e97e62 100644
--- a/drivers/dma/ioat/dma_v3.c
+++ b/drivers/dma/ioat/dma_v3.c
@@ -67,6 +67,8 @@
#include "dma.h"
#include "dma_v2.h"
+extern struct kmem_cache *ioat3_sed_cache;
+
/* ioat hardware assumes at least two sources for raid operations */
#define src_cnt_to_sw(x) ((x) + 2)
#define src_cnt_to_hw(x) ((x) - 2)
@@ -87,22 +89,8 @@ static const u8 pq_idx_to_field[] = { 1, 4, 5, 0, 1, 2, 4, 5 };
static const u8 pq16_idx_to_field[] = { 1, 4, 1, 2, 3, 4, 5, 6, 7,
0, 1, 2, 3, 4, 5, 6 };
-/*
- * technically sources 1 and 2 do not require SED, but the op will have
- * at least 9 descriptors so that's irrelevant.
- */
-static const u8 pq16_idx_to_sed[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 1, 1, 1, 1, 1, 1, 1 };
-
static void ioat3_eh(struct ioat2_dma_chan *ioat);
-static dma_addr_t xor_get_src(struct ioat_raw_descriptor *descs[2], int idx)
-{
- struct ioat_raw_descriptor *raw = descs[xor_idx_to_desc >> idx & 1];
-
- return raw->field[xor_idx_to_field[idx]];
-}
-
static void xor_set_src(struct ioat_raw_descriptor *descs[2],
dma_addr_t addr, u32 offset, int idx)
{
@@ -135,12 +123,6 @@ static void pq_set_src(struct ioat_raw_descriptor *descs[2],
pq->coef[idx] = coef;
}
-static int sed_get_pq16_pool_idx(int src_cnt)
-{
-
- return pq16_idx_to_sed[src_cnt];
-}
-
static bool is_jf_ioat(struct pci_dev *pdev)
{
switch (pdev->device) {
@@ -272,7 +254,7 @@ ioat3_alloc_sed(struct ioatdma_device *device, unsigned int hw_pool)
struct ioat_sed_ent *sed;
gfp_t flags = __GFP_ZERO | GFP_ATOMIC;
- sed = kmem_cache_alloc(device->sed_pool, flags);
+ sed = kmem_cache_alloc(ioat3_sed_cache, flags);
if (!sed)
return NULL;
@@ -280,7 +262,7 @@ ioat3_alloc_sed(struct ioatdma_device *device, unsigned int hw_pool)
sed->hw = dma_pool_alloc(device->sed_hw_pool[hw_pool],
flags, &sed->dma);
if (!sed->hw) {
- kmem_cache_free(device->sed_pool, sed);
+ kmem_cache_free(ioat3_sed_cache, sed);
return NULL;
}
@@ -293,165 +275,7 @@ static void ioat3_free_sed(struct ioatdma_device *device, struct ioat_sed_ent *s
return;
dma_pool_free(device->sed_hw_pool[sed->hw_pool], sed->hw, sed->dma);
- kmem_cache_free(device->sed_pool, sed);
-}
-
-static void ioat3_dma_unmap(struct ioat2_dma_chan *ioat,
- struct ioat_ring_ent *desc, int idx)
-{
- struct ioat_chan_common *chan = &ioat->base;
- struct pci_dev *pdev = chan->device->pdev;
- size_t len = desc->len;
- size_t offset = len - desc->hw->size;
- struct dma_async_tx_descriptor *tx = &desc->txd;
- enum dma_ctrl_flags flags = tx->flags;
-
- switch (desc->hw->ctl_f.op) {
- case IOAT_OP_COPY:
- if (!desc->hw->ctl_f.null) /* skip 'interrupt' ops */
- ioat_dma_unmap(chan, flags, len, desc->hw);
- break;
- case IOAT_OP_XOR_VAL:
- case IOAT_OP_XOR: {
- struct ioat_xor_descriptor *xor = desc->xor;
- struct ioat_ring_ent *ext;
- struct ioat_xor_ext_descriptor *xor_ex = NULL;
- int src_cnt = src_cnt_to_sw(xor->ctl_f.src_cnt);
- struct ioat_raw_descriptor *descs[2];
- int i;
-
- if (src_cnt > 5) {
- ext = ioat2_get_ring_ent(ioat, idx + 1);
- xor_ex = ext->xor_ex;
- }
-
- if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
- descs[0] = (struct ioat_raw_descriptor *) xor;
- descs[1] = (struct ioat_raw_descriptor *) xor_ex;
- for (i = 0; i < src_cnt; i++) {
- dma_addr_t src = xor_get_src(descs, i);
-
- ioat_unmap(pdev, src - offset, len,
- PCI_DMA_TODEVICE, flags, 0);
- }
-
- /* dest is a source in xor validate operations */
- if (xor->ctl_f.op == IOAT_OP_XOR_VAL) {
- ioat_unmap(pdev, xor->dst_addr - offset, len,
- PCI_DMA_TODEVICE, flags, 1);
- break;
- }
- }
-
- if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP))
- ioat_unmap(pdev, xor->dst_addr - offset, len,
- PCI_DMA_FROMDEVICE, flags, 1);
- break;
- }
- case IOAT_OP_PQ_VAL:
- case IOAT_OP_PQ: {
- struct ioat_pq_descriptor *pq = desc->pq;
- struct ioat_ring_ent *ext;
- struct ioat_pq_ext_descriptor *pq_ex = NULL;
- int src_cnt = src_cnt_to_sw(pq->ctl_f.src_cnt);
- struct ioat_raw_descriptor *descs[2];
- int i;
-
- if (src_cnt > 3) {
- ext = ioat2_get_ring_ent(ioat, idx + 1);
- pq_ex = ext->pq_ex;
- }
-
- /* in the 'continue' case don't unmap the dests as sources */
- if (dmaf_p_disabled_continue(flags))
- src_cnt--;
- else if (dmaf_continue(flags))
- src_cnt -= 3;
-
- if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
- descs[0] = (struct ioat_raw_descriptor *) pq;
- descs[1] = (struct ioat_raw_descriptor *) pq_ex;
- for (i = 0; i < src_cnt; i++) {
- dma_addr_t src = pq_get_src(descs, i);
-
- ioat_unmap(pdev, src - offset, len,
- PCI_DMA_TODEVICE, flags, 0);
- }
-
- /* the dests are sources in pq validate operations */
- if (pq->ctl_f.op == IOAT_OP_XOR_VAL) {
- if (!(flags & DMA_PREP_PQ_DISABLE_P))
- ioat_unmap(pdev, pq->p_addr - offset,
- len, PCI_DMA_TODEVICE, flags, 0);
- if (!(flags & DMA_PREP_PQ_DISABLE_Q))
- ioat_unmap(pdev, pq->q_addr - offset,
- len, PCI_DMA_TODEVICE, flags, 0);
- break;
- }
- }
-
- if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
- if (!(flags & DMA_PREP_PQ_DISABLE_P))
- ioat_unmap(pdev, pq->p_addr - offset, len,
- PCI_DMA_BIDIRECTIONAL, flags, 1);
- if (!(flags & DMA_PREP_PQ_DISABLE_Q))
- ioat_unmap(pdev, pq->q_addr - offset, len,
- PCI_DMA_BIDIRECTIONAL, flags, 1);
- }
- break;
- }
- case IOAT_OP_PQ_16S:
- case IOAT_OP_PQ_VAL_16S: {
- struct ioat_pq_descriptor *pq = desc->pq;
- int src_cnt = src16_cnt_to_sw(pq->ctl_f.src_cnt);
- struct ioat_raw_descriptor *descs[4];
- int i;
-
- /* in the 'continue' case don't unmap the dests as sources */
- if (dmaf_p_disabled_continue(flags))
- src_cnt--;
- else if (dmaf_continue(flags))
- src_cnt -= 3;
-
- if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
- descs[0] = (struct ioat_raw_descriptor *)pq;
- descs[1] = (struct ioat_raw_descriptor *)(desc->sed->hw);
- descs[2] = (struct ioat_raw_descriptor *)(&desc->sed->hw->b[0]);
- for (i = 0; i < src_cnt; i++) {
- dma_addr_t src = pq16_get_src(descs, i);
-
- ioat_unmap(pdev, src - offset, len,
- PCI_DMA_TODEVICE, flags, 0);
- }
-
- /* the dests are sources in pq validate operations */
- if (pq->ctl_f.op == IOAT_OP_XOR_VAL) {
- if (!(flags & DMA_PREP_PQ_DISABLE_P))
- ioat_unmap(pdev, pq->p_addr - offset,
- len, PCI_DMA_TODEVICE,
- flags, 0);
- if (!(flags & DMA_PREP_PQ_DISABLE_Q))
- ioat_unmap(pdev, pq->q_addr - offset,
- len, PCI_DMA_TODEVICE,
- flags, 0);
- break;
- }
- }
-
- if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
- if (!(flags & DMA_PREP_PQ_DISABLE_P))
- ioat_unmap(pdev, pq->p_addr - offset, len,
- PCI_DMA_BIDIRECTIONAL, flags, 1);
- if (!(flags & DMA_PREP_PQ_DISABLE_Q))
- ioat_unmap(pdev, pq->q_addr - offset, len,
- PCI_DMA_BIDIRECTIONAL, flags, 1);
- }
- break;
- }
- default:
- dev_err(&pdev->dev, "%s: unknown op type: %#x\n",
- __func__, desc->hw->ctl_f.op);
- }
+ kmem_cache_free(ioat3_sed_cache, sed);
}
static bool desc_has_ext(struct ioat_ring_ent *desc)
@@ -577,7 +401,7 @@ static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete)
tx = &desc->txd;
if (tx->cookie) {
dma_cookie_complete(tx);
- ioat3_dma_unmap(ioat, desc, idx + i);
+ dma_descriptor_unmap(tx);
if (tx->callback) {
tx->callback(tx->callback_param);
tx->callback = NULL;
@@ -807,7 +631,7 @@ ioat3_tx_status(struct dma_chan *c, dma_cookie_t cookie,
enum dma_status ret;
ret = dma_cookie_status(c, cookie, txstate);
- if (ret == DMA_SUCCESS)
+ if (ret == DMA_COMPLETE)
return ret;
ioat3_cleanup(ioat);
@@ -1129,9 +953,6 @@ __ioat3_prep_pq16_lock(struct dma_chan *c, enum sum_check_flags *result,
u8 op;
int i, s, idx, num_descs;
- /* this function only handles src_cnt 9 - 16 */
- BUG_ON(src_cnt < 9);
-
/* this function is only called with 9-16 sources */
op = result ? IOAT_OP_PQ_VAL_16S : IOAT_OP_PQ_16S;
@@ -1159,8 +980,7 @@ __ioat3_prep_pq16_lock(struct dma_chan *c, enum sum_check_flags *result,
descs[0] = (struct ioat_raw_descriptor *) pq;
- desc->sed = ioat3_alloc_sed(device,
- sed_get_pq16_pool_idx(src_cnt));
+ desc->sed = ioat3_alloc_sed(device, (src_cnt-2) >> 3);
if (!desc->sed) {
dev_err(to_dev(chan),
"%s: no free sed entries\n", __func__);
@@ -1218,13 +1038,21 @@ __ioat3_prep_pq16_lock(struct dma_chan *c, enum sum_check_flags *result,
return &desc->txd;
}
+static int src_cnt_flags(unsigned int src_cnt, unsigned long flags)
+{
+ if (dmaf_p_disabled_continue(flags))
+ return src_cnt + 1;
+ else if (dmaf_continue(flags))
+ return src_cnt + 3;
+ else
+ return src_cnt;
+}
+
static struct dma_async_tx_descriptor *
ioat3_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
unsigned int src_cnt, const unsigned char *scf, size_t len,
unsigned long flags)
{
- struct dma_device *dma = chan->device;
-
/* specify valid address for disabled result */
if (flags & DMA_PREP_PQ_DISABLE_P)
dst[0] = dst[1];
@@ -1244,7 +1072,7 @@ ioat3_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
single_source_coef[0] = scf[0];
single_source_coef[1] = 0;
- return (src_cnt > 8) && (dma->max_pq > 8) ?
+ return src_cnt_flags(src_cnt, flags) > 8 ?
__ioat3_prep_pq16_lock(chan, NULL, dst, single_source,
2, single_source_coef, len,
flags) :
@@ -1252,7 +1080,7 @@ ioat3_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
single_source_coef, len, flags);
} else {
- return (src_cnt > 8) && (dma->max_pq > 8) ?
+ return src_cnt_flags(src_cnt, flags) > 8 ?
__ioat3_prep_pq16_lock(chan, NULL, dst, src, src_cnt,
scf, len, flags) :
__ioat3_prep_pq_lock(chan, NULL, dst, src, src_cnt,
@@ -1265,8 +1093,6 @@ ioat3_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
unsigned int src_cnt, const unsigned char *scf, size_t len,
enum sum_check_flags *pqres, unsigned long flags)
{
- struct dma_device *dma = chan->device;
-
/* specify valid address for disabled result */
if (flags & DMA_PREP_PQ_DISABLE_P)
pq[0] = pq[1];
@@ -1278,7 +1104,7 @@ ioat3_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
*/
*pqres = 0;
- return (src_cnt > 8) && (dma->max_pq > 8) ?
+ return src_cnt_flags(src_cnt, flags) > 8 ?
__ioat3_prep_pq16_lock(chan, pqres, pq, src, src_cnt, scf, len,
flags) :
__ioat3_prep_pq_lock(chan, pqres, pq, src, src_cnt, scf, len,
@@ -1289,7 +1115,6 @@ static struct dma_async_tx_descriptor *
ioat3_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src,
unsigned int src_cnt, size_t len, unsigned long flags)
{
- struct dma_device *dma = chan->device;
unsigned char scf[src_cnt];
dma_addr_t pq[2];
@@ -1298,7 +1123,7 @@ ioat3_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src,
flags |= DMA_PREP_PQ_DISABLE_Q;
pq[1] = dst; /* specify valid address for disabled result */
- return (src_cnt > 8) && (dma->max_pq > 8) ?
+ return src_cnt_flags(src_cnt, flags) > 8 ?
__ioat3_prep_pq16_lock(chan, NULL, pq, src, src_cnt, scf, len,
flags) :
__ioat3_prep_pq_lock(chan, NULL, pq, src, src_cnt, scf, len,
@@ -1310,7 +1135,6 @@ ioat3_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src,
unsigned int src_cnt, size_t len,
enum sum_check_flags *result, unsigned long flags)
{
- struct dma_device *dma = chan->device;
unsigned char scf[src_cnt];
dma_addr_t pq[2];
@@ -1324,8 +1148,7 @@ ioat3_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src,
flags |= DMA_PREP_PQ_DISABLE_Q;
pq[1] = pq[0]; /* specify valid address for disabled result */
-
- return (src_cnt > 8) && (dma->max_pq > 8) ?
+ return src_cnt_flags(src_cnt, flags) > 8 ?
__ioat3_prep_pq16_lock(chan, result, pq, &src[1], src_cnt - 1,
scf, len, flags) :
__ioat3_prep_pq_lock(chan, result, pq, &src[1], src_cnt - 1,
@@ -1444,9 +1267,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
DMA_TO_DEVICE);
tx = dma->device_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
IOAT_NUM_SRC_TEST, PAGE_SIZE,
- DMA_PREP_INTERRUPT |
- DMA_COMPL_SKIP_SRC_UNMAP |
- DMA_COMPL_SKIP_DEST_UNMAP);
+ DMA_PREP_INTERRUPT);
if (!tx) {
dev_err(dev, "Self-test xor prep failed\n");
@@ -1468,7 +1289,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
- if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
+ if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
dev_err(dev, "Self-test xor timed out\n");
err = -ENODEV;
goto dma_unmap;
@@ -1507,9 +1328,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
DMA_TO_DEVICE);
tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
- &xor_val_result, DMA_PREP_INTERRUPT |
- DMA_COMPL_SKIP_SRC_UNMAP |
- DMA_COMPL_SKIP_DEST_UNMAP);
+ &xor_val_result, DMA_PREP_INTERRUPT);
if (!tx) {
dev_err(dev, "Self-test zero prep failed\n");
err = -ENODEV;
@@ -1530,7 +1349,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
- if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
+ if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
dev_err(dev, "Self-test validate timed out\n");
err = -ENODEV;
goto dma_unmap;
@@ -1545,6 +1364,8 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
goto free_resources;
}
+ memset(page_address(dest), 0, PAGE_SIZE);
+
/* test for non-zero parity sum */
op = IOAT_OP_XOR_VAL;
@@ -1554,9 +1375,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
DMA_TO_DEVICE);
tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
- &xor_val_result, DMA_PREP_INTERRUPT |
- DMA_COMPL_SKIP_SRC_UNMAP |
- DMA_COMPL_SKIP_DEST_UNMAP);
+ &xor_val_result, DMA_PREP_INTERRUPT);
if (!tx) {
dev_err(dev, "Self-test 2nd zero prep failed\n");
err = -ENODEV;
@@ -1577,7 +1396,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
- if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
+ if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
dev_err(dev, "Self-test 2nd validate timed out\n");
err = -ENODEV;
goto dma_unmap;
@@ -1630,52 +1449,36 @@ static int ioat3_dma_self_test(struct ioatdma_device *device)
static int ioat3_irq_reinit(struct ioatdma_device *device)
{
- int msixcnt = device->common.chancnt;
struct pci_dev *pdev = device->pdev;
- int i;
- struct msix_entry *msix;
- struct ioat_chan_common *chan;
- int err = 0;
+ int irq = pdev->irq, i;
+
+ if (!is_bwd_ioat(pdev))
+ return 0;
switch (device->irq_mode) {
case IOAT_MSIX:
+ for (i = 0; i < device->common.chancnt; i++) {
+ struct msix_entry *msix = &device->msix_entries[i];
+ struct ioat_chan_common *chan;
- for (i = 0; i < msixcnt; i++) {
- msix = &device->msix_entries[i];
chan = ioat_chan_by_index(device, i);
devm_free_irq(&pdev->dev, msix->vector, chan);
}
pci_disable_msix(pdev);
break;
-
- case IOAT_MSIX_SINGLE:
- msix = &device->msix_entries[0];
- chan = ioat_chan_by_index(device, 0);
- devm_free_irq(&pdev->dev, msix->vector, chan);
- pci_disable_msix(pdev);
- break;
-
case IOAT_MSI:
- chan = ioat_chan_by_index(device, 0);
- devm_free_irq(&pdev->dev, pdev->irq, chan);
pci_disable_msi(pdev);
- break;
-
+ /* fall through */
case IOAT_INTX:
- chan = ioat_chan_by_index(device, 0);
- devm_free_irq(&pdev->dev, pdev->irq, chan);
+ devm_free_irq(&pdev->dev, irq, device);
break;
-
default:
return 0;
}
-
device->irq_mode = IOAT_NOIRQ;
- err = ioat_dma_setup_interrupts(device);
-
- return err;
+ return ioat_dma_setup_interrupts(device);
}
static int ioat3_reset_hw(struct ioat_chan_common *chan)
@@ -1718,14 +1521,12 @@ static int ioat3_reset_hw(struct ioat_chan_common *chan)
}
err = ioat2_reset_sync(chan, msecs_to_jiffies(200));
- if (err) {
- dev_err(&pdev->dev, "Failed to reset!\n");
- return err;
- }
-
- if (device->irq_mode != IOAT_NOIRQ && is_bwd_ioat(pdev))
+ if (!err)
err = ioat3_irq_reinit(device);
+ if (err)
+ dev_err(&pdev->dev, "Failed to reset: %d\n", err);
+
return err;
}
@@ -1835,21 +1636,15 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca)
char pool_name[14];
int i;
- /* allocate sw descriptor pool for SED */
- device->sed_pool = kmem_cache_create("ioat_sed",
- sizeof(struct ioat_sed_ent), 0, 0, NULL);
- if (!device->sed_pool)
- return -ENOMEM;
-
for (i = 0; i < MAX_SED_POOLS; i++) {
snprintf(pool_name, 14, "ioat_hw%d_sed", i);
/* allocate SED DMA pool */
- device->sed_hw_pool[i] = dma_pool_create(pool_name,
+ device->sed_hw_pool[i] = dmam_pool_create(pool_name,
&pdev->dev,
SED_SIZE * (i + 1), 64, 0);
if (!device->sed_hw_pool[i])
- goto sed_pool_cleanup;
+ return -ENOMEM;
}
}
@@ -1875,28 +1670,4 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca)
device->dca = ioat3_dca_init(pdev, device->reg_base);
return 0;
-
-sed_pool_cleanup:
- if (device->sed_pool) {
- int i;
- kmem_cache_destroy(device->sed_pool);
-
- for (i = 0; i < MAX_SED_POOLS; i++)
- if (device->sed_hw_pool[i])
- dma_pool_destroy(device->sed_hw_pool[i]);
- }
-
- return -ENOMEM;
-}
-
-void ioat3_dma_remove(struct ioatdma_device *device)
-{
- if (device->sed_pool) {
- int i;
- kmem_cache_destroy(device->sed_pool);
-
- for (i = 0; i < MAX_SED_POOLS; i++)
- if (device->sed_hw_pool[i])
- dma_pool_destroy(device->sed_hw_pool[i]);
- }
}
diff --git a/drivers/dma/ioat/pci.c b/drivers/dma/ioat/pci.c
index 2c8d560e6334..1d051cd045db 100644
--- a/drivers/dma/ioat/pci.c
+++ b/drivers/dma/ioat/pci.c
@@ -123,6 +123,7 @@ module_param(ioat_dca_enabled, int, 0644);
MODULE_PARM_DESC(ioat_dca_enabled, "control support of dca service (default: 1)");
struct kmem_cache *ioat2_cache;
+struct kmem_cache *ioat3_sed_cache;
#define DRV_NAME "ioatdma"
@@ -207,9 +208,6 @@ static void ioat_remove(struct pci_dev *pdev)
if (!device)
return;
- if (device->version >= IOAT_VER_3_0)
- ioat3_dma_remove(device);
-
dev_err(&pdev->dev, "Removing dma and dca services\n");
if (device->dca) {
unregister_dca_provider(device->dca, &pdev->dev);
@@ -221,7 +219,7 @@ static void ioat_remove(struct pci_dev *pdev)
static int __init ioat_init_module(void)
{
- int err;
+ int err = -ENOMEM;
pr_info("%s: Intel(R) QuickData Technology Driver %s\n",
DRV_NAME, IOAT_DMA_VERSION);
@@ -231,9 +229,21 @@ static int __init ioat_init_module(void)
if (!ioat2_cache)
return -ENOMEM;
+ ioat3_sed_cache = KMEM_CACHE(ioat_sed_ent, 0);
+ if (!ioat3_sed_cache)
+ goto err_ioat2_cache;
+
err = pci_register_driver(&ioat_pci_driver);
if (err)
- kmem_cache_destroy(ioat2_cache);
+ goto err_ioat3_cache;
+
+ return 0;
+
+ err_ioat3_cache:
+ kmem_cache_destroy(ioat3_sed_cache);
+
+ err_ioat2_cache:
+ kmem_cache_destroy(ioat2_cache);
return err;
}
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index dd8b44a56e5d..c56137bc3868 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -61,80 +61,6 @@ static void iop_adma_free_slots(struct iop_adma_desc_slot *slot)
}
}
-static void
-iop_desc_unmap(struct iop_adma_chan *iop_chan, struct iop_adma_desc_slot *desc)
-{
- struct dma_async_tx_descriptor *tx = &desc->async_tx;
- struct iop_adma_desc_slot *unmap = desc->group_head;
- struct device *dev = &iop_chan->device->pdev->dev;
- u32 len = unmap->unmap_len;
- enum dma_ctrl_flags flags = tx->flags;
- u32 src_cnt;
- dma_addr_t addr;
- dma_addr_t dest;
-
- src_cnt = unmap->unmap_src_cnt;
- dest = iop_desc_get_dest_addr(unmap, iop_chan);
- if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
- enum dma_data_direction dir;
-
- if (src_cnt > 1) /* is xor? */
- dir = DMA_BIDIRECTIONAL;
- else
- dir = DMA_FROM_DEVICE;
-
- dma_unmap_page(dev, dest, len, dir);
- }
-
- if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
- while (src_cnt--) {
- addr = iop_desc_get_src_addr(unmap, iop_chan, src_cnt);
- if (addr == dest)
- continue;
- dma_unmap_page(dev, addr, len, DMA_TO_DEVICE);
- }
- }
- desc->group_head = NULL;
-}
-
-static void
-iop_desc_unmap_pq(struct iop_adma_chan *iop_chan, struct iop_adma_desc_slot *desc)
-{
- struct dma_async_tx_descriptor *tx = &desc->async_tx;
- struct iop_adma_desc_slot *unmap = desc->group_head;
- struct device *dev = &iop_chan->device->pdev->dev;
- u32 len = unmap->unmap_len;
- enum dma_ctrl_flags flags = tx->flags;
- u32 src_cnt = unmap->unmap_src_cnt;
- dma_addr_t pdest = iop_desc_get_dest_addr(unmap, iop_chan);
- dma_addr_t qdest = iop_desc_get_qdest_addr(unmap, iop_chan);
- int i;
-
- if (tx->flags & DMA_PREP_CONTINUE)
- src_cnt -= 3;
-
- if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP) && !desc->pq_check_result) {
- dma_unmap_page(dev, pdest, len, DMA_BIDIRECTIONAL);
- dma_unmap_page(dev, qdest, len, DMA_BIDIRECTIONAL);
- }
-
- if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
- dma_addr_t addr;
-
- for (i = 0; i < src_cnt; i++) {
- addr = iop_desc_get_src_addr(unmap, iop_chan, i);
- dma_unmap_page(dev, addr, len, DMA_TO_DEVICE);
- }
- if (desc->pq_check_result) {
- dma_unmap_page(dev, pdest, len, DMA_TO_DEVICE);
- dma_unmap_page(dev, qdest, len, DMA_TO_DEVICE);
- }
- }
-
- desc->group_head = NULL;
-}
-
-
static dma_cookie_t
iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc,
struct iop_adma_chan *iop_chan, dma_cookie_t cookie)
@@ -152,15 +78,9 @@ iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc,
if (tx->callback)
tx->callback(tx->callback_param);
- /* unmap dma addresses
- * (unmap_single vs unmap_page?)
- */
- if (desc->group_head && desc->unmap_len) {
- if (iop_desc_is_pq(desc))
- iop_desc_unmap_pq(iop_chan, desc);
- else
- iop_desc_unmap(iop_chan, desc);
- }
+ dma_descriptor_unmap(tx);
+ if (desc->group_head)
+ desc->group_head = NULL;
}
/* run dependent operations */
@@ -591,7 +511,6 @@ iop_adma_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags)
if (sw_desc) {
grp_start = sw_desc->group_head;
iop_desc_init_interrupt(grp_start, iop_chan);
- grp_start->unmap_len = 0;
sw_desc->async_tx.flags = flags;
}
spin_unlock_bh(&iop_chan->lock);
@@ -623,8 +542,6 @@ iop_adma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest,
iop_desc_set_byte_count(grp_start, iop_chan, len);
iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest);
iop_desc_set_memcpy_src_addr(grp_start, dma_src);
- sw_desc->unmap_src_cnt = 1;
- sw_desc->unmap_len = len;
sw_desc->async_tx.flags = flags;
}
spin_unlock_bh(&iop_chan->lock);
@@ -657,8 +574,6 @@ iop_adma_prep_dma_xor(struct dma_chan *chan, dma_addr_t dma_dest,
iop_desc_init_xor(grp_start, src_cnt, flags);
iop_desc_set_byte_count(grp_start, iop_chan, len);
iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest);
- sw_desc->unmap_src_cnt = src_cnt;
- sw_desc->unmap_len = len;
sw_desc->async_tx.flags = flags;
while (src_cnt--)
iop_desc_set_xor_src_addr(grp_start, src_cnt,
@@ -694,8 +609,6 @@ iop_adma_prep_dma_xor_val(struct dma_chan *chan, dma_addr_t *dma_src,
grp_start->xor_check_result = result;
pr_debug("\t%s: grp_start->xor_check_result: %p\n",
__func__, grp_start->xor_check_result);
- sw_desc->unmap_src_cnt = src_cnt;
- sw_desc->unmap_len = len;
sw_desc->async_tx.flags = flags;
while (src_cnt--)
iop_desc_set_zero_sum_src_addr(grp_start, src_cnt,
@@ -748,8 +661,6 @@ iop_adma_prep_dma_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
dst[0] = dst[1] & 0x7;
iop_desc_set_pq_addr(g, dst);
- sw_desc->unmap_src_cnt = src_cnt;
- sw_desc->unmap_len = len;
sw_desc->async_tx.flags = flags;
for (i = 0; i < src_cnt; i++)
iop_desc_set_pq_src_addr(g, i, src[i], scf[i]);
@@ -804,8 +715,6 @@ iop_adma_prep_dma_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
g->pq_check_result = pqres;
pr_debug("\t%s: g->pq_check_result: %p\n",
__func__, g->pq_check_result);
- sw_desc->unmap_src_cnt = src_cnt+2;
- sw_desc->unmap_len = len;
sw_desc->async_tx.flags = flags;
while (src_cnt--)
iop_desc_set_pq_zero_sum_src_addr(g, src_cnt,
@@ -864,7 +773,7 @@ static enum dma_status iop_adma_status(struct dma_chan *chan,
int ret;
ret = dma_cookie_status(chan, cookie, txstate);
- if (ret == DMA_SUCCESS)
+ if (ret == DMA_COMPLETE)
return ret;
iop_adma_slot_cleanup(iop_chan);
@@ -983,7 +892,7 @@ static int iop_adma_memcpy_self_test(struct iop_adma_device *device)
msleep(1);
if (iop_adma_status(dma_chan, cookie, NULL) !=
- DMA_SUCCESS) {
+ DMA_COMPLETE) {
dev_err(dma_chan->device->dev,
"Self-test copy timed out, disabling\n");
err = -ENODEV;
@@ -1083,7 +992,7 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device)
msleep(8);
if (iop_adma_status(dma_chan, cookie, NULL) !=
- DMA_SUCCESS) {
+ DMA_COMPLETE) {
dev_err(dma_chan->device->dev,
"Self-test xor timed out, disabling\n");
err = -ENODEV;
@@ -1129,7 +1038,7 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device)
iop_adma_issue_pending(dma_chan);
msleep(8);
- if (iop_adma_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
+ if (iop_adma_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
dev_err(dma_chan->device->dev,
"Self-test zero sum timed out, disabling\n");
err = -ENODEV;
@@ -1158,7 +1067,7 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device)
iop_adma_issue_pending(dma_chan);
msleep(8);
- if (iop_adma_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
+ if (iop_adma_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
dev_err(dma_chan->device->dev,
"Self-test non-zero sum timed out, disabling\n");
err = -ENODEV;
@@ -1254,7 +1163,7 @@ iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device)
msleep(8);
if (iop_adma_status(dma_chan, cookie, NULL) !=
- DMA_SUCCESS) {
+ DMA_COMPLETE) {
dev_err(dev, "Self-test pq timed out, disabling\n");
err = -ENODEV;
goto free_resources;
@@ -1291,7 +1200,7 @@ iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device)
msleep(8);
if (iop_adma_status(dma_chan, cookie, NULL) !=
- DMA_SUCCESS) {
+ DMA_COMPLETE) {
dev_err(dev, "Self-test pq-zero-sum timed out, disabling\n");
err = -ENODEV;
goto free_resources;
@@ -1323,7 +1232,7 @@ iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device)
msleep(8);
if (iop_adma_status(dma_chan, cookie, NULL) !=
- DMA_SUCCESS) {
+ DMA_COMPLETE) {
dev_err(dev, "Self-test !pq-zero-sum timed out, disabling\n");
err = -ENODEV;
goto free_resources;
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c
index cb9c0bc317e8..128ca143486d 100644
--- a/drivers/dma/ipu/ipu_idmac.c
+++ b/drivers/dma/ipu/ipu_idmac.c
@@ -1232,8 +1232,10 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id)
desc = list_entry(ichan->queue.next, struct idmac_tx_desc, list);
descnew = desc;
- dev_dbg(dev, "IDMAC irq %d, dma 0x%08x, next dma 0x%08x, current %d, curbuf 0x%08x\n",
- irq, sg_dma_address(*sg), sgnext ? sg_dma_address(sgnext) : 0, ichan->active_buffer, curbuf);
+ dev_dbg(dev, "IDMAC irq %d, dma %#llx, next dma %#llx, current %d, curbuf %#x\n",
+ irq, (u64)sg_dma_address(*sg),
+ sgnext ? (u64)sg_dma_address(sgnext) : 0,
+ ichan->active_buffer, curbuf);
/* Find the descriptor of sgnext */
sgnew = idmac_sg_next(ichan, &descnew, *sg);
diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c
index a2c330f5f952..e26075408e9b 100644
--- a/drivers/dma/k3dma.c
+++ b/drivers/dma/k3dma.c
@@ -344,7 +344,7 @@ static enum dma_status k3_dma_tx_status(struct dma_chan *chan,
size_t bytes = 0;
ret = dma_cookie_status(&c->vc.chan, cookie, state);
- if (ret == DMA_SUCCESS)
+ if (ret == DMA_COMPLETE)
return ret;
spin_lock_irqsave(&c->vc.lock, flags);
@@ -693,7 +693,7 @@ static int k3_dma_probe(struct platform_device *op)
irq = platform_get_irq(op, 0);
ret = devm_request_irq(&op->dev, irq,
- k3_dma_int_handler, IRQF_DISABLED, DRIVER_NAME, d);
+ k3_dma_int_handler, 0, DRIVER_NAME, d);
if (ret)
return ret;
diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c
index ff8d7827f8cb..8869500ab92b 100644
--- a/drivers/dma/mmp_pdma.c
+++ b/drivers/dma/mmp_pdma.c
@@ -798,8 +798,7 @@ static void dma_do_tasklet(unsigned long data)
* move the descriptors to a temporary list so we can drop
* the lock during the entire cleanup operation
*/
- list_del(&desc->node);
- list_add(&desc->node, &chain_cleanup);
+ list_move(&desc->node, &chain_cleanup);
/*
* Look for the first list entry which has the ENDIRQEN flag
@@ -863,7 +862,7 @@ static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev,
if (irq) {
ret = devm_request_irq(pdev->dev, irq,
- mmp_pdma_chan_handler, IRQF_DISABLED, "pdma", phy);
+ mmp_pdma_chan_handler, 0, "pdma", phy);
if (ret) {
dev_err(pdev->dev, "channel request irq fail!\n");
return ret;
@@ -970,7 +969,7 @@ static int mmp_pdma_probe(struct platform_device *op)
/* all chan share one irq, demux inside */
irq = platform_get_irq(op, 0);
ret = devm_request_irq(pdev->dev, irq,
- mmp_pdma_int_handler, IRQF_DISABLED, "pdma", pdev);
+ mmp_pdma_int_handler, 0, "pdma", pdev);
if (ret)
return ret;
}
@@ -1018,6 +1017,7 @@ static int mmp_pdma_probe(struct platform_device *op)
}
}
+ platform_set_drvdata(op, pdev);
dev_info(pdev->device.dev, "initialized %d channels\n", dma_channels);
return 0;
}
diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c
index d3b6358e5a27..3ddacc14a736 100644
--- a/drivers/dma/mmp_tdma.c
+++ b/drivers/dma/mmp_tdma.c
@@ -62,6 +62,11 @@
#define TDCR_BURSTSZ_16B (0x3 << 6)
#define TDCR_BURSTSZ_32B (0x6 << 6)
#define TDCR_BURSTSZ_64B (0x7 << 6)
+#define TDCR_BURSTSZ_SQU_1B (0x5 << 6)
+#define TDCR_BURSTSZ_SQU_2B (0x6 << 6)
+#define TDCR_BURSTSZ_SQU_4B (0x0 << 6)
+#define TDCR_BURSTSZ_SQU_8B (0x1 << 6)
+#define TDCR_BURSTSZ_SQU_16B (0x3 << 6)
#define TDCR_BURSTSZ_SQU_32B (0x7 << 6)
#define TDCR_BURSTSZ_128B (0x5 << 6)
#define TDCR_DSTDIR_MSK (0x3 << 4) /* Dst Direction */
@@ -158,7 +163,7 @@ static void mmp_tdma_disable_chan(struct mmp_tdma_chan *tdmac)
/* disable irq */
writel(0, tdmac->reg_base + TDIMR);
- tdmac->status = DMA_SUCCESS;
+ tdmac->status = DMA_COMPLETE;
}
static void mmp_tdma_resume_chan(struct mmp_tdma_chan *tdmac)
@@ -228,8 +233,31 @@ static int mmp_tdma_config_chan(struct mmp_tdma_chan *tdmac)
return -EINVAL;
}
} else if (tdmac->type == PXA910_SQU) {
- tdcr |= TDCR_BURSTSZ_SQU_32B;
tdcr |= TDCR_SSPMOD;
+
+ switch (tdmac->burst_sz) {
+ case 1:
+ tdcr |= TDCR_BURSTSZ_SQU_1B;
+ break;
+ case 2:
+ tdcr |= TDCR_BURSTSZ_SQU_2B;
+ break;
+ case 4:
+ tdcr |= TDCR_BURSTSZ_SQU_4B;
+ break;
+ case 8:
+ tdcr |= TDCR_BURSTSZ_SQU_8B;
+ break;
+ case 16:
+ tdcr |= TDCR_BURSTSZ_SQU_16B;
+ break;
+ case 32:
+ tdcr |= TDCR_BURSTSZ_SQU_32B;
+ break;
+ default:
+ dev_err(tdmac->dev, "mmp_tdma: unknown burst size.\n");
+ return -EINVAL;
+ }
}
writel(tdcr, tdmac->reg_base + TDCR);
@@ -324,7 +352,7 @@ static int mmp_tdma_alloc_chan_resources(struct dma_chan *chan)
if (tdmac->irq) {
ret = devm_request_irq(tdmac->dev, tdmac->irq,
- mmp_tdma_chan_handler, IRQF_DISABLED, "tdma", tdmac);
+ mmp_tdma_chan_handler, 0, "tdma", tdmac);
if (ret)
return ret;
}
@@ -365,7 +393,7 @@ static struct dma_async_tx_descriptor *mmp_tdma_prep_dma_cyclic(
int num_periods = buf_len / period_len;
int i = 0, buf = 0;
- if (tdmac->status != DMA_SUCCESS)
+ if (tdmac->status != DMA_COMPLETE)
return NULL;
if (period_len > TDMA_MAX_XFER_BYTES) {
@@ -499,7 +527,7 @@ static int mmp_tdma_chan_init(struct mmp_tdma_device *tdev,
tdmac->idx = idx;
tdmac->type = type;
tdmac->reg_base = (unsigned long)tdev->base + idx * 4;
- tdmac->status = DMA_SUCCESS;
+ tdmac->status = DMA_COMPLETE;
tdev->tdmac[tdmac->idx] = tdmac;
tasklet_init(&tdmac->tasklet, dma_do_tasklet, (unsigned long)tdmac);
@@ -554,7 +582,7 @@ static int mmp_tdma_probe(struct platform_device *pdev)
if (irq_num != chan_num) {
irq = platform_get_irq(pdev, 0);
ret = devm_request_irq(&pdev->dev, irq,
- mmp_tdma_int_handler, IRQF_DISABLED, "tdma", tdev);
+ mmp_tdma_int_handler, 0, "tdma", tdev);
if (ret)
return ret;
}
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 536dcb8ba5fd..53fb0c8365b0 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -54,20 +54,6 @@ static void mv_desc_init(struct mv_xor_desc_slot *desc, unsigned long flags)
hw_desc->desc_command = (1 << 31);
}
-static u32 mv_desc_get_dest_addr(struct mv_xor_desc_slot *desc)
-{
- struct mv_xor_desc *hw_desc = desc->hw_desc;
- return hw_desc->phy_dest_addr;
-}
-
-static u32 mv_desc_get_src_addr(struct mv_xor_desc_slot *desc,
- int src_idx)
-{
- struct mv_xor_desc *hw_desc = desc->hw_desc;
- return hw_desc->phy_src_addr[mv_phy_src_idx(src_idx)];
-}
-
-
static void mv_desc_set_byte_count(struct mv_xor_desc_slot *desc,
u32 byte_count)
{
@@ -278,42 +264,9 @@ mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc,
desc->async_tx.callback(
desc->async_tx.callback_param);
- /* unmap dma addresses
- * (unmap_single vs unmap_page?)
- */
- if (desc->group_head && desc->unmap_len) {
- struct mv_xor_desc_slot *unmap = desc->group_head;
- struct device *dev = mv_chan_to_devp(mv_chan);
- u32 len = unmap->unmap_len;
- enum dma_ctrl_flags flags = desc->async_tx.flags;
- u32 src_cnt;
- dma_addr_t addr;
- dma_addr_t dest;
-
- src_cnt = unmap->unmap_src_cnt;
- dest = mv_desc_get_dest_addr(unmap);
- if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
- enum dma_data_direction dir;
-
- if (src_cnt > 1) /* is xor ? */
- dir = DMA_BIDIRECTIONAL;
- else
- dir = DMA_FROM_DEVICE;
- dma_unmap_page(dev, dest, len, dir);
- }
-
- if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
- while (src_cnt--) {
- addr = mv_desc_get_src_addr(unmap,
- src_cnt);
- if (addr == dest)
- continue;
- dma_unmap_page(dev, addr, len,
- DMA_TO_DEVICE);
- }
- }
+ dma_descriptor_unmap(&desc->async_tx);
+ if (desc->group_head)
desc->group_head = NULL;
- }
}
/* run dependent operations */
@@ -749,7 +702,7 @@ static enum dma_status mv_xor_status(struct dma_chan *chan,
enum dma_status ret;
ret = dma_cookie_status(chan, cookie, txstate);
- if (ret == DMA_SUCCESS) {
+ if (ret == DMA_COMPLETE) {
mv_xor_clean_completed_slots(mv_chan);
return ret;
}
@@ -828,7 +781,6 @@ static void mv_xor_issue_pending(struct dma_chan *chan)
/*
* Perform a transaction to verify the HW works.
*/
-#define MV_XOR_TEST_SIZE 2000
static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
{
@@ -838,20 +790,21 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
struct dma_chan *dma_chan;
dma_cookie_t cookie;
struct dma_async_tx_descriptor *tx;
+ struct dmaengine_unmap_data *unmap;
int err = 0;
- src = kmalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL);
+ src = kmalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL);
if (!src)
return -ENOMEM;
- dest = kzalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL);
+ dest = kzalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL);
if (!dest) {
kfree(src);
return -ENOMEM;
}
/* Fill in src buffer */
- for (i = 0; i < MV_XOR_TEST_SIZE; i++)
+ for (i = 0; i < PAGE_SIZE; i++)
((u8 *) src)[i] = (u8)i;
dma_chan = &mv_chan->dmachan;
@@ -860,21 +813,33 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
goto out;
}
- dest_dma = dma_map_single(dma_chan->device->dev, dest,
- MV_XOR_TEST_SIZE, DMA_FROM_DEVICE);
+ unmap = dmaengine_get_unmap_data(dma_chan->device->dev, 2, GFP_KERNEL);
+ if (!unmap) {
+ err = -ENOMEM;
+ goto free_resources;
+ }
+
+ src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src), 0,
+ PAGE_SIZE, DMA_TO_DEVICE);
+ unmap->to_cnt = 1;
+ unmap->addr[0] = src_dma;
- src_dma = dma_map_single(dma_chan->device->dev, src,
- MV_XOR_TEST_SIZE, DMA_TO_DEVICE);
+ dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest), 0,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ unmap->from_cnt = 1;
+ unmap->addr[1] = dest_dma;
+
+ unmap->len = PAGE_SIZE;
tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
- MV_XOR_TEST_SIZE, 0);
+ PAGE_SIZE, 0);
cookie = mv_xor_tx_submit(tx);
mv_xor_issue_pending(dma_chan);
async_tx_ack(tx);
msleep(1);
if (mv_xor_status(dma_chan, cookie, NULL) !=
- DMA_SUCCESS) {
+ DMA_COMPLETE) {
dev_err(dma_chan->device->dev,
"Self-test copy timed out, disabling\n");
err = -ENODEV;
@@ -882,8 +847,8 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
}
dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
- MV_XOR_TEST_SIZE, DMA_FROM_DEVICE);
- if (memcmp(src, dest, MV_XOR_TEST_SIZE)) {
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ if (memcmp(src, dest, PAGE_SIZE)) {
dev_err(dma_chan->device->dev,
"Self-test copy failed compare, disabling\n");
err = -ENODEV;
@@ -891,6 +856,7 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
}
free_resources:
+ dmaengine_unmap_put(unmap);
mv_xor_free_chan_resources(dma_chan);
out:
kfree(src);
@@ -908,13 +874,15 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST];
dma_addr_t dest_dma;
struct dma_async_tx_descriptor *tx;
+ struct dmaengine_unmap_data *unmap;
struct dma_chan *dma_chan;
dma_cookie_t cookie;
u8 cmp_byte = 0;
u32 cmp_word;
int err = 0;
+ int src_count = MV_XOR_NUM_SRC_TEST;
- for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) {
+ for (src_idx = 0; src_idx < src_count; src_idx++) {
xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
if (!xor_srcs[src_idx]) {
while (src_idx--)
@@ -931,13 +899,13 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
}
/* Fill in src buffers */
- for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) {
+ for (src_idx = 0; src_idx < src_count; src_idx++) {
u8 *ptr = page_address(xor_srcs[src_idx]);
for (i = 0; i < PAGE_SIZE; i++)
ptr[i] = (1 << src_idx);
}
- for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++)
+ for (src_idx = 0; src_idx < src_count; src_idx++)
cmp_byte ^= (u8) (1 << src_idx);
cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
@@ -951,16 +919,29 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
goto out;
}
+ unmap = dmaengine_get_unmap_data(dma_chan->device->dev, src_count + 1,
+ GFP_KERNEL);
+ if (!unmap) {
+ err = -ENOMEM;
+ goto free_resources;
+ }
+
/* test xor */
- dest_dma = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE,
- DMA_FROM_DEVICE);
+ for (i = 0; i < src_count; i++) {
+ unmap->addr[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
+ 0, PAGE_SIZE, DMA_TO_DEVICE);
+ dma_srcs[i] = unmap->addr[i];
+ unmap->to_cnt++;
+ }
- for (i = 0; i < MV_XOR_NUM_SRC_TEST; i++)
- dma_srcs[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
- 0, PAGE_SIZE, DMA_TO_DEVICE);
+ unmap->addr[src_count] = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ dest_dma = unmap->addr[src_count];
+ unmap->from_cnt = 1;
+ unmap->len = PAGE_SIZE;
tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
- MV_XOR_NUM_SRC_TEST, PAGE_SIZE, 0);
+ src_count, PAGE_SIZE, 0);
cookie = mv_xor_tx_submit(tx);
mv_xor_issue_pending(dma_chan);
@@ -968,7 +949,7 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
msleep(8);
if (mv_xor_status(dma_chan, cookie, NULL) !=
- DMA_SUCCESS) {
+ DMA_COMPLETE) {
dev_err(dma_chan->device->dev,
"Self-test xor timed out, disabling\n");
err = -ENODEV;
@@ -989,9 +970,10 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
}
free_resources:
+ dmaengine_unmap_put(unmap);
mv_xor_free_chan_resources(dma_chan);
out:
- src_idx = MV_XOR_NUM_SRC_TEST;
+ src_idx = src_count;
while (src_idx--)
__free_page(xor_srcs[src_idx]);
__free_page(dest);
@@ -1076,10 +1058,7 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
}
mv_chan->mmr_base = xordev->xor_base;
- if (!mv_chan->mmr_base) {
- ret = -ENOMEM;
- goto err_free_dma;
- }
+ mv_chan->mmr_high_base = xordev->xor_high_base;
tasklet_init(&mv_chan->irq_tasklet, mv_xor_tasklet, (unsigned long)
mv_chan);
@@ -1138,7 +1117,7 @@ static void
mv_xor_conf_mbus_windows(struct mv_xor_device *xordev,
const struct mbus_dram_target_info *dram)
{
- void __iomem *base = xordev->xor_base;
+ void __iomem *base = xordev->xor_high_base;
u32 win_enable = 0;
int i;
@@ -1220,6 +1199,7 @@ static int mv_xor_probe(struct platform_device *pdev)
int i = 0;
for_each_child_of_node(pdev->dev.of_node, np) {
+ struct mv_xor_chan *chan;
dma_cap_mask_t cap_mask;
int irq;
@@ -1237,21 +1217,21 @@ static int mv_xor_probe(struct platform_device *pdev)
goto err_channel_add;
}
- xordev->channels[i] =
- mv_xor_channel_add(xordev, pdev, i,
- cap_mask, irq);
- if (IS_ERR(xordev->channels[i])) {
- ret = PTR_ERR(xordev->channels[i]);
- xordev->channels[i] = NULL;
+ chan = mv_xor_channel_add(xordev, pdev, i,
+ cap_mask, irq);
+ if (IS_ERR(chan)) {
+ ret = PTR_ERR(chan);
irq_dispose_mapping(irq);
goto err_channel_add;
}
+ xordev->channels[i] = chan;
i++;
}
} else if (pdata && pdata->channels) {
for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
struct mv_xor_channel_data *cd;
+ struct mv_xor_chan *chan;
int irq;
cd = &pdata->channels[i];
@@ -1266,13 +1246,14 @@ static int mv_xor_probe(struct platform_device *pdev)
goto err_channel_add;
}
- xordev->channels[i] =
- mv_xor_channel_add(xordev, pdev, i,
- cd->cap_mask, irq);
- if (IS_ERR(xordev->channels[i])) {
- ret = PTR_ERR(xordev->channels[i]);
+ chan = mv_xor_channel_add(xordev, pdev, i,
+ cd->cap_mask, irq);
+ if (IS_ERR(chan)) {
+ ret = PTR_ERR(chan);
goto err_channel_add;
}
+
+ xordev->channels[i] = chan;
}
}
diff --git a/drivers/dma/mv_xor.h b/drivers/dma/mv_xor.h
index 06b067f24c9b..d0749229c875 100644
--- a/drivers/dma/mv_xor.h
+++ b/drivers/dma/mv_xor.h
@@ -34,13 +34,13 @@
#define XOR_OPERATION_MODE_MEMCPY 2
#define XOR_DESCRIPTOR_SWAP BIT(14)
-#define XOR_CURR_DESC(chan) (chan->mmr_base + 0x210 + (chan->idx * 4))
-#define XOR_NEXT_DESC(chan) (chan->mmr_base + 0x200 + (chan->idx * 4))
-#define XOR_BYTE_COUNT(chan) (chan->mmr_base + 0x220 + (chan->idx * 4))
-#define XOR_DEST_POINTER(chan) (chan->mmr_base + 0x2B0 + (chan->idx * 4))
-#define XOR_BLOCK_SIZE(chan) (chan->mmr_base + 0x2C0 + (chan->idx * 4))
-#define XOR_INIT_VALUE_LOW(chan) (chan->mmr_base + 0x2E0)
-#define XOR_INIT_VALUE_HIGH(chan) (chan->mmr_base + 0x2E4)
+#define XOR_CURR_DESC(chan) (chan->mmr_high_base + 0x10 + (chan->idx * 4))
+#define XOR_NEXT_DESC(chan) (chan->mmr_high_base + 0x00 + (chan->idx * 4))
+#define XOR_BYTE_COUNT(chan) (chan->mmr_high_base + 0x20 + (chan->idx * 4))
+#define XOR_DEST_POINTER(chan) (chan->mmr_high_base + 0xB0 + (chan->idx * 4))
+#define XOR_BLOCK_SIZE(chan) (chan->mmr_high_base + 0xC0 + (chan->idx * 4))
+#define XOR_INIT_VALUE_LOW(chan) (chan->mmr_high_base + 0xE0)
+#define XOR_INIT_VALUE_HIGH(chan) (chan->mmr_high_base + 0xE4)
#define XOR_CONFIG(chan) (chan->mmr_base + 0x10 + (chan->idx * 4))
#define XOR_ACTIVATION(chan) (chan->mmr_base + 0x20 + (chan->idx * 4))
@@ -50,11 +50,11 @@
#define XOR_ERROR_ADDR(chan) (chan->mmr_base + 0x60)
#define XOR_INTR_MASK_VALUE 0x3F5
-#define WINDOW_BASE(w) (0x250 + ((w) << 2))
-#define WINDOW_SIZE(w) (0x270 + ((w) << 2))
-#define WINDOW_REMAP_HIGH(w) (0x290 + ((w) << 2))
-#define WINDOW_BAR_ENABLE(chan) (0x240 + ((chan) << 2))
-#define WINDOW_OVERRIDE_CTRL(chan) (0x2A0 + ((chan) << 2))
+#define WINDOW_BASE(w) (0x50 + ((w) << 2))
+#define WINDOW_SIZE(w) (0x70 + ((w) << 2))
+#define WINDOW_REMAP_HIGH(w) (0x90 + ((w) << 2))
+#define WINDOW_BAR_ENABLE(chan) (0x40 + ((chan) << 2))
+#define WINDOW_OVERRIDE_CTRL(chan) (0xA0 + ((chan) << 2))
struct mv_xor_device {
void __iomem *xor_base;
@@ -82,6 +82,7 @@ struct mv_xor_chan {
int pending;
spinlock_t lock; /* protects the descriptor slot pool */
void __iomem *mmr_base;
+ void __iomem *mmr_high_base;
unsigned int idx;
int irq;
enum dma_transaction_type current_type;
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c
index ccd13df841db..ead491346da7 100644
--- a/drivers/dma/mxs-dma.c
+++ b/drivers/dma/mxs-dma.c
@@ -27,6 +27,7 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_dma.h>
+#include <linux/list.h>
#include <asm/irq.h>
@@ -57,6 +58,9 @@
(((dma_is_apbh(d) && apbh_is_old(d)) ? 0x050 : 0x110) + (n) * 0x70)
#define HW_APBHX_CHn_SEMA(d, n) \
(((dma_is_apbh(d) && apbh_is_old(d)) ? 0x080 : 0x140) + (n) * 0x70)
+#define HW_APBHX_CHn_BAR(d, n) \
+ (((dma_is_apbh(d) && apbh_is_old(d)) ? 0x070 : 0x130) + (n) * 0x70)
+#define HW_APBX_CHn_DEBUG1(d, n) (0x150 + (n) * 0x70)
/*
* ccw bits definitions
@@ -115,7 +119,9 @@ struct mxs_dma_chan {
int desc_count;
enum dma_status status;
unsigned int flags;
+ bool reset;
#define MXS_DMA_SG_LOOP (1 << 0)
+#define MXS_DMA_USE_SEMAPHORE (1 << 1)
};
#define MXS_DMA_CHANNELS 16
@@ -201,12 +207,47 @@ static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan)
struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
int chan_id = mxs_chan->chan.chan_id;
- if (dma_is_apbh(mxs_dma) && apbh_is_old(mxs_dma))
+ /*
+ * mxs dma channel resets can cause a channel stall. To recover from a
+ * channel stall, we have to reset the whole DMA engine. To avoid this,
+ * we use cyclic DMA with semaphores, that are enhanced in
+ * mxs_dma_int_handler. To reset the channel, we can simply stop writing
+ * into the semaphore counter.
+ */
+ if (mxs_chan->flags & MXS_DMA_USE_SEMAPHORE &&
+ mxs_chan->flags & MXS_DMA_SG_LOOP) {
+ mxs_chan->reset = true;
+ } else if (dma_is_apbh(mxs_dma) && apbh_is_old(mxs_dma)) {
writel(1 << (chan_id + BP_APBH_CTRL0_RESET_CHANNEL),
mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET);
- else
+ } else {
+ unsigned long elapsed = 0;
+ const unsigned long max_wait = 50000; /* 50ms */
+ void __iomem *reg_dbg1 = mxs_dma->base +
+ HW_APBX_CHn_DEBUG1(mxs_dma, chan_id);
+
+ /*
+ * On i.MX28 APBX, the DMA channel can stop working if we reset
+ * the channel while it is in READ_FLUSH (0x08) state.
+ * We wait here until we leave the state. Then we trigger the
+ * reset. Waiting a maximum of 50ms, the kernel shouldn't crash
+ * because of this.
+ */
+ while ((readl(reg_dbg1) & 0xf) == 0x8 && elapsed < max_wait) {
+ udelay(100);
+ elapsed += 100;
+ }
+
+ if (elapsed >= max_wait)
+ dev_err(&mxs_chan->mxs_dma->pdev->dev,
+ "Failed waiting for the DMA channel %d to leave state READ_FLUSH, trying to reset channel in READ_FLUSH state now\n",
+ chan_id);
+
writel(1 << (chan_id + BP_APBHX_CHANNEL_CTRL_RESET_CHANNEL),
mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_SET);
+ }
+
+ mxs_chan->status = DMA_COMPLETE;
}
static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan)
@@ -219,12 +260,21 @@ static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan)
mxs_dma->base + HW_APBHX_CHn_NXTCMDAR(mxs_dma, chan_id));
/* write 1 to SEMA to kick off the channel */
- writel(1, mxs_dma->base + HW_APBHX_CHn_SEMA(mxs_dma, chan_id));
+ if (mxs_chan->flags & MXS_DMA_USE_SEMAPHORE &&
+ mxs_chan->flags & MXS_DMA_SG_LOOP) {
+ /* A cyclic DMA consists of at least 2 segments, so initialize
+ * the semaphore with 2 so we have enough time to add 1 to the
+ * semaphore if we need to */
+ writel(2, mxs_dma->base + HW_APBHX_CHn_SEMA(mxs_dma, chan_id));
+ } else {
+ writel(1, mxs_dma->base + HW_APBHX_CHn_SEMA(mxs_dma, chan_id));
+ }
+ mxs_chan->reset = false;
}
static void mxs_dma_disable_chan(struct mxs_dma_chan *mxs_chan)
{
- mxs_chan->status = DMA_SUCCESS;
+ mxs_chan->status = DMA_COMPLETE;
}
static void mxs_dma_pause_chan(struct mxs_dma_chan *mxs_chan)
@@ -272,58 +322,88 @@ static void mxs_dma_tasklet(unsigned long data)
mxs_chan->desc.callback(mxs_chan->desc.callback_param);
}
+static int mxs_dma_irq_to_chan(struct mxs_dma_engine *mxs_dma, int irq)
+{
+ int i;
+
+ for (i = 0; i != mxs_dma->nr_channels; ++i)
+ if (mxs_dma->mxs_chans[i].chan_irq == irq)
+ return i;
+
+ return -EINVAL;
+}
+
static irqreturn_t mxs_dma_int_handler(int irq, void *dev_id)
{
struct mxs_dma_engine *mxs_dma = dev_id;
- u32 stat1, stat2;
+ struct mxs_dma_chan *mxs_chan;
+ u32 completed;
+ u32 err;
+ int chan = mxs_dma_irq_to_chan(mxs_dma, irq);
+
+ if (chan < 0)
+ return IRQ_NONE;
/* completion status */
- stat1 = readl(mxs_dma->base + HW_APBHX_CTRL1);
- stat1 &= MXS_DMA_CHANNELS_MASK;
- writel(stat1, mxs_dma->base + HW_APBHX_CTRL1 + STMP_OFFSET_REG_CLR);
+ completed = readl(mxs_dma->base + HW_APBHX_CTRL1);
+ completed = (completed >> chan) & 0x1;
+
+ /* Clear interrupt */
+ writel((1 << chan),
+ mxs_dma->base + HW_APBHX_CTRL1 + STMP_OFFSET_REG_CLR);
/* error status */
- stat2 = readl(mxs_dma->base + HW_APBHX_CTRL2);
- writel(stat2, mxs_dma->base + HW_APBHX_CTRL2 + STMP_OFFSET_REG_CLR);
+ err = readl(mxs_dma->base + HW_APBHX_CTRL2);
+ err &= (1 << (MXS_DMA_CHANNELS + chan)) | (1 << chan);
+
+ /*
+ * error status bit is in the upper 16 bits, error irq bit in the lower
+ * 16 bits. We transform it into a simpler error code:
+ * err: 0x00 = no error, 0x01 = TERMINATION, 0x02 = BUS_ERROR
+ */
+ err = (err >> (MXS_DMA_CHANNELS + chan)) + (err >> chan);
+
+ /* Clear error irq */
+ writel((1 << chan),
+ mxs_dma->base + HW_APBHX_CTRL2 + STMP_OFFSET_REG_CLR);
/*
* When both completion and error of termination bits set at the
* same time, we do not take it as an error. IOW, it only becomes
- * an error we need to handle here in case of either it's (1) a bus
- * error or (2) a termination error with no completion.
+ * an error we need to handle here in case of either it's a bus
+ * error or a termination error with no completion. 0x01 is termination
+ * error, so we can subtract err & completed to get the real error case.
*/
- stat2 = ((stat2 >> MXS_DMA_CHANNELS) & stat2) | /* (1) */
- (~(stat2 >> MXS_DMA_CHANNELS) & stat2 & ~stat1); /* (2) */
-
- /* combine error and completion status for checking */
- stat1 = (stat2 << MXS_DMA_CHANNELS) | stat1;
- while (stat1) {
- int channel = fls(stat1) - 1;
- struct mxs_dma_chan *mxs_chan =
- &mxs_dma->mxs_chans[channel % MXS_DMA_CHANNELS];
-
- if (channel >= MXS_DMA_CHANNELS) {
- dev_dbg(mxs_dma->dma_device.dev,
- "%s: error in channel %d\n", __func__,
- channel - MXS_DMA_CHANNELS);
- mxs_chan->status = DMA_ERROR;
- mxs_dma_reset_chan(mxs_chan);
- } else {
- if (mxs_chan->flags & MXS_DMA_SG_LOOP)
- mxs_chan->status = DMA_IN_PROGRESS;
- else
- mxs_chan->status = DMA_SUCCESS;
- }
+ err -= err & completed;
- stat1 &= ~(1 << channel);
+ mxs_chan = &mxs_dma->mxs_chans[chan];
- if (mxs_chan->status == DMA_SUCCESS)
- dma_cookie_complete(&mxs_chan->desc);
+ if (err) {
+ dev_dbg(mxs_dma->dma_device.dev,
+ "%s: error in channel %d\n", __func__,
+ chan);
+ mxs_chan->status = DMA_ERROR;
+ mxs_dma_reset_chan(mxs_chan);
+ } else if (mxs_chan->status != DMA_COMPLETE) {
+ if (mxs_chan->flags & MXS_DMA_SG_LOOP) {
+ mxs_chan->status = DMA_IN_PROGRESS;
+ if (mxs_chan->flags & MXS_DMA_USE_SEMAPHORE)
+ writel(1, mxs_dma->base +
+ HW_APBHX_CHn_SEMA(mxs_dma, chan));
+ } else {
+ mxs_chan->status = DMA_COMPLETE;
+ }
+ }
- /* schedule tasklet on this channel */
- tasklet_schedule(&mxs_chan->tasklet);
+ if (mxs_chan->status == DMA_COMPLETE) {
+ if (mxs_chan->reset)
+ return IRQ_HANDLED;
+ dma_cookie_complete(&mxs_chan->desc);
}
+ /* schedule tasklet on this channel */
+ tasklet_schedule(&mxs_chan->tasklet);
+
return IRQ_HANDLED;
}
@@ -523,6 +603,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic(
mxs_chan->status = DMA_IN_PROGRESS;
mxs_chan->flags |= MXS_DMA_SG_LOOP;
+ mxs_chan->flags |= MXS_DMA_USE_SEMAPHORE;
if (num_periods > NUM_CCW) {
dev_err(mxs_dma->dma_device.dev,
@@ -554,6 +635,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic(
ccw->bits |= CCW_IRQ;
ccw->bits |= CCW_HALT_ON_TERM;
ccw->bits |= CCW_TERM_FLUSH;
+ ccw->bits |= CCW_DEC_SEM;
ccw->bits |= BF_CCW(direction == DMA_DEV_TO_MEM ?
MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ, COMMAND);
@@ -599,8 +681,24 @@ static enum dma_status mxs_dma_tx_status(struct dma_chan *chan,
dma_cookie_t cookie, struct dma_tx_state *txstate)
{
struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
+ struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
+ u32 residue = 0;
+
+ if (mxs_chan->status == DMA_IN_PROGRESS &&
+ mxs_chan->flags & MXS_DMA_SG_LOOP) {
+ struct mxs_dma_ccw *last_ccw;
+ u32 bar;
+
+ last_ccw = &mxs_chan->ccw[mxs_chan->desc_count - 1];
+ residue = last_ccw->xfer_bytes + last_ccw->bufaddr;
+
+ bar = readl(mxs_dma->base +
+ HW_APBHX_CHn_BAR(mxs_dma, chan->chan_id));
+ residue -= bar;
+ }
- dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie, 0);
+ dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie,
+ residue);
return mxs_chan->status;
}
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
index ec3fc4fd9160..2f66cf4e54fe 100644
--- a/drivers/dma/omap-dma.c
+++ b/drivers/dma/omap-dma.c
@@ -248,7 +248,7 @@ static enum dma_status omap_dma_tx_status(struct dma_chan *chan,
unsigned long flags;
ret = dma_cookie_status(chan, cookie, txstate);
- if (ret == DMA_SUCCESS || !txstate)
+ if (ret == DMA_COMPLETE || !txstate)
return ret;
spin_lock_irqsave(&c->vc.lock, flags);
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index df8b10fd1726..536632f6479c 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -2268,6 +2268,8 @@ static void pl330_tasklet(unsigned long data)
list_move_tail(&desc->node, &pch->dmac->desc_pool);
}
+ dma_descriptor_unmap(&desc->txd);
+
if (callback) {
spin_unlock_irqrestore(&pch->lock, flags);
callback(callback_param);
@@ -2314,7 +2316,7 @@ bool pl330_filter(struct dma_chan *chan, void *param)
return false;
peri_id = chan->private;
- return *peri_id == (unsigned)param;
+ return *peri_id == (unsigned long)param;
}
EXPORT_SYMBOL(pl330_filter);
@@ -2490,12 +2492,9 @@ static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx)
static inline void _init_desc(struct dma_pl330_desc *desc)
{
- desc->pchan = NULL;
desc->req.x = &desc->px;
desc->req.token = desc;
desc->rqcfg.swap = SWAP_NO;
- desc->rqcfg.privileged = 0;
- desc->rqcfg.insnaccess = 0;
desc->rqcfg.scctl = SCCTRL0;
desc->rqcfg.dcctl = DCCTRL0;
desc->req.cfg = &desc->rqcfg;
@@ -2515,7 +2514,7 @@ static int add_desc(struct dma_pl330_dmac *pdmac, gfp_t flg, int count)
if (!pdmac)
return 0;
- desc = kmalloc(count * sizeof(*desc), flg);
+ desc = kcalloc(count, sizeof(*desc), flg);
if (!desc)
return 0;
@@ -2926,16 +2925,23 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
amba_set_drvdata(adev, pdmac);
- irq = adev->irq[0];
- ret = request_irq(irq, pl330_irq_handler, 0,
- dev_name(&adev->dev), pi);
- if (ret)
- return ret;
+ for (i = 0; i < AMBA_NR_IRQS; i++) {
+ irq = adev->irq[i];
+ if (irq) {
+ ret = devm_request_irq(&adev->dev, irq,
+ pl330_irq_handler, 0,
+ dev_name(&adev->dev), pi);
+ if (ret)
+ return ret;
+ } else {
+ break;
+ }
+ }
pi->pcfg.periph_id = adev->periphid;
ret = pl330_add(pi);
if (ret)
- goto probe_err1;
+ return ret;
INIT_LIST_HEAD(&pdmac->desc_pool);
spin_lock_init(&pdmac->pool_lock);
@@ -3033,8 +3039,6 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
return 0;
probe_err3:
- amba_set_drvdata(adev, NULL);
-
/* Idle the DMAC */
list_for_each_entry_safe(pch, _p, &pdmac->ddma.channels,
chan.device_node) {
@@ -3048,8 +3052,6 @@ probe_err3:
}
probe_err2:
pl330_del(pi);
-probe_err1:
- free_irq(irq, pi);
return ret;
}
@@ -3059,7 +3061,6 @@ static int pl330_remove(struct amba_device *adev)
struct dma_pl330_dmac *pdmac = amba_get_drvdata(adev);
struct dma_pl330_chan *pch, *_p;
struct pl330_info *pi;
- int irq;
if (!pdmac)
return 0;
@@ -3068,7 +3069,6 @@ static int pl330_remove(struct amba_device *adev)
of_dma_controller_free(adev->dev.of_node);
dma_async_device_unregister(&pdmac->ddma);
- amba_set_drvdata(adev, NULL);
/* Idle the DMAC */
list_for_each_entry_safe(pch, _p, &pdmac->ddma.channels,
@@ -3086,9 +3086,6 @@ static int pl330_remove(struct amba_device *adev)
pl330_del(pi);
- irq = adev->irq[0];
- free_irq(irq, pi);
-
return 0;
}
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c
index e24b5ef486b5..8bba298535b0 100644
--- a/drivers/dma/ppc4xx/adma.c
+++ b/drivers/dma/ppc4xx/adma.c
@@ -533,29 +533,6 @@ static void ppc440spe_desc_init_memcpy(struct ppc440spe_adma_desc_slot *desc,
}
/**
- * ppc440spe_desc_init_memset - initialize the descriptor for MEMSET operation
- */
-static void ppc440spe_desc_init_memset(struct ppc440spe_adma_desc_slot *desc,
- int value, unsigned long flags)
-{
- struct dma_cdb *hw_desc = desc->hw_desc;
-
- memset(desc->hw_desc, 0, sizeof(struct dma_cdb));
- desc->hw_next = NULL;
- desc->src_cnt = 1;
- desc->dst_cnt = 1;
-
- if (flags & DMA_PREP_INTERRUPT)
- set_bit(PPC440SPE_DESC_INT, &desc->flags);
- else
- clear_bit(PPC440SPE_DESC_INT, &desc->flags);
-
- hw_desc->sg1u = hw_desc->sg1l = cpu_to_le32((u32)value);
- hw_desc->sg3u = hw_desc->sg3l = cpu_to_le32((u32)value);
- hw_desc->opc = DMA_CDB_OPC_DFILL128;
-}
-
-/**
* ppc440spe_desc_set_src_addr - set source address into the descriptor
*/
static void ppc440spe_desc_set_src_addr(struct ppc440spe_adma_desc_slot *desc,
@@ -804,218 +781,6 @@ static void ppc440spe_desc_set_link(struct ppc440spe_adma_chan *chan,
}
/**
- * ppc440spe_desc_get_src_addr - extract the source address from the descriptor
- */
-static u32 ppc440spe_desc_get_src_addr(struct ppc440spe_adma_desc_slot *desc,
- struct ppc440spe_adma_chan *chan, int src_idx)
-{
- struct dma_cdb *dma_hw_desc;
- struct xor_cb *xor_hw_desc;
-
- switch (chan->device->id) {
- case PPC440SPE_DMA0_ID:
- case PPC440SPE_DMA1_ID:
- dma_hw_desc = desc->hw_desc;
- /* May have 0, 1, 2, or 3 sources */
- switch (dma_hw_desc->opc) {
- case DMA_CDB_OPC_NO_OP:
- case DMA_CDB_OPC_DFILL128:
- return 0;
- case DMA_CDB_OPC_DCHECK128:
- if (unlikely(src_idx)) {
- printk(KERN_ERR "%s: try to get %d source for"
- " DCHECK128\n", __func__, src_idx);
- BUG();
- }
- return le32_to_cpu(dma_hw_desc->sg1l);
- case DMA_CDB_OPC_MULTICAST:
- case DMA_CDB_OPC_MV_SG1_SG2:
- if (unlikely(src_idx > 2)) {
- printk(KERN_ERR "%s: try to get %d source from"
- " DMA descr\n", __func__, src_idx);
- BUG();
- }
- if (src_idx) {
- if (le32_to_cpu(dma_hw_desc->sg1u) &
- DMA_CUED_XOR_WIN_MSK) {
- u8 region;
-
- if (src_idx == 1)
- return le32_to_cpu(
- dma_hw_desc->sg1l) +
- desc->unmap_len;
-
- region = (le32_to_cpu(
- dma_hw_desc->sg1u)) >>
- DMA_CUED_REGION_OFF;
-
- region &= DMA_CUED_REGION_MSK;
- switch (region) {
- case DMA_RXOR123:
- return le32_to_cpu(
- dma_hw_desc->sg1l) +
- (desc->unmap_len << 1);
- case DMA_RXOR124:
- return le32_to_cpu(
- dma_hw_desc->sg1l) +
- (desc->unmap_len * 3);
- case DMA_RXOR125:
- return le32_to_cpu(
- dma_hw_desc->sg1l) +
- (desc->unmap_len << 2);
- default:
- printk(KERN_ERR
- "%s: try to"
- " get src3 for region %02x"
- "PPC440SPE_DESC_RXOR12?\n",
- __func__, region);
- BUG();
- }
- } else {
- printk(KERN_ERR
- "%s: try to get %d"
- " source for non-cued descr\n",
- __func__, src_idx);
- BUG();
- }
- }
- return le32_to_cpu(dma_hw_desc->sg1l);
- default:
- printk(KERN_ERR "%s: unknown OPC 0x%02x\n",
- __func__, dma_hw_desc->opc);
- BUG();
- }
- return le32_to_cpu(dma_hw_desc->sg1l);
- case PPC440SPE_XOR_ID:
- /* May have up to 16 sources */
- xor_hw_desc = desc->hw_desc;
- return xor_hw_desc->ops[src_idx].l;
- }
- return 0;
-}
-
-/**
- * ppc440spe_desc_get_dest_addr - extract the destination address from the
- * descriptor
- */
-static u32 ppc440spe_desc_get_dest_addr(struct ppc440spe_adma_desc_slot *desc,
- struct ppc440spe_adma_chan *chan, int idx)
-{
- struct dma_cdb *dma_hw_desc;
- struct xor_cb *xor_hw_desc;
-
- switch (chan->device->id) {
- case PPC440SPE_DMA0_ID:
- case PPC440SPE_DMA1_ID:
- dma_hw_desc = desc->hw_desc;
-
- if (likely(!idx))
- return le32_to_cpu(dma_hw_desc->sg2l);
- return le32_to_cpu(dma_hw_desc->sg3l);
- case PPC440SPE_XOR_ID:
- xor_hw_desc = desc->hw_desc;
- return xor_hw_desc->cbtal;
- }
- return 0;
-}
-
-/**
- * ppc440spe_desc_get_src_num - extract the number of source addresses from
- * the descriptor
- */
-static u32 ppc440spe_desc_get_src_num(struct ppc440spe_adma_desc_slot *desc,
- struct ppc440spe_adma_chan *chan)
-{
- struct dma_cdb *dma_hw_desc;
- struct xor_cb *xor_hw_desc;
-
- switch (chan->device->id) {
- case PPC440SPE_DMA0_ID:
- case PPC440SPE_DMA1_ID:
- dma_hw_desc = desc->hw_desc;
-
- switch (dma_hw_desc->opc) {
- case DMA_CDB_OPC_NO_OP:
- case DMA_CDB_OPC_DFILL128:
- return 0;
- case DMA_CDB_OPC_DCHECK128:
- return 1;
- case DMA_CDB_OPC_MV_SG1_SG2:
- case DMA_CDB_OPC_MULTICAST:
- /*
- * Only for RXOR operations we have more than
- * one source
- */
- if (le32_to_cpu(dma_hw_desc->sg1u) &
- DMA_CUED_XOR_WIN_MSK) {
- /* RXOR op, there are 2 or 3 sources */
- if (((le32_to_cpu(dma_hw_desc->sg1u) >>
- DMA_CUED_REGION_OFF) &
- DMA_CUED_REGION_MSK) == DMA_RXOR12) {
- /* RXOR 1-2 */
- return 2;
- } else {
- /* RXOR 1-2-3/1-2-4/1-2-5 */
- return 3;
- }
- }
- return 1;
- default:
- printk(KERN_ERR "%s: unknown OPC 0x%02x\n",
- __func__, dma_hw_desc->opc);
- BUG();
- }
- case PPC440SPE_XOR_ID:
- /* up to 16 sources */
- xor_hw_desc = desc->hw_desc;
- return xor_hw_desc->cbc & XOR_CDCR_OAC_MSK;
- default:
- BUG();
- }
- return 0;
-}
-
-/**
- * ppc440spe_desc_get_dst_num - get the number of destination addresses in
- * this descriptor
- */
-static u32 ppc440spe_desc_get_dst_num(struct ppc440spe_adma_desc_slot *desc,
- struct ppc440spe_adma_chan *chan)
-{
- struct dma_cdb *dma_hw_desc;
-
- switch (chan->device->id) {
- case PPC440SPE_DMA0_ID:
- case PPC440SPE_DMA1_ID:
- /* May be 1 or 2 destinations */
- dma_hw_desc = desc->hw_desc;
- switch (dma_hw_desc->opc) {
- case DMA_CDB_OPC_NO_OP:
- case DMA_CDB_OPC_DCHECK128:
- return 0;
- case DMA_CDB_OPC_MV_SG1_SG2:
- case DMA_CDB_OPC_DFILL128:
- return 1;
- case DMA_CDB_OPC_MULTICAST:
- if (desc->dst_cnt == 2)
- return 2;
- else
- return 1;
- default:
- printk(KERN_ERR "%s: unknown OPC 0x%02x\n",
- __func__, dma_hw_desc->opc);
- BUG();
- }
- case PPC440SPE_XOR_ID:
- /* Always only 1 destination */
- return 1;
- default:
- BUG();
- }
- return 0;
-}
-
-/**
* ppc440spe_desc_get_link - get the address of the descriptor that
* follows this one
*/
@@ -1707,43 +1472,6 @@ static void ppc440spe_adma_free_slots(struct ppc440spe_adma_desc_slot *slot,
}
}
-static void ppc440spe_adma_unmap(struct ppc440spe_adma_chan *chan,
- struct ppc440spe_adma_desc_slot *desc)
-{
- u32 src_cnt, dst_cnt;
- dma_addr_t addr;
-
- /*
- * get the number of sources & destination
- * included in this descriptor and unmap
- * them all
- */
- src_cnt = ppc440spe_desc_get_src_num(desc, chan);
- dst_cnt = ppc440spe_desc_get_dst_num(desc, chan);
-
- /* unmap destinations */
- if (!(desc->async_tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
- while (dst_cnt--) {
- addr = ppc440spe_desc_get_dest_addr(
- desc, chan, dst_cnt);
- dma_unmap_page(chan->device->dev,
- addr, desc->unmap_len,
- DMA_FROM_DEVICE);
- }
- }
-
- /* unmap sources */
- if (!(desc->async_tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
- while (src_cnt--) {
- addr = ppc440spe_desc_get_src_addr(
- desc, chan, src_cnt);
- dma_unmap_page(chan->device->dev,
- addr, desc->unmap_len,
- DMA_TO_DEVICE);
- }
- }
-}
-
/**
* ppc440spe_adma_run_tx_complete_actions - call functions to be called
* upon completion
@@ -1753,8 +1481,6 @@ static dma_cookie_t ppc440spe_adma_run_tx_complete_actions(
struct ppc440spe_adma_chan *chan,
dma_cookie_t cookie)
{
- int i;
-
BUG_ON(desc->async_tx.cookie < 0);
if (desc->async_tx.cookie > 0) {
cookie = desc->async_tx.cookie;
@@ -1767,26 +1493,7 @@ static dma_cookie_t ppc440spe_adma_run_tx_complete_actions(
desc->async_tx.callback(
desc->async_tx.callback_param);
- /* unmap dma addresses
- * (unmap_single vs unmap_page?)
- *
- * actually, ppc's dma_unmap_page() functions are empty, so
- * the following code is just for the sake of completeness
- */
- if (chan && chan->needs_unmap && desc->group_head &&
- desc->unmap_len) {
- struct ppc440spe_adma_desc_slot *unmap =
- desc->group_head;
- /* assume 1 slot per op always */
- u32 slot_count = unmap->slot_cnt;
-
- /* Run through the group list and unmap addresses */
- for (i = 0; i < slot_count; i++) {
- BUG_ON(!unmap);
- ppc440spe_adma_unmap(chan, unmap);
- unmap = unmap->hw_next;
- }
- }
+ dma_descriptor_unmap(&desc->async_tx);
}
/* run dependent operations */
@@ -3893,7 +3600,7 @@ static enum dma_status ppc440spe_adma_tx_status(struct dma_chan *chan,
ppc440spe_chan = to_ppc440spe_adma_chan(chan);
ret = dma_cookie_status(chan, cookie, txstate);
- if (ret == DMA_SUCCESS)
+ if (ret == DMA_COMPLETE)
return ret;
ppc440spe_adma_slot_cleanup(ppc440spe_chan);
@@ -4166,7 +3873,7 @@ static void ppc440spe_adma_init_capabilities(struct ppc440spe_adma_device *adev)
ppc440spe_adma_prep_dma_interrupt;
}
pr_info("%s: AMCC(R) PPC440SP(E) ADMA Engine: "
- "( %s%s%s%s%s%s%s)\n",
+ "( %s%s%s%s%s%s)\n",
dev_name(adev->dev),
dma_has_cap(DMA_PQ, adev->common.cap_mask) ? "pq " : "",
dma_has_cap(DMA_PQ_VAL, adev->common.cap_mask) ? "pq_val " : "",
diff --git a/drivers/dma/s3c24xx-dma.c b/drivers/dma/s3c24xx-dma.c
index 4cb127978636..4eddedb6eb7d 100644
--- a/drivers/dma/s3c24xx-dma.c
+++ b/drivers/dma/s3c24xx-dma.c
@@ -628,42 +628,13 @@ retry:
s3cchan->state = S3C24XX_DMA_CHAN_IDLE;
}
-static void s3c24xx_dma_unmap_buffers(struct s3c24xx_txd *txd)
-{
- struct device *dev = txd->vd.tx.chan->device->dev;
- struct s3c24xx_sg *dsg;
-
- if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
- if (txd->vd.tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE)
- list_for_each_entry(dsg, &txd->dsg_list, node)
- dma_unmap_single(dev, dsg->src_addr, dsg->len,
- DMA_TO_DEVICE);
- else {
- list_for_each_entry(dsg, &txd->dsg_list, node)
- dma_unmap_page(dev, dsg->src_addr, dsg->len,
- DMA_TO_DEVICE);
- }
- }
-
- if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
- if (txd->vd.tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE)
- list_for_each_entry(dsg, &txd->dsg_list, node)
- dma_unmap_single(dev, dsg->dst_addr, dsg->len,
- DMA_FROM_DEVICE);
- else
- list_for_each_entry(dsg, &txd->dsg_list, node)
- dma_unmap_page(dev, dsg->dst_addr, dsg->len,
- DMA_FROM_DEVICE);
- }
-}
-
static void s3c24xx_dma_desc_free(struct virt_dma_desc *vd)
{
struct s3c24xx_txd *txd = to_s3c24xx_txd(&vd->tx);
struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(vd->tx.chan);
if (!s3cchan->slave)
- s3c24xx_dma_unmap_buffers(txd);
+ dma_descriptor_unmap(&vd->tx);
s3c24xx_dma_free_txd(txd);
}
@@ -795,7 +766,7 @@ static enum dma_status s3c24xx_dma_tx_status(struct dma_chan *chan,
spin_lock_irqsave(&s3cchan->vc.lock, flags);
ret = dma_cookie_status(chan, cookie, txstate);
- if (ret == DMA_SUCCESS) {
+ if (ret == DMA_COMPLETE) {
spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
return ret;
}
diff --git a/drivers/dma/sa11x0-dma.c b/drivers/dma/sa11x0-dma.c
index 461a91ab70bb..ab26d46bbe15 100644
--- a/drivers/dma/sa11x0-dma.c
+++ b/drivers/dma/sa11x0-dma.c
@@ -436,7 +436,7 @@ static enum dma_status sa11x0_dma_tx_status(struct dma_chan *chan,
enum dma_status ret;
ret = dma_cookie_status(&c->vc.chan, cookie, state);
- if (ret == DMA_SUCCESS)
+ if (ret == DMA_COMPLETE)
return ret;
if (!state)
diff --git a/drivers/dma/sh/rcar-hpbdma.c b/drivers/dma/sh/rcar-hpbdma.c
index ebad84591a6e..3083d901a414 100644
--- a/drivers/dma/sh/rcar-hpbdma.c
+++ b/drivers/dma/sh/rcar-hpbdma.c
@@ -60,6 +60,7 @@
#define HPB_DMAE_DSTPR_DMSTP BIT(0)
/* DMA status register (DSTSR) bits */
+#define HPB_DMAE_DSTSR_DQSTS BIT(2)
#define HPB_DMAE_DSTSR_DMSTS BIT(0)
/* DMA common registers */
@@ -286,6 +287,9 @@ static void hpb_dmae_halt(struct shdma_chan *schan)
ch_reg_write(chan, HPB_DMAE_DCMDR_DQEND, HPB_DMAE_DCMDR);
ch_reg_write(chan, HPB_DMAE_DSTPR_DMSTP, HPB_DMAE_DSTPR);
+
+ chan->plane_idx = 0;
+ chan->first_desc = true;
}
static const struct hpb_dmae_slave_config *
@@ -385,7 +389,10 @@ static bool hpb_dmae_channel_busy(struct shdma_chan *schan)
struct hpb_dmae_chan *chan = to_chan(schan);
u32 dstsr = ch_reg_read(chan, HPB_DMAE_DSTSR);
- return (dstsr & HPB_DMAE_DSTSR_DMSTS) == HPB_DMAE_DSTSR_DMSTS;
+ if (chan->xfer_mode == XFER_DOUBLE)
+ return dstsr & HPB_DMAE_DSTSR_DQSTS;
+ else
+ return dstsr & HPB_DMAE_DSTSR_DMSTS;
}
static int
@@ -510,6 +517,8 @@ static int hpb_dmae_chan_probe(struct hpb_dmae_device *hpbdev, int id)
}
schan = &new_hpb_chan->shdma_chan;
+ schan->max_xfer_len = HPB_DMA_TCR_MAX;
+
shdma_chan_probe(sdev, schan, id);
if (pdev->id >= 0)
diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c
index d94ab592cc1b..2e7b394def80 100644
--- a/drivers/dma/sh/shdma-base.c
+++ b/drivers/dma/sh/shdma-base.c
@@ -724,7 +724,7 @@ static enum dma_status shdma_tx_status(struct dma_chan *chan,
* If we don't find cookie on the queue, it has been aborted and we have
* to report error
*/
- if (status != DMA_SUCCESS) {
+ if (status != DMA_COMPLETE) {
struct shdma_desc *sdesc;
status = DMA_ERROR;
list_for_each_entry(sdesc, &schan->ld_queue, node)
diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c
index 1069e8869f20..0d765c0e21ec 100644
--- a/drivers/dma/sh/shdmac.c
+++ b/drivers/dma/sh/shdmac.c
@@ -685,7 +685,7 @@ MODULE_DEVICE_TABLE(of, sh_dmae_of_match);
static int sh_dmae_probe(struct platform_device *pdev)
{
const struct sh_dmae_pdata *pdata;
- unsigned long irqflags = IRQF_DISABLED,
+ unsigned long irqflags = 0,
chan_flag[SH_DMAE_MAX_CHANNELS] = {};
int errirq, chan_irq[SH_DMAE_MAX_CHANNELS];
int err, i, irq_cnt = 0, irqres = 0, irq_cap = 0;
@@ -838,7 +838,7 @@ static int sh_dmae_probe(struct platform_device *pdev)
IORESOURCE_IRQ_SHAREABLE)
chan_flag[irq_cnt] = IRQF_SHARED;
else
- chan_flag[irq_cnt] = IRQF_DISABLED;
+ chan_flag[irq_cnt] = 0;
dev_dbg(&pdev->dev,
"Found IRQ %d for channel %d\n",
i, irq_cnt);
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 82d2b97ad942..b8c031b7de4e 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -14,6 +14,7 @@
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/log2.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
#include <linux/err.h>
@@ -2626,7 +2627,7 @@ static enum dma_status d40_tx_status(struct dma_chan *chan,
}
ret = dma_cookie_status(chan, cookie, txstate);
- if (ret != DMA_SUCCESS)
+ if (ret != DMA_COMPLETE)
dma_set_residue(txstate, stedma40_residue(chan));
if (d40_is_paused(d40c))
@@ -2796,8 +2797,8 @@ static int d40_set_runtime_config(struct dma_chan *chan,
src_addr_width > DMA_SLAVE_BUSWIDTH_8_BYTES ||
dst_addr_width <= DMA_SLAVE_BUSWIDTH_UNDEFINED ||
dst_addr_width > DMA_SLAVE_BUSWIDTH_8_BYTES ||
- ((src_addr_width > 1) && (src_addr_width & 1)) ||
- ((dst_addr_width > 1) && (dst_addr_width & 1)))
+ !is_power_of_2(src_addr_width) ||
+ !is_power_of_2(dst_addr_width))
return -EINVAL;
cfg->src_info.data_width = src_addr_width;
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
index 5d4986e5f5fa..73654e33f13b 100644
--- a/drivers/dma/tegra20-apb-dma.c
+++ b/drivers/dma/tegra20-apb-dma.c
@@ -570,7 +570,7 @@ static void handle_once_dma_done(struct tegra_dma_channel *tdc,
list_del(&sgreq->node);
if (sgreq->last_sg) {
- dma_desc->dma_status = DMA_SUCCESS;
+ dma_desc->dma_status = DMA_COMPLETE;
dma_cookie_complete(&dma_desc->txd);
if (!dma_desc->cb_count)
list_add_tail(&dma_desc->cb_node, &tdc->cb_desc);
@@ -768,7 +768,7 @@ static enum dma_status tegra_dma_tx_status(struct dma_chan *dc,
unsigned int residual;
ret = dma_cookie_status(dc, cookie, txstate);
- if (ret == DMA_SUCCESS)
+ if (ret == DMA_COMPLETE)
return ret;
spin_lock_irqsave(&tdc->lock, flags);
@@ -1018,7 +1018,7 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg(
return &dma_desc->txd;
}
-struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic(
+static struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic(
struct dma_chan *dc, dma_addr_t buf_addr, size_t buf_len,
size_t period_len, enum dma_transfer_direction direction,
unsigned long flags, void *context)
diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c
index 28af214fce04..4506a7b4f972 100644
--- a/drivers/dma/timb_dma.c
+++ b/drivers/dma/timb_dma.c
@@ -154,38 +154,6 @@ static bool __td_dma_done_ack(struct timb_dma_chan *td_chan)
return done;
}
-static void __td_unmap_desc(struct timb_dma_chan *td_chan, const u8 *dma_desc,
- bool single)
-{
- dma_addr_t addr;
- int len;
-
- addr = (dma_desc[7] << 24) | (dma_desc[6] << 16) | (dma_desc[5] << 8) |
- dma_desc[4];
-
- len = (dma_desc[3] << 8) | dma_desc[2];
-
- if (single)
- dma_unmap_single(chan2dev(&td_chan->chan), addr, len,
- DMA_TO_DEVICE);
- else
- dma_unmap_page(chan2dev(&td_chan->chan), addr, len,
- DMA_TO_DEVICE);
-}
-
-static void __td_unmap_descs(struct timb_dma_desc *td_desc, bool single)
-{
- struct timb_dma_chan *td_chan = container_of(td_desc->txd.chan,
- struct timb_dma_chan, chan);
- u8 *descs;
-
- for (descs = td_desc->desc_list; ; descs += TIMB_DMA_DESC_SIZE) {
- __td_unmap_desc(td_chan, descs, single);
- if (descs[0] & 0x02)
- break;
- }
-}
-
static int td_fill_desc(struct timb_dma_chan *td_chan, u8 *dma_desc,
struct scatterlist *sg, bool last)
{
@@ -293,10 +261,7 @@ static void __td_finish(struct timb_dma_chan *td_chan)
list_move(&td_desc->desc_node, &td_chan->free_list);
- if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP))
- __td_unmap_descs(td_desc,
- txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE);
-
+ dma_descriptor_unmap(txd);
/*
* The API requires that no submissions are done from a
* callback, so we don't need to drop the lock here
diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c
index 71e8e775189e..17686caf64d5 100644
--- a/drivers/dma/txx9dmac.c
+++ b/drivers/dma/txx9dmac.c
@@ -406,7 +406,6 @@ txx9dmac_descriptor_complete(struct txx9dmac_chan *dc,
dma_async_tx_callback callback;
void *param;
struct dma_async_tx_descriptor *txd = &desc->txd;
- struct txx9dmac_slave *ds = dc->chan.private;
dev_vdbg(chan2dev(&dc->chan), "descriptor %u %p complete\n",
txd->cookie, desc);
@@ -419,30 +418,7 @@ txx9dmac_descriptor_complete(struct txx9dmac_chan *dc,
list_splice_init(&desc->tx_list, &dc->free_list);
list_move(&desc->desc_node, &dc->free_list);
- if (!ds) {
- dma_addr_t dmaaddr;
- if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
- dmaaddr = is_dmac64(dc) ?
- desc->hwdesc.DAR : desc->hwdesc32.DAR;
- if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
- dma_unmap_single(chan2parent(&dc->chan),
- dmaaddr, desc->len, DMA_FROM_DEVICE);
- else
- dma_unmap_page(chan2parent(&dc->chan),
- dmaaddr, desc->len, DMA_FROM_DEVICE);
- }
- if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
- dmaaddr = is_dmac64(dc) ?
- desc->hwdesc.SAR : desc->hwdesc32.SAR;
- if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
- dma_unmap_single(chan2parent(&dc->chan),
- dmaaddr, desc->len, DMA_TO_DEVICE);
- else
- dma_unmap_page(chan2parent(&dc->chan),
- dmaaddr, desc->len, DMA_TO_DEVICE);
- }
- }
-
+ dma_descriptor_unmap(txd);
/*
* The API requires that no submissions are done from a
* callback, so we don't need to drop the lock here
@@ -962,8 +938,8 @@ txx9dmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
enum dma_status ret;
ret = dma_cookie_status(chan, cookie, txstate);
- if (ret == DMA_SUCCESS)
- return DMA_SUCCESS;
+ if (ret == DMA_COMPLETE)
+ return DMA_COMPLETE;
spin_lock_bh(&dc->lock);
txx9dmac_scan_descriptors(dc);
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index 8472405c5586..d7f1b57bd3be 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -945,7 +945,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
u32 tad_offset;
u32 rir_way;
u32 mb, kb;
- u64 ch_addr, offset, limit, prv = 0;
+ u64 ch_addr, offset, limit = 0, prv = 0;
/*
diff --git a/drivers/extcon/extcon-arizona.c b/drivers/extcon/extcon-arizona.c
index 3c55ec856e39..a287cece0593 100644
--- a/drivers/extcon/extcon-arizona.c
+++ b/drivers/extcon/extcon-arizona.c
@@ -1082,7 +1082,7 @@ static void arizona_micd_set_level(struct arizona *arizona, int index,
static int arizona_extcon_probe(struct platform_device *pdev)
{
struct arizona *arizona = dev_get_drvdata(pdev->dev.parent);
- struct arizona_pdata *pdata;
+ struct arizona_pdata *pdata = &arizona->pdata;
struct arizona_extcon_info *info;
unsigned int val;
int jack_irq_fall, jack_irq_rise;
@@ -1091,8 +1091,6 @@ static int arizona_extcon_probe(struct platform_device *pdev)
if (!arizona->dapm || !arizona->dapm->card)
return -EPROBE_DEFER;
- pdata = dev_get_platdata(arizona->dev);
-
info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
if (!info) {
dev_err(&pdev->dev, "Failed to allocate memory\n");
diff --git a/drivers/extcon/extcon-class.c b/drivers/extcon/extcon-class.c
index 15443d3b6be1..76322330cbd7 100644
--- a/drivers/extcon/extcon-class.c
+++ b/drivers/extcon/extcon-class.c
@@ -792,6 +792,8 @@ void extcon_dev_unregister(struct extcon_dev *edev)
return;
}
+ device_unregister(&edev->dev);
+
if (edev->mutually_exclusive && edev->max_supported) {
for (index = 0; edev->mutually_exclusive[index];
index++)
@@ -812,7 +814,6 @@ void extcon_dev_unregister(struct extcon_dev *edev)
if (switch_class)
class_compat_remove_link(switch_class, &edev->dev, NULL);
#endif
- device_unregister(&edev->dev);
put_device(&edev->dev);
}
EXPORT_SYMBOL_GPL(extcon_dev_unregister);
diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile
index 299fad6b5867..5373dc5b6011 100644
--- a/drivers/firmware/Makefile
+++ b/drivers/firmware/Makefile
@@ -14,3 +14,4 @@ obj-$(CONFIG_FIRMWARE_MEMMAP) += memmap.o
obj-$(CONFIG_GOOGLE_FIRMWARE) += google/
obj-$(CONFIG_EFI) += efi/
+obj-$(CONFIG_UEFI_CPER) += efi/
diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig
index 3150aa4874e8..6aecbc86ec94 100644
--- a/drivers/firmware/efi/Kconfig
+++ b/drivers/firmware/efi/Kconfig
@@ -36,7 +36,7 @@ config EFI_VARS_PSTORE_DEFAULT_DISABLE
backend for pstore by default. This setting can be overridden
using the efivars module's pstore_disable parameter.
-config UEFI_CPER
- def_bool n
-
endmenu
+
+config UEFI_CPER
+ bool
diff --git a/drivers/firmware/efi/Makefile b/drivers/firmware/efi/Makefile
index 9ba156d3c775..6c2a41ec21ba 100644
--- a/drivers/firmware/efi/Makefile
+++ b/drivers/firmware/efi/Makefile
@@ -1,7 +1,7 @@
#
# Makefile for linux kernel
#
-obj-y += efi.o vars.o
+obj-$(CONFIG_EFI) += efi.o vars.o
obj-$(CONFIG_EFI_VARS) += efivars.o
obj-$(CONFIG_EFI_VARS_PSTORE) += efi-pstore.o
obj-$(CONFIG_UEFI_CPER) += cper.o
diff --git a/drivers/firmware/efi/efi-pstore.c b/drivers/firmware/efi/efi-pstore.c
index 5002d50e3781..4b9dc836dcf9 100644
--- a/drivers/firmware/efi/efi-pstore.c
+++ b/drivers/firmware/efi/efi-pstore.c
@@ -18,14 +18,12 @@ module_param_named(pstore_disable, efivars_pstore_disable, bool, 0644);
static int efi_pstore_open(struct pstore_info *psi)
{
- efivar_entry_iter_begin();
psi->data = NULL;
return 0;
}
static int efi_pstore_close(struct pstore_info *psi)
{
- efivar_entry_iter_end();
psi->data = NULL;
return 0;
}
@@ -39,6 +37,12 @@ struct pstore_read_data {
char **buf;
};
+static inline u64 generic_id(unsigned long timestamp,
+ unsigned int part, int count)
+{
+ return (timestamp * 100 + part) * 1000 + count;
+}
+
static int efi_pstore_read_func(struct efivar_entry *entry, void *data)
{
efi_guid_t vendor = LINUX_EFI_CRASH_GUID;
@@ -57,7 +61,7 @@ static int efi_pstore_read_func(struct efivar_entry *entry, void *data)
if (sscanf(name, "dump-type%u-%u-%d-%lu-%c",
cb_data->type, &part, &cnt, &time, &data_type) == 5) {
- *cb_data->id = part;
+ *cb_data->id = generic_id(time, part, cnt);
*cb_data->count = cnt;
cb_data->timespec->tv_sec = time;
cb_data->timespec->tv_nsec = 0;
@@ -67,7 +71,7 @@ static int efi_pstore_read_func(struct efivar_entry *entry, void *data)
*cb_data->compressed = false;
} else if (sscanf(name, "dump-type%u-%u-%d-%lu",
cb_data->type, &part, &cnt, &time) == 4) {
- *cb_data->id = part;
+ *cb_data->id = generic_id(time, part, cnt);
*cb_data->count = cnt;
cb_data->timespec->tv_sec = time;
cb_data->timespec->tv_nsec = 0;
@@ -79,7 +83,7 @@ static int efi_pstore_read_func(struct efivar_entry *entry, void *data)
* which doesn't support holding
* multiple logs, remains.
*/
- *cb_data->id = part;
+ *cb_data->id = generic_id(time, part, 0);
*cb_data->count = 0;
cb_data->timespec->tv_sec = time;
cb_data->timespec->tv_nsec = 0;
@@ -91,19 +95,125 @@ static int efi_pstore_read_func(struct efivar_entry *entry, void *data)
__efivar_entry_get(entry, &entry->var.Attributes,
&entry->var.DataSize, entry->var.Data);
size = entry->var.DataSize;
+ memcpy(*cb_data->buf, entry->var.Data,
+ (size_t)min_t(unsigned long, EFIVARS_DATA_SIZE_MAX, size));
- *cb_data->buf = kmemdup(entry->var.Data, size, GFP_KERNEL);
- if (*cb_data->buf == NULL)
- return -ENOMEM;
return size;
}
+/**
+ * efi_pstore_scan_sysfs_enter
+ * @entry: scanning entry
+ * @next: next entry
+ * @head: list head
+ */
+static void efi_pstore_scan_sysfs_enter(struct efivar_entry *pos,
+ struct efivar_entry *next,
+ struct list_head *head)
+{
+ pos->scanning = true;
+ if (&next->list != head)
+ next->scanning = true;
+}
+
+/**
+ * __efi_pstore_scan_sysfs_exit
+ * @entry: deleting entry
+ * @turn_off_scanning: Check if a scanning flag should be turned off
+ */
+static inline void __efi_pstore_scan_sysfs_exit(struct efivar_entry *entry,
+ bool turn_off_scanning)
+{
+ if (entry->deleting) {
+ list_del(&entry->list);
+ efivar_entry_iter_end();
+ efivar_unregister(entry);
+ efivar_entry_iter_begin();
+ } else if (turn_off_scanning)
+ entry->scanning = false;
+}
+
+/**
+ * efi_pstore_scan_sysfs_exit
+ * @pos: scanning entry
+ * @next: next entry
+ * @head: list head
+ * @stop: a flag checking if scanning will stop
+ */
+static void efi_pstore_scan_sysfs_exit(struct efivar_entry *pos,
+ struct efivar_entry *next,
+ struct list_head *head, bool stop)
+{
+ __efi_pstore_scan_sysfs_exit(pos, true);
+ if (stop)
+ __efi_pstore_scan_sysfs_exit(next, &next->list != head);
+}
+
+/**
+ * efi_pstore_sysfs_entry_iter
+ *
+ * @data: function-specific data to pass to callback
+ * @pos: entry to begin iterating from
+ *
+ * You MUST call efivar_enter_iter_begin() before this function, and
+ * efivar_entry_iter_end() afterwards.
+ *
+ * It is possible to begin iteration from an arbitrary entry within
+ * the list by passing @pos. @pos is updated on return to point to
+ * the next entry of the last one passed to efi_pstore_read_func().
+ * To begin iterating from the beginning of the list @pos must be %NULL.
+ */
+static int efi_pstore_sysfs_entry_iter(void *data, struct efivar_entry **pos)
+{
+ struct efivar_entry *entry, *n;
+ struct list_head *head = &efivar_sysfs_list;
+ int size = 0;
+
+ if (!*pos) {
+ list_for_each_entry_safe(entry, n, head, list) {
+ efi_pstore_scan_sysfs_enter(entry, n, head);
+
+ size = efi_pstore_read_func(entry, data);
+ efi_pstore_scan_sysfs_exit(entry, n, head, size < 0);
+ if (size)
+ break;
+ }
+ *pos = n;
+ return size;
+ }
+
+ list_for_each_entry_safe_from((*pos), n, head, list) {
+ efi_pstore_scan_sysfs_enter((*pos), n, head);
+
+ size = efi_pstore_read_func((*pos), data);
+ efi_pstore_scan_sysfs_exit((*pos), n, head, size < 0);
+ if (size)
+ break;
+ }
+ *pos = n;
+ return size;
+}
+
+/**
+ * efi_pstore_read
+ *
+ * This function returns a size of NVRAM entry logged via efi_pstore_write().
+ * The meaning and behavior of efi_pstore/pstore are as below.
+ *
+ * size > 0: Got data of an entry logged via efi_pstore_write() successfully,
+ * and pstore filesystem will continue reading subsequent entries.
+ * size == 0: Entry was not logged via efi_pstore_write(),
+ * and efi_pstore driver will continue reading subsequent entries.
+ * size < 0: Failed to get data of entry logging via efi_pstore_write(),
+ * and pstore will stop reading entry.
+ */
static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type,
int *count, struct timespec *timespec,
char **buf, bool *compressed,
struct pstore_info *psi)
{
struct pstore_read_data data;
+ ssize_t size;
data.id = id;
data.type = type;
@@ -112,8 +222,17 @@ static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type,
data.compressed = compressed;
data.buf = buf;
- return __efivar_entry_iter(efi_pstore_read_func, &efivar_sysfs_list, &data,
- (struct efivar_entry **)&psi->data);
+ *data.buf = kzalloc(EFIVARS_DATA_SIZE_MAX, GFP_KERNEL);
+ if (!*data.buf)
+ return -ENOMEM;
+
+ efivar_entry_iter_begin();
+ size = efi_pstore_sysfs_entry_iter(&data,
+ (struct efivar_entry **)&psi->data);
+ efivar_entry_iter_end();
+ if (size <= 0)
+ kfree(*data.buf);
+ return size;
}
static int efi_pstore_write(enum pstore_type_id type,
@@ -184,9 +303,17 @@ static int efi_pstore_erase_func(struct efivar_entry *entry, void *data)
return 0;
}
+ if (entry->scanning) {
+ /*
+ * Skip deletion because this entry will be deleted
+ * after scanning is completed.
+ */
+ entry->deleting = true;
+ } else
+ list_del(&entry->list);
+
/* found */
__efivar_entry_delete(entry);
- list_del(&entry->list);
return 1;
}
@@ -199,14 +326,16 @@ static int efi_pstore_erase(enum pstore_type_id type, u64 id, int count,
char name[DUMP_NAME_LEN];
efi_char16_t efi_name[DUMP_NAME_LEN];
int found, i;
+ unsigned int part;
- sprintf(name, "dump-type%u-%u-%d-%lu", type, (unsigned int)id, count,
- time.tv_sec);
+ do_div(id, 1000);
+ part = do_div(id, 100);
+ sprintf(name, "dump-type%u-%u-%d-%lu", type, part, count, time.tv_sec);
for (i = 0; i < DUMP_NAME_LEN; i++)
efi_name[i] = name[i];
- edata.id = id;
+ edata.id = part;
edata.type = type;
edata.count = count;
edata.time = time;
@@ -214,10 +343,12 @@ static int efi_pstore_erase(enum pstore_type_id type, u64 id, int count,
efivar_entry_iter_begin();
found = __efivar_entry_iter(efi_pstore_erase_func, &efivar_sysfs_list, &edata, &entry);
- efivar_entry_iter_end();
- if (found)
+ if (found && !entry->scanning) {
+ efivar_entry_iter_end();
efivar_unregister(entry);
+ } else
+ efivar_entry_iter_end();
return 0;
}
@@ -225,6 +356,7 @@ static int efi_pstore_erase(enum pstore_type_id type, u64 id, int count,
static struct pstore_info efi_pstore_info = {
.owner = THIS_MODULE,
.name = "efi",
+ .flags = PSTORE_FLAGS_FRAGILE,
.open = efi_pstore_open,
.close = efi_pstore_close,
.read = efi_pstore_read,
diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c
index 933eb027d527..3dc248239197 100644
--- a/drivers/firmware/efi/efivars.c
+++ b/drivers/firmware/efi/efivars.c
@@ -383,12 +383,16 @@ static ssize_t efivar_delete(struct file *filp, struct kobject *kobj,
else if (__efivar_entry_delete(entry))
err = -EIO;
- efivar_entry_iter_end();
-
- if (err)
+ if (err) {
+ efivar_entry_iter_end();
return err;
+ }
- efivar_unregister(entry);
+ if (!entry->scanning) {
+ efivar_entry_iter_end();
+ efivar_unregister(entry);
+ } else
+ efivar_entry_iter_end();
/* It's dead Jim.... */
return count;
diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c
index 391c67b182d9..b22659cccca4 100644
--- a/drivers/firmware/efi/vars.c
+++ b/drivers/firmware/efi/vars.c
@@ -683,8 +683,16 @@ struct efivar_entry *efivar_entry_find(efi_char16_t *name, efi_guid_t guid,
if (!found)
return NULL;
- if (remove)
- list_del(&entry->list);
+ if (remove) {
+ if (entry->scanning) {
+ /*
+ * The entry will be deleted
+ * after scanning is completed.
+ */
+ entry->deleting = true;
+ } else
+ list_del(&entry->list);
+ }
return entry;
}
diff --git a/drivers/gpio/gpio-bcm-kona.c b/drivers/gpio/gpio-bcm-kona.c
index 72c927dc3be1..54c18c220a60 100644
--- a/drivers/gpio/gpio-bcm-kona.c
+++ b/drivers/gpio/gpio-bcm-kona.c
@@ -158,7 +158,7 @@ static int bcm_kona_gpio_get(struct gpio_chip *chip, unsigned gpio)
spin_unlock_irqrestore(&kona_gpio->lock, flags);
/* return the specified bit status */
- return !!(val & bit);
+ return !!(val & BIT(bit));
}
static int bcm_kona_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c
index 8847adf392b7..84be70157ad6 100644
--- a/drivers/gpio/gpio-davinci.c
+++ b/drivers/gpio/gpio-davinci.c
@@ -327,7 +327,7 @@ static int gpio_to_irq_unbanked(struct gpio_chip *chip, unsigned offset)
* NOTE: we assume for now that only irqs in the first gpio_chip
* can provide direct-mapped IRQs to AINTC (up to 32 GPIOs).
*/
- if (offset < d->irq_base)
+ if (offset < d->gpio_unbanked)
return d->gpio_irq + offset;
else
return -ENODEV;
@@ -419,6 +419,8 @@ static int davinci_gpio_irq_setup(struct platform_device *pdev)
/* pass "bank 0" GPIO IRQs to AINTC */
chips[0].chip.to_irq = gpio_to_irq_unbanked;
+ chips[0].gpio_irq = bank_irq;
+ chips[0].gpio_unbanked = pdata->gpio_unbanked;
binten = BIT(0);
/* AINTC handles mask/unmask; GPIO handles triggering */
diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c
index 914e859e3eda..d7d6d72eba33 100644
--- a/drivers/gpio/gpio-mpc8xxx.c
+++ b/drivers/gpio/gpio-mpc8xxx.c
@@ -70,10 +70,14 @@ static int mpc8572_gpio_get(struct gpio_chip *gc, unsigned int gpio)
u32 val;
struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc);
struct mpc8xxx_gpio_chip *mpc8xxx_gc = to_mpc8xxx_gpio_chip(mm);
+ u32 out_mask, out_shadow;
- val = in_be32(mm->regs + GPIO_DAT) & ~in_be32(mm->regs + GPIO_DIR);
+ out_mask = in_be32(mm->regs + GPIO_DIR);
- return (val | mpc8xxx_gc->data) & mpc8xxx_gpio2mask(gpio);
+ val = in_be32(mm->regs + GPIO_DAT) & ~out_mask;
+ out_shadow = mpc8xxx_gc->data & out_mask;
+
+ return (val | out_shadow) & mpc8xxx_gpio2mask(gpio);
}
static int mpc8xxx_gpio_get(struct gpio_chip *gc, unsigned int gpio)
diff --git a/drivers/gpio/gpio-msm-v2.c b/drivers/gpio/gpio-msm-v2.c
index f7a0cc4da950..2baf0ddf7e02 100644
--- a/drivers/gpio/gpio-msm-v2.c
+++ b/drivers/gpio/gpio-msm-v2.c
@@ -102,7 +102,7 @@ struct msm_gpio_dev {
DECLARE_BITMAP(wake_irqs, MAX_NR_GPIO);
DECLARE_BITMAP(dual_edge_irqs, MAX_NR_GPIO);
struct irq_domain *domain;
- unsigned int summary_irq;
+ int summary_irq;
void __iomem *msm_tlmm_base;
};
@@ -252,7 +252,7 @@ static void msm_gpio_irq_mask(struct irq_data *d)
spin_lock_irqsave(&tlmm_lock, irq_flags);
writel(TARGET_PROC_NONE, GPIO_INTR_CFG_SU(gpio));
- clear_gpio_bits(INTR_RAW_STATUS_EN | INTR_ENABLE, GPIO_INTR_CFG(gpio));
+ clear_gpio_bits(BIT(INTR_RAW_STATUS_EN) | BIT(INTR_ENABLE), GPIO_INTR_CFG(gpio));
__clear_bit(gpio, msm_gpio.enabled_irqs);
spin_unlock_irqrestore(&tlmm_lock, irq_flags);
}
@@ -264,7 +264,7 @@ static void msm_gpio_irq_unmask(struct irq_data *d)
spin_lock_irqsave(&tlmm_lock, irq_flags);
__set_bit(gpio, msm_gpio.enabled_irqs);
- set_gpio_bits(INTR_RAW_STATUS_EN | INTR_ENABLE, GPIO_INTR_CFG(gpio));
+ set_gpio_bits(BIT(INTR_RAW_STATUS_EN) | BIT(INTR_ENABLE), GPIO_INTR_CFG(gpio));
writel(TARGET_PROC_SCORPION, GPIO_INTR_CFG_SU(gpio));
spin_unlock_irqrestore(&tlmm_lock, irq_flags);
}
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index 3c3321f94053..db3129043e63 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -79,7 +79,7 @@ struct mvebu_gpio_chip {
spinlock_t lock;
void __iomem *membase;
void __iomem *percpu_membase;
- unsigned int irqbase;
+ int irqbase;
struct irq_domain *domain;
int soc_variant;
};
diff --git a/drivers/gpio/gpio-pl061.c b/drivers/gpio/gpio-pl061.c
index f22f7f3e2e53..b4d42112d02d 100644
--- a/drivers/gpio/gpio-pl061.c
+++ b/drivers/gpio/gpio-pl061.c
@@ -286,11 +286,6 @@ static int pl061_probe(struct amba_device *adev, const struct amba_id *id)
if (!chip->base)
return -ENOMEM;
- chip->domain = irq_domain_add_simple(adev->dev.of_node, PL061_GPIO_NR,
- irq_base, &pl061_domain_ops, chip);
- if (!chip->domain)
- return -ENODEV;
-
spin_lock_init(&chip->lock);
chip->gc.request = pl061_gpio_request;
@@ -320,6 +315,11 @@ static int pl061_probe(struct amba_device *adev, const struct amba_id *id)
irq_set_chained_handler(irq, pl061_irq_handler);
irq_set_handler_data(irq, chip);
+ chip->domain = irq_domain_add_simple(adev->dev.of_node, PL061_GPIO_NR,
+ irq_base, &pl061_domain_ops, chip);
+ if (!chip->domain)
+ return -ENODEV;
+
for (i = 0; i < PL061_GPIO_NR; i++) {
if (pdata) {
if (pdata->directions & (1 << i))
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
index d3f15ae93bd3..8b7e719a68c3 100644
--- a/drivers/gpio/gpio-rcar.c
+++ b/drivers/gpio/gpio-rcar.c
@@ -169,7 +169,8 @@ static irqreturn_t gpio_rcar_irq_handler(int irq, void *dev_id)
u32 pending;
unsigned int offset, irqs_handled = 0;
- while ((pending = gpio_rcar_read(p, INTDT))) {
+ while ((pending = gpio_rcar_read(p, INTDT) &
+ gpio_rcar_read(p, INTMSK))) {
offset = __ffs(pending);
gpio_rcar_write(p, INTCLR, BIT(offset));
generic_handle_irq(irq_find_mapping(p->irq_domain, offset));
@@ -381,7 +382,7 @@ static int gpio_rcar_probe(struct platform_device *pdev)
if (!p->irq_domain) {
ret = -ENXIO;
dev_err(&pdev->dev, "cannot initialize irq domain\n");
- goto err1;
+ goto err0;
}
if (devm_request_irq(&pdev->dev, irq->start,
diff --git a/drivers/gpio/gpio-tb10x.c b/drivers/gpio/gpio-tb10x.c
index 0502b9a041a5..da071ddbad99 100644
--- a/drivers/gpio/gpio-tb10x.c
+++ b/drivers/gpio/gpio-tb10x.c
@@ -132,6 +132,7 @@ static int tb10x_gpio_direction_out(struct gpio_chip *chip,
int mask = BIT(offset);
int val = TB10X_GPIO_DIR_OUT << offset;
+ tb10x_gpio_set(chip, offset, value);
tb10x_set_bits(tb10x_gpio, OFFSET_TO_REG_DDR, mask, val);
return 0;
diff --git a/drivers/gpio/gpio-twl4030.c b/drivers/gpio/gpio-twl4030.c
index 0c7e891c8651..f9996899c1f2 100644
--- a/drivers/gpio/gpio-twl4030.c
+++ b/drivers/gpio/gpio-twl4030.c
@@ -300,7 +300,7 @@ static int twl_direction_in(struct gpio_chip *chip, unsigned offset)
if (offset < TWL4030_GPIO_MAX)
ret = twl4030_set_gpio_direction(offset, 1);
else
- ret = -EINVAL;
+ ret = -EINVAL; /* LED outputs can't be set as input */
if (!ret)
priv->direction &= ~BIT(offset);
@@ -354,17 +354,27 @@ static void twl_set(struct gpio_chip *chip, unsigned offset, int value)
static int twl_direction_out(struct gpio_chip *chip, unsigned offset, int value)
{
struct gpio_twl4030_priv *priv = to_gpio_twl4030(chip);
+ int ret = 0;
mutex_lock(&priv->mutex);
- if (offset < TWL4030_GPIO_MAX)
- twl4030_set_gpio_dataout(offset, value);
+ if (offset < TWL4030_GPIO_MAX) {
+ ret = twl4030_set_gpio_direction(offset, 0);
+ if (ret) {
+ mutex_unlock(&priv->mutex);
+ return ret;
+ }
+ }
+
+ /*
+ * LED gpios i.e. offset >= TWL4030_GPIO_MAX are always output
+ */
priv->direction |= BIT(offset);
mutex_unlock(&priv->mutex);
twl_set(chip, offset, value);
- return 0;
+ return ret;
}
static int twl_to_irq(struct gpio_chip *chip, unsigned offset)
@@ -435,7 +445,8 @@ static int gpio_twl4030_debounce(u32 debounce, u8 mmc_cd)
static int gpio_twl4030_remove(struct platform_device *pdev);
-static struct twl4030_gpio_platform_data *of_gpio_twl4030(struct device *dev)
+static struct twl4030_gpio_platform_data *of_gpio_twl4030(struct device *dev,
+ struct twl4030_gpio_platform_data *pdata)
{
struct twl4030_gpio_platform_data *omap_twl_info;
@@ -443,6 +454,9 @@ static struct twl4030_gpio_platform_data *of_gpio_twl4030(struct device *dev)
if (!omap_twl_info)
return NULL;
+ if (pdata)
+ *omap_twl_info = *pdata;
+
omap_twl_info->use_leds = of_property_read_bool(dev->of_node,
"ti,use-leds");
@@ -500,7 +514,7 @@ no_irqs:
mutex_init(&priv->mutex);
if (node)
- pdata = of_gpio_twl4030(&pdev->dev);
+ pdata = of_gpio_twl4030(&pdev->dev, pdata);
if (pdata == NULL) {
dev_err(&pdev->dev, "Platform data is missing\n");
diff --git a/drivers/gpio/gpio-ucb1400.c b/drivers/gpio/gpio-ucb1400.c
index 1a605f2a0f55..06fb5cf99ded 100644
--- a/drivers/gpio/gpio-ucb1400.c
+++ b/drivers/gpio/gpio-ucb1400.c
@@ -105,3 +105,4 @@ module_platform_driver(ucb1400_gpio_driver);
MODULE_DESCRIPTION("Philips UCB1400 GPIO driver");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:ucb1400_gpio");
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 7dd446150294..85f772c0b26a 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -13,6 +13,8 @@
#include <linux/acpi_gpio.h>
#include <linux/idr.h>
#include <linux/slab.h>
+#include <linux/acpi.h>
+#include <linux/gpio/driver.h>
#define CREATE_TRACE_POINTS
#include <trace/events/gpio.h>
@@ -1307,6 +1309,18 @@ struct gpio_chip *gpiochip_find(void *data,
}
EXPORT_SYMBOL_GPL(gpiochip_find);
+static int gpiochip_match_name(struct gpio_chip *chip, void *data)
+{
+ const char *name = data;
+
+ return !strcmp(chip->label, name);
+}
+
+static struct gpio_chip *find_chip_by_name(const char *name)
+{
+ return gpiochip_find((void *)name, gpiochip_match_name);
+}
+
#ifdef CONFIG_PINCTRL
/**
@@ -1340,8 +1354,10 @@ int gpiochip_add_pingroup_range(struct gpio_chip *chip,
ret = pinctrl_get_group_pins(pctldev, pin_group,
&pin_range->range.pins,
&pin_range->range.npins);
- if (ret < 0)
+ if (ret < 0) {
+ kfree(pin_range);
return ret;
+ }
pinctrl_add_gpio_range(pctldev, &pin_range->range);
@@ -2259,26 +2275,10 @@ void gpiod_add_table(struct gpiod_lookup *table, size_t size)
mutex_unlock(&gpio_lookup_lock);
}
-/*
- * Caller must have a acquired gpio_lookup_lock
- */
-static struct gpio_chip *find_chip_by_name(const char *name)
-{
- struct gpio_chip *chip = NULL;
-
- list_for_each_entry(chip, &gpio_lookup_list, list) {
- if (chip->label == NULL)
- continue;
- if (!strcmp(chip->label, name))
- break;
- }
-
- return chip;
-}
-
#ifdef CONFIG_OF
static struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id,
- unsigned int idx, unsigned long *flags)
+ unsigned int idx,
+ enum gpio_lookup_flags *flags)
{
char prop_name[32]; /* 32 is max size of property name */
enum of_gpio_flags of_flags;
@@ -2296,20 +2296,22 @@ static struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id,
return desc;
if (of_flags & OF_GPIO_ACTIVE_LOW)
- *flags |= GPIOF_ACTIVE_LOW;
+ *flags |= GPIO_ACTIVE_LOW;
return desc;
}
#else
static struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id,
- unsigned int idx, unsigned long *flags)
+ unsigned int idx,
+ enum gpio_lookup_flags *flags)
{
return ERR_PTR(-ENODEV);
}
#endif
static struct gpio_desc *acpi_find_gpio(struct device *dev, const char *con_id,
- unsigned int idx, unsigned long *flags)
+ unsigned int idx,
+ enum gpio_lookup_flags *flags)
{
struct acpi_gpio_info info;
struct gpio_desc *desc;
@@ -2319,13 +2321,14 @@ static struct gpio_desc *acpi_find_gpio(struct device *dev, const char *con_id,
return desc;
if (info.gpioint && info.active_low)
- *flags |= GPIOF_ACTIVE_LOW;
+ *flags |= GPIO_ACTIVE_LOW;
return desc;
}
static struct gpio_desc *gpiod_find(struct device *dev, const char *con_id,
- unsigned int idx, unsigned long *flags)
+ unsigned int idx,
+ enum gpio_lookup_flags *flags)
{
const char *dev_id = dev ? dev_name(dev) : NULL;
struct gpio_desc *desc = ERR_PTR(-ENODEV);
@@ -2365,7 +2368,7 @@ static struct gpio_desc *gpiod_find(struct device *dev, const char *con_id,
continue;
}
- if (chip->ngpio >= p->chip_hwnum) {
+ if (chip->ngpio <= p->chip_hwnum) {
dev_warn(dev, "GPIO chip %s has %d GPIOs\n",
chip->label, chip->ngpio);
continue;
@@ -2415,9 +2418,9 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
const char *con_id,
unsigned int idx)
{
- struct gpio_desc *desc;
+ struct gpio_desc *desc = NULL;
int status;
- unsigned long flags = 0;
+ enum gpio_lookup_flags flags = 0;
dev_dbg(dev, "GPIO lookup for consumer %s\n", con_id);
@@ -2428,13 +2431,23 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
} else if (IS_ENABLED(CONFIG_ACPI) && dev && ACPI_HANDLE(dev)) {
dev_dbg(dev, "using ACPI for GPIO lookup\n");
desc = acpi_find_gpio(dev, con_id, idx, &flags);
- } else {
+ }
+
+ /*
+ * Either we are not using DT or ACPI, or their lookup did not return
+ * a result. In that case, use platform lookup as a fallback.
+ */
+ if (!desc || IS_ERR(desc)) {
+ struct gpio_desc *pdesc;
dev_dbg(dev, "using lookup tables for GPIO lookup");
- desc = gpiod_find(dev, con_id, idx, &flags);
+ pdesc = gpiod_find(dev, con_id, idx, &flags);
+ /* If used as fallback, do not replace the previous error */
+ if (!IS_ERR(pdesc) || !desc)
+ desc = pdesc;
}
if (IS_ERR(desc)) {
- dev_warn(dev, "lookup for GPIO %s failed\n", con_id);
+ dev_dbg(dev, "lookup for GPIO %s failed\n", con_id);
return desc;
}
@@ -2443,8 +2456,12 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
if (status < 0)
return ERR_PTR(status);
- if (flags & GPIOF_ACTIVE_LOW)
+ if (flags & GPIO_ACTIVE_LOW)
set_bit(FLAG_ACTIVE_LOW, &desc->flags);
+ if (flags & GPIO_OPEN_DRAIN)
+ set_bit(FLAG_OPEN_DRAIN, &desc->flags);
+ if (flags & GPIO_OPEN_SOURCE)
+ set_bit(FLAG_OPEN_SOURCE, &desc->flags);
return desc;
}
diff --git a/drivers/gpu/drm/armada/armada_drm.h b/drivers/gpu/drm/armada/armada_drm.h
index eef09ec9a5ff..a72cae03b99b 100644
--- a/drivers/gpu/drm/armada/armada_drm.h
+++ b/drivers/gpu/drm/armada/armada_drm.h
@@ -103,6 +103,7 @@ void armada_drm_queue_unref_work(struct drm_device *,
extern const struct drm_mode_config_funcs armada_drm_mode_config_funcs;
int armada_fbdev_init(struct drm_device *);
+void armada_fbdev_lastclose(struct drm_device *);
void armada_fbdev_fini(struct drm_device *);
int armada_overlay_plane_create(struct drm_device *, unsigned long);
diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c
index 4f2b28354915..62d0ff3efddf 100644
--- a/drivers/gpu/drm/armada/armada_drv.c
+++ b/drivers/gpu/drm/armada/armada_drv.c
@@ -321,6 +321,11 @@ static struct drm_ioctl_desc armada_ioctls[] = {
DRM_UNLOCKED),
};
+static void armada_drm_lastclose(struct drm_device *dev)
+{
+ armada_fbdev_lastclose(dev);
+}
+
static const struct file_operations armada_drm_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
@@ -337,7 +342,7 @@ static struct drm_driver armada_drm_driver = {
.open = NULL,
.preclose = NULL,
.postclose = NULL,
- .lastclose = NULL,
+ .lastclose = armada_drm_lastclose,
.unload = armada_drm_unload,
.get_vblank_counter = drm_vblank_count,
.enable_vblank = armada_drm_enable_vblank,
diff --git a/drivers/gpu/drm/armada/armada_fbdev.c b/drivers/gpu/drm/armada/armada_fbdev.c
index dd5ea77dac96..948cb14c561e 100644
--- a/drivers/gpu/drm/armada/armada_fbdev.c
+++ b/drivers/gpu/drm/armada/armada_fbdev.c
@@ -105,9 +105,9 @@ static int armada_fb_create(struct drm_fb_helper *fbh,
drm_fb_helper_fill_fix(info, dfb->fb.pitches[0], dfb->fb.depth);
drm_fb_helper_fill_var(info, fbh, sizes->fb_width, sizes->fb_height);
- DRM_DEBUG_KMS("allocated %dx%d %dbpp fb: 0x%08x\n",
- dfb->fb.width, dfb->fb.height,
- dfb->fb.bits_per_pixel, obj->phys_addr);
+ DRM_DEBUG_KMS("allocated %dx%d %dbpp fb: 0x%08llx\n",
+ dfb->fb.width, dfb->fb.height, dfb->fb.bits_per_pixel,
+ (unsigned long long)obj->phys_addr);
return 0;
@@ -177,6 +177,16 @@ int armada_fbdev_init(struct drm_device *dev)
return ret;
}
+void armada_fbdev_lastclose(struct drm_device *dev)
+{
+ struct armada_private *priv = dev->dev_private;
+
+ drm_modeset_lock_all(dev);
+ if (priv->fbdev)
+ drm_fb_helper_restore_fbdev_mode(priv->fbdev);
+ drm_modeset_unlock_all(dev);
+}
+
void armada_fbdev_fini(struct drm_device *dev)
{
struct armada_private *priv = dev->dev_private;
@@ -192,11 +202,11 @@ void armada_fbdev_fini(struct drm_device *dev)
framebuffer_release(info);
}
+ drm_fb_helper_fini(fbh);
+
if (fbh->fb)
fbh->fb->funcs->destroy(fbh->fb);
- drm_fb_helper_fini(fbh);
-
priv->fbdev = NULL;
}
}
diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c
index 9f2356bae7fd..887816f43476 100644
--- a/drivers/gpu/drm/armada/armada_gem.c
+++ b/drivers/gpu/drm/armada/armada_gem.c
@@ -172,8 +172,9 @@ armada_gem_linear_back(struct drm_device *dev, struct armada_gem_object *obj)
obj->dev_addr = obj->linear->start;
}
- DRM_DEBUG_DRIVER("obj %p phys %#x dev %#x\n",
- obj, obj->phys_addr, obj->dev_addr);
+ DRM_DEBUG_DRIVER("obj %p phys %#llx dev %#llx\n", obj,
+ (unsigned long long)obj->phys_addr,
+ (unsigned long long)obj->dev_addr);
return 0;
}
@@ -557,7 +558,6 @@ armada_gem_prime_import(struct drm_device *dev, struct dma_buf *buf)
* refcount on the gem object itself.
*/
drm_gem_object_reference(obj);
- dma_buf_put(buf);
return obj;
}
}
@@ -573,6 +573,7 @@ armada_gem_prime_import(struct drm_device *dev, struct dma_buf *buf)
}
dobj->obj.import_attach = attach;
+ get_dma_buf(buf);
/*
* Don't call dma_buf_map_attachment() here - it maps the
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index fb7cf0e796f6..8835dcddfac3 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -68,6 +68,8 @@
#define EDID_QUIRK_DETAILED_SYNC_PP (1 << 6)
/* Force reduced-blanking timings for detailed modes */
#define EDID_QUIRK_FORCE_REDUCED_BLANKING (1 << 7)
+/* Force 8bpc */
+#define EDID_QUIRK_FORCE_8BPC (1 << 8)
struct detailed_mode_closure {
struct drm_connector *connector;
@@ -128,6 +130,9 @@ static struct edid_quirk {
/* Medion MD 30217 PG */
{ "MED", 0x7b8, EDID_QUIRK_PREFER_LARGE_75 },
+
+ /* Panel in Samsung NP700G7A-S01PL notebook reports 6bpc */
+ { "SEC", 0xd033, EDID_QUIRK_FORCE_8BPC },
};
/*
@@ -2674,7 +2679,7 @@ static int add_3d_struct_modes(struct drm_connector *connector, u16 structure,
int modes = 0;
u8 cea_mode;
- if (video_db == NULL || video_index > video_len)
+ if (video_db == NULL || video_index >= video_len)
return 0;
/* CEA modes are numbered 1..127 */
@@ -2701,7 +2706,7 @@ static int add_3d_struct_modes(struct drm_connector *connector, u16 structure,
if (structure & (1 << 8)) {
newmode = drm_mode_duplicate(dev, &edid_cea_modes[cea_mode]);
if (newmode) {
- newmode->flags = DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF;
+ newmode->flags |= DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF;
drm_mode_probed_add(connector, newmode);
modes++;
}
@@ -3435,6 +3440,9 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
drm_add_display_info(edid, &connector->display_info);
+ if (quirks & EDID_QUIRK_FORCE_8BPC)
+ connector->display_info.bpc = 8;
+
return num_modes;
}
EXPORT_SYMBOL(drm_add_edid_modes);
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index f53d5246979c..66dd3a001cf1 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -566,11 +566,11 @@ err_unload:
if (dev->driver->unload)
dev->driver->unload(dev);
err_primary_node:
- drm_put_minor(dev->primary);
+ drm_unplug_minor(dev->primary);
err_render_node:
- drm_put_minor(dev->render);
+ drm_unplug_minor(dev->render);
err_control_node:
- drm_put_minor(dev->control);
+ drm_unplug_minor(dev->control);
err_agp:
if (dev->driver->bus->agp_destroy)
dev->driver->bus->agp_destroy(dev);
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index 1a35ea53106b..c22c3097c3e8 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -489,6 +489,11 @@ void drm_sysfs_hotplug_event(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_sysfs_hotplug_event);
+static void drm_sysfs_release(struct device *dev)
+{
+ kfree(dev);
+}
+
/**
* drm_sysfs_device_add - adds a class device to sysfs for a character driver
* @dev: DRM device to be added
@@ -501,6 +506,7 @@ EXPORT_SYMBOL(drm_sysfs_hotplug_event);
int drm_sysfs_device_add(struct drm_minor *minor)
{
char *minor_str;
+ int r;
if (minor->type == DRM_MINOR_CONTROL)
minor_str = "controlD%d";
@@ -509,14 +515,34 @@ int drm_sysfs_device_add(struct drm_minor *minor)
else
minor_str = "card%d";
- minor->kdev = device_create(drm_class, minor->dev->dev,
- MKDEV(DRM_MAJOR, minor->index),
- minor, minor_str, minor->index);
- if (IS_ERR(minor->kdev)) {
- DRM_ERROR("device create failed %ld\n", PTR_ERR(minor->kdev));
- return PTR_ERR(minor->kdev);
+ minor->kdev = kzalloc(sizeof(*minor->kdev), GFP_KERNEL);
+ if (!minor->kdev) {
+ r = -ENOMEM;
+ goto error;
}
+
+ device_initialize(minor->kdev);
+ minor->kdev->devt = MKDEV(DRM_MAJOR, minor->index);
+ minor->kdev->class = drm_class;
+ minor->kdev->type = &drm_sysfs_device_minor;
+ minor->kdev->parent = minor->dev->dev;
+ minor->kdev->release = drm_sysfs_release;
+ dev_set_drvdata(minor->kdev, minor);
+
+ r = dev_set_name(minor->kdev, minor_str, minor->index);
+ if (r < 0)
+ goto error;
+
+ r = device_add(minor->kdev);
+ if (r < 0)
+ goto error;
+
return 0;
+
+error:
+ DRM_ERROR("device create failed %d\n", r);
+ put_device(minor->kdev);
+ return r;
}
/**
@@ -529,7 +555,7 @@ int drm_sysfs_device_add(struct drm_minor *minor)
void drm_sysfs_device_remove(struct drm_minor *minor)
{
if (minor->kdev)
- device_destroy(drm_class, MKDEV(DRM_MAJOR, minor->index));
+ device_unregister(minor->kdev);
minor->kdev = NULL;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index b676006a95a0..22b8f5eced80 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -173,28 +173,37 @@ static int exynos_drm_open(struct drm_device *dev, struct drm_file *file)
static void exynos_drm_preclose(struct drm_device *dev,
struct drm_file *file)
{
+ exynos_drm_subdrv_close(dev, file);
+}
+
+static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file)
+{
struct exynos_drm_private *private = dev->dev_private;
- struct drm_pending_vblank_event *e, *t;
+ struct drm_pending_vblank_event *v, *vt;
+ struct drm_pending_event *e, *et;
unsigned long flags;
- /* release events of current file */
+ if (!file->driver_priv)
+ return;
+
+ /* Release all events not unhandled by page flip handler. */
spin_lock_irqsave(&dev->event_lock, flags);
- list_for_each_entry_safe(e, t, &private->pageflip_event_list,
+ list_for_each_entry_safe(v, vt, &private->pageflip_event_list,
base.link) {
- if (e->base.file_priv == file) {
- list_del(&e->base.link);
- e->base.destroy(&e->base);
+ if (v->base.file_priv == file) {
+ list_del(&v->base.link);
+ drm_vblank_put(dev, v->pipe);
+ v->base.destroy(&v->base);
}
}
- spin_unlock_irqrestore(&dev->event_lock, flags);
- exynos_drm_subdrv_close(dev, file);
-}
+ /* Release all events handled by page flip handler but not freed. */
+ list_for_each_entry_safe(e, et, &file->event_list, link) {
+ list_del(&e->link);
+ e->destroy(e);
+ }
+ spin_unlock_irqrestore(&dev->event_lock, flags);
-static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file)
-{
- if (!file->driver_priv)
- return;
kfree(file->driver_priv);
file->driver_priv = NULL;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 23da72b5eae9..a61878bf5dcd 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -31,7 +31,7 @@
#include "exynos_drm_iommu.h"
/*
- * FIMD is stand for Fully Interactive Mobile Display and
+ * FIMD stands for Fully Interactive Mobile Display and
* as a display controller, it transfers contents drawn on memory
* to a LCD Panel through Display Interfaces such as RGB or
* CPU Interface.
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index 3271fd4b1724..7bccedca487a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -383,6 +383,8 @@ out:
g2d_userptr->npages,
g2d_userptr->vma);
+ exynos_gem_put_vma(g2d_userptr->vma);
+
if (!g2d_userptr->out_of_list)
list_del_init(&g2d_userptr->list);
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 0cab2d045135..5c648425c1e0 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -83,6 +83,14 @@ void i915_update_dri1_breadcrumb(struct drm_device *dev)
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv;
+ /*
+ * The dri breadcrumb update races against the drm master disappearing.
+ * Instead of trying to fix this (this is by far not the only ums issue)
+ * just don't do the update in kms mode.
+ */
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return;
+
if (dev->primary->master) {
master_priv = dev->primary->master->driver_priv;
if (master_priv->sarea_priv)
@@ -1490,16 +1498,9 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
spin_lock_init(&dev_priv->uncore.lock);
spin_lock_init(&dev_priv->mm.object_stat_lock);
mutex_init(&dev_priv->dpio_lock);
- mutex_init(&dev_priv->rps.hw_lock);
mutex_init(&dev_priv->modeset_restore_lock);
- mutex_init(&dev_priv->pc8.lock);
- dev_priv->pc8.requirements_met = false;
- dev_priv->pc8.gpu_idle = false;
- dev_priv->pc8.irqs_disabled = false;
- dev_priv->pc8.enabled = false;
- dev_priv->pc8.disable_count = 2; /* requirements_met + gpu_idle */
- INIT_DELAYED_WORK(&dev_priv->pc8.enable_work, hsw_enable_pc8_work);
+ intel_pm_setup(dev);
intel_display_crc_init(dev);
@@ -1603,7 +1604,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
}
intel_irq_init(dev);
- intel_pm_init(dev);
intel_uncore_sanitize(dev);
/* Try to make sure MCHBAR is enabled before poking at it */
@@ -1848,8 +1848,10 @@ void i915_driver_lastclose(struct drm_device * dev)
void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
{
+ mutex_lock(&dev->struct_mutex);
i915_gem_context_close(dev, file_priv);
i915_gem_release(dev, file_priv);
+ mutex_unlock(&dev->struct_mutex);
}
void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 989be12cdd6e..5b7b7e06cb3a 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -534,8 +534,10 @@ static int i915_drm_freeze(struct drm_device *dev)
* Disable CRTCs directly since we want to preserve sw state
* for _thaw.
*/
+ mutex_lock(&dev->mode_config.mutex);
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
dev_priv->display.crtc_disable(crtc);
+ mutex_unlock(&dev->mode_config.mutex);
intel_modeset_suspend_hw(dev);
}
@@ -649,6 +651,7 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
intel_modeset_init_hw(dev);
drm_modeset_lock_all(dev);
+ drm_mode_config_reset(dev);
intel_modeset_setup_hw_state(dev, true);
drm_modeset_unlock_all(dev);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 8600c315b4c4..90fcccba17b0 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1755,8 +1755,13 @@ struct drm_i915_file_private {
#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
#define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \
((dev)->pdev->device & 0xFF00) == 0x0C00)
-#define IS_ULT(dev) (IS_HASWELL(dev) && \
+#define IS_BDW_ULT(dev) (IS_BROADWELL(dev) && \
+ (((dev)->pdev->device & 0xf) == 0x2 || \
+ ((dev)->pdev->device & 0xf) == 0x6 || \
+ ((dev)->pdev->device & 0xf) == 0xe))
+#define IS_HSW_ULT(dev) (IS_HASWELL(dev) && \
((dev)->pdev->device & 0xFF00) == 0x0A00)
+#define IS_ULT(dev) (IS_HSW_ULT(dev) || IS_BDW_ULT(dev))
#define IS_HSW_GT3(dev) (IS_HASWELL(dev) && \
((dev)->pdev->device & 0x00F0) == 0x0020)
#define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary)
@@ -1816,6 +1821,7 @@ struct drm_i915_file_private {
#define HAS_POWER_WELL(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev))
#define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg)
#define HAS_PSR(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev))
+#define HAS_PC8(dev) (IS_HASWELL(dev)) /* XXX HSW:ULX */
#define INTEL_PCH_DEVICE_ID_MASK 0xff00
#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
@@ -1900,9 +1906,7 @@ void i915_queue_hangcheck(struct drm_device *dev);
void i915_handle_error(struct drm_device *dev, bool wedged);
extern void intel_irq_init(struct drm_device *dev);
-extern void intel_pm_init(struct drm_device *dev);
extern void intel_hpd_init(struct drm_device *dev);
-extern void intel_pm_init(struct drm_device *dev);
extern void intel_uncore_sanitize(struct drm_device *dev);
extern void intel_uncore_early_sanitize(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 12bbd5eac70d..76d3d1ab73c6 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2343,15 +2343,24 @@ static void i915_gem_free_request(struct drm_i915_gem_request *request)
kfree(request);
}
-static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
- struct intel_ring_buffer *ring)
+static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
+ struct intel_ring_buffer *ring)
{
- u32 completed_seqno;
- u32 acthd;
+ u32 completed_seqno = ring->get_seqno(ring, false);
+ u32 acthd = intel_ring_get_active_head(ring);
+ struct drm_i915_gem_request *request;
+
+ list_for_each_entry(request, &ring->request_list, list) {
+ if (i915_seqno_passed(completed_seqno, request->seqno))
+ continue;
- acthd = intel_ring_get_active_head(ring);
- completed_seqno = ring->get_seqno(ring, false);
+ i915_set_reset_status(ring, request, acthd);
+ }
+}
+static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
+ struct intel_ring_buffer *ring)
+{
while (!list_empty(&ring->request_list)) {
struct drm_i915_gem_request *request;
@@ -2359,9 +2368,6 @@ static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
struct drm_i915_gem_request,
list);
- if (request->seqno > completed_seqno)
- i915_set_reset_status(ring, request, acthd);
-
i915_gem_free_request(request);
}
@@ -2403,8 +2409,16 @@ void i915_gem_reset(struct drm_device *dev)
struct intel_ring_buffer *ring;
int i;
+ /*
+ * Before we free the objects from the requests, we need to inspect
+ * them for finding the guilty party. As the requests only borrow
+ * their reference to the objects, the inspection must be done first.
+ */
+ for_each_ring(ring, dev_priv, i)
+ i915_gem_reset_ring_status(dev_priv, ring);
+
for_each_ring(ring, dev_priv, i)
- i915_gem_reset_ring_lists(dev_priv, ring);
+ i915_gem_reset_ring_cleanup(dev_priv, ring);
i915_gem_cleanup_ringbuffer(dev);
@@ -4442,10 +4456,9 @@ i915_gem_init_hw(struct drm_device *dev)
if (dev_priv->ellc_size)
I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
- if (IS_HSW_GT3(dev))
- I915_WRITE(MI_PREDICATE_RESULT_2, LOWER_SLICE_ENABLED);
- else
- I915_WRITE(MI_PREDICATE_RESULT_2, LOWER_SLICE_DISABLED);
+ if (IS_HASWELL(dev))
+ I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev) ?
+ LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
if (HAS_PCH_NOP(dev)) {
u32 temp = I915_READ(GEN7_MSG_CTL);
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 72a3df32292f..b0f42b9ca037 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -347,10 +347,8 @@ void i915_gem_context_close(struct drm_device *dev, struct drm_file *file)
{
struct drm_i915_file_private *file_priv = file->driver_priv;
- mutex_lock(&dev->struct_mutex);
idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
idr_destroy(&file_priv->context_idr);
- mutex_unlock(&dev->struct_mutex);
}
static struct i915_hw_context *
@@ -423,11 +421,21 @@ static int do_switch(struct i915_hw_context *to)
if (ret)
return ret;
- /* Clear this page out of any CPU caches for coherent swap-in/out. Note
+ /*
+ * Pin can switch back to the default context if we end up calling into
+ * evict_everything - as a last ditch gtt defrag effort that also
+ * switches to the default context. Hence we need to reload from here.
+ */
+ from = ring->last_context;
+
+ /*
+ * Clear this page out of any CPU caches for coherent swap-in/out. Note
* that thanks to write = false in this call and us not setting any gpu
* write domains when putting a context object onto the active list
* (when switching away from it), this won't block.
- * XXX: We need a real interface to do this instead of trickery. */
+ *
+ * XXX: We need a real interface to do this instead of trickery.
+ */
ret = i915_gem_object_set_to_gtt_domain(to->obj, false);
if (ret) {
i915_gem_object_unpin(to->obj);
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index 7d5752fda5f1..9bb533e0d762 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -125,13 +125,15 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
ret = i915_gem_object_get_pages(obj);
if (ret)
- goto error;
+ goto err;
+
+ i915_gem_object_pin_pages(obj);
ret = -ENOMEM;
pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages));
if (pages == NULL)
- goto error;
+ goto err_unpin;
i = 0;
for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0)
@@ -141,15 +143,16 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
drm_free_large(pages);
if (!obj->dma_buf_vmapping)
- goto error;
+ goto err_unpin;
obj->vmapping_count = 1;
- i915_gem_object_pin_pages(obj);
out_unlock:
mutex_unlock(&dev->struct_mutex);
return obj->dma_buf_vmapping;
-error:
+err_unpin:
+ i915_gem_object_unpin_pages(obj);
+err:
mutex_unlock(&dev->struct_mutex);
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index b7376533633d..8f3adc7d0dc8 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -88,6 +88,7 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
} else
drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level);
+search_again:
/* First see if there is a large enough contiguous idle region... */
list_for_each_entry(vma, &vm->inactive_list, mm_list) {
if (mark_free(vma, &unwind_list))
@@ -115,10 +116,17 @@ none:
list_del_init(&vma->exec_list);
}
- /* We expect the caller to unpin, evict all and try again, or give up.
- * So calling i915_gem_evict_vm() is unnecessary.
+ /* Can we unpin some objects such as idle hw contents,
+ * or pending flips?
*/
- return -ENOSPC;
+ ret = nonblocking ? -ENOSPC : i915_gpu_idle(dev);
+ if (ret)
+ return ret;
+
+ /* Only idle the GPU and repeat the search once */
+ i915_gem_retire_requests(dev);
+ nonblocking = true;
+ goto search_again;
found:
/* drm_mm doesn't allow any other other operations while
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 885d595e0e02..a3ba9a8cd687 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -33,6 +33,9 @@
#include "intel_drv.h"
#include <linux/dma_remapping.h>
+#define __EXEC_OBJECT_HAS_PIN (1<<31)
+#define __EXEC_OBJECT_HAS_FENCE (1<<30)
+
struct eb_vmas {
struct list_head vmas;
int and;
@@ -90,7 +93,7 @@ eb_lookup_vmas(struct eb_vmas *eb,
{
struct drm_i915_gem_object *obj;
struct list_head objects;
- int i, ret = 0;
+ int i, ret;
INIT_LIST_HEAD(&objects);
spin_lock(&file->table_lock);
@@ -103,7 +106,7 @@ eb_lookup_vmas(struct eb_vmas *eb,
DRM_DEBUG("Invalid object handle %d at index %d\n",
exec[i].handle, i);
ret = -ENOENT;
- goto out;
+ goto err;
}
if (!list_empty(&obj->obj_exec_link)) {
@@ -111,7 +114,7 @@ eb_lookup_vmas(struct eb_vmas *eb,
DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
obj, exec[i].handle, i);
ret = -EINVAL;
- goto out;
+ goto err;
}
drm_gem_object_reference(&obj->base);
@@ -120,9 +123,13 @@ eb_lookup_vmas(struct eb_vmas *eb,
spin_unlock(&file->table_lock);
i = 0;
- list_for_each_entry(obj, &objects, obj_exec_link) {
+ while (!list_empty(&objects)) {
struct i915_vma *vma;
+ obj = list_first_entry(&objects,
+ struct drm_i915_gem_object,
+ obj_exec_link);
+
/*
* NOTE: We can leak any vmas created here when something fails
* later on. But that's no issue since vma_unbind can deal with
@@ -135,10 +142,12 @@ eb_lookup_vmas(struct eb_vmas *eb,
if (IS_ERR(vma)) {
DRM_DEBUG("Failed to lookup VMA\n");
ret = PTR_ERR(vma);
- goto out;
+ goto err;
}
+ /* Transfer ownership from the objects list to the vmas list. */
list_add_tail(&vma->exec_list, &eb->vmas);
+ list_del_init(&obj->obj_exec_link);
vma->exec_entry = &exec[i];
if (eb->and < 0) {
@@ -152,16 +161,22 @@ eb_lookup_vmas(struct eb_vmas *eb,
++i;
}
+ return 0;
-out:
+
+err:
while (!list_empty(&objects)) {
obj = list_first_entry(&objects,
struct drm_i915_gem_object,
obj_exec_link);
list_del_init(&obj->obj_exec_link);
- if (ret)
- drm_gem_object_unreference(&obj->base);
+ drm_gem_object_unreference(&obj->base);
}
+ /*
+ * Objects already transfered to the vmas list will be unreferenced by
+ * eb_destroy.
+ */
+
return ret;
}
@@ -187,7 +202,28 @@ static struct i915_vma *eb_get_vma(struct eb_vmas *eb, unsigned long handle)
}
}
-static void eb_destroy(struct eb_vmas *eb) {
+static void
+i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
+{
+ struct drm_i915_gem_exec_object2 *entry;
+ struct drm_i915_gem_object *obj = vma->obj;
+
+ if (!drm_mm_node_allocated(&vma->node))
+ return;
+
+ entry = vma->exec_entry;
+
+ if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
+ i915_gem_object_unpin_fence(obj);
+
+ if (entry->flags & __EXEC_OBJECT_HAS_PIN)
+ i915_gem_object_unpin(obj);
+
+ entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
+}
+
+static void eb_destroy(struct eb_vmas *eb)
+{
while (!list_empty(&eb->vmas)) {
struct i915_vma *vma;
@@ -195,6 +231,7 @@ static void eb_destroy(struct eb_vmas *eb) {
struct i915_vma,
exec_list);
list_del_init(&vma->exec_list);
+ i915_gem_execbuffer_unreserve_vma(vma);
drm_gem_object_unreference(&vma->obj->base);
}
kfree(eb);
@@ -478,9 +515,6 @@ i915_gem_execbuffer_relocate(struct eb_vmas *eb,
return ret;
}
-#define __EXEC_OBJECT_HAS_PIN (1<<31)
-#define __EXEC_OBJECT_HAS_FENCE (1<<30)
-
static int
need_reloc_mappable(struct i915_vma *vma)
{
@@ -552,26 +586,6 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
return 0;
}
-static void
-i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
-{
- struct drm_i915_gem_exec_object2 *entry;
- struct drm_i915_gem_object *obj = vma->obj;
-
- if (!drm_mm_node_allocated(&vma->node))
- return;
-
- entry = vma->exec_entry;
-
- if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
- i915_gem_object_unpin_fence(obj);
-
- if (entry->flags & __EXEC_OBJECT_HAS_PIN)
- i915_gem_object_unpin(obj);
-
- entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
-}
-
static int
i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
struct list_head *vmas,
@@ -670,13 +684,14 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
goto err;
}
-err: /* Decrement pin count for bound objects */
- list_for_each_entry(vma, vmas, exec_list)
- i915_gem_execbuffer_unreserve_vma(vma);
-
+err:
if (ret != -ENOSPC || retry++)
return ret;
+ /* Decrement pin count for bound objects */
+ list_for_each_entry(vma, vmas, exec_list)
+ i915_gem_execbuffer_unreserve_vma(vma);
+
ret = i915_gem_evict_vm(vm, true);
if (ret)
return ret;
@@ -708,6 +723,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
while (!list_empty(&eb->vmas)) {
vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list);
list_del_init(&vma->exec_list);
+ i915_gem_execbuffer_unreserve_vma(vma);
drm_gem_object_unreference(&vma->obj->base);
}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 3620a1b0a73c..c79dd2b1f70e 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -57,7 +57,9 @@ typedef gen8_gtt_pte_t gen8_ppgtt_pde_t;
#define HSW_WB_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x2)
#define HSW_WB_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x3)
#define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb)
+#define HSW_WB_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x8)
#define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6)
+#define HSW_WT_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x7)
#define GEN8_PTES_PER_PAGE (PAGE_SIZE / sizeof(gen8_gtt_pte_t))
#define GEN8_PDES_PER_PAGE (PAGE_SIZE / sizeof(gen8_ppgtt_pde_t))
@@ -185,10 +187,10 @@ static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr,
case I915_CACHE_NONE:
break;
case I915_CACHE_WT:
- pte |= HSW_WT_ELLC_LLC_AGE0;
+ pte |= HSW_WT_ELLC_LLC_AGE3;
break;
default:
- pte |= HSW_WB_ELLC_LLC_AGE0;
+ pte |= HSW_WB_ELLC_LLC_AGE3;
break;
}
@@ -335,8 +337,8 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
kfree(ppgtt->gen8_pt_dma_addr[i]);
}
- __free_pages(ppgtt->gen8_pt_pages, ppgtt->num_pt_pages << PAGE_SHIFT);
- __free_pages(ppgtt->pd_pages, ppgtt->num_pd_pages << PAGE_SHIFT);
+ __free_pages(ppgtt->gen8_pt_pages, get_order(ppgtt->num_pt_pages << PAGE_SHIFT));
+ __free_pages(ppgtt->pd_pages, get_order(ppgtt->num_pd_pages << PAGE_SHIFT));
}
/**
@@ -1239,6 +1241,11 @@ static inline unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
if (bdw_gmch_ctl)
bdw_gmch_ctl = 1 << bdw_gmch_ctl;
+ if (bdw_gmch_ctl > 4) {
+ WARN_ON(!i915_preliminary_hw_support);
+ return 4<<20;
+ }
+
return bdw_gmch_ctl << 20;
}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index f9eafb6ed523..ee2742122a02 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -235,6 +235,7 @@
*/
#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*x-1)
#define MI_STORE_REGISTER_MEM(x) MI_INSTR(0x24, 2*x-1)
+#define MI_SRM_LRM_GLOBAL_GTT (1<<22)
#define MI_FLUSH_DW MI_INSTR(0x26, 1) /* for GEN6 */
#define MI_FLUSH_DW_STORE_INDEX (1<<21)
#define MI_INVALIDATE_TLB (1<<18)
diff --git a/drivers/gpu/drm/i915/intel_acpi.c b/drivers/gpu/drm/i915/intel_acpi.c
index 43959edd4291..dfff0907f70e 100644
--- a/drivers/gpu/drm/i915/intel_acpi.c
+++ b/drivers/gpu/drm/i915/intel_acpi.c
@@ -196,7 +196,7 @@ static bool intel_dsm_pci_probe(struct pci_dev *pdev)
acpi_handle dhandle;
int ret;
- dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
+ dhandle = ACPI_HANDLE(&pdev->dev);
if (!dhandle)
return false;
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 6dd622d733b9..e4fba39631a5 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -790,7 +790,12 @@ init_vbt_defaults(struct drm_i915_private *dev_priv)
/* Default to using SSC */
dev_priv->vbt.lvds_use_ssc = 1;
- dev_priv->vbt.lvds_ssc_freq = intel_bios_ssc_frequency(dev, 1);
+ /*
+ * Core/SandyBridge/IvyBridge use alternative (120MHz) reference
+ * clock for LVDS.
+ */
+ dev_priv->vbt.lvds_ssc_freq = intel_bios_ssc_frequency(dev,
+ !HAS_PCH_SPLIT(dev));
DRM_DEBUG_KMS("Set default to SSC at %dMHz\n", dev_priv->vbt.lvds_ssc_freq);
for (port = PORT_A; port < I915_MAX_PORTS; port++) {
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 1591576a6101..526c8ded16b0 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -173,7 +173,7 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port)
ddi_translations = ddi_translations_dp;
break;
case PORT_D:
- if (intel_dpd_is_edp(dev))
+ if (intel_dp_is_edp(dev, PORT_D))
ddi_translations = ddi_translations_edp;
else
ddi_translations = ddi_translations_dp;
@@ -1158,9 +1158,10 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder)
if (wait)
intel_wait_ddi_buf_idle(dev_priv, port);
- if (type == INTEL_OUTPUT_EDP) {
+ if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
ironlake_edp_panel_vdd_on(intel_dp);
+ intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
ironlake_edp_panel_off(intel_dp);
}
@@ -1406,6 +1407,26 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
default:
break;
}
+
+ if (encoder->type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp_bpp &&
+ pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
+ /*
+ * This is a big fat ugly hack.
+ *
+ * Some machines in UEFI boot mode provide us a VBT that has 18
+ * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
+ * unknown we fail to light up. Yet the same BIOS boots up with
+ * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
+ * max, not what it tells us to use.
+ *
+ * Note: This will still be broken if the eDP panel is not lit
+ * up by the BIOS, and thus we can't get the mode at module
+ * load.
+ */
+ DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
+ pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
+ dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
+ }
}
static void intel_ddi_destroy(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 3cddd508d110..54e82a80cf50 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -5815,7 +5815,7 @@ static void intel_set_pipe_csc(struct drm_crtc *crtc)
uint16_t postoff = 0;
if (intel_crtc->config.limited_color_range)
- postoff = (16 * (1 << 13) / 255) & 0x1fff;
+ postoff = (16 * (1 << 12) / 255) & 0x1fff;
I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff);
I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff);
@@ -6303,7 +6303,7 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
uint32_t val;
list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head)
- WARN(crtc->base.enabled, "CRTC for pipe %c enabled\n",
+ WARN(crtc->active, "CRTC for pipe %c enabled\n",
pipe_name(crtc->pipe));
WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n");
@@ -6402,7 +6402,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
/* Make sure we're not on PC8 state before disabling PC8, otherwise
* we'll hang the machine! */
- dev_priv->uncore.funcs.force_wake_get(dev_priv);
+ gen6_gt_force_wake_get(dev_priv);
if (val & LCPLL_POWER_DOWN_ALLOW) {
val &= ~LCPLL_POWER_DOWN_ALLOW;
@@ -6436,7 +6436,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
DRM_ERROR("Switching back to LCPLL failed\n");
}
- dev_priv->uncore.funcs.force_wake_put(dev_priv);
+ gen6_gt_force_wake_put(dev_priv);
}
void hsw_enable_pc8_work(struct work_struct *__work)
@@ -6518,6 +6518,9 @@ static void __hsw_disable_package_c8(struct drm_i915_private *dev_priv)
void hsw_enable_package_c8(struct drm_i915_private *dev_priv)
{
+ if (!HAS_PC8(dev_priv->dev))
+ return;
+
mutex_lock(&dev_priv->pc8.lock);
__hsw_enable_package_c8(dev_priv);
mutex_unlock(&dev_priv->pc8.lock);
@@ -6525,6 +6528,9 @@ void hsw_enable_package_c8(struct drm_i915_private *dev_priv)
void hsw_disable_package_c8(struct drm_i915_private *dev_priv)
{
+ if (!HAS_PC8(dev_priv->dev))
+ return;
+
mutex_lock(&dev_priv->pc8.lock);
__hsw_disable_package_c8(dev_priv);
mutex_unlock(&dev_priv->pc8.lock);
@@ -6562,6 +6568,9 @@ static void hsw_update_package_c8(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
bool allow;
+ if (!HAS_PC8(dev_priv->dev))
+ return;
+
if (!i915_enable_pc8)
return;
@@ -6585,18 +6594,28 @@ done:
static void hsw_package_c8_gpu_idle(struct drm_i915_private *dev_priv)
{
+ if (!HAS_PC8(dev_priv->dev))
+ return;
+
+ mutex_lock(&dev_priv->pc8.lock);
if (!dev_priv->pc8.gpu_idle) {
dev_priv->pc8.gpu_idle = true;
- hsw_enable_package_c8(dev_priv);
+ __hsw_enable_package_c8(dev_priv);
}
+ mutex_unlock(&dev_priv->pc8.lock);
}
static void hsw_package_c8_gpu_busy(struct drm_i915_private *dev_priv)
{
+ if (!HAS_PC8(dev_priv->dev))
+ return;
+
+ mutex_lock(&dev_priv->pc8.lock);
if (dev_priv->pc8.gpu_idle) {
dev_priv->pc8.gpu_idle = false;
- hsw_disable_package_c8(dev_priv);
+ __hsw_disable_package_c8(dev_priv);
}
+ mutex_unlock(&dev_priv->pc8.lock);
}
#define for_each_power_domain(domain, mask) \
@@ -7184,7 +7203,9 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
intel_crtc->cursor_visible = visible;
}
/* and commit changes on next vblank */
+ POSTING_READ(CURCNTR(pipe));
I915_WRITE(CURBASE(pipe), base);
+ POSTING_READ(CURBASE(pipe));
}
static void ivb_update_cursor(struct drm_crtc *crtc, u32 base)
@@ -7213,7 +7234,9 @@ static void ivb_update_cursor(struct drm_crtc *crtc, u32 base)
intel_crtc->cursor_visible = visible;
}
/* and commit changes on next vblank */
+ POSTING_READ(CURCNTR_IVB(pipe));
I915_WRITE(CURBASE_IVB(pipe), base);
+ POSTING_READ(CURBASE_IVB(pipe));
}
/* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
@@ -8331,7 +8354,8 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
DERRMR_PIPEB_PRI_FLIP_DONE |
DERRMR_PIPEC_PRI_FLIP_DONE));
- intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1));
+ intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1) |
+ MI_SRM_LRM_GLOBAL_GTT);
intel_ring_emit(ring, DERRMR);
intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
}
@@ -9111,7 +9135,7 @@ intel_pipe_config_compare(struct drm_device *dev,
if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
PIPE_CONF_CHECK_I(pipe_bpp);
- if (!IS_HASWELL(dev)) {
+ if (!HAS_DDI(dev)) {
PIPE_CONF_CHECK_CLOCK_FUZZY(adjusted_mode.crtc_clock);
PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
}
@@ -9248,8 +9272,7 @@ check_crtc_state(struct drm_device *dev)
enum pipe pipe;
if (encoder->base.crtc != &crtc->base)
continue;
- if (encoder->get_config &&
- encoder->get_hw_state(encoder, &pipe))
+ if (encoder->get_hw_state(encoder, &pipe))
encoder->get_config(encoder, &pipe_config);
}
@@ -10027,7 +10050,7 @@ static void intel_setup_outputs(struct drm_device *dev)
intel_ddi_init(dev, PORT_D);
} else if (HAS_PCH_SPLIT(dev)) {
int found;
- dpd_is_edp = intel_dpd_is_edp(dev);
+ dpd_is_edp = intel_dp_is_edp(dev, PORT_D);
if (has_edp_a(dev))
intel_dp_init(dev, DP_A, PORT_A);
@@ -10064,8 +10087,7 @@ static void intel_setup_outputs(struct drm_device *dev)
intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIC,
PORT_C);
if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED)
- intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C,
- PORT_C);
+ intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C, PORT_C);
}
intel_dsi_init(dev);
@@ -10909,8 +10931,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
if (encoder->get_hw_state(encoder, &pipe)) {
crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
encoder->base.crtc = &crtc->base;
- if (encoder->get_config)
- encoder->get_config(encoder, &crtc->config);
+ encoder->get_config(encoder, &crtc->config);
} else {
encoder->base.crtc = NULL;
}
@@ -11015,8 +11036,6 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
}
intel_modeset_check_state(dev);
-
- drm_mode_config_reset(dev);
}
void intel_modeset_gem_init(struct drm_device *dev)
@@ -11025,7 +11044,10 @@ void intel_modeset_gem_init(struct drm_device *dev)
intel_setup_overlay(dev);
+ drm_modeset_lock_all(dev);
+ drm_mode_config_reset(dev);
intel_modeset_setup_hw_state(dev, false);
+ drm_modeset_unlock_all(dev);
}
void intel_modeset_cleanup(struct drm_device *dev)
@@ -11104,14 +11126,15 @@ void intel_connector_attach_encoder(struct intel_connector *connector,
int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
u16 gmch_ctrl;
- pci_read_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, &gmch_ctrl);
+ pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl);
if (state)
gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
else
gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
- pci_write_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, gmch_ctrl);
+ pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl);
return 0;
}
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index eb8139da9763..30c627c7b7ba 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -1774,7 +1774,7 @@ static void intel_disable_dp(struct intel_encoder *encoder)
* ensure that we have vdd while we switch off the panel. */
ironlake_edp_panel_vdd_on(intel_dp);
ironlake_edp_backlight_off(intel_dp);
- intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
+ intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
ironlake_edp_panel_off(intel_dp);
/* cpu edp my only be disable _after_ the cpu pipe/plane is disabled. */
@@ -3326,11 +3326,19 @@ intel_trans_dp_port_sel(struct drm_crtc *crtc)
}
/* check the VBT to see whether the eDP is on DP-D port */
-bool intel_dpd_is_edp(struct drm_device *dev)
+bool intel_dp_is_edp(struct drm_device *dev, enum port port)
{
struct drm_i915_private *dev_priv = dev->dev_private;
union child_device_config *p_child;
int i;
+ static const short port_mapping[] = {
+ [PORT_B] = PORT_IDPB,
+ [PORT_C] = PORT_IDPC,
+ [PORT_D] = PORT_IDPD,
+ };
+
+ if (port == PORT_A)
+ return true;
if (!dev_priv->vbt.child_dev_num)
return false;
@@ -3338,7 +3346,7 @@ bool intel_dpd_is_edp(struct drm_device *dev)
for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
p_child = dev_priv->vbt.child_dev + i;
- if (p_child->common.dvo_port == PORT_IDPD &&
+ if (p_child->common.dvo_port == port_mapping[port] &&
(p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
(DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
return true;
@@ -3616,26 +3624,10 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
intel_dp->DP = I915_READ(intel_dp->output_reg);
intel_dp->attached_connector = intel_connector;
- type = DRM_MODE_CONNECTOR_DisplayPort;
- /*
- * FIXME : We need to initialize built-in panels before external panels.
- * For X0, DP_C is fixed as eDP. Revisit this as part of VLV eDP cleanup
- */
- switch (port) {
- case PORT_A:
+ if (intel_dp_is_edp(dev, port))
type = DRM_MODE_CONNECTOR_eDP;
- break;
- case PORT_C:
- if (IS_VALLEYVIEW(dev))
- type = DRM_MODE_CONNECTOR_eDP;
- break;
- case PORT_D:
- if (HAS_PCH_SPLIT(dev) && intel_dpd_is_edp(dev))
- type = DRM_MODE_CONNECTOR_eDP;
- break;
- default: /* silence GCC warning */
- break;
- }
+ else
+ type = DRM_MODE_CONNECTOR_DisplayPort;
/*
* For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 1e49aa8f5377..79f91f26e288 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -708,7 +708,7 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder);
void intel_dp_check_link_status(struct intel_dp *intel_dp);
bool intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config);
-bool intel_dpd_is_edp(struct drm_device *dev);
+bool intel_dp_is_edp(struct drm_device *dev, enum port port);
void ironlake_edp_backlight_on(struct intel_dp *intel_dp);
void ironlake_edp_backlight_off(struct intel_dp *intel_dp);
void ironlake_edp_panel_on(struct intel_dp *intel_dp);
@@ -821,6 +821,7 @@ void intel_update_sprite_watermarks(struct drm_plane *plane,
uint32_t sprite_width, int pixel_size,
bool enabled, bool scaled);
void intel_init_pm(struct drm_device *dev);
+void intel_pm_setup(struct drm_device *dev);
bool intel_fbc_enabled(struct drm_device *dev);
void intel_update_fbc(struct drm_device *dev);
void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 1b2f41c3f191..6d69a9bad865 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -638,7 +638,7 @@ static void intel_didl_outputs(struct drm_device *dev)
u32 temp;
int i = 0;
- handle = DEVICE_ACPI_HANDLE(&dev->pdev->dev);
+ handle = ACPI_HANDLE(&dev->pdev->dev);
if (!handle || acpi_bus_get_device(handle, &acpi_dev))
return;
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index f161ac02c4f6..e6f782d1c669 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -451,7 +451,9 @@ static u32 intel_panel_get_backlight(struct drm_device *dev,
spin_lock_irqsave(&dev_priv->backlight.lock, flags);
- if (HAS_PCH_SPLIT(dev)) {
+ if (IS_BROADWELL(dev)) {
+ val = I915_READ(BLC_PWM_PCH_CTL2) & BACKLIGHT_DUTY_CYCLE_MASK;
+ } else if (HAS_PCH_SPLIT(dev)) {
val = I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
} else {
if (IS_VALLEYVIEW(dev))
@@ -479,6 +481,13 @@ static u32 intel_panel_get_backlight(struct drm_device *dev,
return val;
}
+static void intel_bdw_panel_set_backlight(struct drm_device *dev, u32 level)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 val = I915_READ(BLC_PWM_PCH_CTL2) & ~BACKLIGHT_DUTY_CYCLE_MASK;
+ I915_WRITE(BLC_PWM_PCH_CTL2, val | level);
+}
+
static void intel_pch_panel_set_backlight(struct drm_device *dev, u32 level)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -496,7 +505,9 @@ static void intel_panel_actually_set_backlight(struct drm_device *dev,
DRM_DEBUG_DRIVER("set backlight PWM = %d\n", level);
level = intel_panel_compute_brightness(dev, pipe, level);
- if (HAS_PCH_SPLIT(dev))
+ if (IS_BROADWELL(dev))
+ return intel_bdw_panel_set_backlight(dev, level);
+ else if (HAS_PCH_SPLIT(dev))
return intel_pch_panel_set_backlight(dev, level);
if (is_backlight_combination_mode(dev)) {
@@ -666,7 +677,16 @@ void intel_panel_enable_backlight(struct intel_connector *connector)
POSTING_READ(reg);
I915_WRITE(reg, tmp | BLM_PWM_ENABLE);
- if (HAS_PCH_SPLIT(dev) &&
+ if (IS_BROADWELL(dev)) {
+ /*
+ * Broadwell requires PCH override to drive the PCH
+ * backlight pin. The above will configure the CPU
+ * backlight pin, which we don't plan to use.
+ */
+ tmp = I915_READ(BLC_PWM_PCH_CTL1);
+ tmp |= BLM_PCH_OVERRIDE_ENABLE | BLM_PCH_PWM_ENABLE;
+ I915_WRITE(BLC_PWM_PCH_CTL1, tmp);
+ } else if (HAS_PCH_SPLIT(dev) &&
!(dev_priv->quirks & QUIRK_NO_PCH_PWM_ENABLE)) {
tmp = I915_READ(BLC_PWM_PCH_CTL1);
tmp |= BLM_PCH_PWM_ENABLE;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 0a07d7c9cafc..26c29c173221 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -1180,7 +1180,7 @@ static bool g4x_compute_wm0(struct drm_device *dev,
adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
clock = adjusted_mode->crtc_clock;
- htotal = adjusted_mode->htotal;
+ htotal = adjusted_mode->crtc_htotal;
hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
pixel_size = crtc->fb->bits_per_pixel / 8;
@@ -1267,7 +1267,7 @@ static bool g4x_compute_srwm(struct drm_device *dev,
crtc = intel_get_crtc_for_plane(dev, plane);
adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
clock = adjusted_mode->crtc_clock;
- htotal = adjusted_mode->htotal;
+ htotal = adjusted_mode->crtc_htotal;
hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
pixel_size = crtc->fb->bits_per_pixel / 8;
@@ -1498,7 +1498,7 @@ static void i965_update_wm(struct drm_crtc *unused_crtc)
const struct drm_display_mode *adjusted_mode =
&to_intel_crtc(crtc)->config.adjusted_mode;
int clock = adjusted_mode->crtc_clock;
- int htotal = adjusted_mode->htotal;
+ int htotal = adjusted_mode->crtc_htotal;
int hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
int pixel_size = crtc->fb->bits_per_pixel / 8;
unsigned long line_time_us;
@@ -1624,8 +1624,8 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
const struct drm_display_mode *adjusted_mode =
&to_intel_crtc(enabled)->config.adjusted_mode;
int clock = adjusted_mode->crtc_clock;
- int htotal = adjusted_mode->htotal;
- int hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
+ int htotal = adjusted_mode->crtc_htotal;
+ int hdisplay = to_intel_crtc(enabled)->config.pipe_src_w;
int pixel_size = enabled->fb->bits_per_pixel / 8;
unsigned long line_time_us;
int entries;
@@ -1776,7 +1776,7 @@ static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane,
crtc = intel_get_crtc_for_plane(dev, plane);
adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
clock = adjusted_mode->crtc_clock;
- htotal = adjusted_mode->htotal;
+ htotal = adjusted_mode->crtc_htotal;
hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
pixel_size = crtc->fb->bits_per_pixel / 8;
@@ -2469,8 +2469,9 @@ hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc)
/* The WM are computed with base on how long it takes to fill a single
* row at the given clock rate, multiplied by 8.
* */
- linetime = DIV_ROUND_CLOSEST(mode->htotal * 1000 * 8, mode->clock);
- ips_linetime = DIV_ROUND_CLOSEST(mode->htotal * 1000 * 8,
+ linetime = DIV_ROUND_CLOSEST(mode->crtc_htotal * 1000 * 8,
+ mode->crtc_clock);
+ ips_linetime = DIV_ROUND_CLOSEST(mode->crtc_htotal * 1000 * 8,
intel_ddi_get_cdclk_freq(dev_priv));
return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) |
@@ -3888,7 +3889,7 @@ static void gen6_enable_rps(struct drm_device *dev)
I915_WRITE(GEN6_RC_SLEEP, 0);
I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
- if (INTEL_INFO(dev)->gen <= 6 || IS_IVYBRIDGE(dev))
+ if (IS_IVYBRIDGE(dev))
I915_WRITE(GEN6_RC6_THRESHOLD, 125000);
else
I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
@@ -5684,8 +5685,11 @@ static void __intel_set_power_well(struct drm_device *dev, bool enable)
{
struct drm_i915_private *dev_priv = dev->dev_private;
bool is_enabled, enable_requested;
+ unsigned long irqflags;
uint32_t tmp;
+ WARN_ON(dev_priv->pc8.enabled);
+
tmp = I915_READ(HSW_PWR_WELL_DRIVER);
is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED;
enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST;
@@ -5701,9 +5705,24 @@ static void __intel_set_power_well(struct drm_device *dev, bool enable)
HSW_PWR_WELL_STATE_ENABLED), 20))
DRM_ERROR("Timeout enabling power well\n");
}
+
+ if (IS_BROADWELL(dev)) {
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_B),
+ dev_priv->de_irq_mask[PIPE_B]);
+ I915_WRITE(GEN8_DE_PIPE_IER(PIPE_B),
+ ~dev_priv->de_irq_mask[PIPE_B] |
+ GEN8_PIPE_VBLANK);
+ I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_C),
+ dev_priv->de_irq_mask[PIPE_C]);
+ I915_WRITE(GEN8_DE_PIPE_IER(PIPE_C),
+ ~dev_priv->de_irq_mask[PIPE_C] |
+ GEN8_PIPE_VBLANK);
+ POSTING_READ(GEN8_DE_PIPE_IER(PIPE_C));
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ }
} else {
if (enable_requested) {
- unsigned long irqflags;
enum pipe p;
I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
@@ -5730,16 +5749,24 @@ static void __intel_set_power_well(struct drm_device *dev, bool enable)
static void __intel_power_well_get(struct drm_device *dev,
struct i915_power_well *power_well)
{
- if (!power_well->count++)
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (!power_well->count++) {
+ hsw_disable_package_c8(dev_priv);
__intel_set_power_well(dev, true);
+ }
}
static void __intel_power_well_put(struct drm_device *dev,
struct i915_power_well *power_well)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
WARN_ON(!power_well->count);
- if (!--power_well->count && i915_disable_power_well)
+ if (!--power_well->count && i915_disable_power_well) {
__intel_set_power_well(dev, false);
+ hsw_enable_package_c8(dev_priv);
+ }
}
void intel_display_power_get(struct drm_device *dev,
@@ -6129,10 +6156,19 @@ int vlv_freq_opcode(int ddr_freq, int val)
return val;
}
-void intel_pm_init(struct drm_device *dev)
+void intel_pm_setup(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ mutex_init(&dev_priv->rps.hw_lock);
+
+ mutex_init(&dev_priv->pc8.lock);
+ dev_priv->pc8.requirements_met = false;
+ dev_priv->pc8.gpu_idle = false;
+ dev_priv->pc8.irqs_disabled = false;
+ dev_priv->pc8.enabled = false;
+ dev_priv->pc8.disable_count = 2; /* requirements_met + gpu_idle */
+ INIT_DELAYED_WORK(&dev_priv->pc8.enable_work, hsw_enable_pc8_work);
INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
intel_gen6_powersave_work);
}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index b620337e6d67..c2f09d456300 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -965,6 +965,7 @@ void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
} else if (IS_GEN6(ring->dev)) {
mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
} else {
+ /* XXX: gen8 returns to sanity */
mmio = RING_HWS_PGA(ring->mmio_base);
}
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 18c406246a2d..22cf0f4ba248 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -902,6 +902,13 @@ intel_tv_mode_valid(struct drm_connector *connector,
}
+static void
+intel_tv_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_config *pipe_config)
+{
+ pipe_config->adjusted_mode.crtc_clock = pipe_config->port_clock;
+}
+
static bool
intel_tv_compute_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config)
@@ -1621,6 +1628,7 @@ intel_tv_init(struct drm_device *dev)
DRM_MODE_ENCODER_TVDAC);
intel_encoder->compute_config = intel_tv_compute_config;
+ intel_encoder->get_config = intel_tv_get_config;
intel_encoder->mode_set = intel_tv_mode_set;
intel_encoder->enable = intel_enable_tv;
intel_encoder->disable = intel_disable_tv;
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index f9883ceff946..25cbe073c388 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -217,6 +217,19 @@ static void gen6_force_wake_work(struct work_struct *work)
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
+static void intel_uncore_forcewake_reset(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (IS_VALLEYVIEW(dev)) {
+ vlv_force_wake_reset(dev_priv);
+ } else if (INTEL_INFO(dev)->gen >= 6) {
+ __gen6_gt_force_wake_reset(dev_priv);
+ if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
+ __gen6_gt_force_wake_mt_reset(dev_priv);
+ }
+}
+
void intel_uncore_early_sanitize(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -234,19 +247,8 @@ void intel_uncore_early_sanitize(struct drm_device *dev)
dev_priv->ellc_size = 128;
DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size);
}
-}
-static void intel_uncore_forcewake_reset(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (IS_VALLEYVIEW(dev)) {
- vlv_force_wake_reset(dev_priv);
- } else if (INTEL_INFO(dev)->gen >= 6) {
- __gen6_gt_force_wake_reset(dev_priv);
- if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
- __gen6_gt_force_wake_mt_reset(dev_priv);
- }
+ intel_uncore_forcewake_reset(dev);
}
void intel_uncore_sanitize(struct drm_device *dev)
@@ -782,6 +784,7 @@ static int gen6_do_reset(struct drm_device *dev)
int intel_gpu_reset(struct drm_device *dev)
{
switch (INTEL_INFO(dev)->gen) {
+ case 8:
case 7:
case 6: return gen6_do_reset(dev);
case 5: return ironlake_do_reset(dev);
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
index edcf801613e6..b3fa1ba191b7 100644
--- a/drivers/gpu/drm/nouveau/Makefile
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -59,6 +59,7 @@ nouveau-y += core/subdev/clock/nv40.o
nouveau-y += core/subdev/clock/nv50.o
nouveau-y += core/subdev/clock/nv84.o
nouveau-y += core/subdev/clock/nva3.o
+nouveau-y += core/subdev/clock/nvaa.o
nouveau-y += core/subdev/clock/nvc0.o
nouveau-y += core/subdev/clock/nve0.o
nouveau-y += core/subdev/clock/pllnv04.o
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv50.c b/drivers/gpu/drm/nouveau/core/engine/device/nv50.c
index db139827047c..db3fc7be856a 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv50.c
@@ -283,7 +283,7 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = nvaa_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
@@ -311,7 +311,7 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = nvaa_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
index 5f555788121c..e6352bd5b4ff 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
@@ -33,6 +33,7 @@
#include <engine/dmaobj.h>
#include <engine/fifo.h>
+#include "nv04.h"
#include "nv50.h"
/*******************************************************************************
@@ -460,6 +461,8 @@ nv50_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
nv_subdev(priv)->intr = nv04_fifo_intr;
nv_engine(priv)->cclass = &nv50_fifo_cclass;
nv_engine(priv)->sclass = nv50_fifo_sclass;
+ priv->base.pause = nv04_fifo_pause;
+ priv->base.start = nv04_fifo_start;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
index 0908dc834c84..fe0f41e65d9b 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
@@ -35,6 +35,7 @@
#include <engine/dmaobj.h>
#include <engine/fifo.h>
+#include "nv04.h"
#include "nv50.h"
/*******************************************************************************
@@ -432,6 +433,8 @@ nv84_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
nv_subdev(priv)->intr = nv04_fifo_intr;
nv_engine(priv)->cclass = &nv84_fifo_cclass;
nv_engine(priv)->sclass = nv84_fifo_sclass;
+ priv->base.pause = nv04_fifo_pause;
+ priv->base.start = nv04_fifo_start;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nv50.c b/drivers/gpu/drm/nouveau/core/engine/software/nv50.c
index b574dd4bb828..5ce686ee729e 100644
--- a/drivers/gpu/drm/nouveau/core/engine/software/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nv50.c
@@ -176,7 +176,7 @@ nv50_software_context_ctor(struct nouveau_object *parent,
if (ret)
return ret;
- chan->vblank.nr_event = pdisp->vblank->index_nr;
+ chan->vblank.nr_event = pdisp ? pdisp->vblank->index_nr : 0;
chan->vblank.event = kzalloc(chan->vblank.nr_event *
sizeof(*chan->vblank.event), GFP_KERNEL);
if (!chan->vblank.event)
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/clock.h b/drivers/gpu/drm/nouveau/core/include/subdev/clock.h
index e2675bc0edba..8f4ced75444a 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/clock.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/clock.h
@@ -14,6 +14,9 @@ enum nv_clk_src {
nv_clk_src_hclk,
nv_clk_src_hclkm3,
nv_clk_src_hclkm3d2,
+ nv_clk_src_hclkm2d3, /* NVAA */
+ nv_clk_src_hclkm4, /* NVAA */
+ nv_clk_src_cclk, /* NVAA */
nv_clk_src_host,
@@ -127,6 +130,7 @@ extern struct nouveau_oclass nv04_clock_oclass;
extern struct nouveau_oclass nv40_clock_oclass;
extern struct nouveau_oclass *nv50_clock_oclass;
extern struct nouveau_oclass *nv84_clock_oclass;
+extern struct nouveau_oclass *nvaa_clock_oclass;
extern struct nouveau_oclass nva3_clock_oclass;
extern struct nouveau_oclass nvc0_clock_oclass;
extern struct nouveau_oclass nve0_clock_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c
index da50c1b12928..30c1f3a4158e 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c
@@ -69,6 +69,11 @@ nv04_clock_pll_prog(struct nouveau_clock *clk, u32 reg1,
return 0;
}
+static struct nouveau_clocks
+nv04_domain[] = {
+ { nv_clk_src_max }
+};
+
static int
nv04_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
@@ -77,7 +82,7 @@ nv04_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nv04_clock_priv *priv;
int ret;
- ret = nouveau_clock_create(parent, engine, oclass, NULL, &priv);
+ ret = nouveau_clock_create(parent, engine, oclass, nv04_domain, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nvaa.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nvaa.c
new file mode 100644
index 000000000000..7a723b4f564d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nvaa.c
@@ -0,0 +1,445 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <engine/fifo.h>
+#include <subdev/bios.h>
+#include <subdev/bios/pll.h>
+#include <subdev/timer.h>
+#include <subdev/clock.h>
+
+#include "pll.h"
+
+struct nvaa_clock_priv {
+ struct nouveau_clock base;
+ enum nv_clk_src csrc, ssrc, vsrc;
+ u32 cctrl, sctrl;
+ u32 ccoef, scoef;
+ u32 cpost, spost;
+ u32 vdiv;
+};
+
+static u32
+read_div(struct nouveau_clock *clk)
+{
+ return nv_rd32(clk, 0x004600);
+}
+
+static u32
+read_pll(struct nouveau_clock *clk, u32 base)
+{
+ u32 ctrl = nv_rd32(clk, base + 0);
+ u32 coef = nv_rd32(clk, base + 4);
+ u32 ref = clk->read(clk, nv_clk_src_href);
+ u32 post_div = 0;
+ u32 clock = 0;
+ int N1, M1;
+
+ switch (base){
+ case 0x4020:
+ post_div = 1 << ((nv_rd32(clk, 0x4070) & 0x000f0000) >> 16);
+ break;
+ case 0x4028:
+ post_div = (nv_rd32(clk, 0x4040) & 0x000f0000) >> 16;
+ break;
+ default:
+ break;
+ }
+
+ N1 = (coef & 0x0000ff00) >> 8;
+ M1 = (coef & 0x000000ff);
+ if ((ctrl & 0x80000000) && M1) {
+ clock = ref * N1 / M1;
+ clock = clock / post_div;
+ }
+
+ return clock;
+}
+
+static int
+nvaa_clock_read(struct nouveau_clock *clk, enum nv_clk_src src)
+{
+ struct nvaa_clock_priv *priv = (void *)clk;
+ u32 mast = nv_rd32(clk, 0x00c054);
+ u32 P = 0;
+
+ switch (src) {
+ case nv_clk_src_crystal:
+ return nv_device(priv)->crystal;
+ case nv_clk_src_href:
+ return 100000; /* PCIE reference clock */
+ case nv_clk_src_hclkm4:
+ return clk->read(clk, nv_clk_src_href) * 4;
+ case nv_clk_src_hclkm2d3:
+ return clk->read(clk, nv_clk_src_href) * 2 / 3;
+ case nv_clk_src_host:
+ switch (mast & 0x000c0000) {
+ case 0x00000000: return clk->read(clk, nv_clk_src_hclkm2d3);
+ case 0x00040000: break;
+ case 0x00080000: return clk->read(clk, nv_clk_src_hclkm4);
+ case 0x000c0000: return clk->read(clk, nv_clk_src_cclk);
+ }
+ break;
+ case nv_clk_src_core:
+ P = (nv_rd32(clk, 0x004028) & 0x00070000) >> 16;
+
+ switch (mast & 0x00000003) {
+ case 0x00000000: return clk->read(clk, nv_clk_src_crystal) >> P;
+ case 0x00000001: return 0;
+ case 0x00000002: return clk->read(clk, nv_clk_src_hclkm4) >> P;
+ case 0x00000003: return read_pll(clk, 0x004028) >> P;
+ }
+ break;
+ case nv_clk_src_cclk:
+ if ((mast & 0x03000000) != 0x03000000)
+ return clk->read(clk, nv_clk_src_core);
+
+ if ((mast & 0x00000200) == 0x00000000)
+ return clk->read(clk, nv_clk_src_core);
+
+ switch (mast & 0x00000c00) {
+ case 0x00000000: return clk->read(clk, nv_clk_src_href);
+ case 0x00000400: return clk->read(clk, nv_clk_src_hclkm4);
+ case 0x00000800: return clk->read(clk, nv_clk_src_hclkm2d3);
+ default: return 0;
+ }
+ case nv_clk_src_shader:
+ P = (nv_rd32(clk, 0x004020) & 0x00070000) >> 16;
+ switch (mast & 0x00000030) {
+ case 0x00000000:
+ if (mast & 0x00000040)
+ return clk->read(clk, nv_clk_src_href) >> P;
+ return clk->read(clk, nv_clk_src_crystal) >> P;
+ case 0x00000010: break;
+ case 0x00000020: return read_pll(clk, 0x004028) >> P;
+ case 0x00000030: return read_pll(clk, 0x004020) >> P;
+ }
+ break;
+ case nv_clk_src_mem:
+ return 0;
+ break;
+ case nv_clk_src_vdec:
+ P = (read_div(clk) & 0x00000700) >> 8;
+
+ switch (mast & 0x00400000) {
+ case 0x00400000:
+ return clk->read(clk, nv_clk_src_core) >> P;
+ break;
+ default:
+ return 500000 >> P;
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ nv_debug(priv, "unknown clock source %d 0x%08x\n", src, mast);
+ return 0;
+}
+
+static u32
+calc_pll(struct nvaa_clock_priv *priv, u32 reg,
+ u32 clock, int *N, int *M, int *P)
+{
+ struct nouveau_bios *bios = nouveau_bios(priv);
+ struct nvbios_pll pll;
+ struct nouveau_clock *clk = &priv->base;
+ int ret;
+
+ ret = nvbios_pll_parse(bios, reg, &pll);
+ if (ret)
+ return 0;
+
+ pll.vco2.max_freq = 0;
+ pll.refclk = clk->read(clk, nv_clk_src_href);
+ if (!pll.refclk)
+ return 0;
+
+ return nv04_pll_calc(nv_subdev(priv), &pll, clock, N, M, NULL, NULL, P);
+}
+
+static inline u32
+calc_P(u32 src, u32 target, int *div)
+{
+ u32 clk0 = src, clk1 = src;
+ for (*div = 0; *div <= 7; (*div)++) {
+ if (clk0 <= target) {
+ clk1 = clk0 << (*div ? 1 : 0);
+ break;
+ }
+ clk0 >>= 1;
+ }
+
+ if (target - clk0 <= clk1 - target)
+ return clk0;
+ (*div)--;
+ return clk1;
+}
+
+static int
+nvaa_clock_calc(struct nouveau_clock *clk, struct nouveau_cstate *cstate)
+{
+ struct nvaa_clock_priv *priv = (void *)clk;
+ const int shader = cstate->domain[nv_clk_src_shader];
+ const int core = cstate->domain[nv_clk_src_core];
+ const int vdec = cstate->domain[nv_clk_src_vdec];
+ u32 out = 0, clock = 0;
+ int N, M, P1, P2 = 0;
+ int divs = 0;
+
+ /* cclk: find suitable source, disable PLL if we can */
+ if (core < clk->read(clk, nv_clk_src_hclkm4))
+ out = calc_P(clk->read(clk, nv_clk_src_hclkm4), core, &divs);
+
+ /* Calculate clock * 2, so shader clock can use it too */
+ clock = calc_pll(priv, 0x4028, (core << 1), &N, &M, &P1);
+
+ if (abs(core - out) <=
+ abs(core - (clock >> 1))) {
+ priv->csrc = nv_clk_src_hclkm4;
+ priv->cctrl = divs << 16;
+ } else {
+ /* NVCTRL is actually used _after_ NVPOST, and after what we
+ * call NVPLL. To make matters worse, NVPOST is an integer
+ * divider instead of a right-shift number. */
+ if(P1 > 2) {
+ P2 = P1 - 2;
+ P1 = 2;
+ }
+
+ priv->csrc = nv_clk_src_core;
+ priv->ccoef = (N << 8) | M;
+
+ priv->cctrl = (P2 + 1) << 16;
+ priv->cpost = (1 << P1) << 16;
+ }
+
+ /* sclk: nvpll + divisor, href or spll */
+ out = 0;
+ if (shader == clk->read(clk, nv_clk_src_href)) {
+ priv->ssrc = nv_clk_src_href;
+ } else {
+ clock = calc_pll(priv, 0x4020, shader, &N, &M, &P1);
+ if (priv->csrc == nv_clk_src_core) {
+ out = calc_P((core << 1), shader, &divs);
+ }
+
+ if (abs(shader - out) <=
+ abs(shader - clock) &&
+ (divs + P2) <= 7) {
+ priv->ssrc = nv_clk_src_core;
+ priv->sctrl = (divs + P2) << 16;
+ } else {
+ priv->ssrc = nv_clk_src_shader;
+ priv->scoef = (N << 8) | M;
+ priv->sctrl = P1 << 16;
+ }
+ }
+
+ /* vclk */
+ out = calc_P(core, vdec, &divs);
+ clock = calc_P(500000, vdec, &P1);
+ if(abs(vdec - out) <=
+ abs(vdec - clock)) {
+ priv->vsrc = nv_clk_src_cclk;
+ priv->vdiv = divs << 16;
+ } else {
+ priv->vsrc = nv_clk_src_vdec;
+ priv->vdiv = P1 << 16;
+ }
+
+ /* Print strategy! */
+ nv_debug(priv, "nvpll: %08x %08x %08x\n",
+ priv->ccoef, priv->cpost, priv->cctrl);
+ nv_debug(priv, " spll: %08x %08x %08x\n",
+ priv->scoef, priv->spost, priv->sctrl);
+ nv_debug(priv, " vdiv: %08x\n", priv->vdiv);
+ if (priv->csrc == nv_clk_src_hclkm4)
+ nv_debug(priv, "core: hrefm4\n");
+ else
+ nv_debug(priv, "core: nvpll\n");
+
+ if (priv->ssrc == nv_clk_src_hclkm4)
+ nv_debug(priv, "shader: hrefm4\n");
+ else if (priv->ssrc == nv_clk_src_core)
+ nv_debug(priv, "shader: nvpll\n");
+ else
+ nv_debug(priv, "shader: spll\n");
+
+ if (priv->vsrc == nv_clk_src_hclkm4)
+ nv_debug(priv, "vdec: 500MHz\n");
+ else
+ nv_debug(priv, "vdec: core\n");
+
+ return 0;
+}
+
+static int
+nvaa_clock_prog(struct nouveau_clock *clk)
+{
+ struct nvaa_clock_priv *priv = (void *)clk;
+ struct nouveau_fifo *pfifo = nouveau_fifo(clk);
+ unsigned long flags;
+ u32 pllmask = 0, mast, ptherm_gate;
+ int ret = -EBUSY;
+
+ /* halt and idle execution engines */
+ ptherm_gate = nv_mask(clk, 0x020060, 0x00070000, 0x00000000);
+ nv_mask(clk, 0x002504, 0x00000001, 0x00000001);
+ /* Wait until the interrupt handler is finished */
+ if (!nv_wait(clk, 0x000100, 0xffffffff, 0x00000000))
+ goto resume;
+
+ if (pfifo)
+ pfifo->pause(pfifo, &flags);
+
+ if (!nv_wait(clk, 0x002504, 0x00000010, 0x00000010))
+ goto resume;
+ if (!nv_wait(clk, 0x00251c, 0x0000003f, 0x0000003f))
+ goto resume;
+
+ /* First switch to safe clocks: href */
+ mast = nv_mask(clk, 0xc054, 0x03400e70, 0x03400640);
+ mast &= ~0x00400e73;
+ mast |= 0x03000000;
+
+ switch (priv->csrc) {
+ case nv_clk_src_hclkm4:
+ nv_mask(clk, 0x4028, 0x00070000, priv->cctrl);
+ mast |= 0x00000002;
+ break;
+ case nv_clk_src_core:
+ nv_wr32(clk, 0x402c, priv->ccoef);
+ nv_wr32(clk, 0x4028, 0x80000000 | priv->cctrl);
+ nv_wr32(clk, 0x4040, priv->cpost);
+ pllmask |= (0x3 << 8);
+ mast |= 0x00000003;
+ break;
+ default:
+ nv_warn(priv,"Reclocking failed: unknown core clock\n");
+ goto resume;
+ }
+
+ switch (priv->ssrc) {
+ case nv_clk_src_href:
+ nv_mask(clk, 0x4020, 0x00070000, 0x00000000);
+ /* mast |= 0x00000000; */
+ break;
+ case nv_clk_src_core:
+ nv_mask(clk, 0x4020, 0x00070000, priv->sctrl);
+ mast |= 0x00000020;
+ break;
+ case nv_clk_src_shader:
+ nv_wr32(clk, 0x4024, priv->scoef);
+ nv_wr32(clk, 0x4020, 0x80000000 | priv->sctrl);
+ nv_wr32(clk, 0x4070, priv->spost);
+ pllmask |= (0x3 << 12);
+ mast |= 0x00000030;
+ break;
+ default:
+ nv_warn(priv,"Reclocking failed: unknown sclk clock\n");
+ goto resume;
+ }
+
+ if (!nv_wait(clk, 0x004080, pllmask, pllmask)) {
+ nv_warn(priv,"Reclocking failed: unstable PLLs\n");
+ goto resume;
+ }
+
+ switch (priv->vsrc) {
+ case nv_clk_src_cclk:
+ mast |= 0x00400000;
+ default:
+ nv_wr32(clk, 0x4600, priv->vdiv);
+ }
+
+ nv_wr32(clk, 0xc054, mast);
+ ret = 0;
+
+resume:
+ if (pfifo)
+ pfifo->start(pfifo, &flags);
+
+ nv_mask(clk, 0x002504, 0x00000001, 0x00000000);
+ nv_wr32(clk, 0x020060, ptherm_gate);
+
+ /* Disable some PLLs and dividers when unused */
+ if (priv->csrc != nv_clk_src_core) {
+ nv_wr32(clk, 0x4040, 0x00000000);
+ nv_mask(clk, 0x4028, 0x80000000, 0x00000000);
+ }
+
+ if (priv->ssrc != nv_clk_src_shader) {
+ nv_wr32(clk, 0x4070, 0x00000000);
+ nv_mask(clk, 0x4020, 0x80000000, 0x00000000);
+ }
+
+ return ret;
+}
+
+static void
+nvaa_clock_tidy(struct nouveau_clock *clk)
+{
+}
+
+static struct nouveau_clocks
+nvaa_domains[] = {
+ { nv_clk_src_crystal, 0xff },
+ { nv_clk_src_href , 0xff },
+ { nv_clk_src_core , 0xff, 0, "core", 1000 },
+ { nv_clk_src_shader , 0xff, 0, "shader", 1000 },
+ { nv_clk_src_vdec , 0xff, 0, "vdec", 1000 },
+ { nv_clk_src_max }
+};
+
+static int
+nvaa_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nvaa_clock_priv *priv;
+ int ret;
+
+ ret = nouveau_clock_create(parent, engine, oclass, nvaa_domains, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ priv->base.read = nvaa_clock_read;
+ priv->base.calc = nvaa_clock_calc;
+ priv->base.prog = nvaa_clock_prog;
+ priv->base.tidy = nvaa_clock_tidy;
+ return 0;
+}
+
+struct nouveau_oclass *
+nvaa_clock_oclass = &(struct nouveau_oclass) {
+ .handle = NV_SUBDEV(CLOCK, 0xaa),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nvaa_clock_ctor,
+ .dtor = _nouveau_clock_dtor,
+ .init = _nouveau_clock_init,
+ .fini = _nouveau_clock_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mxm/base.c b/drivers/gpu/drm/nouveau/core/subdev/mxm/base.c
index e286e132c7e7..129120473f6c 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mxm/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mxm/base.c
@@ -116,7 +116,7 @@ mxm_shadow_dsm(struct nouveau_mxm *mxm, u8 version)
acpi_handle handle;
int ret;
- handle = DEVICE_ACPI_HANDLE(&device->pdev->dev);
+ handle = ACPI_HANDLE(&device->pdev->dev);
if (!handle)
return false;
diff --git a/drivers/gpu/drm/nouveau/dispnv04/overlay.c b/drivers/gpu/drm/nouveau/dispnv04/overlay.c
index 3618ac6b6316..32e7064b819b 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/overlay.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/overlay.c
@@ -58,8 +58,8 @@ struct nouveau_plane {
};
static uint32_t formats[] = {
- DRM_FORMAT_NV12,
DRM_FORMAT_UYVY,
+ DRM_FORMAT_NV12,
};
/* Sine can be approximated with
@@ -99,13 +99,28 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
struct nouveau_bo *cur = nv_plane->cur;
bool flip = nv_plane->flip;
- int format = ALIGN(src_w * 4, 0x100);
int soff = NV_PCRTC0_SIZE * nv_crtc->index;
int soff2 = NV_PCRTC0_SIZE * !nv_crtc->index;
- int ret;
+ int format, ret;
+
+ /* Source parameters given in 16.16 fixed point, ignore fractional. */
+ src_x >>= 16;
+ src_y >>= 16;
+ src_w >>= 16;
+ src_h >>= 16;
+
+ format = ALIGN(src_w * 4, 0x100);
if (format > 0xffff)
- return -EINVAL;
+ return -ERANGE;
+
+ if (dev->chipset >= 0x30) {
+ if (crtc_w < (src_w >> 1) || crtc_h < (src_h >> 1))
+ return -ERANGE;
+ } else {
+ if (crtc_w < (src_w >> 3) || crtc_h < (src_h >> 3))
+ return -ERANGE;
+ }
ret = nouveau_bo_pin(nv_fb->nvbo, TTM_PL_FLAG_VRAM);
if (ret)
@@ -113,12 +128,6 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
nv_plane->cur = nv_fb->nvbo;
- /* Source parameters given in 16.16 fixed point, ignore fractional. */
- src_x = src_x >> 16;
- src_y = src_y >> 16;
- src_w = src_w >> 16;
- src_h = src_h >> 16;
-
nv_mask(dev, NV_PCRTC_ENGINE_CTRL + soff, NV_CRTC_FSEL_OVERLAY, NV_CRTC_FSEL_OVERLAY);
nv_mask(dev, NV_PCRTC_ENGINE_CTRL + soff2, NV_CRTC_FSEL_OVERLAY, 0);
@@ -245,14 +254,25 @@ nv10_overlay_init(struct drm_device *device)
{
struct nouveau_device *dev = nouveau_dev(device);
struct nouveau_plane *plane = kzalloc(sizeof(struct nouveau_plane), GFP_KERNEL);
+ int num_formats = ARRAY_SIZE(formats);
int ret;
if (!plane)
return;
+ switch (dev->chipset) {
+ case 0x10:
+ case 0x11:
+ case 0x15:
+ case 0x1a:
+ case 0x20:
+ num_formats = 1;
+ break;
+ }
+
ret = drm_plane_init(device, &plane->base, 3 /* both crtc's */,
&nv10_plane_funcs,
- formats, ARRAY_SIZE(formats), false);
+ formats, num_formats, false);
if (ret)
goto err;
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index 07273a2ae62f..95c740454049 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -256,7 +256,7 @@ static int nouveau_dsm_pci_probe(struct pci_dev *pdev)
acpi_handle dhandle;
int retval = 0;
- dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
+ dhandle = ACPI_HANDLE(&pdev->dev);
if (!dhandle)
return false;
@@ -414,7 +414,7 @@ bool nouveau_acpi_rom_supported(struct pci_dev *pdev)
if (!nouveau_dsm_priv.dsm_detected && !nouveau_dsm_priv.optimus_detected)
return false;
- dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
+ dhandle = ACPI_HANDLE(&pdev->dev);
if (!dhandle)
return false;
@@ -448,7 +448,7 @@ nouveau_acpi_edid(struct drm_device *dev, struct drm_connector *connector)
return NULL;
}
- handle = DEVICE_ACPI_HANDLE(&dev->pdev->dev);
+ handle = ACPI_HANDLE(&dev->pdev->dev);
if (!handle)
return NULL;
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 7809d92183c4..29c3efdfc7dd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -608,6 +608,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
fence = nouveau_fence_ref(new_bo->bo.sync_obj);
spin_unlock(&new_bo->bo.bdev->fence_lock);
ret = nouveau_fence_sync(fence, chan);
+ nouveau_fence_unref(&fence);
if (ret)
return ret;
@@ -701,7 +702,7 @@ nouveau_finish_page_flip(struct nouveau_channel *chan,
s = list_first_entry(&fctx->flip, struct nouveau_page_flip_state, head);
if (s->event)
- drm_send_vblank_event(dev, -1, s->event);
+ drm_send_vblank_event(dev, s->crtc, s->event);
list_del(&s->head);
if (ps)
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 7a3759f1c41a..98a22e6e27a1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -858,6 +858,12 @@ static int nouveau_pmops_runtime_suspend(struct device *dev)
if (nouveau_runtime_pm == 0)
return -EINVAL;
+ /* are we optimus enabled? */
+ if (nouveau_runtime_pm == -1 && !nouveau_is_optimus() && !nouveau_is_v1_dsm()) {
+ DRM_DEBUG_DRIVER("failing to power off - not optimus\n");
+ return -EINVAL;
+ }
+
nv_debug_level(SILENT);
drm_kms_helper_poll_disable(drm_dev);
vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_OFF);
diff --git a/drivers/gpu/drm/nouveau/nouveau_hwmon.c b/drivers/gpu/drm/nouveau/nouveau_hwmon.c
index 38a4db5bfe21..4aff04fa483c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_hwmon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_hwmon.c
@@ -630,7 +630,6 @@ error:
hwmon->hwmon = NULL;
return ret;
#else
- hwmon->hwmon = NULL;
return 0;
#endif
}
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index f8e66c08b11a..4e384a2f99c3 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -1265,7 +1265,7 @@ nv50_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
uint32_t start, uint32_t size)
{
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- u32 end = max(start + size, (u32)256);
+ u32 end = min_t(u32, start + size, 256);
u32 i;
for (i = start; i < end; i++) {
diff --git a/drivers/gpu/drm/qxl/Kconfig b/drivers/gpu/drm/qxl/Kconfig
index 037d324bf58f..66ac0ff95f5a 100644
--- a/drivers/gpu/drm/qxl/Kconfig
+++ b/drivers/gpu/drm/qxl/Kconfig
@@ -8,5 +8,6 @@ config DRM_QXL
select DRM_KMS_HELPER
select DRM_KMS_FB_HELPER
select DRM_TTM
+ select CRC32
help
QXL virtual GPU for Spice virtualization desktop integration. Do not enable this driver unless your distro ships a corresponding X.org QXL driver that can handle kernel modesetting.
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 5e827c29d194..d70aafb83307 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -24,7 +24,7 @@
*/
-#include "linux/crc32.h"
+#include <linux/crc32.h>
#include "qxl_drv.h"
#include "qxl_object.h"
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index 0109a9644cb2..821ab7b9409b 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -92,6 +92,7 @@ qxl_release_free(struct qxl_device *qdev,
- DRM_FILE_OFFSET);
qxl_fence_remove_release(&bo->fence, release->id);
qxl_bo_unref(&bo);
+ kfree(entry);
}
spin_lock(&qdev->release_idr_lock);
idr_remove(&qdev->release_idr, release->id);
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 80a20120e625..b1970596a782 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -1196,7 +1196,9 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
} else if ((rdev->family == CHIP_TAHITI) ||
(rdev->family == CHIP_PITCAIRN))
fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P8_32x32_8x16);
- else if (rdev->family == CHIP_VERDE)
+ else if ((rdev->family == CHIP_VERDE) ||
+ (rdev->family == CHIP_OLAND) ||
+ (rdev->family == CHIP_HAINAN)) /* for completeness. HAINAN has no display hw */
fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P4_8x16);
switch (radeon_crtc->crtc_id) {
diff --git a/drivers/gpu/drm/radeon/atombios_i2c.c b/drivers/gpu/drm/radeon/atombios_i2c.c
index deaf98cdca3a..f685035dbe39 100644
--- a/drivers/gpu/drm/radeon/atombios_i2c.c
+++ b/drivers/gpu/drm/radeon/atombios_i2c.c
@@ -44,7 +44,7 @@ static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan,
PROCESS_I2C_CHANNEL_TRANSACTION_PS_ALLOCATION args;
int index = GetIndexIntoMasterTable(COMMAND, ProcessI2cChannelTransaction);
unsigned char *base;
- u16 out;
+ u16 out = cpu_to_le16(0);
memset(&args, 0, sizeof(args));
@@ -55,9 +55,14 @@ static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan,
DRM_ERROR("hw i2c: tried to write too many bytes (%d vs 3)\n", num);
return -EINVAL;
}
- args.ucRegIndex = buf[0];
- if (num > 1)
- memcpy(&out, &buf[1], num - 1);
+ if (buf == NULL)
+ args.ucRegIndex = 0;
+ else
+ args.ucRegIndex = buf[0];
+ if (num)
+ num--;
+ if (num)
+ memcpy(&out, &buf[1], num);
args.lpI2CDataOut = cpu_to_le16(out);
} else {
if (num > ATOM_MAX_HW_I2C_READ) {
@@ -94,14 +99,14 @@ int radeon_atom_hw_i2c_xfer(struct i2c_adapter *i2c_adap,
struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
struct i2c_msg *p;
int i, remaining, current_count, buffer_offset, max_bytes, ret;
- u8 buf = 0, flags;
+ u8 flags;
/* check for bus probe */
p = &msgs[0];
if ((num == 1) && (p->len == 0)) {
ret = radeon_process_i2c_ch(i2c,
p->addr, HW_I2C_WRITE,
- &buf, 1);
+ NULL, 0);
if (ret)
return ret;
else
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index ae92aa041c6a..b43a3a3c9067 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -1560,17 +1560,17 @@ u32 cik_get_xclk(struct radeon_device *rdev)
* cik_mm_rdoorbell - read a doorbell dword
*
* @rdev: radeon_device pointer
- * @offset: byte offset into the aperture
+ * @index: doorbell index
*
* Returns the value in the doorbell aperture at the
- * requested offset (CIK).
+ * requested doorbell index (CIK).
*/
-u32 cik_mm_rdoorbell(struct radeon_device *rdev, u32 offset)
+u32 cik_mm_rdoorbell(struct radeon_device *rdev, u32 index)
{
- if (offset < rdev->doorbell.size) {
- return readl(((void __iomem *)rdev->doorbell.ptr) + offset);
+ if (index < rdev->doorbell.num_doorbells) {
+ return readl(rdev->doorbell.ptr + index);
} else {
- DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", offset);
+ DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
return 0;
}
}
@@ -1579,18 +1579,18 @@ u32 cik_mm_rdoorbell(struct radeon_device *rdev, u32 offset)
* cik_mm_wdoorbell - write a doorbell dword
*
* @rdev: radeon_device pointer
- * @offset: byte offset into the aperture
+ * @index: doorbell index
* @v: value to write
*
* Writes @v to the doorbell aperture at the
- * requested offset (CIK).
+ * requested doorbell index (CIK).
*/
-void cik_mm_wdoorbell(struct radeon_device *rdev, u32 offset, u32 v)
+void cik_mm_wdoorbell(struct radeon_device *rdev, u32 index, u32 v)
{
- if (offset < rdev->doorbell.size) {
- writel(v, ((void __iomem *)rdev->doorbell.ptr) + offset);
+ if (index < rdev->doorbell.num_doorbells) {
+ writel(v, rdev->doorbell.ptr + index);
} else {
- DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", offset);
+ DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
}
}
@@ -2427,6 +2427,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev)
gb_tile_moden = 0;
break;
}
+ rdev->config.cik.macrotile_mode_array[reg_offset] = gb_tile_moden;
WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden);
}
} else if (num_pipe_configs == 4) {
@@ -2773,6 +2774,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev)
gb_tile_moden = 0;
break;
}
+ rdev->config.cik.macrotile_mode_array[reg_offset] = gb_tile_moden;
WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden);
}
} else if (num_pipe_configs == 2) {
@@ -2990,6 +2992,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev)
gb_tile_moden = 0;
break;
}
+ rdev->config.cik.macrotile_mode_array[reg_offset] = gb_tile_moden;
WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden);
}
} else
@@ -3556,17 +3559,24 @@ void cik_fence_compute_ring_emit(struct radeon_device *rdev,
radeon_ring_write(ring, 0);
}
-void cik_semaphore_ring_emit(struct radeon_device *rdev,
+bool cik_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_ring *ring,
struct radeon_semaphore *semaphore,
bool emit_wait)
{
+/* TODO: figure out why semaphore cause lockups */
+#if 0
uint64_t addr = semaphore->gpu_addr;
unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL;
radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1));
radeon_ring_write(ring, addr & 0xffffffff);
radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) | sel);
+
+ return true;
+#else
+ return false;
+#endif
}
/**
@@ -3609,13 +3619,8 @@ int cik_copy_cpdma(struct radeon_device *rdev,
return r;
}
- if (radeon_fence_need_sync(*fence, ring->idx)) {
- radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
- ring->idx);
- radeon_fence_note_sync(*fence, ring->idx);
- } else {
- radeon_semaphore_free(rdev, &sem, NULL);
- }
+ radeon_semaphore_sync_to(sem, *fence);
+ radeon_semaphore_sync_rings(rdev, sem, ring->idx);
for (i = 0; i < num_loops; i++) {
cur_size_in_bytes = size_in_bytes;
@@ -4052,7 +4057,7 @@ void cik_compute_ring_set_wptr(struct radeon_device *rdev,
struct radeon_ring *ring)
{
rdev->wb.wb[ring->wptr_offs/4] = cpu_to_le32(ring->wptr);
- WDOORBELL32(ring->doorbell_offset, ring->wptr);
+ WDOORBELL32(ring->doorbell_index, ring->wptr);
}
/**
@@ -4393,10 +4398,6 @@ static int cik_cp_compute_resume(struct radeon_device *rdev)
return r;
}
- /* doorbell offset */
- rdev->ring[idx].doorbell_offset =
- (rdev->ring[idx].doorbell_page_num * PAGE_SIZE) + 0;
-
/* init the mqd struct */
memset(buf, 0, sizeof(struct bonaire_mqd));
@@ -4508,7 +4509,7 @@ static int cik_cp_compute_resume(struct radeon_device *rdev)
RREG32(CP_HQD_PQ_DOORBELL_CONTROL);
mqd->queue_state.cp_hqd_pq_doorbell_control &= ~DOORBELL_OFFSET_MASK;
mqd->queue_state.cp_hqd_pq_doorbell_control |=
- DOORBELL_OFFSET(rdev->ring[idx].doorbell_offset / 4);
+ DOORBELL_OFFSET(rdev->ring[idx].doorbell_index);
mqd->queue_state.cp_hqd_pq_doorbell_control |= DOORBELL_EN;
mqd->queue_state.cp_hqd_pq_doorbell_control &=
~(DOORBELL_SOURCE | DOORBELL_HIT);
@@ -7839,14 +7840,14 @@ int cik_init(struct radeon_device *rdev)
ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
ring->ring_obj = NULL;
r600_ring_init(rdev, ring, 1024 * 1024);
- r = radeon_doorbell_get(rdev, &ring->doorbell_page_num);
+ r = radeon_doorbell_get(rdev, &ring->doorbell_index);
if (r)
return r;
ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
ring->ring_obj = NULL;
r600_ring_init(rdev, ring, 1024 * 1024);
- r = radeon_doorbell_get(rdev, &ring->doorbell_page_num);
+ r = radeon_doorbell_get(rdev, &ring->doorbell_index);
if (r)
return r;
diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c
index 9c9529de20ee..d08b83c6267b 100644
--- a/drivers/gpu/drm/radeon/cik_sdma.c
+++ b/drivers/gpu/drm/radeon/cik_sdma.c
@@ -130,7 +130,7 @@ void cik_sdma_fence_ring_emit(struct radeon_device *rdev,
* Add a DMA semaphore packet to the ring wait on or signal
* other rings (CIK).
*/
-void cik_sdma_semaphore_ring_emit(struct radeon_device *rdev,
+bool cik_sdma_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_ring *ring,
struct radeon_semaphore *semaphore,
bool emit_wait)
@@ -141,6 +141,8 @@ void cik_sdma_semaphore_ring_emit(struct radeon_device *rdev,
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SEMAPHORE, 0, extra_bits));
radeon_ring_write(ring, addr & 0xfffffff8);
radeon_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
+
+ return true;
}
/**
@@ -443,13 +445,8 @@ int cik_copy_dma(struct radeon_device *rdev,
return r;
}
- if (radeon_fence_need_sync(*fence, ring->idx)) {
- radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
- ring->idx);
- radeon_fence_note_sync(*fence, ring->idx);
- } else {
- radeon_semaphore_free(rdev, &sem, NULL);
- }
+ radeon_semaphore_sync_to(sem, *fence);
+ radeon_semaphore_sync_rings(rdev, sem, ring->idx);
for (i = 0; i < num_loops; i++) {
cur_size_in_bytes = size_in_bytes;
@@ -461,7 +458,7 @@ int cik_copy_dma(struct radeon_device *rdev,
radeon_ring_write(ring, 0); /* src/dst endian swap */
radeon_ring_write(ring, src_offset & 0xffffffff);
radeon_ring_write(ring, upper_32_bits(src_offset) & 0xffffffff);
- radeon_ring_write(ring, dst_offset & 0xfffffffc);
+ radeon_ring_write(ring, dst_offset & 0xffffffff);
radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xffffffff);
src_offset += cur_size_in_bytes;
dst_offset += cur_size_in_bytes;
diff --git a/drivers/gpu/drm/radeon/cypress_dpm.c b/drivers/gpu/drm/radeon/cypress_dpm.c
index 91bb470de0a3..920e1e4a52c5 100644
--- a/drivers/gpu/drm/radeon/cypress_dpm.c
+++ b/drivers/gpu/drm/radeon/cypress_dpm.c
@@ -299,7 +299,9 @@ void cypress_program_response_times(struct radeon_device *rdev)
static int cypress_pcie_performance_request(struct radeon_device *rdev,
u8 perf_req, bool advertise)
{
+#if defined(CONFIG_ACPI)
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+#endif
u32 tmp;
udelay(10);
diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
index 009f46e0ce72..713a5d359901 100644
--- a/drivers/gpu/drm/radeon/dce6_afmt.c
+++ b/drivers/gpu/drm/radeon/dce6_afmt.c
@@ -93,11 +93,13 @@ void dce6_afmt_select_pin(struct drm_encoder *encoder)
struct radeon_device *rdev = encoder->dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
- u32 offset = dig->afmt->offset;
+ u32 offset;
- if (!dig->afmt->pin)
+ if (!dig || !dig->afmt || !dig->afmt->pin)
return;
+ offset = dig->afmt->offset;
+
WREG32(AFMT_AUDIO_SRC_CONTROL + offset,
AFMT_AUDIO_SRC_SELECT(dig->afmt->pin->id));
}
@@ -112,7 +114,7 @@ void dce6_afmt_write_latency_fields(struct drm_encoder *encoder,
struct radeon_connector *radeon_connector = NULL;
u32 tmp = 0, offset;
- if (!dig->afmt->pin)
+ if (!dig || !dig->afmt || !dig->afmt->pin)
return;
offset = dig->afmt->pin->offset;
@@ -156,7 +158,7 @@ void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder)
u8 *sadb;
int sad_count;
- if (!dig->afmt->pin)
+ if (!dig || !dig->afmt || !dig->afmt->pin)
return;
offset = dig->afmt->pin->offset;
@@ -172,7 +174,7 @@ void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder)
}
sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb);
- if (sad_count < 0) {
+ if (sad_count <= 0) {
DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
return;
}
@@ -217,7 +219,7 @@ void dce6_afmt_write_sad_regs(struct drm_encoder *encoder)
{ AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
};
- if (!dig->afmt->pin)
+ if (!dig || !dig->afmt || !dig->afmt->pin)
return;
offset = dig->afmt->pin->offset;
@@ -233,7 +235,7 @@ void dce6_afmt_write_sad_regs(struct drm_encoder *encoder)
}
sad_count = drm_edid_to_sad(radeon_connector->edid, &sads);
- if (sad_count < 0) {
+ if (sad_count <= 0) {
DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
return;
}
@@ -306,7 +308,9 @@ int dce6_audio_init(struct radeon_device *rdev)
rdev->audio.enabled = true;
if (ASIC_IS_DCE8(rdev))
- rdev->audio.num_pins = 7;
+ rdev->audio.num_pins = 6;
+ else if (ASIC_IS_DCE61(rdev))
+ rdev->audio.num_pins = 4;
else
rdev->audio.num_pins = 6;
diff --git a/drivers/gpu/drm/radeon/evergreen_dma.c b/drivers/gpu/drm/radeon/evergreen_dma.c
index 6a0656d00ed0..a37b54436382 100644
--- a/drivers/gpu/drm/radeon/evergreen_dma.c
+++ b/drivers/gpu/drm/radeon/evergreen_dma.c
@@ -131,13 +131,8 @@ int evergreen_copy_dma(struct radeon_device *rdev,
return r;
}
- if (radeon_fence_need_sync(*fence, ring->idx)) {
- radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
- ring->idx);
- radeon_fence_note_sync(*fence, ring->idx);
- } else {
- radeon_semaphore_free(rdev, &sem, NULL);
- }
+ radeon_semaphore_sync_to(sem, *fence);
+ radeon_semaphore_sync_rings(rdev, sem, ring->idx);
for (i = 0; i < num_loops; i++) {
cur_size_in_dw = size_in_dw;
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
index aa695c4feb3d..0c6d5cef4cf1 100644
--- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
+++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
@@ -118,7 +118,7 @@ static void dce4_afmt_write_speaker_allocation(struct drm_encoder *encoder)
}
sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb);
- if (sad_count < 0) {
+ if (sad_count <= 0) {
DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
return;
}
@@ -173,7 +173,7 @@ static void evergreen_hdmi_write_sad_regs(struct drm_encoder *encoder)
}
sad_count = drm_edid_to_sad(radeon_connector->edid, &sads);
- if (sad_count < 0) {
+ if (sad_count <= 0) {
DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
return;
}
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 11aab2ab54ce..f59a9e9fccf8 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -895,6 +895,10 @@ static void cayman_gpu_init(struct radeon_device *rdev)
(rdev->pdev->device == 0x999C)) {
rdev->config.cayman.max_simds_per_se = 6;
rdev->config.cayman.max_backends_per_se = 2;
+ rdev->config.cayman.max_hw_contexts = 8;
+ rdev->config.cayman.sx_max_export_size = 256;
+ rdev->config.cayman.sx_max_export_pos_size = 64;
+ rdev->config.cayman.sx_max_export_smx_size = 192;
} else if ((rdev->pdev->device == 0x9903) ||
(rdev->pdev->device == 0x9904) ||
(rdev->pdev->device == 0x990A) ||
@@ -905,6 +909,10 @@ static void cayman_gpu_init(struct radeon_device *rdev)
(rdev->pdev->device == 0x999D)) {
rdev->config.cayman.max_simds_per_se = 4;
rdev->config.cayman.max_backends_per_se = 2;
+ rdev->config.cayman.max_hw_contexts = 8;
+ rdev->config.cayman.sx_max_export_size = 256;
+ rdev->config.cayman.sx_max_export_pos_size = 64;
+ rdev->config.cayman.sx_max_export_smx_size = 192;
} else if ((rdev->pdev->device == 0x9919) ||
(rdev->pdev->device == 0x9990) ||
(rdev->pdev->device == 0x9991) ||
@@ -915,9 +923,17 @@ static void cayman_gpu_init(struct radeon_device *rdev)
(rdev->pdev->device == 0x99A0)) {
rdev->config.cayman.max_simds_per_se = 3;
rdev->config.cayman.max_backends_per_se = 1;
+ rdev->config.cayman.max_hw_contexts = 4;
+ rdev->config.cayman.sx_max_export_size = 128;
+ rdev->config.cayman.sx_max_export_pos_size = 32;
+ rdev->config.cayman.sx_max_export_smx_size = 96;
} else {
rdev->config.cayman.max_simds_per_se = 2;
rdev->config.cayman.max_backends_per_se = 1;
+ rdev->config.cayman.max_hw_contexts = 4;
+ rdev->config.cayman.sx_max_export_size = 128;
+ rdev->config.cayman.sx_max_export_pos_size = 32;
+ rdev->config.cayman.sx_max_export_smx_size = 96;
}
rdev->config.cayman.max_texture_channel_caches = 2;
rdev->config.cayman.max_gprs = 256;
@@ -925,10 +941,6 @@ static void cayman_gpu_init(struct radeon_device *rdev)
rdev->config.cayman.max_gs_threads = 32;
rdev->config.cayman.max_stack_entries = 512;
rdev->config.cayman.sx_num_of_sets = 8;
- rdev->config.cayman.sx_max_export_size = 256;
- rdev->config.cayman.sx_max_export_pos_size = 64;
- rdev->config.cayman.sx_max_export_smx_size = 192;
- rdev->config.cayman.max_hw_contexts = 8;
rdev->config.cayman.sq_num_cf_insts = 2;
rdev->config.cayman.sc_prim_fifo_size = 0x40;
diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c
index f26339028154..49c4d48f54d6 100644
--- a/drivers/gpu/drm/radeon/ni_dpm.c
+++ b/drivers/gpu/drm/radeon/ni_dpm.c
@@ -785,8 +785,8 @@ static void ni_apply_state_adjust_rules(struct radeon_device *rdev,
struct ni_ps *ps = ni_get_ps(rps);
struct radeon_clock_and_voltage_limits *max_limits;
bool disable_mclk_switching;
- u32 mclk, sclk;
- u16 vddc, vddci;
+ u32 mclk;
+ u16 vddci;
u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
int i;
@@ -839,24 +839,14 @@ static void ni_apply_state_adjust_rules(struct radeon_device *rdev,
/* XXX validate the min clocks required for display */
+ /* adjust low state */
if (disable_mclk_switching) {
- mclk = ps->performance_levels[ps->performance_level_count - 1].mclk;
- sclk = ps->performance_levels[0].sclk;
- vddc = ps->performance_levels[0].vddc;
- vddci = ps->performance_levels[ps->performance_level_count - 1].vddci;
- } else {
- sclk = ps->performance_levels[0].sclk;
- mclk = ps->performance_levels[0].mclk;
- vddc = ps->performance_levels[0].vddc;
- vddci = ps->performance_levels[0].vddci;
+ ps->performance_levels[0].mclk =
+ ps->performance_levels[ps->performance_level_count - 1].mclk;
+ ps->performance_levels[0].vddci =
+ ps->performance_levels[ps->performance_level_count - 1].vddci;
}
- /* adjusted low state */
- ps->performance_levels[0].sclk = sclk;
- ps->performance_levels[0].mclk = mclk;
- ps->performance_levels[0].vddc = vddc;
- ps->performance_levels[0].vddci = vddci;
-
btc_skip_blacklist_clocks(rdev, max_limits->sclk, max_limits->mclk,
&ps->performance_levels[0].sclk,
&ps->performance_levels[0].mclk);
@@ -868,11 +858,15 @@ static void ni_apply_state_adjust_rules(struct radeon_device *rdev,
ps->performance_levels[i].vddc = ps->performance_levels[i - 1].vddc;
}
+ /* adjust remaining states */
if (disable_mclk_switching) {
mclk = ps->performance_levels[0].mclk;
+ vddci = ps->performance_levels[0].vddci;
for (i = 1; i < ps->performance_level_count; i++) {
if (mclk < ps->performance_levels[i].mclk)
mclk = ps->performance_levels[i].mclk;
+ if (vddci < ps->performance_levels[i].vddci)
+ vddci = ps->performance_levels[i].vddci;
}
for (i = 0; i < ps->performance_level_count; i++) {
ps->performance_levels[i].mclk = mclk;
@@ -3445,9 +3439,9 @@ static int ni_enable_smc_cac(struct radeon_device *rdev,
static int ni_pcie_performance_request(struct radeon_device *rdev,
u8 perf_req, bool advertise)
{
+#if defined(CONFIG_ACPI)
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
-#if defined(CONFIG_ACPI)
if ((perf_req == PCIE_PERF_REQ_PECI_GEN1) ||
(perf_req == PCIE_PERF_REQ_PECI_GEN2)) {
if (eg_pi->pcie_performance_request_registered == false)
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 784983d78158..10abc4d5a6cc 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -869,13 +869,14 @@ void r100_fence_ring_emit(struct radeon_device *rdev,
radeon_ring_write(ring, RADEON_SW_INT_FIRE);
}
-void r100_semaphore_ring_emit(struct radeon_device *rdev,
+bool r100_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_ring *ring,
struct radeon_semaphore *semaphore,
bool emit_wait)
{
/* Unused on older asics, since we don't have semaphores or multiple rings */
BUG();
+ return false;
}
int r100_copy_blit(struct radeon_device *rdev,
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 4e609e8a8d2b..9ad06732a78b 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -2650,7 +2650,7 @@ void r600_fence_ring_emit(struct radeon_device *rdev,
}
}
-void r600_semaphore_ring_emit(struct radeon_device *rdev,
+bool r600_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_ring *ring,
struct radeon_semaphore *semaphore,
bool emit_wait)
@@ -2664,6 +2664,8 @@ void r600_semaphore_ring_emit(struct radeon_device *rdev,
radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1));
radeon_ring_write(ring, addr & 0xffffffff);
radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel);
+
+ return true;
}
/**
@@ -2706,13 +2708,8 @@ int r600_copy_cpdma(struct radeon_device *rdev,
return r;
}
- if (radeon_fence_need_sync(*fence, ring->idx)) {
- radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
- ring->idx);
- radeon_fence_note_sync(*fence, ring->idx);
- } else {
- radeon_semaphore_free(rdev, &sem, NULL);
- }
+ radeon_semaphore_sync_to(sem, *fence);
+ radeon_semaphore_sync_rings(rdev, sem, ring->idx);
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
diff --git a/drivers/gpu/drm/radeon/r600_dma.c b/drivers/gpu/drm/radeon/r600_dma.c
index 3b317456512a..7844d15c139f 100644
--- a/drivers/gpu/drm/radeon/r600_dma.c
+++ b/drivers/gpu/drm/radeon/r600_dma.c
@@ -311,7 +311,7 @@ void r600_dma_fence_ring_emit(struct radeon_device *rdev,
* Add a DMA semaphore packet to the ring wait on or signal
* other rings (r6xx-SI).
*/
-void r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
+bool r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_ring *ring,
struct radeon_semaphore *semaphore,
bool emit_wait)
@@ -322,6 +322,8 @@ void r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SEMAPHORE, 0, s, 0));
radeon_ring_write(ring, addr & 0xfffffffc);
radeon_ring_write(ring, upper_32_bits(addr) & 0xff);
+
+ return true;
}
/**
@@ -462,13 +464,8 @@ int r600_copy_dma(struct radeon_device *rdev,
return r;
}
- if (radeon_fence_need_sync(*fence, ring->idx)) {
- radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
- ring->idx);
- radeon_fence_note_sync(*fence, ring->idx);
- } else {
- radeon_semaphore_free(rdev, &sem, NULL);
- }
+ radeon_semaphore_sync_to(sem, *fence);
+ radeon_semaphore_sync_rings(rdev, sem, ring->idx);
for (i = 0; i < num_loops; i++) {
cur_size_in_dw = size_in_dw;
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index 4b89262f3f0e..b7d3ecba43e3 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -304,9 +304,9 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock)
WREG32(DCCG_AUDIO_DTO1_MODULE, dto_modulo);
WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */
}
- } else if (ASIC_IS_DCE3(rdev)) {
+ } else {
/* according to the reg specs, this should DCE3.2 only, but in
- * practice it seems to cover DCE3.0/3.1 as well.
+ * practice it seems to cover DCE2.0/3.0/3.1 as well.
*/
if (dig->dig_encoder == 0) {
WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 100);
@@ -317,10 +317,6 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock)
WREG32(DCCG_AUDIO_DTO1_MODULE, clock * 100);
WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */
}
- } else {
- /* according to the reg specs, this should be DCE2.0 and DCE3.0/3.1 */
- WREG32(AUDIO_DTO, AUDIO_DTO_PHASE(base_rate / 10) |
- AUDIO_DTO_MODULE(clock / 10));
}
}
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index b9ee99258602..b1f990d0eaa1 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -348,6 +348,7 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence **fence, i
void radeon_fence_process(struct radeon_device *rdev, int ring);
bool radeon_fence_signaled(struct radeon_fence *fence);
int radeon_fence_wait(struct radeon_fence *fence, bool interruptible);
+int radeon_fence_wait_locked(struct radeon_fence *fence);
int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring);
int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring);
int radeon_fence_wait_any(struct radeon_device *rdev,
@@ -548,17 +549,20 @@ struct radeon_semaphore {
struct radeon_sa_bo *sa_bo;
signed waiters;
uint64_t gpu_addr;
+ struct radeon_fence *sync_to[RADEON_NUM_RINGS];
};
int radeon_semaphore_create(struct radeon_device *rdev,
struct radeon_semaphore **semaphore);
-void radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring,
+bool radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring,
struct radeon_semaphore *semaphore);
-void radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring,
+bool radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring,
struct radeon_semaphore *semaphore);
+void radeon_semaphore_sync_to(struct radeon_semaphore *semaphore,
+ struct radeon_fence *fence);
int radeon_semaphore_sync_rings(struct radeon_device *rdev,
struct radeon_semaphore *semaphore,
- int signaler, int waiter);
+ int waiting_ring);
void radeon_semaphore_free(struct radeon_device *rdev,
struct radeon_semaphore **semaphore,
struct radeon_fence *fence);
@@ -645,13 +649,15 @@ void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg);
/*
* GPU doorbell structures, functions & helpers
*/
+#define RADEON_MAX_DOORBELLS 1024 /* Reserve at most 1024 doorbell slots for radeon-owned rings. */
+
struct radeon_doorbell {
- u32 num_pages;
- bool free[1024];
/* doorbell mmio */
- resource_size_t base;
- resource_size_t size;
- void __iomem *ptr;
+ resource_size_t base;
+ resource_size_t size;
+ u32 __iomem *ptr;
+ u32 num_doorbells; /* Number of doorbells actually reserved for radeon. */
+ unsigned long used[DIV_ROUND_UP(RADEON_MAX_DOORBELLS, BITS_PER_LONG)];
};
int radeon_doorbell_get(struct radeon_device *rdev, u32 *page);
@@ -765,7 +771,6 @@ struct radeon_ib {
struct radeon_fence *fence;
struct radeon_vm *vm;
bool is_const_ib;
- struct radeon_fence *sync_to[RADEON_NUM_RINGS];
struct radeon_semaphore *semaphore;
};
@@ -799,8 +804,7 @@ struct radeon_ring {
u32 pipe;
u32 queue;
struct radeon_bo *mqd_obj;
- u32 doorbell_page_num;
- u32 doorbell_offset;
+ u32 doorbell_index;
unsigned wptr_offs;
};
@@ -921,7 +925,6 @@ int radeon_ib_get(struct radeon_device *rdev, int ring,
struct radeon_ib *ib, struct radeon_vm *vm,
unsigned size);
void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib);
-void radeon_ib_sync_to(struct radeon_ib *ib, struct radeon_fence *fence);
int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
struct radeon_ib *const_ib);
int radeon_ib_pool_init(struct radeon_device *rdev);
@@ -1638,7 +1641,7 @@ struct radeon_asic_ring {
/* command emmit functions */
void (*ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib);
void (*emit_fence)(struct radeon_device *rdev, struct radeon_fence *fence);
- void (*emit_semaphore)(struct radeon_device *rdev, struct radeon_ring *cp,
+ bool (*emit_semaphore)(struct radeon_device *rdev, struct radeon_ring *cp,
struct radeon_semaphore *semaphore, bool emit_wait);
void (*vm_flush)(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
@@ -1979,6 +1982,7 @@ struct cik_asic {
unsigned tile_config;
uint32_t tile_mode_array[32];
+ uint32_t macrotile_mode_array[16];
};
union radeon_asic_config {
@@ -2239,8 +2243,8 @@ void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v,
u32 r100_io_rreg(struct radeon_device *rdev, u32 reg);
void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v);
-u32 cik_mm_rdoorbell(struct radeon_device *rdev, u32 offset);
-void cik_mm_wdoorbell(struct radeon_device *rdev, u32 offset, u32 v);
+u32 cik_mm_rdoorbell(struct radeon_device *rdev, u32 index);
+void cik_mm_wdoorbell(struct radeon_device *rdev, u32 index, u32 v);
/*
* Cast helper
@@ -2303,8 +2307,8 @@ void cik_mm_wdoorbell(struct radeon_device *rdev, u32 offset, u32 v);
#define RREG32_IO(reg) r100_io_rreg(rdev, (reg))
#define WREG32_IO(reg, v) r100_io_wreg(rdev, (reg), (v))
-#define RDOORBELL32(offset) cik_mm_rdoorbell(rdev, (offset))
-#define WDOORBELL32(offset, v) cik_mm_wdoorbell(rdev, (offset), (v))
+#define RDOORBELL32(index) cik_mm_rdoorbell(rdev, (index))
+#define WDOORBELL32(index, v) cik_mm_wdoorbell(rdev, (index), (v))
/*
* Indirect registers accessor
@@ -2706,10 +2710,10 @@ void radeon_vm_fence(struct radeon_device *rdev,
struct radeon_vm *vm,
struct radeon_fence *fence);
uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr);
-int radeon_vm_bo_update_pte(struct radeon_device *rdev,
- struct radeon_vm *vm,
- struct radeon_bo *bo,
- struct ttm_mem_reg *mem);
+int radeon_vm_bo_update(struct radeon_device *rdev,
+ struct radeon_vm *vm,
+ struct radeon_bo *bo,
+ struct ttm_mem_reg *mem);
void radeon_vm_bo_invalidate(struct radeon_device *rdev,
struct radeon_bo *bo);
struct radeon_bo_va *radeon_vm_bo_find(struct radeon_vm *vm,
diff --git a/drivers/gpu/drm/radeon/radeon_acpi.c b/drivers/gpu/drm/radeon/radeon_acpi.c
index 10f98c7742d8..98a9074b306b 100644
--- a/drivers/gpu/drm/radeon/radeon_acpi.c
+++ b/drivers/gpu/drm/radeon/radeon_acpi.c
@@ -369,7 +369,7 @@ int radeon_atif_handler(struct radeon_device *rdev,
return NOTIFY_DONE;
/* Check pending SBIOS requests */
- handle = DEVICE_ACPI_HANDLE(&rdev->pdev->dev);
+ handle = ACPI_HANDLE(&rdev->pdev->dev);
count = radeon_atif_get_sbios_requests(handle, &req);
if (count <= 0)
@@ -556,7 +556,7 @@ int radeon_acpi_pcie_notify_device_ready(struct radeon_device *rdev)
struct radeon_atcs *atcs = &rdev->atcs;
/* Get the device handle */
- handle = DEVICE_ACPI_HANDLE(&rdev->pdev->dev);
+ handle = ACPI_HANDLE(&rdev->pdev->dev);
if (!handle)
return -EINVAL;
@@ -596,7 +596,7 @@ int radeon_acpi_pcie_performance_request(struct radeon_device *rdev,
u32 retry = 3;
/* Get the device handle */
- handle = DEVICE_ACPI_HANDLE(&rdev->pdev->dev);
+ handle = ACPI_HANDLE(&rdev->pdev->dev);
if (!handle)
return -EINVAL;
@@ -699,7 +699,7 @@ int radeon_acpi_init(struct radeon_device *rdev)
int ret;
/* Get the device handle */
- handle = DEVICE_ACPI_HANDLE(&rdev->pdev->dev);
+ handle = ACPI_HANDLE(&rdev->pdev->dev);
/* No need to proceed if we're sure that ATIF is not supported */
if (!ASIC_IS_AVIVO(rdev) || !rdev->bios || !handle)
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index 50853c0cb49d..c0425bb6223a 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -2015,11 +2015,13 @@ static struct radeon_asic ci_asic = {
.bandwidth_update = &dce8_bandwidth_update,
.get_vblank_counter = &evergreen_get_vblank_counter,
.wait_for_vblank = &dce4_wait_for_vblank,
+ .set_backlight_level = &atombios_set_backlight_level,
+ .get_backlight_level = &atombios_get_backlight_level,
.hdmi_enable = &evergreen_hdmi_enable,
.hdmi_setmode = &evergreen_hdmi_setmode,
},
.copy = {
- .blit = NULL,
+ .blit = &cik_copy_cpdma,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = &cik_copy_dma,
.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
@@ -2114,11 +2116,13 @@ static struct radeon_asic kv_asic = {
.bandwidth_update = &dce8_bandwidth_update,
.get_vblank_counter = &evergreen_get_vblank_counter,
.wait_for_vblank = &dce4_wait_for_vblank,
+ .set_backlight_level = &atombios_set_backlight_level,
+ .get_backlight_level = &atombios_get_backlight_level,
.hdmi_enable = &evergreen_hdmi_enable,
.hdmi_setmode = &evergreen_hdmi_setmode,
},
.copy = {
- .blit = NULL,
+ .blit = &cik_copy_cpdma,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = &cik_copy_dma,
.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index f2833ee3a613..c9fd97b58076 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -80,7 +80,7 @@ int r100_irq_set(struct radeon_device *rdev);
int r100_irq_process(struct radeon_device *rdev);
void r100_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
-void r100_semaphore_ring_emit(struct radeon_device *rdev,
+bool r100_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_ring *cp,
struct radeon_semaphore *semaphore,
bool emit_wait);
@@ -313,13 +313,13 @@ int r600_cs_parse(struct radeon_cs_parser *p);
int r600_dma_cs_parse(struct radeon_cs_parser *p);
void r600_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
-void r600_semaphore_ring_emit(struct radeon_device *rdev,
+bool r600_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_ring *cp,
struct radeon_semaphore *semaphore,
bool emit_wait);
void r600_dma_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
-void r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
+bool r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_ring *ring,
struct radeon_semaphore *semaphore,
bool emit_wait);
@@ -566,10 +566,6 @@ int sumo_dpm_force_performance_level(struct radeon_device *rdev,
*/
void cayman_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
-void cayman_uvd_semaphore_emit(struct radeon_device *rdev,
- struct radeon_ring *ring,
- struct radeon_semaphore *semaphore,
- bool emit_wait);
void cayman_pcie_gart_tlb_flush(struct radeon_device *rdev);
int cayman_init(struct radeon_device *rdev);
void cayman_fini(struct radeon_device *rdev);
@@ -697,7 +693,7 @@ void cik_pciep_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
int cik_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
void cik_sdma_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
-void cik_sdma_semaphore_ring_emit(struct radeon_device *rdev,
+bool cik_sdma_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_ring *ring,
struct radeon_semaphore *semaphore,
bool emit_wait);
@@ -717,7 +713,7 @@ void cik_fence_gfx_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
void cik_fence_compute_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
-void cik_semaphore_ring_emit(struct radeon_device *rdev,
+bool cik_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_ring *cp,
struct radeon_semaphore *semaphore,
bool emit_wait);
@@ -807,7 +803,7 @@ void uvd_v1_0_stop(struct radeon_device *rdev);
int uvd_v1_0_ring_test(struct radeon_device *rdev, struct radeon_ring *ring);
int uvd_v1_0_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
-void uvd_v1_0_semaphore_emit(struct radeon_device *rdev,
+bool uvd_v1_0_semaphore_emit(struct radeon_device *rdev,
struct radeon_ring *ring,
struct radeon_semaphore *semaphore,
bool emit_wait);
@@ -819,7 +815,7 @@ void uvd_v2_2_fence_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
/* uvd v3.1 */
-void uvd_v3_1_semaphore_emit(struct radeon_device *rdev,
+bool uvd_v3_1_semaphore_emit(struct radeon_device *rdev,
struct radeon_ring *ring,
struct radeon_semaphore *semaphore,
bool emit_wait);
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index f79ee184ffd5..5c39bf7c3d88 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -2918,7 +2918,7 @@ int radeon_atom_get_memory_pll_dividers(struct radeon_device *rdev,
mpll_param->dll_speed = args.ucDllSpeed;
mpll_param->bwcntl = args.ucBWCntl;
mpll_param->vco_mode =
- (args.ucPllCntlFlag & MPLL_CNTL_FLAG_VCO_MODE_MASK) ? 1 : 0;
+ (args.ucPllCntlFlag & MPLL_CNTL_FLAG_VCO_MODE_MASK);
mpll_param->yclk_sel =
(args.ucPllCntlFlag & MPLL_CNTL_FLAG_BYPASS_DQ_PLL) ? 1 : 0;
mpll_param->qdr =
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
index 6153ec18943a..9d302eaeea15 100644
--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
@@ -8,8 +8,7 @@
*/
#include <linux/vga_switcheroo.h>
#include <linux/slab.h>
-#include <acpi/acpi.h>
-#include <acpi/acpi_bus.h>
+#include <linux/acpi.h>
#include <linux/pci.h>
#include "radeon_acpi.h"
@@ -447,7 +446,7 @@ static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev)
acpi_handle dhandle, atpx_handle;
acpi_status status;
- dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
+ dhandle = ACPI_HANDLE(&pdev->dev);
if (!dhandle)
return false;
@@ -493,7 +492,7 @@ static int radeon_atpx_init(void)
*/
static int radeon_atpx_get_client_id(struct pci_dev *pdev)
{
- if (radeon_atpx_priv.dhandle == DEVICE_ACPI_HANDLE(&pdev->dev))
+ if (radeon_atpx_priv.dhandle == ACPI_HANDLE(&pdev->dev))
return VGA_SWITCHEROO_IGD;
else
return VGA_SWITCHEROO_DIS;
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
index c155d6f3fa68..b3633d9a5317 100644
--- a/drivers/gpu/drm/radeon/radeon_bios.c
+++ b/drivers/gpu/drm/radeon/radeon_bios.c
@@ -185,7 +185,7 @@ static bool radeon_atrm_get_bios(struct radeon_device *rdev)
return false;
while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
- dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
+ dhandle = ACPI_HANDLE(&pdev->dev);
if (!dhandle)
continue;
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 26ca223d12d6..0b366169d64d 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -159,7 +159,8 @@ static void radeon_cs_sync_rings(struct radeon_cs_parser *p)
if (!p->relocs[i].robj)
continue;
- radeon_ib_sync_to(&p->ib, p->relocs[i].robj->tbo.sync_obj);
+ radeon_semaphore_sync_to(p->ib.semaphore,
+ p->relocs[i].robj->tbo.sync_obj);
}
}
@@ -359,13 +360,13 @@ static int radeon_bo_vm_update_pte(struct radeon_cs_parser *parser,
struct radeon_bo *bo;
int r;
- r = radeon_vm_bo_update_pte(rdev, vm, rdev->ring_tmp_bo.bo, &rdev->ring_tmp_bo.bo->tbo.mem);
+ r = radeon_vm_bo_update(rdev, vm, rdev->ring_tmp_bo.bo, &rdev->ring_tmp_bo.bo->tbo.mem);
if (r) {
return r;
}
list_for_each_entry(lobj, &parser->validated, tv.head) {
bo = lobj->bo;
- r = radeon_vm_bo_update_pte(parser->rdev, vm, bo, &bo->tbo.mem);
+ r = radeon_vm_bo_update(parser->rdev, vm, bo, &bo->tbo.mem);
if (r) {
return r;
}
@@ -411,9 +412,9 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
goto out;
}
radeon_cs_sync_rings(parser);
- radeon_ib_sync_to(&parser->ib, vm->fence);
- radeon_ib_sync_to(&parser->ib, radeon_vm_grab_id(
- rdev, vm, parser->ring));
+ radeon_semaphore_sync_to(parser->ib.semaphore, vm->fence);
+ radeon_semaphore_sync_to(parser->ib.semaphore,
+ radeon_vm_grab_id(rdev, vm, parser->ring));
if ((rdev->family >= CHIP_TAHITI) &&
(parser->chunk_const_ib_idx != -1)) {
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index b9234c43f43d..39b033b441d2 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -251,28 +251,23 @@ void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
*/
int radeon_doorbell_init(struct radeon_device *rdev)
{
- int i;
-
/* doorbell bar mapping */
rdev->doorbell.base = pci_resource_start(rdev->pdev, 2);
rdev->doorbell.size = pci_resource_len(rdev->pdev, 2);
- /* limit to 4 MB for now */
- if (rdev->doorbell.size > (4 * 1024 * 1024))
- rdev->doorbell.size = 4 * 1024 * 1024;
+ rdev->doorbell.num_doorbells = min_t(u32, rdev->doorbell.size / sizeof(u32), RADEON_MAX_DOORBELLS);
+ if (rdev->doorbell.num_doorbells == 0)
+ return -EINVAL;
- rdev->doorbell.ptr = ioremap(rdev->doorbell.base, rdev->doorbell.size);
+ rdev->doorbell.ptr = ioremap(rdev->doorbell.base, rdev->doorbell.num_doorbells * sizeof(u32));
if (rdev->doorbell.ptr == NULL) {
return -ENOMEM;
}
DRM_INFO("doorbell mmio base: 0x%08X\n", (uint32_t)rdev->doorbell.base);
DRM_INFO("doorbell mmio size: %u\n", (unsigned)rdev->doorbell.size);
- rdev->doorbell.num_pages = rdev->doorbell.size / PAGE_SIZE;
+ memset(&rdev->doorbell.used, 0, sizeof(rdev->doorbell.used));
- for (i = 0; i < rdev->doorbell.num_pages; i++) {
- rdev->doorbell.free[i] = true;
- }
return 0;
}
@@ -290,40 +285,38 @@ void radeon_doorbell_fini(struct radeon_device *rdev)
}
/**
- * radeon_doorbell_get - Allocate a doorbell page
+ * radeon_doorbell_get - Allocate a doorbell entry
*
* @rdev: radeon_device pointer
- * @doorbell: doorbell page number
+ * @doorbell: doorbell index
*
- * Allocate a doorbell page for use by the driver (all asics).
+ * Allocate a doorbell for use by the driver (all asics).
* Returns 0 on success or -EINVAL on failure.
*/
int radeon_doorbell_get(struct radeon_device *rdev, u32 *doorbell)
{
- int i;
-
- for (i = 0; i < rdev->doorbell.num_pages; i++) {
- if (rdev->doorbell.free[i]) {
- rdev->doorbell.free[i] = false;
- *doorbell = i;
- return 0;
- }
+ unsigned long offset = find_first_zero_bit(rdev->doorbell.used, rdev->doorbell.num_doorbells);
+ if (offset < rdev->doorbell.num_doorbells) {
+ __set_bit(offset, rdev->doorbell.used);
+ *doorbell = offset;
+ return 0;
+ } else {
+ return -EINVAL;
}
- return -EINVAL;
}
/**
- * radeon_doorbell_free - Free a doorbell page
+ * radeon_doorbell_free - Free a doorbell entry
*
* @rdev: radeon_device pointer
- * @doorbell: doorbell page number
+ * @doorbell: doorbell index
*
- * Free a doorbell page allocated for use by the driver (all asics)
+ * Free a doorbell allocated for use by the driver (all asics)
*/
void radeon_doorbell_free(struct radeon_device *rdev, u32 doorbell)
{
- if (doorbell < rdev->doorbell.num_pages)
- rdev->doorbell.free[doorbell] = true;
+ if (doorbell < rdev->doorbell.num_doorbells)
+ __clear_bit(doorbell, rdev->doorbell.used);
}
/*
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 1aee32213f66..1958b36ad0e5 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -76,9 +76,10 @@
* 2.32.0 - new info request for rings working
* 2.33.0 - Add SI tiling mode array query
* 2.34.0 - Add CIK tiling mode array query
+ * 2.35.0 - Add CIK macrotile mode array query
*/
#define KMS_DRIVER_MAJOR 2
-#define KMS_DRIVER_MINOR 34
+#define KMS_DRIVER_MINOR 35
#define KMS_DRIVER_PATCHLEVEL 0
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
int radeon_driver_unload_kms(struct drm_device *dev);
@@ -507,15 +508,6 @@ static const struct file_operations radeon_driver_kms_fops = {
#endif
};
-
-static void
-radeon_pci_shutdown(struct pci_dev *pdev)
-{
- struct drm_device *dev = pci_get_drvdata(pdev);
-
- radeon_driver_unload_kms(dev);
-}
-
static struct drm_driver kms_driver = {
.driver_features =
DRIVER_USE_AGP |
@@ -585,7 +577,6 @@ static struct pci_driver radeon_kms_pci_driver = {
.probe = radeon_pci_probe,
.remove = radeon_pci_remove,
.driver.pm = &radeon_pm_ops,
- .shutdown = radeon_pci_shutdown,
};
static int __init radeon_init(void)
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
index 543dcfae7e6f..00e0d449021c 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.h
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
@@ -108,9 +108,10 @@
* 1.31- Add support for num Z pipes from GET_PARAM
* 1.32- fixes for rv740 setup
* 1.33- Add r6xx/r7xx const buffer support
+ * 1.34- fix evergreen/cayman GS register
*/
#define DRIVER_MAJOR 1
-#define DRIVER_MINOR 33
+#define DRIVER_MINOR 34
#define DRIVER_PATCHLEVEL 0
long radeon_drm_ioctl(struct file *filp,
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 281d14c22a47..d3a86e43c012 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -472,6 +472,36 @@ int radeon_fence_wait_any(struct radeon_device *rdev,
}
/**
+ * radeon_fence_wait_locked - wait for a fence to signal
+ *
+ * @fence: radeon fence object
+ *
+ * Wait for the requested fence to signal (all asics).
+ * Returns 0 if the fence has passed, error for all other cases.
+ */
+int radeon_fence_wait_locked(struct radeon_fence *fence)
+{
+ uint64_t seq[RADEON_NUM_RINGS] = {};
+ int r;
+
+ if (fence == NULL) {
+ WARN(1, "Querying an invalid fence : %p !\n", fence);
+ return -EINVAL;
+ }
+
+ seq[fence->ring] = fence->seq;
+ if (seq[fence->ring] == RADEON_FENCE_SIGNALED_SEQ)
+ return 0;
+
+ r = radeon_fence_wait_seq(fence->rdev, seq, false, false);
+ if (r)
+ return r;
+
+ fence->seq = RADEON_FENCE_SIGNALED_SEQ;
+ return 0;
+}
+
+/**
* radeon_fence_wait_next_locked - wait for the next fence to signal
*
* @rdev: radeon device pointer
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index 8a83b89d4709..96e440061bdb 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -29,6 +29,7 @@
#include <drm/radeon_drm.h>
#include "radeon.h"
#include "radeon_reg.h"
+#include "radeon_trace.h"
/*
* GART
@@ -651,7 +652,7 @@ retry:
radeon_asic_vm_set_page(rdev, &ib, vm->pd_gpu_addr,
0, pd_entries, 0, 0);
- radeon_ib_sync_to(&ib, vm->fence);
+ radeon_semaphore_sync_to(ib.semaphore, vm->fence);
r = radeon_ib_schedule(rdev, &ib, NULL);
if (r) {
radeon_ib_free(rdev, &ib);
@@ -737,6 +738,7 @@ struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
for (i = 0; i < 2; ++i) {
if (choices[i]) {
vm->id = choices[i];
+ trace_radeon_vm_grab_id(vm->id, ring);
return rdev->vm_manager.active[choices[i]];
}
}
@@ -1116,7 +1118,7 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev,
}
/**
- * radeon_vm_bo_update_pte - map a bo into the vm page table
+ * radeon_vm_bo_update - map a bo into the vm page table
*
* @rdev: radeon_device pointer
* @vm: requested vm
@@ -1128,10 +1130,10 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev,
*
* Object have to be reserved & global and local mutex must be locked!
*/
-int radeon_vm_bo_update_pte(struct radeon_device *rdev,
- struct radeon_vm *vm,
- struct radeon_bo *bo,
- struct ttm_mem_reg *mem)
+int radeon_vm_bo_update(struct radeon_device *rdev,
+ struct radeon_vm *vm,
+ struct radeon_bo *bo,
+ struct ttm_mem_reg *mem)
{
struct radeon_ib ib;
struct radeon_bo_va *bo_va;
@@ -1176,6 +1178,8 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
bo_va->valid = false;
}
+ trace_radeon_vm_bo_update(bo_va);
+
nptes = radeon_bo_ngpu_pages(bo);
/* assume two extra pdes in case the mapping overlaps the borders */
@@ -1209,6 +1213,8 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
return -ENOMEM;
r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, NULL, ndw * 4);
+ if (r)
+ return r;
ib.length_dw = 0;
r = radeon_vm_update_pdes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset);
@@ -1220,7 +1226,7 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
radeon_vm_update_ptes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset,
addr, radeon_vm_page_flags(bo_va->flags));
- radeon_ib_sync_to(&ib, vm->fence);
+ radeon_semaphore_sync_to(ib.semaphore, vm->fence);
r = radeon_ib_schedule(rdev, &ib, NULL);
if (r) {
radeon_ib_free(rdev, &ib);
@@ -1255,7 +1261,7 @@ int radeon_vm_bo_rmv(struct radeon_device *rdev,
mutex_lock(&rdev->vm_manager.lock);
mutex_lock(&bo_va->vm->mutex);
if (bo_va->soffset) {
- r = radeon_vm_bo_update_pte(rdev, bo_va->vm, bo_va->bo, NULL);
+ r = radeon_vm_bo_update(rdev, bo_va->vm, bo_va->bo, NULL);
}
mutex_unlock(&rdev->vm_manager.lock);
list_del(&bo_va->vm_list);
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index bb8710531a1b..55d0b474bd37 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -340,7 +340,7 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
break;
case RADEON_INFO_BACKEND_MAP:
if (rdev->family >= CHIP_BONAIRE)
- return -EINVAL;
+ *value = rdev->config.cik.backend_map;
else if (rdev->family >= CHIP_TAHITI)
*value = rdev->config.si.backend_map;
else if (rdev->family >= CHIP_CAYMAN)
@@ -449,6 +449,15 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
return -EINVAL;
}
break;
+ case RADEON_INFO_CIK_MACROTILE_MODE_ARRAY:
+ if (rdev->family >= CHIP_BONAIRE) {
+ value = rdev->config.cik.macrotile_mode_array;
+ value_size = sizeof(uint32_t)*16;
+ } else {
+ DRM_DEBUG_KMS("macrotile mode array is cik+ only!\n");
+ return -EINVAL;
+ }
+ break;
case RADEON_INFO_SI_CP_DMA_COMPUTE:
*value = 1;
break;
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index 0c7b8c66301b..0b158f98d287 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -422,6 +422,7 @@ int radeon_crtc_do_set_base(struct drm_crtc *crtc,
/* Pin framebuffer & get tilling informations */
obj = radeon_fb->obj;
rbo = gem_to_radeon_bo(obj);
+retry:
r = radeon_bo_reserve(rbo, false);
if (unlikely(r != 0))
return r;
@@ -430,6 +431,33 @@ int radeon_crtc_do_set_base(struct drm_crtc *crtc,
&base);
if (unlikely(r != 0)) {
radeon_bo_unreserve(rbo);
+
+ /* On old GPU like RN50 with little vram pining can fails because
+ * current fb is taking all space needed. So instead of unpining
+ * the old buffer after pining the new one, first unpin old one
+ * and then retry pining new one.
+ *
+ * As only master can set mode only master can pin and it is
+ * unlikely the master client will race with itself especialy
+ * on those old gpu with single crtc.
+ *
+ * We don't shutdown the display controller because new buffer
+ * will end up in same spot.
+ */
+ if (!atomic && fb && fb != crtc->fb) {
+ struct radeon_bo *old_rbo;
+ unsigned long nsize, osize;
+
+ old_rbo = gem_to_radeon_bo(to_radeon_framebuffer(fb)->obj);
+ osize = radeon_bo_size(old_rbo);
+ nsize = radeon_bo_size(rbo);
+ if (nsize <= osize && !radeon_bo_reserve(old_rbo, false)) {
+ radeon_bo_unpin(old_rbo);
+ radeon_bo_unreserve(old_rbo);
+ fb = NULL;
+ goto retry;
+ }
+ }
return -EINVAL;
}
radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL);
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 866ace070b91..984097b907ef 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -537,8 +537,7 @@ static ssize_t radeon_hwmon_show_temp(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct radeon_device *rdev = ddev->dev_private;
+ struct radeon_device *rdev = dev_get_drvdata(dev);
int temp;
if (rdev->asic->pm.get_temperature)
@@ -553,8 +552,7 @@ static ssize_t radeon_hwmon_show_temp_thresh(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct radeon_device *rdev = ddev->dev_private;
+ struct radeon_device *rdev = dev_get_drvdata(dev);
int hyst = to_sensor_dev_attr(attr)->index;
int temp;
@@ -566,23 +564,14 @@ static ssize_t radeon_hwmon_show_temp_thresh(struct device *dev,
return snprintf(buf, PAGE_SIZE, "%d\n", temp);
}
-static ssize_t radeon_hwmon_show_name(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- return sprintf(buf, "radeon\n");
-}
-
static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, radeon_hwmon_show_temp, NULL, 0);
static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, radeon_hwmon_show_temp_thresh, NULL, 0);
static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, radeon_hwmon_show_temp_thresh, NULL, 1);
-static SENSOR_DEVICE_ATTR(name, S_IRUGO, radeon_hwmon_show_name, NULL, 0);
static struct attribute *hwmon_attributes[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
&sensor_dev_attr_temp1_crit.dev_attr.attr,
&sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
- &sensor_dev_attr_name.dev_attr.attr,
NULL
};
@@ -590,8 +579,7 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
struct attribute *attr, int index)
{
struct device *dev = container_of(kobj, struct device, kobj);
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct radeon_device *rdev = ddev->dev_private;
+ struct radeon_device *rdev = dev_get_drvdata(dev);
/* Skip limit attributes if DPM is not enabled */
if (rdev->pm.pm_method != PM_METHOD_DPM &&
@@ -607,11 +595,15 @@ static const struct attribute_group hwmon_attrgroup = {
.is_visible = hwmon_attributes_visible,
};
+static const struct attribute_group *hwmon_groups[] = {
+ &hwmon_attrgroup,
+ NULL
+};
+
static int radeon_hwmon_init(struct radeon_device *rdev)
{
int err = 0;
-
- rdev->pm.int_hwmon_dev = NULL;
+ struct device *hwmon_dev;
switch (rdev->pm.int_thermal_type) {
case THERMAL_TYPE_RV6XX:
@@ -624,20 +616,13 @@ static int radeon_hwmon_init(struct radeon_device *rdev)
case THERMAL_TYPE_KV:
if (rdev->asic->pm.get_temperature == NULL)
return err;
- rdev->pm.int_hwmon_dev = hwmon_device_register(rdev->dev);
- if (IS_ERR(rdev->pm.int_hwmon_dev)) {
- err = PTR_ERR(rdev->pm.int_hwmon_dev);
+ hwmon_dev = hwmon_device_register_with_groups(rdev->dev,
+ "radeon", rdev,
+ hwmon_groups);
+ if (IS_ERR(hwmon_dev)) {
+ err = PTR_ERR(hwmon_dev);
dev_err(rdev->dev,
"Unable to register hwmon device: %d\n", err);
- break;
- }
- dev_set_drvdata(rdev->pm.int_hwmon_dev, rdev->ddev);
- err = sysfs_create_group(&rdev->pm.int_hwmon_dev->kobj,
- &hwmon_attrgroup);
- if (err) {
- dev_err(rdev->dev,
- "Unable to create hwmon sysfs file: %d\n", err);
- hwmon_device_unregister(rdev->dev);
}
break;
default:
@@ -647,14 +632,6 @@ static int radeon_hwmon_init(struct radeon_device *rdev)
return err;
}
-static void radeon_hwmon_fini(struct radeon_device *rdev)
-{
- if (rdev->pm.int_hwmon_dev) {
- sysfs_remove_group(&rdev->pm.int_hwmon_dev->kobj, &hwmon_attrgroup);
- hwmon_device_unregister(rdev->pm.int_hwmon_dev);
- }
-}
-
static void radeon_dpm_thermal_work_handler(struct work_struct *work)
{
struct radeon_device *rdev =
@@ -1252,7 +1229,6 @@ int radeon_pm_init(struct radeon_device *rdev)
case CHIP_RS780:
case CHIP_RS880:
case CHIP_CAYMAN:
- case CHIP_ARUBA:
case CHIP_BONAIRE:
case CHIP_KABINI:
case CHIP_KAVERI:
@@ -1284,6 +1260,7 @@ int radeon_pm_init(struct radeon_device *rdev)
case CHIP_BARTS:
case CHIP_TURKS:
case CHIP_CAICOS:
+ case CHIP_ARUBA:
case CHIP_TAHITI:
case CHIP_PITCAIRN:
case CHIP_VERDE:
@@ -1337,8 +1314,6 @@ static void radeon_pm_fini_old(struct radeon_device *rdev)
if (rdev->pm.power_state)
kfree(rdev->pm.power_state);
-
- radeon_hwmon_fini(rdev);
}
static void radeon_pm_fini_dpm(struct radeon_device *rdev)
@@ -1358,8 +1333,6 @@ static void radeon_pm_fini_dpm(struct radeon_device *rdev)
if (rdev->pm.power_state)
kfree(rdev->pm.power_state);
-
- radeon_hwmon_fini(rdev);
}
void radeon_pm_fini(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 18254e1c3e71..9214403ae173 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -61,7 +61,7 @@ int radeon_ib_get(struct radeon_device *rdev, int ring,
struct radeon_ib *ib, struct radeon_vm *vm,
unsigned size)
{
- int i, r;
+ int r;
r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &ib->sa_bo, size, 256, true);
if (r) {
@@ -87,8 +87,6 @@ int radeon_ib_get(struct radeon_device *rdev, int ring,
ib->gpu_addr = radeon_sa_bo_gpu_addr(ib->sa_bo);
}
ib->is_const_ib = false;
- for (i = 0; i < RADEON_NUM_RINGS; ++i)
- ib->sync_to[i] = NULL;
return 0;
}
@@ -109,25 +107,6 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib)
}
/**
- * radeon_ib_sync_to - sync to fence before executing the IB
- *
- * @ib: IB object to add fence to
- * @fence: fence to sync to
- *
- * Sync to the fence before executing the IB
- */
-void radeon_ib_sync_to(struct radeon_ib *ib, struct radeon_fence *fence)
-{
- struct radeon_fence *other;
-
- if (!fence)
- return;
-
- other = ib->sync_to[fence->ring];
- ib->sync_to[fence->ring] = radeon_fence_later(fence, other);
-}
-
-/**
* radeon_ib_schedule - schedule an IB (Indirect Buffer) on the ring
*
* @rdev: radeon_device pointer
@@ -151,8 +130,7 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
struct radeon_ib *const_ib)
{
struct radeon_ring *ring = &rdev->ring[ib->ring];
- bool need_sync = false;
- int i, r = 0;
+ int r = 0;
if (!ib->length_dw || !ring->ready) {
/* TODO: Nothings in the ib we should report. */
@@ -166,19 +144,15 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
dev_err(rdev->dev, "scheduling IB failed (%d).\n", r);
return r;
}
- for (i = 0; i < RADEON_NUM_RINGS; ++i) {
- struct radeon_fence *fence = ib->sync_to[i];
- if (radeon_fence_need_sync(fence, ib->ring)) {
- need_sync = true;
- radeon_semaphore_sync_rings(rdev, ib->semaphore,
- fence->ring, ib->ring);
- radeon_fence_note_sync(fence, ib->ring);
- }
- }
- /* immediately free semaphore when we don't need to sync */
- if (!need_sync) {
- radeon_semaphore_free(rdev, &ib->semaphore, NULL);
+
+ /* sync with other rings */
+ r = radeon_semaphore_sync_rings(rdev, ib->semaphore, ib->ring);
+ if (r) {
+ dev_err(rdev->dev, "failed to sync rings (%d)\n", r);
+ radeon_ring_unlock_undo(rdev, ring);
+ return r;
}
+
/* if we can't remember our last VM flush then flush now! */
/* XXX figure out why we have to flush for every IB */
if (ib->vm /*&& !ib->vm->last_flush*/) {
diff --git a/drivers/gpu/drm/radeon/radeon_semaphore.c b/drivers/gpu/drm/radeon/radeon_semaphore.c
index 8dcc20f53d73..2b42aa1914f2 100644
--- a/drivers/gpu/drm/radeon/radeon_semaphore.c
+++ b/drivers/gpu/drm/radeon/radeon_semaphore.c
@@ -29,12 +29,12 @@
*/
#include <drm/drmP.h>
#include "radeon.h"
-
+#include "radeon_trace.h"
int radeon_semaphore_create(struct radeon_device *rdev,
struct radeon_semaphore **semaphore)
{
- int r;
+ int i, r;
*semaphore = kmalloc(sizeof(struct radeon_semaphore), GFP_KERNEL);
if (*semaphore == NULL) {
@@ -50,54 +50,121 @@ int radeon_semaphore_create(struct radeon_device *rdev,
(*semaphore)->waiters = 0;
(*semaphore)->gpu_addr = radeon_sa_bo_gpu_addr((*semaphore)->sa_bo);
*((uint64_t*)radeon_sa_bo_cpu_addr((*semaphore)->sa_bo)) = 0;
+
+ for (i = 0; i < RADEON_NUM_RINGS; ++i)
+ (*semaphore)->sync_to[i] = NULL;
+
return 0;
}
-void radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring,
+bool radeon_semaphore_emit_signal(struct radeon_device *rdev, int ridx,
struct radeon_semaphore *semaphore)
{
- --semaphore->waiters;
- radeon_semaphore_ring_emit(rdev, ring, &rdev->ring[ring], semaphore, false);
+ struct radeon_ring *ring = &rdev->ring[ridx];
+
+ trace_radeon_semaphore_signale(ridx, semaphore);
+
+ if (radeon_semaphore_ring_emit(rdev, ridx, ring, semaphore, false)) {
+ --semaphore->waiters;
+
+ /* for debugging lockup only, used by sysfs debug files */
+ ring->last_semaphore_signal_addr = semaphore->gpu_addr;
+ return true;
+ }
+ return false;
}
-void radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring,
+bool radeon_semaphore_emit_wait(struct radeon_device *rdev, int ridx,
struct radeon_semaphore *semaphore)
{
- ++semaphore->waiters;
- radeon_semaphore_ring_emit(rdev, ring, &rdev->ring[ring], semaphore, true);
+ struct radeon_ring *ring = &rdev->ring[ridx];
+
+ trace_radeon_semaphore_wait(ridx, semaphore);
+
+ if (radeon_semaphore_ring_emit(rdev, ridx, ring, semaphore, true)) {
+ ++semaphore->waiters;
+
+ /* for debugging lockup only, used by sysfs debug files */
+ ring->last_semaphore_wait_addr = semaphore->gpu_addr;
+ return true;
+ }
+ return false;
+}
+
+/**
+ * radeon_semaphore_sync_to - use the semaphore to sync to a fence
+ *
+ * @semaphore: semaphore object to add fence to
+ * @fence: fence to sync to
+ *
+ * Sync to the fence using this semaphore object
+ */
+void radeon_semaphore_sync_to(struct radeon_semaphore *semaphore,
+ struct radeon_fence *fence)
+{
+ struct radeon_fence *other;
+
+ if (!fence)
+ return;
+
+ other = semaphore->sync_to[fence->ring];
+ semaphore->sync_to[fence->ring] = radeon_fence_later(fence, other);
}
-/* caller must hold ring lock */
+/**
+ * radeon_semaphore_sync_rings - sync ring to all registered fences
+ *
+ * @rdev: radeon_device pointer
+ * @semaphore: semaphore object to use for sync
+ * @ring: ring that needs sync
+ *
+ * Ensure that all registered fences are signaled before letting
+ * the ring continue. The caller must hold the ring lock.
+ */
int radeon_semaphore_sync_rings(struct radeon_device *rdev,
struct radeon_semaphore *semaphore,
- int signaler, int waiter)
+ int ring)
{
- int r;
+ int i, r;
- /* no need to signal and wait on the same ring */
- if (signaler == waiter) {
- return 0;
- }
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ struct radeon_fence *fence = semaphore->sync_to[i];
- /* prevent GPU deadlocks */
- if (!rdev->ring[signaler].ready) {
- dev_err(rdev->dev, "Trying to sync to a disabled ring!");
- return -EINVAL;
- }
+ /* check if we really need to sync */
+ if (!radeon_fence_need_sync(fence, ring))
+ continue;
- r = radeon_ring_alloc(rdev, &rdev->ring[signaler], 8);
- if (r) {
- return r;
- }
- radeon_semaphore_emit_signal(rdev, signaler, semaphore);
- radeon_ring_commit(rdev, &rdev->ring[signaler]);
+ /* prevent GPU deadlocks */
+ if (!rdev->ring[i].ready) {
+ dev_err(rdev->dev, "Syncing to a disabled ring!");
+ return -EINVAL;
+ }
- /* we assume caller has already allocated space on waiters ring */
- radeon_semaphore_emit_wait(rdev, waiter, semaphore);
+ /* allocate enough space for sync command */
+ r = radeon_ring_alloc(rdev, &rdev->ring[i], 16);
+ if (r) {
+ return r;
+ }
- /* for debugging lockup only, used by sysfs debug files */
- rdev->ring[signaler].last_semaphore_signal_addr = semaphore->gpu_addr;
- rdev->ring[waiter].last_semaphore_wait_addr = semaphore->gpu_addr;
+ /* emit the signal semaphore */
+ if (!radeon_semaphore_emit_signal(rdev, i, semaphore)) {
+ /* signaling wasn't successful wait manually */
+ radeon_ring_undo(&rdev->ring[i]);
+ radeon_fence_wait_locked(fence);
+ continue;
+ }
+
+ /* we assume caller has already allocated space on waiters ring */
+ if (!radeon_semaphore_emit_wait(rdev, ring, semaphore)) {
+ /* waiting wasn't successful wait manually */
+ radeon_ring_undo(&rdev->ring[i]);
+ radeon_fence_wait_locked(fence);
+ continue;
+ }
+
+ radeon_ring_commit(rdev, &rdev->ring[i]);
+ radeon_fence_note_sync(fence, ring);
+ }
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_trace.h b/drivers/gpu/drm/radeon/radeon_trace.h
index 811bca691b36..0473257d4078 100644
--- a/drivers/gpu/drm/radeon/radeon_trace.h
+++ b/drivers/gpu/drm/radeon/radeon_trace.h
@@ -47,6 +47,39 @@ TRACE_EVENT(radeon_cs,
__entry->fences)
);
+TRACE_EVENT(radeon_vm_grab_id,
+ TP_PROTO(unsigned vmid, int ring),
+ TP_ARGS(vmid, ring),
+ TP_STRUCT__entry(
+ __field(u32, vmid)
+ __field(u32, ring)
+ ),
+
+ TP_fast_assign(
+ __entry->vmid = vmid;
+ __entry->ring = ring;
+ ),
+ TP_printk("vmid=%u, ring=%u", __entry->vmid, __entry->ring)
+);
+
+TRACE_EVENT(radeon_vm_bo_update,
+ TP_PROTO(struct radeon_bo_va *bo_va),
+ TP_ARGS(bo_va),
+ TP_STRUCT__entry(
+ __field(u64, soffset)
+ __field(u64, eoffset)
+ __field(u32, flags)
+ ),
+
+ TP_fast_assign(
+ __entry->soffset = bo_va->soffset;
+ __entry->eoffset = bo_va->eoffset;
+ __entry->flags = bo_va->flags;
+ ),
+ TP_printk("soffs=%010llx, eoffs=%010llx, flags=%08x",
+ __entry->soffset, __entry->eoffset, __entry->flags)
+);
+
TRACE_EVENT(radeon_vm_set_page,
TP_PROTO(uint64_t pe, uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags),
@@ -111,6 +144,42 @@ DEFINE_EVENT(radeon_fence_request, radeon_fence_wait_end,
TP_ARGS(dev, seqno)
);
+DECLARE_EVENT_CLASS(radeon_semaphore_request,
+
+ TP_PROTO(int ring, struct radeon_semaphore *sem),
+
+ TP_ARGS(ring, sem),
+
+ TP_STRUCT__entry(
+ __field(int, ring)
+ __field(signed, waiters)
+ __field(uint64_t, gpu_addr)
+ ),
+
+ TP_fast_assign(
+ __entry->ring = ring;
+ __entry->waiters = sem->waiters;
+ __entry->gpu_addr = sem->gpu_addr;
+ ),
+
+ TP_printk("ring=%u, waiters=%d, addr=%010Lx", __entry->ring,
+ __entry->waiters, __entry->gpu_addr)
+);
+
+DEFINE_EVENT(radeon_semaphore_request, radeon_semaphore_signale,
+
+ TP_PROTO(int ring, struct radeon_semaphore *sem),
+
+ TP_ARGS(ring, sem)
+);
+
+DEFINE_EVENT(radeon_semaphore_request, radeon_semaphore_wait,
+
+ TP_PROTO(int ring, struct radeon_semaphore *sem),
+
+ TP_ARGS(ring, sem)
+);
+
#endif
/* This part must be outside protection */
diff --git a/drivers/gpu/drm/radeon/reg_srcs/cayman b/drivers/gpu/drm/radeon/reg_srcs/cayman
index a072fa8c46b0..d46b58d078aa 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/cayman
+++ b/drivers/gpu/drm/radeon/reg_srcs/cayman
@@ -21,7 +21,7 @@ cayman 0x9400
0x000089AC VGT_COMPUTE_THREAD_GOURP_SIZE
0x000089B0 VGT_HS_OFFCHIP_PARAM
0x00008A14 PA_CL_ENHANCE
-0x00008A60 PA_SC_LINE_STIPPLE_VALUE
+0x00008A60 PA_SU_LINE_STIPPLE_VALUE
0x00008B10 PA_SC_LINE_STIPPLE_STATE
0x00008BF0 PA_SC_ENHANCE
0x00008D8C SQ_DYN_GPR_CNTL_PS_FLUSH_REQ
@@ -532,7 +532,7 @@ cayman 0x9400
0x00028B84 PA_SU_POLY_OFFSET_FRONT_OFFSET
0x00028B88 PA_SU_POLY_OFFSET_BACK_SCALE
0x00028B8C PA_SU_POLY_OFFSET_BACK_OFFSET
-0x00028B74 VGT_GS_INSTANCE_CNT
+0x00028B90 VGT_GS_INSTANCE_CNT
0x00028BD4 PA_SC_CENTROID_PRIORITY_0
0x00028BD8 PA_SC_CENTROID_PRIORITY_1
0x00028BDC PA_SC_LINE_CNTL
diff --git a/drivers/gpu/drm/radeon/reg_srcs/evergreen b/drivers/gpu/drm/radeon/reg_srcs/evergreen
index b912a37689bf..57745c8761c8 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/evergreen
+++ b/drivers/gpu/drm/radeon/reg_srcs/evergreen
@@ -22,7 +22,7 @@ evergreen 0x9400
0x000089A4 VGT_COMPUTE_START_Z
0x000089AC VGT_COMPUTE_THREAD_GOURP_SIZE
0x00008A14 PA_CL_ENHANCE
-0x00008A60 PA_SC_LINE_STIPPLE_VALUE
+0x00008A60 PA_SU_LINE_STIPPLE_VALUE
0x00008B10 PA_SC_LINE_STIPPLE_STATE
0x00008BF0 PA_SC_ENHANCE
0x00008D8C SQ_DYN_GPR_CNTL_PS_FLUSH_REQ
@@ -545,7 +545,7 @@ evergreen 0x9400
0x00028B84 PA_SU_POLY_OFFSET_FRONT_OFFSET
0x00028B88 PA_SU_POLY_OFFSET_BACK_SCALE
0x00028B8C PA_SU_POLY_OFFSET_BACK_OFFSET
-0x00028B74 VGT_GS_INSTANCE_CNT
+0x00028B90 VGT_GS_INSTANCE_CNT
0x00028C00 PA_SC_LINE_CNTL
0x00028C08 PA_SU_VTX_CNTL
0x00028C0C PA_CL_GB_VERT_CLIP_ADJ
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index 1c560629575a..e7dab069cccf 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -162,6 +162,16 @@ static void rs690_mc_init(struct radeon_device *rdev)
base = RREG32_MC(R_000100_MCCFG_FB_LOCATION);
base = G_000100_MC_FB_START(base) << 16;
rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
+ /* Some boards seem to be configured for 128MB of sideport memory,
+ * but really only have 64MB. Just skip the sideport and use
+ * UMA memory.
+ */
+ if (rdev->mc.igp_sideport_enabled &&
+ (rdev->mc.real_vram_size == (384 * 1024 * 1024))) {
+ base += 128 * 1024 * 1024;
+ rdev->mc.real_vram_size -= 128 * 1024 * 1024;
+ rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
+ }
/* Use K8 direct mapping for fast fb access. */
rdev->fastfb_working = false;
diff --git a/drivers/gpu/drm/radeon/rv770_dma.c b/drivers/gpu/drm/radeon/rv770_dma.c
index f9b02e3d6830..aca8cbe8a335 100644
--- a/drivers/gpu/drm/radeon/rv770_dma.c
+++ b/drivers/gpu/drm/radeon/rv770_dma.c
@@ -66,13 +66,8 @@ int rv770_copy_dma(struct radeon_device *rdev,
return r;
}
- if (radeon_fence_need_sync(*fence, ring->idx)) {
- radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
- ring->idx);
- radeon_fence_note_sync(*fence, ring->idx);
- } else {
- radeon_semaphore_free(rdev, &sem, NULL);
- }
+ radeon_semaphore_sync_to(sem, *fence);
+ radeon_semaphore_sync_rings(rdev, sem, ring->idx);
for (i = 0; i < num_loops; i++) {
cur_size_in_dw = size_in_dw;
diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c
index 913b025ae9b3..374499db20c7 100644
--- a/drivers/gpu/drm/radeon/rv770_dpm.c
+++ b/drivers/gpu/drm/radeon/rv770_dpm.c
@@ -2328,6 +2328,12 @@ void rv770_get_engine_memory_ss(struct radeon_device *rdev)
pi->mclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss,
ASIC_INTERNAL_MEMORY_SS, 0);
+ /* disable ss, causes hangs on some cayman boards */
+ if (rdev->family == CHIP_CAYMAN) {
+ pi->sclk_ss = false;
+ pi->mclk_ss = false;
+ }
+
if (pi->sclk_ss || pi->mclk_ss)
pi->dynamic_ss = true;
else
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 6a64ccaa0695..a36736dab5e0 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -3882,8 +3882,15 @@ static int si_mc_init(struct radeon_device *rdev)
rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
/* size in MB on si */
- rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
- rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
+ tmp = RREG32(CONFIG_MEMSIZE);
+ /* some boards may have garbage in the upper 16 bits */
+ if (tmp & 0xffff0000) {
+ DRM_INFO("Probable bad vram size: 0x%08x\n", tmp);
+ if (tmp & 0xffff)
+ tmp &= 0xffff;
+ }
+ rdev->mc.mc_vram_size = tmp * 1024ULL * 1024ULL;
+ rdev->mc.real_vram_size = rdev->mc.mc_vram_size;
rdev->mc.visible_vram_size = rdev->mc.aper_size;
si_vram_gtt_location(rdev, &rdev->mc);
radeon_update_bandwidth_info(rdev);
diff --git a/drivers/gpu/drm/radeon/si_dma.c b/drivers/gpu/drm/radeon/si_dma.c
index 8e8f46133532..59be2cfcbb47 100644
--- a/drivers/gpu/drm/radeon/si_dma.c
+++ b/drivers/gpu/drm/radeon/si_dma.c
@@ -195,13 +195,8 @@ int si_copy_dma(struct radeon_device *rdev,
return r;
}
- if (radeon_fence_need_sync(*fence, ring->idx)) {
- radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
- ring->idx);
- radeon_fence_note_sync(*fence, ring->idx);
- } else {
- radeon_semaphore_free(rdev, &sem, NULL);
- }
+ radeon_semaphore_sync_to(sem, *fence);
+ radeon_semaphore_sync_rings(rdev, sem, ring->idx);
for (i = 0; i < num_loops; i++) {
cur_size_in_bytes = size_in_bytes;
diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c
index 9364129ba292..d700698a1f22 100644
--- a/drivers/gpu/drm/radeon/trinity_dpm.c
+++ b/drivers/gpu/drm/radeon/trinity_dpm.c
@@ -1873,9 +1873,9 @@ int trinity_dpm_init(struct radeon_device *rdev)
pi->enable_sclk_ds = true;
pi->enable_gfx_power_gating = true;
pi->enable_gfx_clock_gating = true;
- pi->enable_mg_clock_gating = true;
- pi->enable_gfx_dynamic_mgpg = true; /* ??? */
- pi->override_dynamic_mgpg = true;
+ pi->enable_mg_clock_gating = false;
+ pi->enable_gfx_dynamic_mgpg = false;
+ pi->override_dynamic_mgpg = false;
pi->enable_auto_thermal_throttling = true;
pi->voltage_drop_in_dce = false; /* need to restructure dpm/modeset interaction */
pi->uvd_dpm = true; /* ??? */
diff --git a/drivers/gpu/drm/radeon/uvd_v1_0.c b/drivers/gpu/drm/radeon/uvd_v1_0.c
index 7266805d9786..d4a68af1a279 100644
--- a/drivers/gpu/drm/radeon/uvd_v1_0.c
+++ b/drivers/gpu/drm/radeon/uvd_v1_0.c
@@ -357,7 +357,7 @@ int uvd_v1_0_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
*
* Emit a semaphore command (either wait or signal) to the UVD ring.
*/
-void uvd_v1_0_semaphore_emit(struct radeon_device *rdev,
+bool uvd_v1_0_semaphore_emit(struct radeon_device *rdev,
struct radeon_ring *ring,
struct radeon_semaphore *semaphore,
bool emit_wait)
@@ -372,6 +372,8 @@ void uvd_v1_0_semaphore_emit(struct radeon_device *rdev,
radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0));
radeon_ring_write(ring, emit_wait ? 1 : 0);
+
+ return true;
}
/**
diff --git a/drivers/gpu/drm/radeon/uvd_v3_1.c b/drivers/gpu/drm/radeon/uvd_v3_1.c
index 5b6fa1f62d4e..d722db2cf340 100644
--- a/drivers/gpu/drm/radeon/uvd_v3_1.c
+++ b/drivers/gpu/drm/radeon/uvd_v3_1.c
@@ -37,7 +37,7 @@
*
* Emit a semaphore command (either wait or signal) to the UVD ring.
*/
-void uvd_v3_1_semaphore_emit(struct radeon_device *rdev,
+bool uvd_v3_1_semaphore_emit(struct radeon_device *rdev,
struct radeon_ring *ring,
struct radeon_semaphore *semaphore,
bool emit_wait)
@@ -52,4 +52,6 @@ void uvd_v3_1_semaphore_emit(struct radeon_device *rdev,
radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0));
radeon_ring_write(ring, 0x80 | (emit_wait ? 1 : 0));
+
+ return true;
}
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index 28e178137718..07eba596d458 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -135,11 +135,11 @@ int tegra_drm_submit(struct tegra_drm_context *context,
unsigned int num_relocs = args->num_relocs;
unsigned int num_waitchks = args->num_waitchks;
struct drm_tegra_cmdbuf __user *cmdbufs =
- (void * __user)(uintptr_t)args->cmdbufs;
+ (void __user *)(uintptr_t)args->cmdbufs;
struct drm_tegra_reloc __user *relocs =
- (void * __user)(uintptr_t)args->relocs;
+ (void __user *)(uintptr_t)args->relocs;
struct drm_tegra_waitchk __user *waitchks =
- (void * __user)(uintptr_t)args->waitchks;
+ (void __user *)(uintptr_t)args->waitchks;
struct drm_tegra_syncpt syncpt;
struct host1x_job *job;
int err;
@@ -163,9 +163,10 @@ int tegra_drm_submit(struct tegra_drm_context *context,
struct drm_tegra_cmdbuf cmdbuf;
struct host1x_bo *bo;
- err = copy_from_user(&cmdbuf, cmdbufs, sizeof(cmdbuf));
- if (err)
+ if (copy_from_user(&cmdbuf, cmdbufs, sizeof(cmdbuf))) {
+ err = -EFAULT;
goto fail;
+ }
bo = host1x_bo_lookup(drm, file, cmdbuf.handle);
if (!bo) {
@@ -178,10 +179,11 @@ int tegra_drm_submit(struct tegra_drm_context *context,
cmdbufs++;
}
- err = copy_from_user(job->relocarray, relocs,
- sizeof(*relocs) * num_relocs);
- if (err)
+ if (copy_from_user(job->relocarray, relocs,
+ sizeof(*relocs) * num_relocs)) {
+ err = -EFAULT;
goto fail;
+ }
while (num_relocs--) {
struct host1x_reloc *reloc = &job->relocarray[num_relocs];
@@ -199,15 +201,17 @@ int tegra_drm_submit(struct tegra_drm_context *context,
}
}
- err = copy_from_user(job->waitchk, waitchks,
- sizeof(*waitchks) * num_waitchks);
- if (err)
+ if (copy_from_user(job->waitchk, waitchks,
+ sizeof(*waitchks) * num_waitchks)) {
+ err = -EFAULT;
goto fail;
+ }
- err = copy_from_user(&syncpt, (void * __user)(uintptr_t)args->syncpts,
- sizeof(syncpt));
- if (err)
+ if (copy_from_user(&syncpt, (void __user *)(uintptr_t)args->syncpts,
+ sizeof(syncpt))) {
+ err = -EFAULT;
goto fail;
+ }
job->is_addr_reg = context->client->ops->is_addr_reg;
job->syncpt_incrs = syncpt.incrs;
@@ -573,7 +577,7 @@ static void tegra_debugfs_cleanup(struct drm_minor *minor)
}
#endif
-struct drm_driver tegra_drm_driver = {
+static struct drm_driver tegra_drm_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM,
.load = tegra_drm_load,
.unload = tegra_drm_unload,
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
index fdfe259ed7f8..7da0b923131f 100644
--- a/drivers/gpu/drm/tegra/drm.h
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -116,7 +116,7 @@ host1x_client_to_dc(struct host1x_client *client)
static inline struct tegra_dc *to_tegra_dc(struct drm_crtc *crtc)
{
- return container_of(crtc, struct tegra_dc, base);
+ return crtc ? container_of(crtc, struct tegra_dc, base) : NULL;
}
static inline void tegra_dc_writel(struct tegra_dc *dc, unsigned long value,
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
index 490f7719e317..a3835e7de184 100644
--- a/drivers/gpu/drm/tegra/fb.c
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -247,7 +247,7 @@ static int tegra_fbdev_probe(struct drm_fb_helper *helper,
info->var.yoffset * fb->pitches[0];
drm->mode_config.fb_base = (resource_size_t)bo->paddr;
- info->screen_base = bo->vaddr + offset;
+ info->screen_base = (void __iomem *)bo->vaddr + offset;
info->screen_size = size;
info->fix.smem_start = (unsigned long)(bo->paddr + offset);
info->fix.smem_len = size;
diff --git a/drivers/gpu/drm/tegra/rgb.c b/drivers/gpu/drm/tegra/rgb.c
index ba47ca4fb880..3b29018913a5 100644
--- a/drivers/gpu/drm/tegra/rgb.c
+++ b/drivers/gpu/drm/tegra/rgb.c
@@ -14,6 +14,8 @@
struct tegra_rgb {
struct tegra_output output;
+ struct tegra_dc *dc;
+
struct clk *clk_parent;
struct clk *clk;
};
@@ -84,18 +86,18 @@ static void tegra_dc_write_regs(struct tegra_dc *dc,
static int tegra_output_rgb_enable(struct tegra_output *output)
{
- struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc);
+ struct tegra_rgb *rgb = to_rgb(output);
- tegra_dc_write_regs(dc, rgb_enable, ARRAY_SIZE(rgb_enable));
+ tegra_dc_write_regs(rgb->dc, rgb_enable, ARRAY_SIZE(rgb_enable));
return 0;
}
static int tegra_output_rgb_disable(struct tegra_output *output)
{
- struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc);
+ struct tegra_rgb *rgb = to_rgb(output);
- tegra_dc_write_regs(dc, rgb_disable, ARRAY_SIZE(rgb_disable));
+ tegra_dc_write_regs(rgb->dc, rgb_disable, ARRAY_SIZE(rgb_disable));
return 0;
}
@@ -146,6 +148,7 @@ int tegra_dc_rgb_probe(struct tegra_dc *dc)
rgb->output.dev = dc->dev;
rgb->output.of_node = np;
+ rgb->dc = dc;
err = tegra_output_probe(&rgb->output);
if (err < 0)
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 8d5a646ebe6a..07e02c4bf5a8 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -151,7 +151,7 @@ static void ttm_bo_release_list(struct kref *list_kref)
atomic_dec(&bo->glob->bo_count);
if (bo->resv == &bo->ttm_resv)
reservation_object_fini(&bo->ttm_resv);
-
+ mutex_destroy(&bo->wu_mutex);
if (bo->destroy)
bo->destroy(bo);
else {
@@ -1123,6 +1123,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
INIT_LIST_HEAD(&bo->ddestroy);
INIT_LIST_HEAD(&bo->swap);
INIT_LIST_HEAD(&bo->io_reserve_lru);
+ mutex_init(&bo->wu_mutex);
bo->bdev = bdev;
bo->glob = bdev->glob;
bo->type = type;
@@ -1704,3 +1705,35 @@ void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
;
}
EXPORT_SYMBOL(ttm_bo_swapout_all);
+
+/**
+ * ttm_bo_wait_unreserved - interruptible wait for a buffer object to become
+ * unreserved
+ *
+ * @bo: Pointer to buffer
+ */
+int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo)
+{
+ int ret;
+
+ /*
+ * In the absense of a wait_unlocked API,
+ * Use the bo::wu_mutex to avoid triggering livelocks due to
+ * concurrent use of this function. Note that this use of
+ * bo::wu_mutex can go away if we change locking order to
+ * mmap_sem -> bo::reserve.
+ */
+ ret = mutex_lock_interruptible(&bo->wu_mutex);
+ if (unlikely(ret != 0))
+ return -ERESTARTSYS;
+ if (!ww_mutex_is_locked(&bo->resv->lock))
+ goto out_unlock;
+ ret = ttm_bo_reserve_nolru(bo, true, false, false, NULL);
+ if (unlikely(ret != 0))
+ goto out_unlock;
+ ww_mutex_unlock(&bo->resv->lock);
+
+out_unlock:
+ mutex_unlock(&bo->wu_mutex);
+ return ret;
+}
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 4834c463c38b..406152152315 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -350,10 +350,14 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
goto out2;
/*
- * Move nonexistent data. NOP.
+ * Don't move nonexistent data. Clear destination instead.
*/
- if (old_iomap == NULL && ttm == NULL)
+ if (old_iomap == NULL &&
+ (ttm == NULL || (ttm->state == tt_unpopulated &&
+ !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) {
+ memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE);
goto out2;
+ }
/*
* TTM might be null for moves within the same region.
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index ac617f3ecd0c..6440eeac22d2 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -107,13 +107,28 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
/*
* Work around locking order reversal in fault / nopfn
* between mmap_sem and bo_reserve: Perform a trylock operation
- * for reserve, and if it fails, retry the fault after scheduling.
+ * for reserve, and if it fails, retry the fault after waiting
+ * for the buffer to become unreserved.
*/
-
- ret = ttm_bo_reserve(bo, true, true, false, 0);
+ ret = ttm_bo_reserve(bo, true, true, false, NULL);
if (unlikely(ret != 0)) {
- if (ret == -EBUSY)
- set_need_resched();
+ if (ret != -EBUSY)
+ return VM_FAULT_NOPAGE;
+
+ if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) {
+ if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
+ up_read(&vma->vm_mm->mmap_sem);
+ (void) ttm_bo_wait_unreserved(bo);
+ }
+
+ return VM_FAULT_RETRY;
+ }
+
+ /*
+ * If we'd want to change locking order to
+ * mmap_sem -> bo::reserve, we'd use a blocking reserve here
+ * instead of retrying the fault...
+ */
return VM_FAULT_NOPAGE;
}
@@ -123,7 +138,6 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
case 0:
break;
case -EBUSY:
- set_need_resched();
case -ERESTARTSYS:
retval = VM_FAULT_NOPAGE;
goto out_unlock;
@@ -155,9 +169,9 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
}
page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
- drm_vma_node_start(&bo->vma_node) - vma->vm_pgoff;
- page_last = vma_pages(vma) +
- drm_vma_node_start(&bo->vma_node) - vma->vm_pgoff;
+ vma->vm_pgoff - drm_vma_node_start(&bo->vma_node);
+ page_last = vma_pages(vma) + vma->vm_pgoff -
+ drm_vma_node_start(&bo->vma_node);
if (unlikely(page_offset >= bo->num_pages)) {
retval = VM_FAULT_SIGBUS;
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index 6c911789ae5c..479e9418e3d7 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -32,8 +32,7 @@
#include <linux/sched.h>
#include <linux/module.h>
-static void ttm_eu_backoff_reservation_locked(struct list_head *list,
- struct ww_acquire_ctx *ticket)
+static void ttm_eu_backoff_reservation_locked(struct list_head *list)
{
struct ttm_validate_buffer *entry;
@@ -93,8 +92,9 @@ void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
entry = list_first_entry(list, struct ttm_validate_buffer, head);
glob = entry->bo->glob;
spin_lock(&glob->lru_lock);
- ttm_eu_backoff_reservation_locked(list, ticket);
- ww_acquire_fini(ticket);
+ ttm_eu_backoff_reservation_locked(list);
+ if (ticket)
+ ww_acquire_fini(ticket);
spin_unlock(&glob->lru_lock);
}
EXPORT_SYMBOL(ttm_eu_backoff_reservation);
@@ -130,7 +130,8 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
entry = list_first_entry(list, struct ttm_validate_buffer, head);
glob = entry->bo->glob;
- ww_acquire_init(ticket, &reservation_ww_class);
+ if (ticket)
+ ww_acquire_init(ticket, &reservation_ww_class);
retry:
list_for_each_entry(entry, list, head) {
struct ttm_buffer_object *bo = entry->bo;
@@ -139,16 +140,17 @@ retry:
if (entry->reserved)
continue;
-
- ret = ttm_bo_reserve_nolru(bo, true, false, true, ticket);
+ ret = ttm_bo_reserve_nolru(bo, true, (ticket == NULL), true,
+ ticket);
if (ret == -EDEADLK) {
/* uh oh, we lost out, drop every reservation and try
* to only reserve this buffer, then start over if
* this succeeds.
*/
+ BUG_ON(ticket == NULL);
spin_lock(&glob->lru_lock);
- ttm_eu_backoff_reservation_locked(list, ticket);
+ ttm_eu_backoff_reservation_locked(list);
spin_unlock(&glob->lru_lock);
ttm_eu_list_ref_sub(list);
ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
@@ -175,7 +177,8 @@ retry:
}
}
- ww_acquire_done(ticket);
+ if (ticket)
+ ww_acquire_done(ticket);
spin_lock(&glob->lru_lock);
ttm_eu_del_from_lru_locked(list);
spin_unlock(&glob->lru_lock);
@@ -184,12 +187,14 @@ retry:
err:
spin_lock(&glob->lru_lock);
- ttm_eu_backoff_reservation_locked(list, ticket);
+ ttm_eu_backoff_reservation_locked(list);
spin_unlock(&glob->lru_lock);
ttm_eu_list_ref_sub(list);
err_fini:
- ww_acquire_done(ticket);
- ww_acquire_fini(ticket);
+ if (ticket) {
+ ww_acquire_done(ticket);
+ ww_acquire_fini(ticket);
+ }
return ret;
}
EXPORT_SYMBOL(ttm_eu_reserve_buffers);
@@ -224,7 +229,8 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
}
spin_unlock(&bdev->fence_lock);
spin_unlock(&glob->lru_lock);
- ww_acquire_fini(ticket);
+ if (ticket)
+ ww_acquire_fini(ticket);
list_for_each_entry(entry, list, head) {
if (entry->old_sync_obj)
diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c
index a868176c258a..6fe7b92a82d1 100644
--- a/drivers/gpu/drm/ttm/ttm_object.c
+++ b/drivers/gpu/drm/ttm/ttm_object.c
@@ -1,6 +1,6 @@
/**************************************************************************
*
- * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright (c) 2009-2013 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -26,6 +26,12 @@
**************************************************************************/
/*
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ *
+ * While no substantial code is shared, the prime code is inspired by
+ * drm_prime.c, with
+ * Authors:
+ * Dave Airlie <airlied@redhat.com>
+ * Rob Clark <rob.clark@linaro.org>
*/
/** @file ttm_ref_object.c
*
@@ -34,6 +40,7 @@
* and release on file close.
*/
+
/**
* struct ttm_object_file
*
@@ -84,6 +91,9 @@ struct ttm_object_device {
struct drm_open_hash object_hash;
atomic_t object_count;
struct ttm_mem_global *mem_glob;
+ struct dma_buf_ops ops;
+ void (*dmabuf_release)(struct dma_buf *dma_buf);
+ size_t dma_buf_size;
};
/**
@@ -116,6 +126,8 @@ struct ttm_ref_object {
struct ttm_object_file *tfile;
};
+static void ttm_prime_dmabuf_release(struct dma_buf *dma_buf);
+
static inline struct ttm_object_file *
ttm_object_file_ref(struct ttm_object_file *tfile)
{
@@ -416,9 +428,10 @@ out_err:
}
EXPORT_SYMBOL(ttm_object_file_init);
-struct ttm_object_device *ttm_object_device_init(struct ttm_mem_global
- *mem_glob,
- unsigned int hash_order)
+struct ttm_object_device *
+ttm_object_device_init(struct ttm_mem_global *mem_glob,
+ unsigned int hash_order,
+ const struct dma_buf_ops *ops)
{
struct ttm_object_device *tdev = kmalloc(sizeof(*tdev), GFP_KERNEL);
int ret;
@@ -430,10 +443,17 @@ struct ttm_object_device *ttm_object_device_init(struct ttm_mem_global
spin_lock_init(&tdev->object_lock);
atomic_set(&tdev->object_count, 0);
ret = drm_ht_create(&tdev->object_hash, hash_order);
+ if (ret != 0)
+ goto out_no_object_hash;
- if (likely(ret == 0))
- return tdev;
+ tdev->ops = *ops;
+ tdev->dmabuf_release = tdev->ops.release;
+ tdev->ops.release = ttm_prime_dmabuf_release;
+ tdev->dma_buf_size = ttm_round_pot(sizeof(struct dma_buf)) +
+ ttm_round_pot(sizeof(struct file));
+ return tdev;
+out_no_object_hash:
kfree(tdev);
return NULL;
}
@@ -452,3 +472,225 @@ void ttm_object_device_release(struct ttm_object_device **p_tdev)
kfree(tdev);
}
EXPORT_SYMBOL(ttm_object_device_release);
+
+/**
+ * get_dma_buf_unless_doomed - get a dma_buf reference if possible.
+ *
+ * @dma_buf: Non-refcounted pointer to a struct dma-buf.
+ *
+ * Obtain a file reference from a lookup structure that doesn't refcount
+ * the file, but synchronizes with its release method to make sure it has
+ * not been freed yet. See for example kref_get_unless_zero documentation.
+ * Returns true if refcounting succeeds, false otherwise.
+ *
+ * Nobody really wants this as a public API yet, so let it mature here
+ * for some time...
+ */
+static bool __must_check get_dma_buf_unless_doomed(struct dma_buf *dmabuf)
+{
+ return atomic_long_inc_not_zero(&dmabuf->file->f_count) != 0L;
+}
+
+/**
+ * ttm_prime_refcount_release - refcount release method for a prime object.
+ *
+ * @p_base: Pointer to ttm_base_object pointer.
+ *
+ * This is a wrapper that calls the refcount_release founction of the
+ * underlying object. At the same time it cleans up the prime object.
+ * This function is called when all references to the base object we
+ * derive from are gone.
+ */
+static void ttm_prime_refcount_release(struct ttm_base_object **p_base)
+{
+ struct ttm_base_object *base = *p_base;
+ struct ttm_prime_object *prime;
+
+ *p_base = NULL;
+ prime = container_of(base, struct ttm_prime_object, base);
+ BUG_ON(prime->dma_buf != NULL);
+ mutex_destroy(&prime->mutex);
+ if (prime->refcount_release)
+ prime->refcount_release(&base);
+}
+
+/**
+ * ttm_prime_dmabuf_release - Release method for the dma-bufs we export
+ *
+ * @dma_buf:
+ *
+ * This function first calls the dma_buf release method the driver
+ * provides. Then it cleans up our dma_buf pointer used for lookup,
+ * and finally releases the reference the dma_buf has on our base
+ * object.
+ */
+static void ttm_prime_dmabuf_release(struct dma_buf *dma_buf)
+{
+ struct ttm_prime_object *prime =
+ (struct ttm_prime_object *) dma_buf->priv;
+ struct ttm_base_object *base = &prime->base;
+ struct ttm_object_device *tdev = base->tfile->tdev;
+
+ if (tdev->dmabuf_release)
+ tdev->dmabuf_release(dma_buf);
+ mutex_lock(&prime->mutex);
+ if (prime->dma_buf == dma_buf)
+ prime->dma_buf = NULL;
+ mutex_unlock(&prime->mutex);
+ ttm_mem_global_free(tdev->mem_glob, tdev->dma_buf_size);
+ ttm_base_object_unref(&base);
+}
+
+/**
+ * ttm_prime_fd_to_handle - Get a base object handle from a prime fd
+ *
+ * @tfile: A struct ttm_object_file identifying the caller.
+ * @fd: The prime / dmabuf fd.
+ * @handle: The returned handle.
+ *
+ * This function returns a handle to an object that previously exported
+ * a dma-buf. Note that we don't handle imports yet, because we simply
+ * have no consumers of that implementation.
+ */
+int ttm_prime_fd_to_handle(struct ttm_object_file *tfile,
+ int fd, u32 *handle)
+{
+ struct ttm_object_device *tdev = tfile->tdev;
+ struct dma_buf *dma_buf;
+ struct ttm_prime_object *prime;
+ struct ttm_base_object *base;
+ int ret;
+
+ dma_buf = dma_buf_get(fd);
+ if (IS_ERR(dma_buf))
+ return PTR_ERR(dma_buf);
+
+ if (dma_buf->ops != &tdev->ops)
+ return -ENOSYS;
+
+ prime = (struct ttm_prime_object *) dma_buf->priv;
+ base = &prime->base;
+ *handle = base->hash.key;
+ ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
+
+ dma_buf_put(dma_buf);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ttm_prime_fd_to_handle);
+
+/**
+ * ttm_prime_handle_to_fd - Return a dma_buf fd from a ttm prime object
+ *
+ * @tfile: Struct ttm_object_file identifying the caller.
+ * @handle: Handle to the object we're exporting from.
+ * @flags: flags for dma-buf creation. We just pass them on.
+ * @prime_fd: The returned file descriptor.
+ *
+ */
+int ttm_prime_handle_to_fd(struct ttm_object_file *tfile,
+ uint32_t handle, uint32_t flags,
+ int *prime_fd)
+{
+ struct ttm_object_device *tdev = tfile->tdev;
+ struct ttm_base_object *base;
+ struct dma_buf *dma_buf;
+ struct ttm_prime_object *prime;
+ int ret;
+
+ base = ttm_base_object_lookup(tfile, handle);
+ if (unlikely(base == NULL ||
+ base->object_type != ttm_prime_type)) {
+ ret = -ENOENT;
+ goto out_unref;
+ }
+
+ prime = container_of(base, struct ttm_prime_object, base);
+ if (unlikely(!base->shareable)) {
+ ret = -EPERM;
+ goto out_unref;
+ }
+
+ ret = mutex_lock_interruptible(&prime->mutex);
+ if (unlikely(ret != 0)) {
+ ret = -ERESTARTSYS;
+ goto out_unref;
+ }
+
+ dma_buf = prime->dma_buf;
+ if (!dma_buf || !get_dma_buf_unless_doomed(dma_buf)) {
+
+ /*
+ * Need to create a new dma_buf, with memory accounting.
+ */
+ ret = ttm_mem_global_alloc(tdev->mem_glob, tdev->dma_buf_size,
+ false, true);
+ if (unlikely(ret != 0)) {
+ mutex_unlock(&prime->mutex);
+ goto out_unref;
+ }
+
+ dma_buf = dma_buf_export(prime, &tdev->ops,
+ prime->size, flags);
+ if (IS_ERR(dma_buf)) {
+ ret = PTR_ERR(dma_buf);
+ ttm_mem_global_free(tdev->mem_glob,
+ tdev->dma_buf_size);
+ mutex_unlock(&prime->mutex);
+ goto out_unref;
+ }
+
+ /*
+ * dma_buf has taken the base object reference
+ */
+ base = NULL;
+ prime->dma_buf = dma_buf;
+ }
+ mutex_unlock(&prime->mutex);
+
+ ret = dma_buf_fd(dma_buf, flags);
+ if (ret >= 0) {
+ *prime_fd = ret;
+ ret = 0;
+ } else
+ dma_buf_put(dma_buf);
+
+out_unref:
+ if (base)
+ ttm_base_object_unref(&base);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ttm_prime_handle_to_fd);
+
+/**
+ * ttm_prime_object_init - Initialize a ttm_prime_object
+ *
+ * @tfile: struct ttm_object_file identifying the caller
+ * @size: The size of the dma_bufs we export.
+ * @prime: The object to be initialized.
+ * @shareable: See ttm_base_object_init
+ * @type: See ttm_base_object_init
+ * @refcount_release: See ttm_base_object_init
+ * @ref_obj_release: See ttm_base_object_init
+ *
+ * Initializes an object which is compatible with the drm_prime model
+ * for data sharing between processes and devices.
+ */
+int ttm_prime_object_init(struct ttm_object_file *tfile, size_t size,
+ struct ttm_prime_object *prime, bool shareable,
+ enum ttm_object_type type,
+ void (*refcount_release) (struct ttm_base_object **),
+ void (*ref_obj_release) (struct ttm_base_object *,
+ enum ttm_ref_type ref_type))
+{
+ mutex_init(&prime->mutex);
+ prime->size = PAGE_ALIGN(size);
+ prime->real_type = type;
+ prime->dma_buf = NULL;
+ prime->refcount_release = refcount_release;
+ return ttm_base_object_init(tfile, &prime->base, shareable,
+ ttm_prime_type,
+ ttm_prime_refcount_release,
+ ref_obj_release);
+}
+EXPORT_SYMBOL(ttm_prime_object_init);
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index 24ffbe990736..8d67b943ac05 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -125,6 +125,12 @@ static int udl_gem_get_pages(struct udl_gem_object *obj, gfp_t gfpmask)
static void udl_gem_put_pages(struct udl_gem_object *obj)
{
+ if (obj->base.import_attach) {
+ drm_free_large(obj->pages);
+ obj->pages = NULL;
+ return;
+ }
+
drm_gem_put_pages(&obj->base, obj->pages, false, false);
obj->pages = NULL;
}
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile
index 2cc6cd91ac11..9f8b690bcf52 100644
--- a/drivers/gpu/drm/vmwgfx/Makefile
+++ b/drivers/gpu/drm/vmwgfx/Makefile
@@ -6,6 +6,6 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \
vmwgfx_overlay.o vmwgfx_marker.o vmwgfx_gmrid_manager.o \
vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o vmwgfx_context.o \
- vmwgfx_surface.o
+ vmwgfx_surface.o vmwgfx_prime.o
obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
index 7776e6f0aef6..0489c6152482 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -150,6 +150,8 @@ struct vmw_ttm_tt {
bool mapped;
};
+const size_t vmw_tt_size = sizeof(struct vmw_ttm_tt);
+
/**
* Helper functions to advance a struct vmw_piter iterator.
*
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 20d5485eaf98..c7a549694e59 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -677,7 +677,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
}
dev_priv->tdev = ttm_object_device_init
- (dev_priv->mem_global_ref.object, 12);
+ (dev_priv->mem_global_ref.object, 12, &vmw_prime_dmabuf_ops);
if (unlikely(dev_priv->tdev == NULL)) {
DRM_ERROR("Unable to initialize TTM object management.\n");
@@ -1210,7 +1210,7 @@ static const struct file_operations vmwgfx_driver_fops = {
static struct drm_driver driver = {
.driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
- DRIVER_MODESET,
+ DRIVER_MODESET | DRIVER_PRIME,
.load = vmw_driver_load,
.unload = vmw_driver_unload,
.lastclose = vmw_lastclose,
@@ -1235,6 +1235,9 @@ static struct drm_driver driver = {
.dumb_map_offset = vmw_dumb_map_offset,
.dumb_destroy = vmw_dumb_destroy,
+ .prime_fd_to_handle = vmw_prime_fd_to_handle,
+ .prime_handle_to_fd = vmw_prime_handle_to_fd,
+
.fops = &vmwgfx_driver_fops,
.name = VMWGFX_DRIVER_NAME,
.desc = VMWGFX_DRIVER_DESC,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index e401d5dbcb96..20890ad8408b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -615,6 +615,7 @@ extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma);
* TTM buffer object driver - vmwgfx_buffer.c
*/
+extern const size_t vmw_tt_size;
extern struct ttm_placement vmw_vram_placement;
extern struct ttm_placement vmw_vram_ne_placement;
extern struct ttm_placement vmw_vram_sys_placement;
@@ -819,6 +820,20 @@ int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv);
extern const struct ttm_mem_type_manager_func vmw_gmrid_manager_func;
/**
+ * Prime - vmwgfx_prime.c
+ */
+
+extern const struct dma_buf_ops vmw_prime_dmabuf_ops;
+extern int vmw_prime_fd_to_handle(struct drm_device *dev,
+ struct drm_file *file_priv,
+ int fd, u32 *handle);
+extern int vmw_prime_handle_to_fd(struct drm_device *dev,
+ struct drm_file *file_priv,
+ uint32_t handle, uint32_t flags,
+ int *prime_fd);
+
+
+/**
* Inline helper functions
*/
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index a51f48e3e917..45d5b5ab6ca9 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -68,6 +68,9 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
SVGA_FIFO_3D_HWVERSION));
break;
}
+ case DRM_VMW_PARAM_MAX_SURF_MEMORY:
+ param->value = dev_priv->memory_size;
+ break;
default:
DRM_ERROR("Illegal vmwgfx get param request: %d\n",
param->param);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index ecb3d867b426..03f1c2038631 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -75,6 +75,7 @@ void vmw_display_unit_cleanup(struct vmw_display_unit *du)
vmw_surface_unreference(&du->cursor_surface);
if (du->cursor_dmabuf)
vmw_dmabuf_unreference(&du->cursor_dmabuf);
+ drm_sysfs_connector_remove(&du->connector);
drm_crtc_cleanup(&du->crtc);
drm_encoder_cleanup(&du->encoder);
drm_connector_cleanup(&du->connector);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index 79f7e8e60529..a055a26819c2 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -260,6 +260,7 @@ static int vmw_ldu_crtc_set_config(struct drm_mode_set *set)
connector->encoder = NULL;
encoder->crtc = NULL;
crtc->fb = NULL;
+ crtc->enabled = false;
vmw_ldu_del_active(dev_priv, ldu);
@@ -285,6 +286,7 @@ static int vmw_ldu_crtc_set_config(struct drm_mode_set *set)
crtc->x = set->x;
crtc->y = set->y;
crtc->mode = *mode;
+ crtc->enabled = true;
vmw_ldu_add_active(dev_priv, ldu, vfb);
@@ -369,6 +371,8 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
encoder->possible_crtcs = (1 << unit);
encoder->possible_clones = 0;
+ (void) drm_sysfs_connector_add(connector);
+
drm_crtc_init(dev, crtc, &vmw_legacy_crtc_funcs);
drm_mode_crtc_set_gamma_size(crtc, 256);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c b/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c
new file mode 100644
index 000000000000..31fe32d8d65a
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c
@@ -0,0 +1,137 @@
+/**************************************************************************
+ *
+ * Copyright © 2013 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors:
+ * Thomas Hellstrom <thellstrom@vmware.com>
+ *
+ */
+
+#include "vmwgfx_drv.h"
+#include <linux/dma-buf.h>
+#include <drm/ttm/ttm_object.h>
+
+/*
+ * DMA-BUF attach- and mapping methods. No need to implement
+ * these until we have other virtual devices use them.
+ */
+
+static int vmw_prime_map_attach(struct dma_buf *dma_buf,
+ struct device *target_dev,
+ struct dma_buf_attachment *attach)
+{
+ return -ENOSYS;
+}
+
+static void vmw_prime_map_detach(struct dma_buf *dma_buf,
+ struct dma_buf_attachment *attach)
+{
+}
+
+static struct sg_table *vmw_prime_map_dma_buf(struct dma_buf_attachment *attach,
+ enum dma_data_direction dir)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static void vmw_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
+ struct sg_table *sgb,
+ enum dma_data_direction dir)
+{
+}
+
+static void *vmw_prime_dmabuf_vmap(struct dma_buf *dma_buf)
+{
+ return NULL;
+}
+
+static void vmw_prime_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
+{
+}
+
+static void *vmw_prime_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
+ unsigned long page_num)
+{
+ return NULL;
+}
+
+static void vmw_prime_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
+ unsigned long page_num, void *addr)
+{
+
+}
+static void *vmw_prime_dmabuf_kmap(struct dma_buf *dma_buf,
+ unsigned long page_num)
+{
+ return NULL;
+}
+
+static void vmw_prime_dmabuf_kunmap(struct dma_buf *dma_buf,
+ unsigned long page_num, void *addr)
+{
+
+}
+
+static int vmw_prime_dmabuf_mmap(struct dma_buf *dma_buf,
+ struct vm_area_struct *vma)
+{
+ WARN_ONCE(true, "Attempted use of dmabuf mmap. Bad.\n");
+ return -ENOSYS;
+}
+
+const struct dma_buf_ops vmw_prime_dmabuf_ops = {
+ .attach = vmw_prime_map_attach,
+ .detach = vmw_prime_map_detach,
+ .map_dma_buf = vmw_prime_map_dma_buf,
+ .unmap_dma_buf = vmw_prime_unmap_dma_buf,
+ .release = NULL,
+ .kmap = vmw_prime_dmabuf_kmap,
+ .kmap_atomic = vmw_prime_dmabuf_kmap_atomic,
+ .kunmap = vmw_prime_dmabuf_kunmap,
+ .kunmap_atomic = vmw_prime_dmabuf_kunmap_atomic,
+ .mmap = vmw_prime_dmabuf_mmap,
+ .vmap = vmw_prime_dmabuf_vmap,
+ .vunmap = vmw_prime_dmabuf_vunmap,
+};
+
+int vmw_prime_fd_to_handle(struct drm_device *dev,
+ struct drm_file *file_priv,
+ int fd, u32 *handle)
+{
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+
+ return ttm_prime_fd_to_handle(tfile, fd, handle);
+}
+
+int vmw_prime_handle_to_fd(struct drm_device *dev,
+ struct drm_file *file_priv,
+ uint32_t handle, uint32_t flags,
+ int *prime_fd)
+{
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+
+ return ttm_prime_handle_to_fd(tfile, handle, flags, prime_fd);
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 252501a54def..9b5ea2ac7ddf 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -35,7 +35,7 @@
#define VMW_RES_EVICT_ERR_COUNT 10
struct vmw_user_dma_buffer {
- struct ttm_base_object base;
+ struct ttm_prime_object prime;
struct vmw_dma_buffer dma;
};
@@ -297,7 +297,7 @@ int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
if (unlikely(base == NULL))
return -EINVAL;
- if (unlikely(base->object_type != converter->object_type))
+ if (unlikely(ttm_base_object_type(base) != converter->object_type))
goto out_bad_resource;
res = converter->base_obj_to_res(base);
@@ -352,6 +352,38 @@ int vmw_user_lookup_handle(struct vmw_private *dev_priv,
/**
* Buffer management.
*/
+
+/**
+ * vmw_dmabuf_acc_size - Calculate the pinned memory usage of buffers
+ *
+ * @dev_priv: Pointer to a struct vmw_private identifying the device.
+ * @size: The requested buffer size.
+ * @user: Whether this is an ordinary dma buffer or a user dma buffer.
+ */
+static size_t vmw_dmabuf_acc_size(struct vmw_private *dev_priv, size_t size,
+ bool user)
+{
+ static size_t struct_size, user_struct_size;
+ size_t num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
+ size_t page_array_size = ttm_round_pot(num_pages * sizeof(void *));
+
+ if (unlikely(struct_size == 0)) {
+ size_t backend_size = ttm_round_pot(vmw_tt_size);
+
+ struct_size = backend_size +
+ ttm_round_pot(sizeof(struct vmw_dma_buffer));
+ user_struct_size = backend_size +
+ ttm_round_pot(sizeof(struct vmw_user_dma_buffer));
+ }
+
+ if (dev_priv->map_mode == vmw_dma_alloc_coherent)
+ page_array_size +=
+ ttm_round_pot(num_pages * sizeof(dma_addr_t));
+
+ return ((user) ? user_struct_size : struct_size) +
+ page_array_size;
+}
+
void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
{
struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
@@ -359,6 +391,13 @@ void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
kfree(vmw_bo);
}
+static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
+{
+ struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
+
+ ttm_prime_object_kfree(vmw_user_bo, prime);
+}
+
int vmw_dmabuf_init(struct vmw_private *dev_priv,
struct vmw_dma_buffer *vmw_bo,
size_t size, struct ttm_placement *placement,
@@ -368,28 +407,23 @@ int vmw_dmabuf_init(struct vmw_private *dev_priv,
struct ttm_bo_device *bdev = &dev_priv->bdev;
size_t acc_size;
int ret;
+ bool user = (bo_free == &vmw_user_dmabuf_destroy);
- BUG_ON(!bo_free);
+ BUG_ON(!bo_free && (!user && (bo_free != vmw_dmabuf_bo_free)));
- acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct vmw_dma_buffer));
+ acc_size = vmw_dmabuf_acc_size(dev_priv, size, user);
memset(vmw_bo, 0, sizeof(*vmw_bo));
INIT_LIST_HEAD(&vmw_bo->res_list);
ret = ttm_bo_init(bdev, &vmw_bo->base, size,
- ttm_bo_type_device, placement,
+ (user) ? ttm_bo_type_device :
+ ttm_bo_type_kernel, placement,
0, interruptible,
NULL, acc_size, NULL, bo_free);
return ret;
}
-static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
-{
- struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
-
- ttm_base_object_kfree(vmw_user_bo, base);
-}
-
static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
{
struct vmw_user_dma_buffer *vmw_user_bo;
@@ -401,7 +435,8 @@ static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
if (unlikely(base == NULL))
return;
- vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
+ vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
+ prime.base);
bo = &vmw_user_bo->dma.base;
ttm_bo_unref(&bo);
}
@@ -442,18 +477,19 @@ int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
return ret;
tmp = ttm_bo_reference(&user_bo->dma.base);
- ret = ttm_base_object_init(tfile,
- &user_bo->base,
- shareable,
- ttm_buffer_type,
- &vmw_user_dmabuf_release, NULL);
+ ret = ttm_prime_object_init(tfile,
+ size,
+ &user_bo->prime,
+ shareable,
+ ttm_buffer_type,
+ &vmw_user_dmabuf_release, NULL);
if (unlikely(ret != 0)) {
ttm_bo_unref(&tmp);
goto out_no_base_object;
}
*p_dma_buf = &user_bo->dma;
- *handle = user_bo->base.hash.key;
+ *handle = user_bo->prime.base.hash.key;
out_no_base_object:
return ret;
@@ -475,8 +511,8 @@ int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
return -EPERM;
vmw_user_bo = vmw_user_dma_buffer(bo);
- return (vmw_user_bo->base.tfile == tfile ||
- vmw_user_bo->base.shareable) ? 0 : -EPERM;
+ return (vmw_user_bo->prime.base.tfile == tfile ||
+ vmw_user_bo->prime.base.shareable) ? 0 : -EPERM;
}
int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
@@ -538,14 +574,15 @@ int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
return -ESRCH;
}
- if (unlikely(base->object_type != ttm_buffer_type)) {
+ if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
ttm_base_object_unref(&base);
printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
(unsigned long)handle);
return -EINVAL;
}
- vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
+ vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
+ prime.base);
(void)ttm_bo_reference(&vmw_user_bo->dma.base);
ttm_base_object_unref(&base);
*out = &vmw_user_bo->dma;
@@ -562,7 +599,8 @@ int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
return -EINVAL;
user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma);
- return ttm_ref_object_add(tfile, &user_bo->base, TTM_REF_USAGE, NULL);
+ return ttm_ref_object_add(tfile, &user_bo->prime.base,
+ TTM_REF_USAGE, NULL);
}
/*
@@ -777,53 +815,55 @@ err_ref:
}
+/**
+ * vmw_dumb_create - Create a dumb kms buffer
+ *
+ * @file_priv: Pointer to a struct drm_file identifying the caller.
+ * @dev: Pointer to the drm device.
+ * @args: Pointer to a struct drm_mode_create_dumb structure
+ *
+ * This is a driver callback for the core drm create_dumb functionality.
+ * Note that this is very similar to the vmw_dmabuf_alloc ioctl, except
+ * that the arguments have a different format.
+ */
int vmw_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
struct vmw_private *dev_priv = vmw_priv(dev);
struct vmw_master *vmaster = vmw_master(file_priv->master);
- struct vmw_user_dma_buffer *vmw_user_bo;
- struct ttm_buffer_object *tmp;
+ struct vmw_dma_buffer *dma_buf;
int ret;
args->pitch = args->width * ((args->bpp + 7) / 8);
args->size = args->pitch * args->height;
- vmw_user_bo = kzalloc(sizeof(*vmw_user_bo), GFP_KERNEL);
- if (vmw_user_bo == NULL)
- return -ENOMEM;
-
ret = ttm_read_lock(&vmaster->lock, true);
- if (ret != 0) {
- kfree(vmw_user_bo);
+ if (unlikely(ret != 0))
return ret;
- }
- ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, args->size,
- &vmw_vram_sys_placement, true,
- &vmw_user_dmabuf_destroy);
- if (ret != 0)
- goto out_no_dmabuf;
-
- tmp = ttm_bo_reference(&vmw_user_bo->dma.base);
- ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile,
- &vmw_user_bo->base,
- false,
- ttm_buffer_type,
- &vmw_user_dmabuf_release, NULL);
+ ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
+ args->size, false, &args->handle,
+ &dma_buf);
if (unlikely(ret != 0))
- goto out_no_base_object;
-
- args->handle = vmw_user_bo->base.hash.key;
+ goto out_no_dmabuf;
-out_no_base_object:
- ttm_bo_unref(&tmp);
+ vmw_dmabuf_unreference(&dma_buf);
out_no_dmabuf:
ttm_read_unlock(&vmaster->lock);
return ret;
}
+/**
+ * vmw_dumb_map_offset - Return the address space offset of a dumb buffer
+ *
+ * @file_priv: Pointer to a struct drm_file identifying the caller.
+ * @dev: Pointer to the drm device.
+ * @handle: Handle identifying the dumb buffer.
+ * @offset: The address space offset returned.
+ *
+ * This is a driver callback for the core drm dumb_map_offset functionality.
+ */
int vmw_dumb_map_offset(struct drm_file *file_priv,
struct drm_device *dev, uint32_t handle,
uint64_t *offset)
@@ -841,6 +881,15 @@ int vmw_dumb_map_offset(struct drm_file *file_priv,
return 0;
}
+/**
+ * vmw_dumb_destroy - Destroy a dumb boffer
+ *
+ * @file_priv: Pointer to a struct drm_file identifying the caller.
+ * @dev: Pointer to the drm device.
+ * @handle: Handle identifying the dumb buffer.
+ *
+ * This is a driver callback for the core drm dumb_destroy functionality.
+ */
int vmw_dumb_destroy(struct drm_file *file_priv,
struct drm_device *dev,
uint32_t handle)
@@ -994,7 +1043,6 @@ void vmw_resource_unreserve(struct vmw_resource *res,
*/
static int
vmw_resource_check_buffer(struct vmw_resource *res,
- struct ww_acquire_ctx *ticket,
bool interruptible,
struct ttm_validate_buffer *val_buf)
{
@@ -1011,7 +1059,7 @@ vmw_resource_check_buffer(struct vmw_resource *res,
INIT_LIST_HEAD(&val_list);
val_buf->bo = ttm_bo_reference(&res->backup->base);
list_add_tail(&val_buf->head, &val_list);
- ret = ttm_eu_reserve_buffers(ticket, &val_list);
+ ret = ttm_eu_reserve_buffers(NULL, &val_list);
if (unlikely(ret != 0))
goto out_no_reserve;
@@ -1029,7 +1077,7 @@ vmw_resource_check_buffer(struct vmw_resource *res,
return 0;
out_no_validate:
- ttm_eu_backoff_reservation(ticket, &val_list);
+ ttm_eu_backoff_reservation(NULL, &val_list);
out_no_reserve:
ttm_bo_unref(&val_buf->bo);
if (backup_dirty)
@@ -1074,8 +1122,7 @@ int vmw_resource_reserve(struct vmw_resource *res, bool no_backup)
* @val_buf: Backup buffer information.
*/
static void
-vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket,
- struct ttm_validate_buffer *val_buf)
+vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf)
{
struct list_head val_list;
@@ -1084,7 +1131,7 @@ vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket,
INIT_LIST_HEAD(&val_list);
list_add_tail(&val_buf->head, &val_list);
- ttm_eu_backoff_reservation(ticket, &val_list);
+ ttm_eu_backoff_reservation(NULL, &val_list);
ttm_bo_unref(&val_buf->bo);
}
@@ -1099,14 +1146,12 @@ int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible)
{
struct ttm_validate_buffer val_buf;
const struct vmw_res_func *func = res->func;
- struct ww_acquire_ctx ticket;
int ret;
BUG_ON(!func->may_evict);
val_buf.bo = NULL;
- ret = vmw_resource_check_buffer(res, &ticket, interruptible,
- &val_buf);
+ ret = vmw_resource_check_buffer(res, interruptible, &val_buf);
if (unlikely(ret != 0))
return ret;
@@ -1121,7 +1166,7 @@ int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible)
res->backup_dirty = true;
res->res_dirty = false;
out_no_unbind:
- vmw_resource_backoff_reservation(&ticket, &val_buf);
+ vmw_resource_backoff_reservation(&val_buf);
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index 26387c3d5a21..22406c8651ea 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -310,6 +310,7 @@ static int vmw_sou_crtc_set_config(struct drm_mode_set *set)
crtc->fb = NULL;
crtc->x = 0;
crtc->y = 0;
+ crtc->enabled = false;
vmw_sou_del_active(dev_priv, sou);
@@ -370,6 +371,7 @@ static int vmw_sou_crtc_set_config(struct drm_mode_set *set)
crtc->fb = NULL;
crtc->x = 0;
crtc->y = 0;
+ crtc->enabled = false;
return ret;
}
@@ -382,6 +384,7 @@ static int vmw_sou_crtc_set_config(struct drm_mode_set *set)
crtc->fb = fb;
crtc->x = set->x;
crtc->y = set->y;
+ crtc->enabled = true;
return 0;
}
@@ -464,6 +467,8 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
encoder->possible_crtcs = (1 << unit);
encoder->possible_clones = 0;
+ (void) drm_sysfs_connector_add(connector);
+
drm_crtc_init(dev, crtc, &vmw_screen_object_crtc_funcs);
drm_mode_crtc_set_gamma_size(crtc, 256);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 582814339748..7de2ea8bd553 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -38,7 +38,7 @@
* @size: TTM accounting size for the surface.
*/
struct vmw_user_surface {
- struct ttm_base_object base;
+ struct ttm_prime_object prime;
struct vmw_surface srf;
uint32_t size;
uint32_t backup_handle;
@@ -580,7 +580,8 @@ static int vmw_surface_init(struct vmw_private *dev_priv,
static struct vmw_resource *
vmw_user_surface_base_to_res(struct ttm_base_object *base)
{
- return &(container_of(base, struct vmw_user_surface, base)->srf.res);
+ return &(container_of(base, struct vmw_user_surface,
+ prime.base)->srf.res);
}
/**
@@ -599,7 +600,7 @@ static void vmw_user_surface_free(struct vmw_resource *res)
kfree(srf->offsets);
kfree(srf->sizes);
kfree(srf->snooper.image);
- ttm_base_object_kfree(user_srf, base);
+ ttm_prime_object_kfree(user_srf, prime);
ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
}
@@ -616,7 +617,7 @@ static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
{
struct ttm_base_object *base = *p_base;
struct vmw_user_surface *user_srf =
- container_of(base, struct vmw_user_surface, base);
+ container_of(base, struct vmw_user_surface, prime.base);
struct vmw_resource *res = &user_srf->srf.res;
*p_base = NULL;
@@ -790,8 +791,8 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
}
srf->snooper.crtc = NULL;
- user_srf->base.shareable = false;
- user_srf->base.tfile = NULL;
+ user_srf->prime.base.shareable = false;
+ user_srf->prime.base.tfile = NULL;
/**
* From this point, the generic resource management functions
@@ -803,9 +804,9 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
goto out_unlock;
tmp = vmw_resource_reference(&srf->res);
- ret = ttm_base_object_init(tfile, &user_srf->base,
- req->shareable, VMW_RES_SURFACE,
- &vmw_user_surface_base_release, NULL);
+ ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime,
+ req->shareable, VMW_RES_SURFACE,
+ &vmw_user_surface_base_release, NULL);
if (unlikely(ret != 0)) {
vmw_resource_unreference(&tmp);
@@ -813,7 +814,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
goto out_unlock;
}
- rep->sid = user_srf->base.hash.key;
+ rep->sid = user_srf->prime.base.hash.key;
vmw_resource_unreference(&res);
ttm_read_unlock(&vmaster->lock);
@@ -823,7 +824,7 @@ out_no_copy:
out_no_offsets:
kfree(srf->sizes);
out_no_sizes:
- ttm_base_object_kfree(user_srf, base);
+ ttm_prime_object_kfree(user_srf, prime);
out_no_user_srf:
ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
out_unlock:
@@ -859,13 +860,14 @@ int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
}
- if (unlikely(base->object_type != VMW_RES_SURFACE))
+ if (unlikely(ttm_base_object_type(base) != VMW_RES_SURFACE))
goto out_bad_resource;
- user_srf = container_of(base, struct vmw_user_surface, base);
+ user_srf = container_of(base, struct vmw_user_surface, prime.base);
srf = &user_srf->srf;
- ret = ttm_ref_object_add(tfile, &user_srf->base, TTM_REF_USAGE, NULL);
+ ret = ttm_ref_object_add(tfile, &user_srf->prime.base,
+ TTM_REF_USAGE, NULL);
if (unlikely(ret != 0)) {
DRM_ERROR("Could not add a reference to a surface.\n");
goto out_no_reference;
diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c
index 509383f8be03..6a929591aa73 100644
--- a/drivers/gpu/host1x/bus.c
+++ b/drivers/gpu/host1x/bus.c
@@ -19,6 +19,7 @@
#include <linux/of.h>
#include <linux/slab.h>
+#include "bus.h"
#include "dev.h"
static DEFINE_MUTEX(clients_lock);
@@ -257,7 +258,7 @@ static int host1x_unregister_client(struct host1x *host1x,
return -ENODEV;
}
-struct bus_type host1x_bus_type = {
+static struct bus_type host1x_bus_type = {
.name = "host1x",
};
@@ -301,7 +302,7 @@ static int host1x_device_add(struct host1x *host1x,
device->dev.coherent_dma_mask = host1x->dev->coherent_dma_mask;
device->dev.dma_mask = &device->dev.coherent_dma_mask;
device->dev.release = host1x_device_release;
- dev_set_name(&device->dev, driver->name);
+ dev_set_name(&device->dev, "%s", driver->name);
device->dev.bus = &host1x_bus_type;
device->dev.parent = host1x->dev;
diff --git a/drivers/gpu/host1x/hw/cdma_hw.c b/drivers/gpu/host1x/hw/cdma_hw.c
index 37e2a63241a9..6b09b71940c2 100644
--- a/drivers/gpu/host1x/hw/cdma_hw.c
+++ b/drivers/gpu/host1x/hw/cdma_hw.c
@@ -54,8 +54,8 @@ static void cdma_timeout_cpu_incr(struct host1x_cdma *cdma, u32 getptr,
u32 *p = (u32 *)((u32)pb->mapped + getptr);
*(p++) = HOST1X_OPCODE_NOP;
*(p++) = HOST1X_OPCODE_NOP;
- dev_dbg(host1x->dev, "%s: NOP at 0x%x\n", __func__,
- pb->phys + getptr);
+ dev_dbg(host1x->dev, "%s: NOP at %#llx\n", __func__,
+ (u64)pb->phys + getptr);
getptr = (getptr + 8) & (pb->size_bytes - 1);
}
wmb();
diff --git a/drivers/gpu/host1x/hw/debug_hw.c b/drivers/gpu/host1x/hw/debug_hw.c
index 640c75ca5a8b..f72c873eff81 100644
--- a/drivers/gpu/host1x/hw/debug_hw.c
+++ b/drivers/gpu/host1x/hw/debug_hw.c
@@ -163,8 +163,8 @@ static void show_channel_gathers(struct output *o, struct host1x_cdma *cdma)
continue;
}
- host1x_debug_output(o, " GATHER at %08x+%04x, %d words\n",
- g->base, g->offset, g->words);
+ host1x_debug_output(o, " GATHER at %#llx+%04x, %d words\n",
+ (u64)g->base, g->offset, g->words);
show_gather(o, g->base + g->offset, g->words, cdma,
g->base, mapped);
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 329fbb9b5976..34e2d39d4ce8 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -460,6 +460,7 @@ config HID_MULTITOUCH
- Stantum multitouch panels
- Touch International Panels
- Unitec Panels
+ - Wistron optical touch panels
- XAT optical touch panels
- Xiroku optical touch panels
- Zytronic touch panels
diff --git a/drivers/hid/hid-appleir.c b/drivers/hid/hid-appleir.c
index a42e6a394c5e..0e6a42d37eb6 100644
--- a/drivers/hid/hid-appleir.c
+++ b/drivers/hid/hid-appleir.c
@@ -297,6 +297,9 @@ static int appleir_probe(struct hid_device *hid, const struct hid_device_id *id)
appleir->hid = hid;
+ /* force input as some remotes bypass the input registration */
+ hid->quirks |= HID_QUIRK_HIDINPUT_FORCE;
+
spin_lock_init(&appleir->lock);
setup_timer(&appleir->key_up_timer,
key_up_tick, (unsigned long) appleir);
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 8c10f2742233..253fe23ef7fe 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1723,6 +1723,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_KENSINGTON, USB_DEVICE_ID_KS_SLIMBLADE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KEYTOUCH, USB_DEVICE_ID_KEYTOUCH_IEC) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_GILA_GAMING_MOUSE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_MANTICORE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_GX_IMPERATOR) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_ERGO_525V) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_I405X) },
@@ -1879,7 +1880,6 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_BT) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO2, USB_DEVICE_ID_NINTENDO_WIIMOTE) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE2) },
{ }
};
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 76559629568c..f9304cb37154 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -489,6 +489,7 @@
#define USB_VENDOR_ID_KYE 0x0458
#define USB_DEVICE_ID_KYE_ERGO_525V 0x0087
#define USB_DEVICE_ID_GENIUS_GILA_GAMING_MOUSE 0x0138
+#define USB_DEVICE_ID_GENIUS_MANTICORE 0x0153
#define USB_DEVICE_ID_GENIUS_GX_IMPERATOR 0x4018
#define USB_DEVICE_ID_KYE_GPEN_560 0x5003
#define USB_DEVICE_ID_KYE_EASYPEN_I405X 0x5010
@@ -640,7 +641,6 @@
#define USB_DEVICE_ID_NEXTWINDOW_TOUCHSCREEN 0x0003
#define USB_VENDOR_ID_NINTENDO 0x057e
-#define USB_VENDOR_ID_NINTENDO2 0x054c
#define USB_DEVICE_ID_NINTENDO_WIIMOTE 0x0306
#define USB_DEVICE_ID_NINTENDO_WIIMOTE2 0x0330
@@ -902,6 +902,9 @@
#define USB_DEVICE_ID_SUPER_DUAL_BOX_PRO 0x8802
#define USB_DEVICE_ID_SUPER_JOY_BOX_5_PRO 0x8804
+#define USB_VENDOR_ID_WISTRON 0x0fb8
+#define USB_DEVICE_ID_WISTRON_OPTICAL_TOUCH 0x1109
+
#define USB_VENDOR_ID_X_TENSIONS 0x1ae7
#define USB_DEVICE_ID_SPEEDLINK_VAD_CEZANNE 0x9001
diff --git a/drivers/hid/hid-kye.c b/drivers/hid/hid-kye.c
index 73845120295e..e77696367591 100644
--- a/drivers/hid/hid-kye.c
+++ b/drivers/hid/hid-kye.c
@@ -342,6 +342,10 @@ static __u8 *kye_report_fixup(struct hid_device *hdev, __u8 *rdesc,
rdesc = kye_consumer_control_fixup(hdev, rdesc, rsize, 83,
"Genius Gx Imperator Keyboard");
break;
+ case USB_DEVICE_ID_GENIUS_MANTICORE:
+ rdesc = kye_consumer_control_fixup(hdev, rdesc, rsize, 104,
+ "Genius Manticore Keyboard");
+ break;
}
return rdesc;
}
@@ -418,6 +422,14 @@ static int kye_probe(struct hid_device *hdev, const struct hid_device_id *id)
goto enabling_err;
}
break;
+ case USB_DEVICE_ID_GENIUS_MANTICORE:
+ /*
+ * The manticore keyboard needs to have all the interfaces
+ * opened at least once to be fully functional.
+ */
+ if (hid_hw_open(hdev))
+ hid_hw_close(hdev);
+ break;
}
return 0;
@@ -439,6 +451,8 @@ static const struct hid_device_id kye_devices[] = {
USB_DEVICE_ID_GENIUS_GILA_GAMING_MOUSE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE,
USB_DEVICE_ID_GENIUS_GX_IMPERATOR) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_KYE,
+ USB_DEVICE_ID_GENIUS_MANTICORE) },
{ }
};
MODULE_DEVICE_TABLE(hid, kye_devices);
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index a2cedb8ae1c0..d83b1e8b505b 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -1335,6 +1335,12 @@ static const struct hid_device_id mt_devices[] = {
{ .driver_data = MT_CLS_NSMU,
MT_USB_DEVICE(USB_VENDOR_ID_UNITEC,
USB_DEVICE_ID_UNITEC_USB_TOUCH_0A19) },
+
+ /* Wistron panels */
+ { .driver_data = MT_CLS_NSMU,
+ MT_USB_DEVICE(USB_VENDOR_ID_WISTRON,
+ USB_DEVICE_ID_WISTRON_OPTICAL_TOUCH) },
+
/* XAT */
{ .driver_data = MT_CLS_NSMU,
MT_USB_DEVICE(USB_VENDOR_ID_XAT,
diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c
index a184e1921c11..8fab82829f8b 100644
--- a/drivers/hid/hid-sensor-hub.c
+++ b/drivers/hid/hid-sensor-hub.c
@@ -112,13 +112,15 @@ static int sensor_hub_get_physical_device_count(
static void sensor_hub_fill_attr_info(
struct hid_sensor_hub_attribute_info *info,
- s32 index, s32 report_id, s32 units, s32 unit_expo, s32 size)
+ s32 index, s32 report_id, struct hid_field *field)
{
info->index = index;
info->report_id = report_id;
- info->units = units;
- info->unit_expo = unit_expo;
- info->size = size/8;
+ info->units = field->unit;
+ info->unit_expo = field->unit_exponent;
+ info->size = (field->report_size * field->report_count)/8;
+ info->logical_minimum = field->logical_minimum;
+ info->logical_maximum = field->logical_maximum;
}
static struct hid_sensor_hub_callbacks *sensor_hub_get_callback(
@@ -325,9 +327,7 @@ int sensor_hub_input_get_attribute_info(struct hid_sensor_hub_device *hsdev,
if (field->physical == usage_id &&
field->logical == attr_usage_id) {
sensor_hub_fill_attr_info(info, i, report->id,
- field->unit, field->unit_exponent,
- field->report_size *
- field->report_count);
+ field);
ret = 0;
} else {
for (j = 0; j < field->maxusage; ++j) {
@@ -336,11 +336,7 @@ int sensor_hub_input_get_attribute_info(struct hid_sensor_hub_device *hsdev,
field->usage[j].collection_index ==
collection_index) {
sensor_hub_fill_attr_info(info,
- i, report->id,
- field->unit,
- field->unit_exponent,
- field->report_size *
- field->report_count);
+ i, report->id, field);
ret = 0;
break;
}
@@ -573,6 +569,8 @@ static int sensor_hub_probe(struct hid_device *hdev,
goto err_free_names;
}
sd->hid_sensor_hub_client_devs[
+ sd->hid_sensor_client_cnt].id = PLATFORM_DEVID_AUTO;
+ sd->hid_sensor_hub_client_devs[
sd->hid_sensor_client_cnt].name = name;
sd->hid_sensor_hub_client_devs[
sd->hid_sensor_client_cnt].platform_data =
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index da551d113762..098af2f84b8c 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -225,6 +225,13 @@ static const unsigned int buzz_keymap[] = {
struct sony_sc {
unsigned long quirks;
+#ifdef CONFIG_SONY_FF
+ struct work_struct rumble_worker;
+ struct hid_device *hdev;
+ __u8 left;
+ __u8 right;
+#endif
+
void *extra;
};
@@ -615,9 +622,9 @@ static void buzz_remove(struct hid_device *hdev)
}
#ifdef CONFIG_SONY_FF
-static int sony_play_effect(struct input_dev *dev, void *data,
- struct ff_effect *effect)
+static void sony_rumble_worker(struct work_struct *work)
{
+ struct sony_sc *sc = container_of(work, struct sony_sc, rumble_worker);
unsigned char buf[] = {
0x01,
0x00, 0xff, 0x00, 0xff, 0x00,
@@ -628,21 +635,28 @@ static int sony_play_effect(struct input_dev *dev, void *data,
0xff, 0x27, 0x10, 0x00, 0x32,
0x00, 0x00, 0x00, 0x00, 0x00
};
- __u8 left;
- __u8 right;
+
+ buf[3] = sc->right;
+ buf[5] = sc->left;
+
+ sc->hdev->hid_output_raw_report(sc->hdev, buf, sizeof(buf),
+ HID_OUTPUT_REPORT);
+}
+
+static int sony_play_effect(struct input_dev *dev, void *data,
+ struct ff_effect *effect)
+{
struct hid_device *hid = input_get_drvdata(dev);
+ struct sony_sc *sc = hid_get_drvdata(hid);
if (effect->type != FF_RUMBLE)
return 0;
- left = effect->u.rumble.strong_magnitude / 256;
- right = effect->u.rumble.weak_magnitude ? 1 : 0;
-
- buf[3] = right;
- buf[5] = left;
+ sc->left = effect->u.rumble.strong_magnitude / 256;
+ sc->right = effect->u.rumble.weak_magnitude ? 1 : 0;
- return hid->hid_output_raw_report(hid, buf, sizeof(buf),
- HID_OUTPUT_REPORT);
+ schedule_work(&sc->rumble_worker);
+ return 0;
}
static int sony_init_ff(struct hid_device *hdev)
@@ -650,16 +664,31 @@ static int sony_init_ff(struct hid_device *hdev)
struct hid_input *hidinput = list_entry(hdev->inputs.next,
struct hid_input, list);
struct input_dev *input_dev = hidinput->input;
+ struct sony_sc *sc = hid_get_drvdata(hdev);
+
+ sc->hdev = hdev;
+ INIT_WORK(&sc->rumble_worker, sony_rumble_worker);
input_set_capability(input_dev, EV_FF, FF_RUMBLE);
return input_ff_create_memless(input_dev, NULL, sony_play_effect);
}
+static void sony_destroy_ff(struct hid_device *hdev)
+{
+ struct sony_sc *sc = hid_get_drvdata(hdev);
+
+ cancel_work_sync(&sc->rumble_worker);
+}
+
#else
static int sony_init_ff(struct hid_device *hdev)
{
return 0;
}
+
+static void sony_destroy_ff(struct hid_device *hdev)
+{
+}
#endif
static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
@@ -728,6 +757,8 @@ static void sony_remove(struct hid_device *hdev)
if (sc->quirks & BUZZ_CONTROLLER)
buzz_remove(hdev);
+ sony_destroy_ff(hdev);
+
hid_hw_stop(hdev);
}
diff --git a/drivers/hid/hid-wiimote-core.c b/drivers/hid/hid-wiimote-core.c
index 1446f526ee8b..abb20db2b443 100644
--- a/drivers/hid/hid-wiimote-core.c
+++ b/drivers/hid/hid-wiimote-core.c
@@ -834,8 +834,7 @@ static void wiimote_init_set_type(struct wiimote_data *wdata,
goto done;
}
- if (vendor == USB_VENDOR_ID_NINTENDO ||
- vendor == USB_VENDOR_ID_NINTENDO2) {
+ if (vendor == USB_VENDOR_ID_NINTENDO) {
if (product == USB_DEVICE_ID_NINTENDO_WIIMOTE) {
devtype = WIIMOTE_DEV_GEN10;
goto done;
@@ -1856,8 +1855,6 @@ static void wiimote_hid_remove(struct hid_device *hdev)
static const struct hid_device_id wiimote_hid_devices[] = {
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO,
USB_DEVICE_ID_NINTENDO_WIIMOTE) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO2,
- USB_DEVICE_ID_NINTENDO_WIIMOTE) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO,
USB_DEVICE_ID_NINTENDO_WIIMOTE2) },
{ }
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
index ae48d18ee315..5f7e55f4b7f0 100644
--- a/drivers/hid/i2c-hid/i2c-hid.c
+++ b/drivers/hid/i2c-hid/i2c-hid.c
@@ -1008,7 +1008,7 @@ static int i2c_hid_probe(struct i2c_client *client,
hid->hid_get_raw_report = i2c_hid_get_raw_report;
hid->hid_output_raw_report = i2c_hid_output_raw_report;
hid->dev.parent = &client->dev;
- ACPI_HANDLE_SET(&hid->dev, ACPI_HANDLE(&client->dev));
+ ACPI_COMPANION_SET(&hid->dev, ACPI_COMPANION(&client->dev));
hid->bus = BUS_I2C;
hid->version = le16_to_cpu(ihid->hdesc.bcdVersion);
hid->vendor = le16_to_cpu(ihid->hdesc.wVendorID);
diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c
index 93b00d76374c..cedc6da93c19 100644
--- a/drivers/hid/uhid.c
+++ b/drivers/hid/uhid.c
@@ -287,7 +287,7 @@ static int uhid_event_from_user(const char __user *buffer, size_t len,
*/
struct uhid_create_req_compat *compat;
- compat = kmalloc(sizeof(*compat), GFP_KERNEL);
+ compat = kzalloc(sizeof(*compat), GFP_KERNEL);
if (!compat)
return -ENOMEM;
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index b3ab9d43bb3e..52d548f1dc1d 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -656,6 +656,7 @@ config SENSORS_LM75
- Analog Devices ADT75
- Dallas Semiconductor DS75, DS1775 and DS7505
+ - Global Mixed-mode Technology (GMT) G751
- Maxim MAX6625 and MAX6626
- Microchip MCP980x
- National Semiconductor LM75, LM75A
diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
index 8d40da314a8e..6a34f7f48eb9 100644
--- a/drivers/hwmon/acpi_power_meter.c
+++ b/drivers/hwmon/acpi_power_meter.c
@@ -602,9 +602,8 @@ static int read_domain_devices(struct acpi_power_meter_resource *resource)
/* Create a symlink to domain objects */
resource->domain_devices[i] = NULL;
- status = acpi_bus_get_device(element->reference.handle,
- &resource->domain_devices[i]);
- if (ACPI_FAILURE(status))
+ if (acpi_bus_get_device(element->reference.handle,
+ &resource->domain_devices[i]))
continue;
obj = resource->domain_devices[i];
diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
index 1d7ff46812c3..dafc63c6932d 100644
--- a/drivers/hwmon/asus_atk0110.c
+++ b/drivers/hwmon/asus_atk0110.c
@@ -18,7 +18,6 @@
#include <linux/err.h>
#include <acpi/acpi.h>
-#include <acpi/acpixf.h>
#include <acpi/acpi_drivers.h>
#include <acpi/acpi_bus.h>
diff --git a/drivers/hwmon/hih6130.c b/drivers/hwmon/hih6130.c
index 2dc37c7c6947..7d68a08baaa8 100644
--- a/drivers/hwmon/hih6130.c
+++ b/drivers/hwmon/hih6130.c
@@ -43,6 +43,7 @@
* @last_update: time of last update (jiffies)
* @temperature: cached temperature measurement value
* @humidity: cached humidity measurement value
+ * @write_length: length for I2C measurement request
*/
struct hih6130 {
struct device *hwmon_dev;
@@ -51,6 +52,7 @@ struct hih6130 {
unsigned long last_update;
int temperature;
int humidity;
+ size_t write_length;
};
/**
@@ -121,8 +123,15 @@ static int hih6130_update_measurements(struct i2c_client *client)
*/
if (time_after(jiffies, hih6130->last_update + HZ) || !hih6130->valid) {
- /* write to slave address, no data, to request a measurement */
- ret = i2c_master_send(client, tmp, 0);
+ /*
+ * Write to slave address to request a measurement.
+ * According with the datasheet it should be with no data, but
+ * for systems with I2C bus drivers that do not allow zero
+ * length packets we write one dummy byte to allow sensor
+ * measurements on them.
+ */
+ tmp[0] = 0;
+ ret = i2c_master_send(client, tmp, hih6130->write_length);
if (ret < 0)
goto out;
@@ -252,6 +261,9 @@ static int hih6130_probe(struct i2c_client *client,
goto fail_remove_sysfs;
}
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_QUICK))
+ hih6130->write_length = 1;
+
return 0;
fail_remove_sysfs:
diff --git a/drivers/hwmon/lm75.c b/drivers/hwmon/lm75.c
index c03b490bba81..7e3ef134f1d2 100644
--- a/drivers/hwmon/lm75.c
+++ b/drivers/hwmon/lm75.c
@@ -39,6 +39,7 @@ enum lm75_type { /* keep sorted in alphabetical order */
ds1775,
ds75,
ds7505,
+ g751,
lm75,
lm75a,
max6625,
@@ -208,6 +209,7 @@ lm75_probe(struct i2c_client *client, const struct i2c_device_id *id)
data->resolution = 12;
data->sample_time = HZ / 4;
break;
+ case g751:
case lm75:
case lm75a:
data->resolution = 9;
@@ -296,6 +298,7 @@ static const struct i2c_device_id lm75_ids[] = {
{ "ds1775", ds1775, },
{ "ds75", ds75, },
{ "ds7505", ds7505, },
+ { "g751", g751, },
{ "lm75", lm75, },
{ "lm75a", lm75a, },
{ "max6625", max6625, },
diff --git a/drivers/hwmon/lm78.c b/drivers/hwmon/lm78.c
index 6cf6bff79003..a2f3b4a365e4 100644
--- a/drivers/hwmon/lm78.c
+++ b/drivers/hwmon/lm78.c
@@ -94,6 +94,8 @@ static inline u8 FAN_TO_REG(long rpm, int div)
{
if (rpm <= 0)
return 255;
+ if (rpm > 1350000)
+ return 1;
return clamp_val((1350000 + rpm * div / 2) / (rpm * div), 1, 254);
}
diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c
index 4c4c1421bf28..8b8f3aa49726 100644
--- a/drivers/hwmon/lm90.c
+++ b/drivers/hwmon/lm90.c
@@ -1610,12 +1610,14 @@ static int lm90_probe(struct i2c_client *client,
"lm90", client);
if (err < 0) {
dev_err(dev, "cannot request IRQ %d\n", client->irq);
- goto exit_remove_files;
+ goto exit_unregister;
}
}
return 0;
+exit_unregister:
+ hwmon_device_unregister(data->hwmon_dev);
exit_remove_files:
lm90_remove_files(client, data);
exit_restore:
diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
index d17325db0ea3..cf811c1a1475 100644
--- a/drivers/hwmon/nct6775.c
+++ b/drivers/hwmon/nct6775.c
@@ -274,6 +274,8 @@ static const u16 NCT6775_FAN_PULSE_SHIFT[] = { 0, 0, 0, 0, 0, 0 };
static const u16 NCT6775_REG_TEMP[] = {
0x27, 0x150, 0x250, 0x62b, 0x62c, 0x62d };
+static const u16 NCT6775_REG_TEMP_MON[] = { 0x73, 0x75, 0x77 };
+
static const u16 NCT6775_REG_TEMP_CONFIG[ARRAY_SIZE(NCT6775_REG_TEMP)] = {
0, 0x152, 0x252, 0x628, 0x629, 0x62A };
static const u16 NCT6775_REG_TEMP_HYST[ARRAY_SIZE(NCT6775_REG_TEMP)] = {
@@ -454,6 +456,7 @@ static const u16 NCT6779_REG_CRITICAL_PWM[] = {
0x137, 0x237, 0x337, 0x837, 0x937, 0xa37 };
static const u16 NCT6779_REG_TEMP[] = { 0x27, 0x150 };
+static const u16 NCT6779_REG_TEMP_MON[] = { 0x73, 0x75, 0x77, 0x79, 0x7b };
static const u16 NCT6779_REG_TEMP_CONFIG[ARRAY_SIZE(NCT6779_REG_TEMP)] = {
0x18, 0x152 };
static const u16 NCT6779_REG_TEMP_HYST[ARRAY_SIZE(NCT6779_REG_TEMP)] = {
@@ -507,6 +510,13 @@ static const u16 NCT6779_REG_TEMP_CRIT[ARRAY_SIZE(nct6779_temp_label) - 1]
#define NCT6791_REG_HM_IO_SPACE_LOCK_ENABLE 0x28
+static const u16 NCT6791_REG_WEIGHT_TEMP_SEL[6] = { 0, 0x239 };
+static const u16 NCT6791_REG_WEIGHT_TEMP_STEP[6] = { 0, 0x23a };
+static const u16 NCT6791_REG_WEIGHT_TEMP_STEP_TOL[6] = { 0, 0x23b };
+static const u16 NCT6791_REG_WEIGHT_DUTY_STEP[6] = { 0, 0x23c };
+static const u16 NCT6791_REG_WEIGHT_TEMP_BASE[6] = { 0, 0x23d };
+static const u16 NCT6791_REG_WEIGHT_DUTY_BASE[6] = { 0, 0x23e };
+
static const u16 NCT6791_REG_ALARM[NUM_REG_ALARM] = {
0x459, 0x45A, 0x45B, 0x568, 0x45D };
@@ -534,6 +544,7 @@ static const u16 NCT6106_REG_IN[] = {
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x07, 0x08, 0x09 };
static const u16 NCT6106_REG_TEMP[] = { 0x10, 0x11, 0x12, 0x13, 0x14, 0x15 };
+static const u16 NCT6106_REG_TEMP_MON[] = { 0x18, 0x19, 0x1a };
static const u16 NCT6106_REG_TEMP_HYST[] = {
0xc3, 0xc7, 0xcb, 0xcf, 0xd3, 0xd7 };
static const u16 NCT6106_REG_TEMP_OVER[] = {
@@ -1307,6 +1318,9 @@ static void nct6775_update_pwm(struct device *dev)
if (reg & 0x80)
data->pwm[2][i] = 0;
+ if (!data->REG_WEIGHT_TEMP_SEL[i])
+ continue;
+
reg = nct6775_read_value(data, data->REG_WEIGHT_TEMP_SEL[i]);
data->pwm_weight_temp_sel[i] = reg & 0x1f;
/* If weight is disabled, report weight source as 0 */
@@ -2852,6 +2866,9 @@ static umode_t nct6775_pwm_is_visible(struct kobject *kobj,
if (!(data->has_pwm & (1 << pwm)))
return 0;
+ if ((nr >= 14 && nr <= 18) || nr == 21) /* weight */
+ if (!data->REG_WEIGHT_TEMP_SEL[pwm])
+ return 0;
if (nr == 19 && data->REG_PWM[3] == NULL) /* pwm_max */
return 0;
if (nr == 20 && data->REG_PWM[4] == NULL) /* pwm_step */
@@ -2945,11 +2962,11 @@ static struct sensor_device_template *nct6775_attributes_pwm_template[] = {
&sensor_dev_template_pwm_step_down_time,
&sensor_dev_template_pwm_start,
&sensor_dev_template_pwm_floor,
- &sensor_dev_template_pwm_weight_temp_sel,
+ &sensor_dev_template_pwm_weight_temp_sel, /* 14 */
&sensor_dev_template_pwm_weight_temp_step,
&sensor_dev_template_pwm_weight_temp_step_tol,
&sensor_dev_template_pwm_weight_temp_step_base,
- &sensor_dev_template_pwm_weight_duty_step,
+ &sensor_dev_template_pwm_weight_duty_step, /* 18 */
&sensor_dev_template_pwm_max, /* 19 */
&sensor_dev_template_pwm_step, /* 20 */
&sensor_dev_template_pwm_weight_duty_base, /* 21 */
@@ -3253,9 +3270,9 @@ static int nct6775_probe(struct platform_device *pdev)
int i, s, err = 0;
int src, mask, available;
const u16 *reg_temp, *reg_temp_over, *reg_temp_hyst, *reg_temp_config;
- const u16 *reg_temp_alternate, *reg_temp_crit;
+ const u16 *reg_temp_mon, *reg_temp_alternate, *reg_temp_crit;
const u16 *reg_temp_crit_l = NULL, *reg_temp_crit_h = NULL;
- int num_reg_temp;
+ int num_reg_temp, num_reg_temp_mon;
u8 cr2a;
struct attribute_group *group;
struct device *hwmon_dev;
@@ -3338,7 +3355,9 @@ static int nct6775_probe(struct platform_device *pdev)
data->BEEP_BITS = NCT6106_BEEP_BITS;
reg_temp = NCT6106_REG_TEMP;
+ reg_temp_mon = NCT6106_REG_TEMP_MON;
num_reg_temp = ARRAY_SIZE(NCT6106_REG_TEMP);
+ num_reg_temp_mon = ARRAY_SIZE(NCT6106_REG_TEMP_MON);
reg_temp_over = NCT6106_REG_TEMP_OVER;
reg_temp_hyst = NCT6106_REG_TEMP_HYST;
reg_temp_config = NCT6106_REG_TEMP_CONFIG;
@@ -3410,7 +3429,9 @@ static int nct6775_probe(struct platform_device *pdev)
data->REG_BEEP = NCT6775_REG_BEEP;
reg_temp = NCT6775_REG_TEMP;
+ reg_temp_mon = NCT6775_REG_TEMP_MON;
num_reg_temp = ARRAY_SIZE(NCT6775_REG_TEMP);
+ num_reg_temp_mon = ARRAY_SIZE(NCT6775_REG_TEMP_MON);
reg_temp_over = NCT6775_REG_TEMP_OVER;
reg_temp_hyst = NCT6775_REG_TEMP_HYST;
reg_temp_config = NCT6775_REG_TEMP_CONFIG;
@@ -3480,7 +3501,9 @@ static int nct6775_probe(struct platform_device *pdev)
data->REG_BEEP = NCT6776_REG_BEEP;
reg_temp = NCT6775_REG_TEMP;
+ reg_temp_mon = NCT6775_REG_TEMP_MON;
num_reg_temp = ARRAY_SIZE(NCT6775_REG_TEMP);
+ num_reg_temp_mon = ARRAY_SIZE(NCT6775_REG_TEMP_MON);
reg_temp_over = NCT6775_REG_TEMP_OVER;
reg_temp_hyst = NCT6775_REG_TEMP_HYST;
reg_temp_config = NCT6776_REG_TEMP_CONFIG;
@@ -3554,7 +3577,9 @@ static int nct6775_probe(struct platform_device *pdev)
data->REG_BEEP = NCT6776_REG_BEEP;
reg_temp = NCT6779_REG_TEMP;
+ reg_temp_mon = NCT6779_REG_TEMP_MON;
num_reg_temp = ARRAY_SIZE(NCT6779_REG_TEMP);
+ num_reg_temp_mon = ARRAY_SIZE(NCT6779_REG_TEMP_MON);
reg_temp_over = NCT6779_REG_TEMP_OVER;
reg_temp_hyst = NCT6779_REG_TEMP_HYST;
reg_temp_config = NCT6779_REG_TEMP_CONFIG;
@@ -3603,8 +3628,8 @@ static int nct6775_probe(struct platform_device *pdev)
data->REG_PWM[0] = NCT6775_REG_PWM;
data->REG_PWM[1] = NCT6775_REG_FAN_START_OUTPUT;
data->REG_PWM[2] = NCT6775_REG_FAN_STOP_OUTPUT;
- data->REG_PWM[5] = NCT6775_REG_WEIGHT_DUTY_STEP;
- data->REG_PWM[6] = NCT6776_REG_WEIGHT_DUTY_BASE;
+ data->REG_PWM[5] = NCT6791_REG_WEIGHT_DUTY_STEP;
+ data->REG_PWM[6] = NCT6791_REG_WEIGHT_DUTY_BASE;
data->REG_PWM_READ = NCT6775_REG_PWM_READ;
data->REG_PWM_MODE = NCT6776_REG_PWM_MODE;
data->PWM_MODE_MASK = NCT6776_PWM_MODE_MASK;
@@ -3620,15 +3645,17 @@ static int nct6775_probe(struct platform_device *pdev)
data->REG_TEMP_OFFSET = NCT6779_REG_TEMP_OFFSET;
data->REG_TEMP_SOURCE = NCT6775_REG_TEMP_SOURCE;
data->REG_TEMP_SEL = NCT6775_REG_TEMP_SEL;
- data->REG_WEIGHT_TEMP_SEL = NCT6775_REG_WEIGHT_TEMP_SEL;
- data->REG_WEIGHT_TEMP[0] = NCT6775_REG_WEIGHT_TEMP_STEP;
- data->REG_WEIGHT_TEMP[1] = NCT6775_REG_WEIGHT_TEMP_STEP_TOL;
- data->REG_WEIGHT_TEMP[2] = NCT6775_REG_WEIGHT_TEMP_BASE;
+ data->REG_WEIGHT_TEMP_SEL = NCT6791_REG_WEIGHT_TEMP_SEL;
+ data->REG_WEIGHT_TEMP[0] = NCT6791_REG_WEIGHT_TEMP_STEP;
+ data->REG_WEIGHT_TEMP[1] = NCT6791_REG_WEIGHT_TEMP_STEP_TOL;
+ data->REG_WEIGHT_TEMP[2] = NCT6791_REG_WEIGHT_TEMP_BASE;
data->REG_ALARM = NCT6791_REG_ALARM;
data->REG_BEEP = NCT6776_REG_BEEP;
reg_temp = NCT6779_REG_TEMP;
+ reg_temp_mon = NCT6779_REG_TEMP_MON;
num_reg_temp = ARRAY_SIZE(NCT6779_REG_TEMP);
+ num_reg_temp_mon = ARRAY_SIZE(NCT6779_REG_TEMP_MON);
reg_temp_over = NCT6779_REG_TEMP_OVER;
reg_temp_hyst = NCT6779_REG_TEMP_HYST;
reg_temp_config = NCT6779_REG_TEMP_CONFIG;
@@ -3729,6 +3756,50 @@ static int nct6775_probe(struct platform_device *pdev)
s++;
}
+ /*
+ * Repeat with temperatures used for fan control.
+ * This set of registers does not support limits.
+ */
+ for (i = 0; i < num_reg_temp_mon; i++) {
+ if (reg_temp_mon[i] == 0)
+ continue;
+
+ src = nct6775_read_value(data, data->REG_TEMP_SEL[i]) & 0x1f;
+ if (!src || (mask & (1 << src)))
+ continue;
+
+ if (src >= data->temp_label_num ||
+ !strlen(data->temp_label[src])) {
+ dev_info(dev,
+ "Invalid temperature source %d at index %d, source register 0x%x, temp register 0x%x\n",
+ src, i, data->REG_TEMP_SEL[i],
+ reg_temp_mon[i]);
+ continue;
+ }
+
+ mask |= 1 << src;
+
+ /* Use fixed index for SYSTIN(1), CPUTIN(2), AUXTIN(3) */
+ if (src <= data->temp_fixed_num) {
+ if (data->have_temp & (1 << (src - 1)))
+ continue;
+ data->have_temp |= 1 << (src - 1);
+ data->have_temp_fixed |= 1 << (src - 1);
+ data->reg_temp[0][src - 1] = reg_temp_mon[i];
+ data->temp_src[src - 1] = src;
+ continue;
+ }
+
+ if (s >= NUM_TEMP)
+ continue;
+
+ /* Use dynamic index for other sources */
+ data->have_temp |= 1 << s;
+ data->reg_temp[0][s] = reg_temp_mon[i];
+ data->temp_src[s] = src;
+ s++;
+ }
+
#ifdef USE_ALTERNATE
/*
* Go through the list of alternate temp registers and enable
diff --git a/drivers/hwmon/sis5595.c b/drivers/hwmon/sis5595.c
index 1404e6319deb..72a889702f0d 100644
--- a/drivers/hwmon/sis5595.c
+++ b/drivers/hwmon/sis5595.c
@@ -141,6 +141,8 @@ static inline u8 FAN_TO_REG(long rpm, int div)
{
if (rpm <= 0)
return 255;
+ if (rpm > 1350000)
+ return 1;
return clamp_val((1350000 + rpm * div / 2) / (rpm * div), 1, 254);
}
diff --git a/drivers/hwmon/vt8231.c b/drivers/hwmon/vt8231.c
index 0e7017841f7d..aee14e2192f8 100644
--- a/drivers/hwmon/vt8231.c
+++ b/drivers/hwmon/vt8231.c
@@ -145,7 +145,7 @@ static const u8 regtempmin[] = { 0x3a, 0x3e, 0x2c, 0x2e, 0x30, 0x32 };
*/
static inline u8 FAN_TO_REG(long rpm, int div)
{
- if (rpm == 0)
+ if (rpm <= 0 || rpm > 1310720)
return 0;
return clamp_val(1310720 / (rpm * div), 1, 255);
}
diff --git a/drivers/hwmon/w83l786ng.c b/drivers/hwmon/w83l786ng.c
index edb06cda5a68..6ed76ceb9270 100644
--- a/drivers/hwmon/w83l786ng.c
+++ b/drivers/hwmon/w83l786ng.c
@@ -481,9 +481,11 @@ store_pwm(struct device *dev, struct device_attribute *attr,
if (err)
return err;
val = clamp_val(val, 0, 255);
+ val = DIV_ROUND_CLOSEST(val, 0x11);
mutex_lock(&data->update_lock);
- data->pwm[nr] = val;
+ data->pwm[nr] = val * 0x11;
+ val |= w83l786ng_read_value(client, W83L786NG_REG_PWM[nr]) & 0xf0;
w83l786ng_write_value(client, W83L786NG_REG_PWM[nr], val);
mutex_unlock(&data->update_lock);
return count;
@@ -510,7 +512,7 @@ store_pwm_enable(struct device *dev, struct device_attribute *attr,
mutex_lock(&data->update_lock);
reg = w83l786ng_read_value(client, W83L786NG_REG_FAN_CFG);
data->pwm_enable[nr] = val;
- reg &= ~(0x02 << W83L786NG_PWM_ENABLE_SHIFT[nr]);
+ reg &= ~(0x03 << W83L786NG_PWM_ENABLE_SHIFT[nr]);
reg |= (val - 1) << W83L786NG_PWM_ENABLE_SHIFT[nr];
w83l786ng_write_value(client, W83L786NG_REG_FAN_CFG, reg);
mutex_unlock(&data->update_lock);
@@ -776,9 +778,10 @@ static struct w83l786ng_data *w83l786ng_update_device(struct device *dev)
((pwmcfg >> W83L786NG_PWM_MODE_SHIFT[i]) & 1)
? 0 : 1;
data->pwm_enable[i] =
- ((pwmcfg >> W83L786NG_PWM_ENABLE_SHIFT[i]) & 2) + 1;
- data->pwm[i] = w83l786ng_read_value(client,
- W83L786NG_REG_PWM[i]);
+ ((pwmcfg >> W83L786NG_PWM_ENABLE_SHIFT[i]) & 3) + 1;
+ data->pwm[i] =
+ (w83l786ng_read_value(client, W83L786NG_REG_PWM[i])
+ & 0x0f) * 0x11;
}
diff --git a/drivers/i2c/busses/i2c-bcm-kona.c b/drivers/i2c/busses/i2c-bcm-kona.c
index 036cf03aeb61..18a74a6751a9 100644
--- a/drivers/i2c/busses/i2c-bcm-kona.c
+++ b/drivers/i2c/busses/i2c-bcm-kona.c
@@ -20,7 +20,6 @@
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/io.h>
-#include <linux/clk.h>
#include <linux/slab.h>
/* Hardware register offsets and field defintions */
@@ -891,7 +890,7 @@ static const struct of_device_id bcm_kona_i2c_of_match[] = {
{.compatible = "brcm,kona-i2c",},
{},
};
-MODULE_DEVICE_TABLE(of, kona_i2c_of_match);
+MODULE_DEVICE_TABLE(of, bcm_kona_i2c_of_match);
static struct platform_driver bcm_kona_i2c_driver = {
.driver = {
diff --git a/drivers/i2c/busses/i2c-bcm2835.c b/drivers/i2c/busses/i2c-bcm2835.c
index d7e8600f31fb..77df97b932af 100644
--- a/drivers/i2c/busses/i2c-bcm2835.c
+++ b/drivers/i2c/busses/i2c-bcm2835.c
@@ -299,6 +299,7 @@ static int bcm2835_i2c_probe(struct platform_device *pdev)
strlcpy(adap->name, "bcm2835 I2C adapter", sizeof(adap->name));
adap->algo = &bcm2835_i2c_algo;
adap->dev.parent = &pdev->dev;
+ adap->dev.of_node = pdev->dev.of_node;
bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_C, 0);
diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c
index ff05d9fef4a8..af0b5830303d 100644
--- a/drivers/i2c/busses/i2c-davinci.c
+++ b/drivers/i2c/busses/i2c-davinci.c
@@ -125,12 +125,12 @@ static struct davinci_i2c_platform_data davinci_i2c_platform_data_default = {
static inline void davinci_i2c_write_reg(struct davinci_i2c_dev *i2c_dev,
int reg, u16 val)
{
- __raw_writew(val, i2c_dev->base + reg);
+ writew_relaxed(val, i2c_dev->base + reg);
}
static inline u16 davinci_i2c_read_reg(struct davinci_i2c_dev *i2c_dev, int reg)
{
- return __raw_readw(i2c_dev->base + reg);
+ return readw_relaxed(i2c_dev->base + reg);
}
/* Generate a pulse on the i2c clock pin. */
diff --git a/drivers/i2c/busses/i2c-diolan-u2c.c b/drivers/i2c/busses/i2c-diolan-u2c.c
index dae3ddfe7619..721f7ebf9a3b 100644
--- a/drivers/i2c/busses/i2c-diolan-u2c.c
+++ b/drivers/i2c/busses/i2c-diolan-u2c.c
@@ -25,8 +25,6 @@
#define USB_VENDOR_ID_DIOLAN 0x0abf
#define USB_DEVICE_ID_DIOLAN_U2C 0x3370
-#define DIOLAN_OUT_EP 0x02
-#define DIOLAN_IN_EP 0x84
/* commands via USB, must match command ids in the firmware */
#define CMD_I2C_READ 0x01
@@ -84,6 +82,7 @@
struct i2c_diolan_u2c {
u8 obuffer[DIOLAN_OUTBUF_LEN]; /* output buffer */
u8 ibuffer[DIOLAN_INBUF_LEN]; /* input buffer */
+ int ep_in, ep_out; /* Endpoints */
struct usb_device *usb_dev; /* the usb device for this device */
struct usb_interface *interface;/* the interface for this device */
struct i2c_adapter adapter; /* i2c related things */
@@ -109,7 +108,7 @@ static int diolan_usb_transfer(struct i2c_diolan_u2c *dev)
return -EINVAL;
ret = usb_bulk_msg(dev->usb_dev,
- usb_sndbulkpipe(dev->usb_dev, DIOLAN_OUT_EP),
+ usb_sndbulkpipe(dev->usb_dev, dev->ep_out),
dev->obuffer, dev->olen, &actual,
DIOLAN_USB_TIMEOUT);
if (!ret) {
@@ -118,7 +117,7 @@ static int diolan_usb_transfer(struct i2c_diolan_u2c *dev)
tmpret = usb_bulk_msg(dev->usb_dev,
usb_rcvbulkpipe(dev->usb_dev,
- DIOLAN_IN_EP),
+ dev->ep_in),
dev->ibuffer,
sizeof(dev->ibuffer), &actual,
DIOLAN_USB_TIMEOUT);
@@ -210,7 +209,7 @@ static void diolan_flush_input(struct i2c_diolan_u2c *dev)
int ret;
ret = usb_bulk_msg(dev->usb_dev,
- usb_rcvbulkpipe(dev->usb_dev, DIOLAN_IN_EP),
+ usb_rcvbulkpipe(dev->usb_dev, dev->ep_in),
dev->ibuffer, sizeof(dev->ibuffer), &actual,
DIOLAN_USB_TIMEOUT);
if (ret < 0 || actual == 0)
@@ -445,9 +444,14 @@ static void diolan_u2c_free(struct i2c_diolan_u2c *dev)
static int diolan_u2c_probe(struct usb_interface *interface,
const struct usb_device_id *id)
{
+ struct usb_host_interface *hostif = interface->cur_altsetting;
struct i2c_diolan_u2c *dev;
int ret;
+ if (hostif->desc.bInterfaceNumber != 0
+ || hostif->desc.bNumEndpoints < 2)
+ return -ENODEV;
+
/* allocate memory for our device state and initialize it */
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (dev == NULL) {
@@ -455,6 +459,8 @@ static int diolan_u2c_probe(struct usb_interface *interface,
ret = -ENOMEM;
goto error;
}
+ dev->ep_out = hostif->endpoint[0].desc.bEndpointAddress;
+ dev->ep_in = hostif->endpoint[1].desc.bEndpointAddress;
dev->usb_dev = usb_get_dev(interface_to_usbdev(interface));
dev->interface = interface;
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index 1d7efa3169cd..d0cfbb4cb964 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -312,7 +312,9 @@ static int i2c_imx_start(struct imx_i2c_struct *i2c_imx)
dev_dbg(&i2c_imx->adapter.dev, "<%s>\n", __func__);
- clk_prepare_enable(i2c_imx->clk);
+ result = clk_prepare_enable(i2c_imx->clk);
+ if (result)
+ return result;
imx_i2c_write_reg(i2c_imx->ifdr, i2c_imx, IMX_I2C_IFDR);
/* Enable I2C controller */
imx_i2c_write_reg(i2c_imx->hwdata->i2sr_clr_opcode, i2c_imx, IMX_I2C_I2SR);
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index a6a891d7970d..90dcc2eaac5f 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -266,13 +266,13 @@ static const u8 reg_map_ip_v2[] = {
static inline void omap_i2c_write_reg(struct omap_i2c_dev *i2c_dev,
int reg, u16 val)
{
- __raw_writew(val, i2c_dev->base +
+ writew_relaxed(val, i2c_dev->base +
(i2c_dev->regs[reg] << i2c_dev->reg_shift));
}
static inline u16 omap_i2c_read_reg(struct omap_i2c_dev *i2c_dev, int reg)
{
- return __raw_readw(i2c_dev->base +
+ return readw_relaxed(i2c_dev->base +
(i2c_dev->regs[reg] << i2c_dev->reg_shift));
}
@@ -1037,6 +1037,20 @@ static const struct i2c_algorithm omap_i2c_algo = {
};
#ifdef CONFIG_OF
+static struct omap_i2c_bus_platform_data omap2420_pdata = {
+ .rev = OMAP_I2C_IP_VERSION_1,
+ .flags = OMAP_I2C_FLAG_NO_FIFO |
+ OMAP_I2C_FLAG_SIMPLE_CLOCK |
+ OMAP_I2C_FLAG_16BIT_DATA_REG |
+ OMAP_I2C_FLAG_BUS_SHIFT_2,
+};
+
+static struct omap_i2c_bus_platform_data omap2430_pdata = {
+ .rev = OMAP_I2C_IP_VERSION_1,
+ .flags = OMAP_I2C_FLAG_BUS_SHIFT_2 |
+ OMAP_I2C_FLAG_FORCE_19200_INT_CLK,
+};
+
static struct omap_i2c_bus_platform_data omap3_pdata = {
.rev = OMAP_I2C_IP_VERSION_1,
.flags = OMAP_I2C_FLAG_BUS_SHIFT_2,
@@ -1055,6 +1069,14 @@ static const struct of_device_id omap_i2c_of_match[] = {
.compatible = "ti,omap3-i2c",
.data = &omap3_pdata,
},
+ {
+ .compatible = "ti,omap2430-i2c",
+ .data = &omap2430_pdata,
+ },
+ {
+ .compatible = "ti,omap2420-i2c",
+ .data = &omap2420_pdata,
+ },
{ },
};
MODULE_DEVICE_TABLE(of, omap_i2c_of_match);
@@ -1140,9 +1162,9 @@ omap_i2c_probe(struct platform_device *pdev)
* Read the Rev hi bit-[15:14] ie scheme this is 1 indicates ver2.
* On omap1/3/2 Offset 4 is IE Reg the bit [15:14] is 0 at reset.
* Also since the omap_i2c_read_reg uses reg_map_ip_* a
- * raw_readw is done.
+ * readw_relaxed is done.
*/
- rev = __raw_readw(dev->base + 0x04);
+ rev = readw_relaxed(dev->base + 0x04);
dev->scheme = OMAP_I2C_SCHEME(rev);
switch (dev->scheme) {
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 5923cfa390c8..d74c0b34248e 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -615,6 +615,22 @@ void i2c_unlock_adapter(struct i2c_adapter *adapter)
}
EXPORT_SYMBOL_GPL(i2c_unlock_adapter);
+static void i2c_dev_set_name(struct i2c_adapter *adap,
+ struct i2c_client *client)
+{
+ struct acpi_device *adev = ACPI_COMPANION(&client->dev);
+
+ if (adev) {
+ dev_set_name(&client->dev, "i2c-%s", acpi_dev_name(adev));
+ return;
+ }
+
+ /* For 10-bit clients, add an arbitrary offset to avoid collisions */
+ dev_set_name(&client->dev, "%d-%04x", i2c_adapter_id(adap),
+ client->addr | ((client->flags & I2C_CLIENT_TEN)
+ ? 0xa000 : 0));
+}
+
/**
* i2c_new_device - instantiate an i2c device
* @adap: the adapter managing the device
@@ -671,12 +687,9 @@ i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info)
client->dev.bus = &i2c_bus_type;
client->dev.type = &i2c_client_type;
client->dev.of_node = info->of_node;
- ACPI_HANDLE_SET(&client->dev, info->acpi_node.handle);
+ ACPI_COMPANION_SET(&client->dev, info->acpi_node.companion);
- /* For 10-bit clients, add an arbitrary offset to avoid collisions */
- dev_set_name(&client->dev, "%d-%04x", i2c_adapter_id(adap),
- client->addr | ((client->flags & I2C_CLIENT_TEN)
- ? 0xa000 : 0));
+ i2c_dev_set_name(adap, client);
status = device_register(&client->dev);
if (status)
goto out_err;
@@ -1100,7 +1113,7 @@ static acpi_status acpi_i2c_add_device(acpi_handle handle, u32 level,
return AE_OK;
memset(&info, 0, sizeof(info));
- info.acpi_node.handle = handle;
+ info.acpi_node.companion = adev;
info.irq = -1;
INIT_LIST_HEAD(&resource_list);
diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
index 797e3117bef7..2d0847b6be62 100644
--- a/drivers/i2c/i2c-mux.c
+++ b/drivers/i2c/i2c-mux.c
@@ -139,6 +139,8 @@ struct i2c_adapter *i2c_add_mux_adapter(struct i2c_adapter *parent,
priv->adap.algo = &priv->algo;
priv->adap.algo_data = priv;
priv->adap.dev.parent = &parent->dev;
+ priv->adap.retries = parent->retries;
+ priv->adap.timeout = parent->timeout;
/* Sanity check on class */
if (i2c_mux_parent_classes(parent) & class)
diff --git a/drivers/ide/ide-acpi.c b/drivers/ide/ide-acpi.c
index 140c8ef50529..d9e1f7ccfe6f 100644
--- a/drivers/ide/ide-acpi.c
+++ b/drivers/ide/ide-acpi.c
@@ -7,6 +7,7 @@
* Copyright (C) 2006 Hannes Reinecke
*/
+#include <linux/acpi.h>
#include <linux/ata.h>
#include <linux/delay.h>
#include <linux/device.h>
@@ -19,8 +20,6 @@
#include <linux/dmi.h>
#include <linux/module.h>
-#include <acpi/acpi_bus.h>
-
#define REGS_PER_GTF 7
struct GTM_buffer {
@@ -128,7 +127,7 @@ static int ide_get_dev_handle(struct device *dev, acpi_handle *handle,
DEBPRINT("ENTER: pci %02x:%02x.%01x\n", bus, devnum, func);
- dev_handle = DEVICE_ACPI_HANDLE(dev);
+ dev_handle = ACPI_HANDLE(dev);
if (!dev_handle) {
DEBPRINT("no acpi handle for device\n");
goto err;
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 3226ce98fb18..f80b700f821c 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -1,7 +1,7 @@
/*
* intel_idle.c - native hardware idle loop for modern Intel processors
*
- * Copyright (c) 2010, Intel Corporation.
+ * Copyright (c) 2013, Intel Corporation.
* Len Brown <len.brown@intel.com>
*
* This program is free software; you can redistribute it and/or modify it
@@ -329,6 +329,22 @@ static struct cpuidle_state atom_cstates[] __initdata = {
{
.enter = NULL }
};
+static struct cpuidle_state avn_cstates[] __initdata = {
+ {
+ .name = "C1-AVN",
+ .desc = "MWAIT 0x00",
+ .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_TIME_VALID,
+ .exit_latency = 2,
+ .target_residency = 2,
+ .enter = &intel_idle },
+ {
+ .name = "C6-AVN",
+ .desc = "MWAIT 0x51",
+ .flags = MWAIT2flg(0x51) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 15,
+ .target_residency = 45,
+ .enter = &intel_idle },
+};
/**
* intel_idle
@@ -361,6 +377,9 @@ static int intel_idle(struct cpuidle_device *dev,
if (!current_set_polling_and_test()) {
+ if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR))
+ clflush((void *)&current_thread_info()->flags);
+
__monitor((void *)&current_thread_info()->flags, 0, 0);
smp_mb();
if (!need_resched())
@@ -462,6 +481,11 @@ static const struct idle_cpu idle_cpu_hsw = {
.disable_promotion_to_c1e = true,
};
+static const struct idle_cpu idle_cpu_avn = {
+ .state_table = avn_cstates,
+ .disable_promotion_to_c1e = true,
+};
+
#define ICPU(model, cpu) \
{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_MWAIT, (unsigned long)&cpu }
@@ -483,6 +507,7 @@ static const struct x86_cpu_id intel_idle_ids[] = {
ICPU(0x3f, idle_cpu_hsw),
ICPU(0x45, idle_cpu_hsw),
ICPU(0x46, idle_cpu_hsw),
+ ICPU(0x4D, idle_cpu_avn),
{}
};
MODULE_DEVICE_TABLE(x86cpu, intel_idle_ids);
diff --git a/drivers/iio/accel/hid-sensor-accel-3d.c b/drivers/iio/accel/hid-sensor-accel-3d.c
index dcda17395c4e..1cae4e920c9b 100644
--- a/drivers/iio/accel/hid-sensor-accel-3d.c
+++ b/drivers/iio/accel/hid-sensor-accel-3d.c
@@ -350,7 +350,7 @@ static int hid_accel_3d_probe(struct platform_device *pdev)
error_iio_unreg:
iio_device_unregister(indio_dev);
error_remove_trigger:
- hid_sensor_remove_trigger(indio_dev);
+ hid_sensor_remove_trigger(&accel_state->common_attributes);
error_unreg_buffer_funcs:
iio_triggered_buffer_cleanup(indio_dev);
error_free_dev_mem:
@@ -363,10 +363,11 @@ static int hid_accel_3d_remove(struct platform_device *pdev)
{
struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data;
struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+ struct accel_3d_state *accel_state = iio_priv(indio_dev);
sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_ACCEL_3D);
iio_device_unregister(indio_dev);
- hid_sensor_remove_trigger(indio_dev);
+ hid_sensor_remove_trigger(&accel_state->common_attributes);
iio_triggered_buffer_cleanup(indio_dev);
kfree(indio_dev->channels);
diff --git a/drivers/iio/accel/kxsd9.c b/drivers/iio/accel/kxsd9.c
index d72118d1189c..98ba761cbb9c 100644
--- a/drivers/iio/accel/kxsd9.c
+++ b/drivers/iio/accel/kxsd9.c
@@ -112,9 +112,10 @@ static int kxsd9_read(struct iio_dev *indio_dev, u8 address)
mutex_lock(&st->buf_lock);
st->tx[0] = KXSD9_READ(address);
ret = spi_sync_transfer(st->us, xfers, ARRAY_SIZE(xfers));
- if (ret)
- return ret;
- return (((u16)(st->rx[0])) << 8) | (st->rx[1] & 0xF0);
+ if (!ret)
+ ret = (((u16)(st->rx[0])) << 8) | (st->rx[1] & 0xF0);
+ mutex_unlock(&st->buf_lock);
+ return ret;
}
static IIO_CONST_ATTR(accel_scale_available,
diff --git a/drivers/iio/adc/ad7887.c b/drivers/iio/adc/ad7887.c
index acb7f90359a3..749a6cadab8b 100644
--- a/drivers/iio/adc/ad7887.c
+++ b/drivers/iio/adc/ad7887.c
@@ -200,7 +200,13 @@ static const struct ad7887_chip_info ad7887_chip_info_tbl[] = {
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
.address = 1,
.scan_index = 1,
- .scan_type = IIO_ST('u', 12, 16, 0),
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 12,
+ .storagebits = 16,
+ .shift = 0,
+ .endianness = IIO_BE,
+ },
},
.channel[1] = {
.type = IIO_VOLTAGE,
@@ -210,7 +216,13 @@ static const struct ad7887_chip_info ad7887_chip_info_tbl[] = {
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
.address = 0,
.scan_index = 0,
- .scan_type = IIO_ST('u', 12, 16, 0),
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 12,
+ .storagebits = 16,
+ .shift = 0,
+ .endianness = IIO_BE,
+ },
},
.channel[2] = IIO_CHAN_SOFT_TIMESTAMP(2),
.int_vref_mv = 2500,
diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c
index 17df74908db1..5b1aa027c034 100644
--- a/drivers/iio/adc/at91_adc.c
+++ b/drivers/iio/adc/at91_adc.c
@@ -1047,6 +1047,7 @@ static int at91_adc_probe(struct platform_device *pdev)
} else {
if (!st->caps->has_tsmr) {
dev_err(&pdev->dev, "We don't support non-TSMR adc\n");
+ ret = -ENODEV;
goto error_disable_adc_clk;
}
diff --git a/drivers/iio/adc/mcp3422.c b/drivers/iio/adc/mcp3422.c
index 12948325431c..c8c1baaec6c1 100644
--- a/drivers/iio/adc/mcp3422.c
+++ b/drivers/iio/adc/mcp3422.c
@@ -88,10 +88,10 @@ static const int mcp3422_sample_rates[4] = {
/* sample rates to sign extension table */
static const int mcp3422_sign_extend[4] = {
- [MCP3422_SRATE_240] = 12,
- [MCP3422_SRATE_60] = 14,
- [MCP3422_SRATE_15] = 16,
- [MCP3422_SRATE_3] = 18 };
+ [MCP3422_SRATE_240] = 11,
+ [MCP3422_SRATE_60] = 13,
+ [MCP3422_SRATE_15] = 15,
+ [MCP3422_SRATE_3] = 17 };
/* Client data (each client gets its own) */
struct mcp3422 {
diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c
index 728411ec7642..d4d748214e4b 100644
--- a/drivers/iio/adc/ti_am335x_adc.c
+++ b/drivers/iio/adc/ti_am335x_adc.c
@@ -229,12 +229,15 @@ static int tiadc_iio_buffered_hardware_setup(struct iio_dev *indio_dev,
unsigned long flags,
const struct iio_buffer_setup_ops *setup_ops)
{
+ struct iio_buffer *buffer;
int ret;
- indio_dev->buffer = iio_kfifo_allocate(indio_dev);
- if (!indio_dev->buffer)
+ buffer = iio_kfifo_allocate(indio_dev);
+ if (!buffer)
return -ENOMEM;
+ iio_device_attach_buffer(indio_dev, buffer);
+
ret = request_threaded_irq(irq, pollfunc_th, pollfunc_bh,
flags, indio_dev->name, indio_dev);
if (ret)
diff --git a/drivers/iio/common/hid-sensors/Kconfig b/drivers/iio/common/hid-sensors/Kconfig
index 1178121b55b0..39188b72cd3b 100644
--- a/drivers/iio/common/hid-sensors/Kconfig
+++ b/drivers/iio/common/hid-sensors/Kconfig
@@ -25,13 +25,4 @@ config HID_SENSOR_IIO_TRIGGER
If this driver is compiled as a module, it will be named
hid-sensor-trigger.
-config HID_SENSOR_ENUM_BASE_QUIRKS
- bool "ENUM base quirks for HID Sensor IIO drivers"
- depends on HID_SENSOR_IIO_COMMON
- help
- Say yes here to build support for sensor hub FW using
- enumeration, which is using 1 as base instead of 0.
- Since logical minimum is still set 0 instead of 1,
- there is no easy way to differentiate.
-
endmenu
diff --git a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
index b6e77e0fc420..7dcf83998e6f 100644
--- a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
+++ b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
@@ -33,33 +33,42 @@ static int hid_sensor_data_rdy_trigger_set_state(struct iio_trigger *trig,
{
struct hid_sensor_common *st = iio_trigger_get_drvdata(trig);
int state_val;
+ int report_val;
if (state) {
if (sensor_hub_device_open(st->hsdev))
return -EIO;
- } else
+ state_val =
+ HID_USAGE_SENSOR_PROP_POWER_STATE_D0_FULL_POWER_ENUM;
+ report_val =
+ HID_USAGE_SENSOR_PROP_REPORTING_STATE_ALL_EVENTS_ENUM;
+
+ } else {
sensor_hub_device_close(st->hsdev);
+ state_val =
+ HID_USAGE_SENSOR_PROP_POWER_STATE_D4_POWER_OFF_ENUM;
+ report_val =
+ HID_USAGE_SENSOR_PROP_REPORTING_STATE_NO_EVENTS_ENUM;
+ }
- state_val = state ? 1 : 0;
- if (IS_ENABLED(CONFIG_HID_SENSOR_ENUM_BASE_QUIRKS))
- ++state_val;
st->data_ready = state;
+ state_val += st->power_state.logical_minimum;
+ report_val += st->report_state.logical_minimum;
sensor_hub_set_feature(st->hsdev, st->power_state.report_id,
st->power_state.index,
(s32)state_val);
sensor_hub_set_feature(st->hsdev, st->report_state.report_id,
st->report_state.index,
- (s32)state_val);
+ (s32)report_val);
return 0;
}
-void hid_sensor_remove_trigger(struct iio_dev *indio_dev)
+void hid_sensor_remove_trigger(struct hid_sensor_common *attrb)
{
- iio_trigger_unregister(indio_dev->trig);
- iio_trigger_free(indio_dev->trig);
- indio_dev->trig = NULL;
+ iio_trigger_unregister(attrb->trigger);
+ iio_trigger_free(attrb->trigger);
}
EXPORT_SYMBOL(hid_sensor_remove_trigger);
@@ -90,7 +99,7 @@ int hid_sensor_setup_trigger(struct iio_dev *indio_dev, const char *name,
dev_err(&indio_dev->dev, "Trigger Register Failed\n");
goto error_free_trig;
}
- indio_dev->trig = trig;
+ indio_dev->trig = attrb->trigger = trig;
return ret;
diff --git a/drivers/iio/common/hid-sensors/hid-sensor-trigger.h b/drivers/iio/common/hid-sensors/hid-sensor-trigger.h
index 9a8731478eda..ca02f7811aa8 100644
--- a/drivers/iio/common/hid-sensors/hid-sensor-trigger.h
+++ b/drivers/iio/common/hid-sensors/hid-sensor-trigger.h
@@ -21,6 +21,6 @@
int hid_sensor_setup_trigger(struct iio_dev *indio_dev, const char *name,
struct hid_sensor_common *attrb);
-void hid_sensor_remove_trigger(struct iio_dev *indio_dev);
+void hid_sensor_remove_trigger(struct hid_sensor_common *attrb);
#endif
diff --git a/drivers/iio/gyro/hid-sensor-gyro-3d.c b/drivers/iio/gyro/hid-sensor-gyro-3d.c
index ea01c6bcfb56..e54f0f4959d3 100644
--- a/drivers/iio/gyro/hid-sensor-gyro-3d.c
+++ b/drivers/iio/gyro/hid-sensor-gyro-3d.c
@@ -348,7 +348,7 @@ static int hid_gyro_3d_probe(struct platform_device *pdev)
error_iio_unreg:
iio_device_unregister(indio_dev);
error_remove_trigger:
- hid_sensor_remove_trigger(indio_dev);
+ hid_sensor_remove_trigger(&gyro_state->common_attributes);
error_unreg_buffer_funcs:
iio_triggered_buffer_cleanup(indio_dev);
error_free_dev_mem:
@@ -361,10 +361,11 @@ static int hid_gyro_3d_remove(struct platform_device *pdev)
{
struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data;
struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+ struct gyro_3d_state *gyro_state = iio_priv(indio_dev);
sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_GYRO_3D);
iio_device_unregister(indio_dev);
- hid_sensor_remove_trigger(indio_dev);
+ hid_sensor_remove_trigger(&gyro_state->common_attributes);
iio_triggered_buffer_cleanup(indio_dev);
kfree(indio_dev->channels);
diff --git a/drivers/iio/imu/adis16400_core.c b/drivers/iio/imu/adis16400_core.c
index 3fb7757a1028..368660dfe135 100644
--- a/drivers/iio/imu/adis16400_core.c
+++ b/drivers/iio/imu/adis16400_core.c
@@ -651,7 +651,12 @@ static const struct iio_chan_spec adis16448_channels[] = {
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
.address = ADIS16448_BARO_OUT,
.scan_index = ADIS16400_SCAN_BARO,
- .scan_type = IIO_ST('s', 16, 16, 0),
+ .scan_type = {
+ .sign = 's',
+ .realbits = 16,
+ .storagebits = 16,
+ .endianness = IIO_BE,
+ },
},
ADIS16400_TEMP_CHAN(ADIS16448_TEMP_OUT, 12),
IIO_CHAN_SOFT_TIMESTAMP(11)
diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig
index f98c2b509254..a022f27c6690 100644
--- a/drivers/iio/light/Kconfig
+++ b/drivers/iio/light/Kconfig
@@ -43,6 +43,7 @@ config GP2AP020A00F
depends on I2C
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
+ select IRQ_WORK
help
Say Y here if you have a Sharp GP2AP020A00F proximity/ALS combo-chip
hooked to an I2C bus.
@@ -81,6 +82,8 @@ config SENSORS_LM3533
config TCS3472
tristate "TAOS TCS3472 color light-to-digital converter"
depends on I2C
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
help
If you say yes here you get support for the TAOS TCS3472
family of color light-to-digital converters with IR filter.
diff --git a/drivers/iio/light/cm36651.c b/drivers/iio/light/cm36651.c
index 21df57130018..0922e39b0ea9 100644
--- a/drivers/iio/light/cm36651.c
+++ b/drivers/iio/light/cm36651.c
@@ -387,7 +387,7 @@ static int cm36651_read_int_time(struct cm36651_data *cm36651,
return -EINVAL;
}
- return IIO_VAL_INT_PLUS_MICRO;
+ return IIO_VAL_INT;
}
static int cm36651_write_int_time(struct cm36651_data *cm36651,
diff --git a/drivers/iio/light/hid-sensor-als.c b/drivers/iio/light/hid-sensor-als.c
index fa6ae8cf89ea..8e8b9d722853 100644
--- a/drivers/iio/light/hid-sensor-als.c
+++ b/drivers/iio/light/hid-sensor-als.c
@@ -314,7 +314,7 @@ static int hid_als_probe(struct platform_device *pdev)
error_iio_unreg:
iio_device_unregister(indio_dev);
error_remove_trigger:
- hid_sensor_remove_trigger(indio_dev);
+ hid_sensor_remove_trigger(&als_state->common_attributes);
error_unreg_buffer_funcs:
iio_triggered_buffer_cleanup(indio_dev);
error_free_dev_mem:
@@ -327,10 +327,11 @@ static int hid_als_remove(struct platform_device *pdev)
{
struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data;
struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+ struct als_state *als_state = iio_priv(indio_dev);
sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_ALS);
iio_device_unregister(indio_dev);
- hid_sensor_remove_trigger(indio_dev);
+ hid_sensor_remove_trigger(&als_state->common_attributes);
iio_triggered_buffer_cleanup(indio_dev);
kfree(indio_dev->channels);
diff --git a/drivers/iio/magnetometer/Kconfig b/drivers/iio/magnetometer/Kconfig
index 0cf09637b35b..d86d226dcd67 100644
--- a/drivers/iio/magnetometer/Kconfig
+++ b/drivers/iio/magnetometer/Kconfig
@@ -19,6 +19,8 @@ config AK8975
config MAG3110
tristate "Freescale MAG3110 3-Axis Magnetometer"
depends on I2C
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
help
Say yes here to build support for the Freescale MAG3110 3-Axis
magnetometer.
diff --git a/drivers/iio/magnetometer/hid-sensor-magn-3d.c b/drivers/iio/magnetometer/hid-sensor-magn-3d.c
index 2634920562fb..b26e1028a0a0 100644
--- a/drivers/iio/magnetometer/hid-sensor-magn-3d.c
+++ b/drivers/iio/magnetometer/hid-sensor-magn-3d.c
@@ -351,7 +351,7 @@ static int hid_magn_3d_probe(struct platform_device *pdev)
error_iio_unreg:
iio_device_unregister(indio_dev);
error_remove_trigger:
- hid_sensor_remove_trigger(indio_dev);
+ hid_sensor_remove_trigger(&magn_state->common_attributes);
error_unreg_buffer_funcs:
iio_triggered_buffer_cleanup(indio_dev);
error_free_dev_mem:
@@ -364,10 +364,11 @@ static int hid_magn_3d_remove(struct platform_device *pdev)
{
struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data;
struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+ struct magn_3d_state *magn_state = iio_priv(indio_dev);
sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_COMPASS_3D);
iio_device_unregister(indio_dev);
- hid_sensor_remove_trigger(indio_dev);
+ hid_sensor_remove_trigger(&magn_state->common_attributes);
iio_triggered_buffer_cleanup(indio_dev);
kfree(indio_dev->channels);
diff --git a/drivers/iio/magnetometer/mag3110.c b/drivers/iio/magnetometer/mag3110.c
index 783c5b417356..becf54496967 100644
--- a/drivers/iio/magnetometer/mag3110.c
+++ b/drivers/iio/magnetometer/mag3110.c
@@ -250,7 +250,12 @@ done:
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SAMP_FREQ) | \
BIT(IIO_CHAN_INFO_SCALE), \
.scan_index = idx, \
- .scan_type = IIO_ST('s', 16, 16, IIO_BE), \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 16, \
+ .storagebits = 16, \
+ .endianness = IIO_BE, \
+ }, \
}
static const struct iio_chan_spec mag3110_channels[] = {
diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c
index c47c2034ca71..0717940ec3b5 100644
--- a/drivers/infiniband/core/iwcm.c
+++ b/drivers/infiniband/core/iwcm.c
@@ -181,9 +181,16 @@ static void add_ref(struct iw_cm_id *cm_id)
static void rem_ref(struct iw_cm_id *cm_id)
{
struct iwcm_id_private *cm_id_priv;
+ int cb_destroy;
+
cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
- if (iwcm_deref_id(cm_id_priv) &&
- test_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags)) {
+
+ /*
+ * Test bit before deref in case the cm_id gets freed on another
+ * thread.
+ */
+ cb_destroy = test_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags);
+ if (iwcm_deref_id(cm_id_priv) && cb_destroy) {
BUG_ON(!list_empty(&cm_id_priv->work_list));
free_cm_id(cm_id_priv);
}
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h
index bdc842e9faef..a283274a5a09 100644
--- a/drivers/infiniband/core/uverbs.h
+++ b/drivers/infiniband/core/uverbs.h
@@ -49,12 +49,20 @@
#define INIT_UDATA(udata, ibuf, obuf, ilen, olen) \
do { \
- (udata)->inbuf = (void __user *) (ibuf); \
+ (udata)->inbuf = (const void __user *) (ibuf); \
(udata)->outbuf = (void __user *) (obuf); \
(udata)->inlen = (ilen); \
(udata)->outlen = (olen); \
} while (0)
+#define INIT_UDATA_BUF_OR_NULL(udata, ibuf, obuf, ilen, olen) \
+ do { \
+ (udata)->inbuf = (ilen) ? (const void __user *) (ibuf) : NULL; \
+ (udata)->outbuf = (olen) ? (void __user *) (obuf) : NULL; \
+ (udata)->inlen = (ilen); \
+ (udata)->outlen = (olen); \
+ } while (0)
+
/*
* Our lifetime rules for these structs are the following:
*
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 65f6e7dc380c..f1cc83855af6 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -2593,6 +2593,9 @@ out_put:
static int kern_spec_to_ib_spec(struct ib_uverbs_flow_spec *kern_spec,
union ib_flow_spec *ib_spec)
{
+ if (kern_spec->reserved)
+ return -EINVAL;
+
ib_spec->type = kern_spec->type;
switch (ib_spec->type) {
@@ -2646,6 +2649,9 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
void *ib_spec;
int i;
+ if (ucore->inlen < sizeof(cmd))
+ return -EINVAL;
+
if (ucore->outlen < sizeof(resp))
return -ENOSPC;
@@ -2671,6 +2677,10 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
(cmd.flow_attr.num_of_specs * sizeof(struct ib_uverbs_flow_spec)))
return -EINVAL;
+ if (cmd.flow_attr.reserved[0] ||
+ cmd.flow_attr.reserved[1])
+ return -EINVAL;
+
if (cmd.flow_attr.num_of_specs) {
kern_flow_attr = kmalloc(sizeof(*kern_flow_attr) + cmd.flow_attr.size,
GFP_KERNEL);
@@ -2731,6 +2741,7 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
if (cmd.flow_attr.size || (i != flow_attr->num_of_specs)) {
pr_warn("create flow failed, flow %d: %d bytes left from uverb cmd\n",
i, cmd.flow_attr.size);
+ err = -EINVAL;
goto err_free;
}
flow_id = ib_create_flow(qp, flow_attr, IB_FLOW_DOMAIN_USER);
@@ -2791,10 +2802,16 @@ int ib_uverbs_ex_destroy_flow(struct ib_uverbs_file *file,
struct ib_uobject *uobj;
int ret;
+ if (ucore->inlen < sizeof(cmd))
+ return -EINVAL;
+
ret = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
if (ret)
return ret;
+ if (cmd.comp_mask)
+ return -EINVAL;
+
uobj = idr_write_uobj(&ib_uverbs_rule_idr, cmd.flow_handle,
file->ucontext);
if (!uobj)
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 34386943ebcf..08219fb3338b 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -668,25 +668,30 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
if ((hdr.in_words + ex_hdr.provider_in_words) * 8 != count)
return -EINVAL;
+ if (ex_hdr.cmd_hdr_reserved)
+ return -EINVAL;
+
if (ex_hdr.response) {
if (!hdr.out_words && !ex_hdr.provider_out_words)
return -EINVAL;
+
+ if (!access_ok(VERIFY_WRITE,
+ (void __user *) (unsigned long) ex_hdr.response,
+ (hdr.out_words + ex_hdr.provider_out_words) * 8))
+ return -EFAULT;
} else {
if (hdr.out_words || ex_hdr.provider_out_words)
return -EINVAL;
}
- INIT_UDATA(&ucore,
- (hdr.in_words) ? buf : 0,
- (unsigned long)ex_hdr.response,
- hdr.in_words * 8,
- hdr.out_words * 8);
-
- INIT_UDATA(&uhw,
- (ex_hdr.provider_in_words) ? buf + ucore.inlen : 0,
- (ex_hdr.provider_out_words) ? (unsigned long)ex_hdr.response + ucore.outlen : 0,
- ex_hdr.provider_in_words * 8,
- ex_hdr.provider_out_words * 8);
+ INIT_UDATA_BUF_OR_NULL(&ucore, buf, (unsigned long) ex_hdr.response,
+ hdr.in_words * 8, hdr.out_words * 8);
+
+ INIT_UDATA_BUF_OR_NULL(&uhw,
+ buf + ucore.inlen,
+ (unsigned long) ex_hdr.response + ucore.outlen,
+ ex_hdr.provider_in_words * 8,
+ ex_hdr.provider_out_words * 8);
err = uverbs_ex_cmd_table[command](file,
&ucore,
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 12fef76c791c..45126879ad28 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -524,50 +524,6 @@ static int send_abort(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp)
return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
}
-#define VLAN_NONE 0xfff
-#define FILTER_SEL_VLAN_NONE 0xffff
-#define FILTER_SEL_WIDTH_P_FC (3+1) /* port uses 3 bits, FCoE one bit */
-#define FILTER_SEL_WIDTH_VIN_P_FC \
- (6 + 7 + FILTER_SEL_WIDTH_P_FC) /* 6 bits are unused, VF uses 7 bits*/
-#define FILTER_SEL_WIDTH_TAG_P_FC \
- (3 + FILTER_SEL_WIDTH_VIN_P_FC) /* PF uses 3 bits */
-#define FILTER_SEL_WIDTH_VLD_TAG_P_FC (1 + FILTER_SEL_WIDTH_TAG_P_FC)
-
-static unsigned int select_ntuple(struct c4iw_dev *dev, struct dst_entry *dst,
- struct l2t_entry *l2t)
-{
- unsigned int ntuple = 0;
- u32 viid;
-
- switch (dev->rdev.lldi.filt_mode) {
-
- /* default filter mode */
- case HW_TPL_FR_MT_PR_IV_P_FC:
- if (l2t->vlan == VLAN_NONE)
- ntuple |= FILTER_SEL_VLAN_NONE << FILTER_SEL_WIDTH_P_FC;
- else {
- ntuple |= l2t->vlan << FILTER_SEL_WIDTH_P_FC;
- ntuple |= 1 << FILTER_SEL_WIDTH_TAG_P_FC;
- }
- ntuple |= l2t->lport << S_PORT | IPPROTO_TCP <<
- FILTER_SEL_WIDTH_VLD_TAG_P_FC;
- break;
- case HW_TPL_FR_MT_PR_OV_P_FC: {
- viid = cxgb4_port_viid(l2t->neigh->dev);
-
- ntuple |= FW_VIID_VIN_GET(viid) << FILTER_SEL_WIDTH_P_FC;
- ntuple |= FW_VIID_PFN_GET(viid) << FILTER_SEL_WIDTH_VIN_P_FC;
- ntuple |= FW_VIID_VIVLD_GET(viid) << FILTER_SEL_WIDTH_TAG_P_FC;
- ntuple |= l2t->lport << S_PORT | IPPROTO_TCP <<
- FILTER_SEL_WIDTH_VLD_TAG_P_FC;
- break;
- }
- default:
- break;
- }
- return ntuple;
-}
-
static int send_connect(struct c4iw_ep *ep)
{
struct cpl_act_open_req *req;
@@ -641,8 +597,9 @@ static int send_connect(struct c4iw_ep *ep)
req->local_ip = la->sin_addr.s_addr;
req->peer_ip = ra->sin_addr.s_addr;
req->opt0 = cpu_to_be64(opt0);
- req->params = cpu_to_be32(select_ntuple(ep->com.dev,
- ep->dst, ep->l2t));
+ req->params = cpu_to_be32(cxgb4_select_ntuple(
+ ep->com.dev->rdev.lldi.ports[0],
+ ep->l2t));
req->opt2 = cpu_to_be32(opt2);
} else {
req6 = (struct cpl_act_open_req6 *)skb_put(skb, wrlen);
@@ -662,9 +619,9 @@ static int send_connect(struct c4iw_ep *ep)
req6->peer_ip_lo = *((__be64 *)
(ra6->sin6_addr.s6_addr + 8));
req6->opt0 = cpu_to_be64(opt0);
- req6->params = cpu_to_be32(
- select_ntuple(ep->com.dev, ep->dst,
- ep->l2t));
+ req6->params = cpu_to_be32(cxgb4_select_ntuple(
+ ep->com.dev->rdev.lldi.ports[0],
+ ep->l2t));
req6->opt2 = cpu_to_be32(opt2);
}
} else {
@@ -681,8 +638,9 @@ static int send_connect(struct c4iw_ep *ep)
t5_req->peer_ip = ra->sin_addr.s_addr;
t5_req->opt0 = cpu_to_be64(opt0);
t5_req->params = cpu_to_be64(V_FILTER_TUPLE(
- select_ntuple(ep->com.dev,
- ep->dst, ep->l2t)));
+ cxgb4_select_ntuple(
+ ep->com.dev->rdev.lldi.ports[0],
+ ep->l2t)));
t5_req->opt2 = cpu_to_be32(opt2);
} else {
t5_req6 = (struct cpl_t5_act_open_req6 *)
@@ -703,7 +661,9 @@ static int send_connect(struct c4iw_ep *ep)
(ra6->sin6_addr.s6_addr + 8));
t5_req6->opt0 = cpu_to_be64(opt0);
t5_req6->params = (__force __be64)cpu_to_be32(
- select_ntuple(ep->com.dev, ep->dst, ep->l2t));
+ cxgb4_select_ntuple(
+ ep->com.dev->rdev.lldi.ports[0],
+ ep->l2t));
t5_req6->opt2 = cpu_to_be32(opt2);
}
}
@@ -1630,7 +1590,8 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
memset(req, 0, sizeof(*req));
req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR));
req->len16_pkd = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*req), 16)));
- req->le.filter = cpu_to_be32(select_ntuple(ep->com.dev, ep->dst,
+ req->le.filter = cpu_to_be32(cxgb4_select_ntuple(
+ ep->com.dev->rdev.lldi.ports[0],
ep->l2t));
sin = (struct sockaddr_in *)&ep->com.local_addr;
req->le.lport = sin->sin_port;
@@ -2938,7 +2899,8 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
/*
* Allocate a server TID.
*/
- if (dev->rdev.lldi.enable_fw_ofld_conn)
+ if (dev->rdev.lldi.enable_fw_ofld_conn &&
+ ep->com.local_addr.ss_family == AF_INET)
ep->stid = cxgb4_alloc_sftid(dev->rdev.lldi.tids,
cm_id->local_addr.ss_family, ep);
else
@@ -3323,9 +3285,7 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
/*
* Calculate the server tid from filter hit index from cpl_rx_pkt.
*/
- stid = (__force int) cpu_to_be32((__force u32) rss->hash_val)
- - dev->rdev.lldi.tids->sftid_base
- + dev->rdev.lldi.tids->nstids;
+ stid = (__force int) cpu_to_be32((__force u32) rss->hash_val);
lep = (struct c4iw_ep *)lookup_stid(dev->rdev.lldi.tids, stid);
if (!lep) {
@@ -3397,7 +3357,9 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
window = (__force u16) htons((__force u16)tcph->window);
/* Calcuate filter portion for LE region. */
- filter = (__force unsigned int) cpu_to_be32(select_ntuple(dev, dst, e));
+ filter = (__force unsigned int) cpu_to_be32(cxgb4_select_ntuple(
+ dev->rdev.lldi.ports[0],
+ e));
/*
* Synthesize the cpl_pass_accept_req. We have everything except the
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index 4cb8eb24497c..84e45006451c 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -173,7 +173,7 @@ static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len,
return ret;
}
-int _c4iw_write_mem_dma(struct c4iw_rdev *rdev, u32 addr, u32 len, void *data)
+static int _c4iw_write_mem_dma(struct c4iw_rdev *rdev, u32 addr, u32 len, void *data)
{
u32 remain = len;
u32 dmalen;
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index 6b29249aa85a..9c9f2f57e960 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -1354,8 +1354,7 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi
neigh->ha, ntohl(rt->rt_gateway));
if (arpindex >= 0) {
- if (!memcmp(nesadapter->arp_table[arpindex].mac_addr,
- neigh->ha, ETH_ALEN)) {
+ if (ether_addr_equal(nesadapter->arp_table[arpindex].mac_addr, neigh->ha)) {
/* Mac address same as in nes_arp_table */
goto out;
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
index c29b5c838833..cdc7df4fdb8a 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
@@ -31,6 +31,7 @@
*/
#include <linux/netdevice.h>
+#include <linux/if_arp.h> /* For ARPHRD_xxx */
#include <linux/module.h>
#include <net/rtnetlink.h>
#include "ipoib.h"
@@ -103,7 +104,7 @@ static int ipoib_new_child_link(struct net *src_net, struct net_device *dev,
return -EINVAL;
pdev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
- if (!pdev)
+ if (!pdev || pdev->type != ARPHRD_INFINIBAND)
return -ENODEV;
ppriv = netdev_priv(pdev);
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 6df23502059a..9804fca6bf06 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -22,6 +22,7 @@
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/in6.h>
+#include <linux/llist.h>
#include <rdma/ib_verbs.h>
#include <rdma/rdma_cm.h>
#include <target/target_core_base.h>
@@ -206,7 +207,9 @@ isert_free_rx_descriptors(struct isert_conn *isert_conn)
isert_conn->conn_rx_descs = NULL;
}
+static void isert_cq_tx_work(struct work_struct *);
static void isert_cq_tx_callback(struct ib_cq *, void *);
+static void isert_cq_rx_work(struct work_struct *);
static void isert_cq_rx_callback(struct ib_cq *, void *);
static int
@@ -258,26 +261,36 @@ isert_create_device_ib_res(struct isert_device *device)
cq_desc[i].device = device;
cq_desc[i].cq_index = i;
+ INIT_WORK(&cq_desc[i].cq_rx_work, isert_cq_rx_work);
device->dev_rx_cq[i] = ib_create_cq(device->ib_device,
isert_cq_rx_callback,
isert_cq_event_callback,
(void *)&cq_desc[i],
ISER_MAX_RX_CQ_LEN, i);
- if (IS_ERR(device->dev_rx_cq[i]))
+ if (IS_ERR(device->dev_rx_cq[i])) {
+ ret = PTR_ERR(device->dev_rx_cq[i]);
+ device->dev_rx_cq[i] = NULL;
goto out_cq;
+ }
+ INIT_WORK(&cq_desc[i].cq_tx_work, isert_cq_tx_work);
device->dev_tx_cq[i] = ib_create_cq(device->ib_device,
isert_cq_tx_callback,
isert_cq_event_callback,
(void *)&cq_desc[i],
ISER_MAX_TX_CQ_LEN, i);
- if (IS_ERR(device->dev_tx_cq[i]))
+ if (IS_ERR(device->dev_tx_cq[i])) {
+ ret = PTR_ERR(device->dev_tx_cq[i]);
+ device->dev_tx_cq[i] = NULL;
goto out_cq;
+ }
- if (ib_req_notify_cq(device->dev_rx_cq[i], IB_CQ_NEXT_COMP))
+ ret = ib_req_notify_cq(device->dev_rx_cq[i], IB_CQ_NEXT_COMP);
+ if (ret)
goto out_cq;
- if (ib_req_notify_cq(device->dev_tx_cq[i], IB_CQ_NEXT_COMP))
+ ret = ib_req_notify_cq(device->dev_tx_cq[i], IB_CQ_NEXT_COMP);
+ if (ret)
goto out_cq;
}
@@ -489,6 +502,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
kref_init(&isert_conn->conn_kref);
kref_get(&isert_conn->conn_kref);
mutex_init(&isert_conn->conn_mutex);
+ mutex_init(&isert_conn->conn_comp_mutex);
spin_lock_init(&isert_conn->conn_lock);
cma_id->context = isert_conn;
@@ -843,14 +857,32 @@ isert_init_tx_hdrs(struct isert_conn *isert_conn,
}
static void
-isert_init_send_wr(struct isert_cmd *isert_cmd, struct ib_send_wr *send_wr)
+isert_init_send_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
+ struct ib_send_wr *send_wr, bool coalesce)
{
+ struct iser_tx_desc *tx_desc = &isert_cmd->tx_desc;
+
isert_cmd->rdma_wr.iser_ib_op = ISER_IB_SEND;
send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc;
send_wr->opcode = IB_WR_SEND;
- send_wr->send_flags = IB_SEND_SIGNALED;
- send_wr->sg_list = &isert_cmd->tx_desc.tx_sg[0];
+ send_wr->sg_list = &tx_desc->tx_sg[0];
send_wr->num_sge = isert_cmd->tx_desc.num_sge;
+ /*
+ * Coalesce send completion interrupts by only setting IB_SEND_SIGNALED
+ * bit for every ISERT_COMP_BATCH_COUNT number of ib_post_send() calls.
+ */
+ mutex_lock(&isert_conn->conn_comp_mutex);
+ if (coalesce &&
+ ++isert_conn->conn_comp_batch < ISERT_COMP_BATCH_COUNT) {
+ llist_add(&tx_desc->comp_llnode, &isert_conn->conn_comp_llist);
+ mutex_unlock(&isert_conn->conn_comp_mutex);
+ return;
+ }
+ isert_conn->conn_comp_batch = 0;
+ tx_desc->comp_llnode_batch = llist_del_all(&isert_conn->conn_comp_llist);
+ mutex_unlock(&isert_conn->conn_comp_mutex);
+
+ send_wr->send_flags = IB_SEND_SIGNALED;
}
static int
@@ -1582,8 +1614,8 @@ isert_response_completion(struct iser_tx_desc *tx_desc,
}
static void
-isert_send_completion(struct iser_tx_desc *tx_desc,
- struct isert_conn *isert_conn)
+__isert_send_completion(struct iser_tx_desc *tx_desc,
+ struct isert_conn *isert_conn)
{
struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
struct isert_cmd *isert_cmd = tx_desc->isert_cmd;
@@ -1624,6 +1656,24 @@ isert_send_completion(struct iser_tx_desc *tx_desc,
}
static void
+isert_send_completion(struct iser_tx_desc *tx_desc,
+ struct isert_conn *isert_conn)
+{
+ struct llist_node *llnode = tx_desc->comp_llnode_batch;
+ struct iser_tx_desc *t;
+ /*
+ * Drain coalesced completion llist starting from comp_llnode_batch
+ * setup in isert_init_send_wr(), and then complete trailing tx_desc.
+ */
+ while (llnode) {
+ t = llist_entry(llnode, struct iser_tx_desc, comp_llnode);
+ llnode = llist_next(llnode);
+ __isert_send_completion(t, isert_conn);
+ }
+ __isert_send_completion(tx_desc, isert_conn);
+}
+
+static void
isert_cq_comp_err(struct iser_tx_desc *tx_desc, struct isert_conn *isert_conn)
{
struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
@@ -1686,7 +1736,6 @@ isert_cq_tx_callback(struct ib_cq *cq, void *context)
{
struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context;
- INIT_WORK(&cq_desc->cq_tx_work, isert_cq_tx_work);
queue_work(isert_comp_wq, &cq_desc->cq_tx_work);
}
@@ -1730,7 +1779,6 @@ isert_cq_rx_callback(struct ib_cq *cq, void *context)
{
struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context;
- INIT_WORK(&cq_desc->cq_rx_work, isert_cq_rx_work);
queue_work(isert_rx_wq, &cq_desc->cq_rx_work);
}
@@ -1793,7 +1841,7 @@ isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
isert_cmd->tx_desc.num_sge = 2;
}
- isert_init_send_wr(isert_cmd, send_wr);
+ isert_init_send_wr(isert_conn, isert_cmd, send_wr, true);
pr_debug("Posting SCSI Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
@@ -1813,7 +1861,7 @@ isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
&isert_cmd->tx_desc.iscsi_header,
nopout_response);
isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
- isert_init_send_wr(isert_cmd, send_wr);
+ isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
pr_debug("Posting NOPIN Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
@@ -1831,7 +1879,7 @@ isert_put_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
iscsit_build_logout_rsp(cmd, conn, (struct iscsi_logout_rsp *)
&isert_cmd->tx_desc.iscsi_header);
isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
- isert_init_send_wr(isert_cmd, send_wr);
+ isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
pr_debug("Posting Logout Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
@@ -1849,7 +1897,7 @@ isert_put_tm_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
iscsit_build_task_mgt_rsp(cmd, conn, (struct iscsi_tm_rsp *)
&isert_cmd->tx_desc.iscsi_header);
isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
- isert_init_send_wr(isert_cmd, send_wr);
+ isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
pr_debug("Posting Task Management Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
@@ -1881,7 +1929,7 @@ isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
tx_dsg->lkey = isert_conn->conn_mr->lkey;
isert_cmd->tx_desc.num_sge = 2;
- isert_init_send_wr(isert_cmd, send_wr);
+ isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
pr_debug("Posting Reject IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
@@ -1921,7 +1969,7 @@ isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
tx_dsg->lkey = isert_conn->conn_mr->lkey;
isert_cmd->tx_desc.num_sge = 2;
}
- isert_init_send_wr(isert_cmd, send_wr);
+ isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
pr_debug("Posting Text Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
@@ -1991,8 +2039,6 @@ isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
data_left = se_cmd->data_length;
- iscsit_increment_maxcmdsn(cmd, conn->sess);
- cmd->stat_sn = conn->stat_sn++;
} else {
sg_off = cmd->write_data_done / PAGE_SIZE;
data_left = se_cmd->data_length - cmd->write_data_done;
@@ -2204,8 +2250,6 @@ isert_reg_rdma_frwr(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
data_left = se_cmd->data_length;
- iscsit_increment_maxcmdsn(cmd, conn->sess);
- cmd->stat_sn = conn->stat_sn++;
} else {
sg_off = cmd->write_data_done / PAGE_SIZE;
data_left = se_cmd->data_length - cmd->write_data_done;
@@ -2259,18 +2303,26 @@ isert_reg_rdma_frwr(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
data_len = min(data_left, rdma_write_max);
wr->cur_rdma_length = data_len;
- spin_lock_irqsave(&isert_conn->conn_lock, flags);
- fr_desc = list_first_entry(&isert_conn->conn_frwr_pool,
- struct fast_reg_descriptor, list);
- list_del(&fr_desc->list);
- spin_unlock_irqrestore(&isert_conn->conn_lock, flags);
- wr->fr_desc = fr_desc;
+ /* if there is a single dma entry, dma mr is sufficient */
+ if (count == 1) {
+ ib_sge->addr = ib_sg_dma_address(ib_dev, &sg_start[0]);
+ ib_sge->length = ib_sg_dma_len(ib_dev, &sg_start[0]);
+ ib_sge->lkey = isert_conn->conn_mr->lkey;
+ wr->fr_desc = NULL;
+ } else {
+ spin_lock_irqsave(&isert_conn->conn_lock, flags);
+ fr_desc = list_first_entry(&isert_conn->conn_frwr_pool,
+ struct fast_reg_descriptor, list);
+ list_del(&fr_desc->list);
+ spin_unlock_irqrestore(&isert_conn->conn_lock, flags);
+ wr->fr_desc = fr_desc;
- ret = isert_fast_reg_mr(fr_desc, isert_cmd, isert_conn,
- ib_sge, offset, data_len);
- if (ret) {
- list_add_tail(&fr_desc->list, &isert_conn->conn_frwr_pool);
- goto unmap_sg;
+ ret = isert_fast_reg_mr(fr_desc, isert_cmd, isert_conn,
+ ib_sge, offset, data_len);
+ if (ret) {
+ list_add_tail(&fr_desc->list, &isert_conn->conn_frwr_pool);
+ goto unmap_sg;
+ }
}
return 0;
@@ -2306,10 +2358,11 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
* Build isert_conn->tx_desc for iSCSI response PDU and attach
*/
isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
- iscsit_build_rsp_pdu(cmd, conn, false, (struct iscsi_scsi_rsp *)
+ iscsit_build_rsp_pdu(cmd, conn, true, (struct iscsi_scsi_rsp *)
&isert_cmd->tx_desc.iscsi_header);
isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
- isert_init_send_wr(isert_cmd, &isert_cmd->tx_desc.send_wr);
+ isert_init_send_wr(isert_conn, isert_cmd,
+ &isert_cmd->tx_desc.send_wr, true);
atomic_inc(&isert_conn->post_send_buf_count);
diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
index 631f2090f0b8..691f90ff2d83 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.h
+++ b/drivers/infiniband/ulp/isert/ib_isert.h
@@ -43,6 +43,8 @@ struct iser_tx_desc {
struct ib_sge tx_sg[2];
int num_sge;
struct isert_cmd *isert_cmd;
+ struct llist_node *comp_llnode_batch;
+ struct llist_node comp_llnode;
struct ib_send_wr send_wr;
} __packed;
@@ -121,6 +123,10 @@ struct isert_conn {
int conn_frwr_pool_size;
/* lock to protect frwr_pool */
spinlock_t conn_lock;
+#define ISERT_COMP_BATCH_COUNT 8
+ int conn_comp_batch;
+ struct llist_head conn_comp_llist;
+ struct mutex conn_comp_mutex;
};
#define ISERT_MAX_CQ 64
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 6c923c7039a1..520a7e5a490b 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -1352,11 +1352,8 @@ static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
/* XXX(hch): this is a horrible layering violation.. */
spin_lock_irqsave(&ioctx->cmd.t_state_lock, flags);
- ioctx->cmd.transport_state |= CMD_T_LUN_STOP;
ioctx->cmd.transport_state &= ~CMD_T_ACTIVE;
spin_unlock_irqrestore(&ioctx->cmd.t_state_lock, flags);
-
- complete(&ioctx->cmd.transport_lun_stop_comp);
break;
case SRPT_STATE_CMD_RSP_SENT:
/*
@@ -1364,9 +1361,6 @@ static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
* not been received in time.
*/
srpt_unmap_sg_to_ib_sge(ioctx->ch, ioctx);
- spin_lock_irqsave(&ioctx->cmd.t_state_lock, flags);
- ioctx->cmd.transport_state |= CMD_T_LUN_STOP;
- spin_unlock_irqrestore(&ioctx->cmd.t_state_lock, flags);
target_put_sess_cmd(ioctx->ch->sess, &ioctx->cmd);
break;
case SRPT_STATE_MGMT_RSP_SENT:
@@ -1476,7 +1470,6 @@ static void srpt_handle_rdma_err_comp(struct srpt_rdma_ch *ch,
{
struct se_cmd *cmd;
enum srpt_command_state state;
- unsigned long flags;
cmd = &ioctx->cmd;
state = srpt_get_cmd_state(ioctx);
@@ -1496,9 +1489,6 @@ static void srpt_handle_rdma_err_comp(struct srpt_rdma_ch *ch,
__func__, __LINE__, state);
break;
case SRPT_RDMA_WRITE_LAST:
- spin_lock_irqsave(&ioctx->cmd.t_state_lock, flags);
- ioctx->cmd.transport_state |= CMD_T_LUN_STOP;
- spin_unlock_irqrestore(&ioctx->cmd.t_state_lock, flags);
break;
default:
printk(KERN_ERR "%s[%d]: opcode = %u\n", __func__,
diff --git a/drivers/input/keyboard/adp5588-keys.c b/drivers/input/keyboard/adp5588-keys.c
index dbd2047f1641..3ed23513d881 100644
--- a/drivers/input/keyboard/adp5588-keys.c
+++ b/drivers/input/keyboard/adp5588-keys.c
@@ -536,7 +536,8 @@ static int adp5588_probe(struct i2c_client *client,
__set_bit(EV_REP, input->evbit);
for (i = 0; i < input->keycodemax; i++)
- __set_bit(kpad->keycode[i] & KEY_MAX, input->keybit);
+ if (kpad->keycode[i] <= KEY_MAX)
+ __set_bit(kpad->keycode[i], input->keybit);
__clear_bit(KEY_RESERVED, input->keybit);
if (kpad->gpimapsize)
diff --git a/drivers/input/keyboard/adp5589-keys.c b/drivers/input/keyboard/adp5589-keys.c
index 67d12b3427c9..60dafd4fa692 100644
--- a/drivers/input/keyboard/adp5589-keys.c
+++ b/drivers/input/keyboard/adp5589-keys.c
@@ -992,7 +992,8 @@ static int adp5589_probe(struct i2c_client *client,
__set_bit(EV_REP, input->evbit);
for (i = 0; i < input->keycodemax; i++)
- __set_bit(kpad->keycode[i] & KEY_MAX, input->keybit);
+ if (kpad->keycode[i] <= KEY_MAX)
+ __set_bit(kpad->keycode[i], input->keybit);
__clear_bit(KEY_RESERVED, input->keybit);
if (kpad->gpimapsize)
diff --git a/drivers/input/keyboard/bf54x-keys.c b/drivers/input/keyboard/bf54x-keys.c
index fc88fb48d70d..09b91d093087 100644
--- a/drivers/input/keyboard/bf54x-keys.c
+++ b/drivers/input/keyboard/bf54x-keys.c
@@ -289,7 +289,8 @@ static int bfin_kpad_probe(struct platform_device *pdev)
__set_bit(EV_REP, input->evbit);
for (i = 0; i < input->keycodemax; i++)
- __set_bit(bf54x_kpad->keycode[i] & KEY_MAX, input->keybit);
+ if (bf54x_kpad->keycode[i] <= KEY_MAX)
+ __set_bit(bf54x_kpad->keycode[i], input->keybit);
__clear_bit(KEY_RESERVED, input->keybit);
error = input_register_device(input);
diff --git a/drivers/input/misc/adxl34x.c b/drivers/input/misc/adxl34x.c
index 0735de3a6468..1cb1da294419 100644
--- a/drivers/input/misc/adxl34x.c
+++ b/drivers/input/misc/adxl34x.c
@@ -158,7 +158,7 @@
/* ORIENT ADXL346 only */
#define ADXL346_2D_VALID (1 << 6)
-#define ADXL346_2D_ORIENT(x) (((x) & 0x3) >> 4)
+#define ADXL346_2D_ORIENT(x) (((x) & 0x30) >> 4)
#define ADXL346_3D_VALID (1 << 3)
#define ADXL346_3D_ORIENT(x) ((x) & 0x7)
#define ADXL346_2D_PORTRAIT_POS 0 /* +X */
diff --git a/drivers/input/misc/hp_sdc_rtc.c b/drivers/input/misc/hp_sdc_rtc.c
index 86b822806e95..45e0e3e55de2 100644
--- a/drivers/input/misc/hp_sdc_rtc.c
+++ b/drivers/input/misc/hp_sdc_rtc.c
@@ -180,7 +180,10 @@ static int64_t hp_sdc_rtc_read_i8042timer (uint8_t loadcmd, int numreg)
if (WARN_ON(down_interruptible(&i8042tregs)))
return -1;
- if (hp_sdc_enqueue_transaction(&t)) return -1;
+ if (hp_sdc_enqueue_transaction(&t)) {
+ up(&i8042tregs);
+ return -1;
+ }
/* Sleep until results come back. */
if (WARN_ON(down_interruptible(&i8042tregs)))
diff --git a/drivers/input/misc/pcf8574_keypad.c b/drivers/input/misc/pcf8574_keypad.c
index e37392976fdd..0deca5a3c87f 100644
--- a/drivers/input/misc/pcf8574_keypad.c
+++ b/drivers/input/misc/pcf8574_keypad.c
@@ -113,9 +113,12 @@ static int pcf8574_kp_probe(struct i2c_client *client, const struct i2c_device_i
idev->keycodemax = ARRAY_SIZE(lp->btncode);
for (i = 0; i < ARRAY_SIZE(pcf8574_kp_btncode); i++) {
- lp->btncode[i] = pcf8574_kp_btncode[i];
- __set_bit(lp->btncode[i] & KEY_MAX, idev->keybit);
+ if (lp->btncode[i] <= KEY_MAX) {
+ lp->btncode[i] = pcf8574_kp_btncode[i];
+ __set_bit(lp->btncode[i], idev->keybit);
+ }
}
+ __clear_bit(KEY_RESERVED, idev->keybit);
sprintf(lp->name, DRV_NAME);
sprintf(lp->phys, "kp_data/input0");
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index ca7a26f1dce8..5cf62e315218 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -70,6 +70,25 @@ static const struct alps_nibble_commands alps_v4_nibble_commands[] = {
{ PSMOUSE_CMD_SETSCALE11, 0x00 }, /* f */
};
+static const struct alps_nibble_commands alps_v6_nibble_commands[] = {
+ { PSMOUSE_CMD_ENABLE, 0x00 }, /* 0 */
+ { PSMOUSE_CMD_SETRATE, 0x0a }, /* 1 */
+ { PSMOUSE_CMD_SETRATE, 0x14 }, /* 2 */
+ { PSMOUSE_CMD_SETRATE, 0x28 }, /* 3 */
+ { PSMOUSE_CMD_SETRATE, 0x3c }, /* 4 */
+ { PSMOUSE_CMD_SETRATE, 0x50 }, /* 5 */
+ { PSMOUSE_CMD_SETRATE, 0x64 }, /* 6 */
+ { PSMOUSE_CMD_SETRATE, 0xc8 }, /* 7 */
+ { PSMOUSE_CMD_GETID, 0x00 }, /* 8 */
+ { PSMOUSE_CMD_GETINFO, 0x00 }, /* 9 */
+ { PSMOUSE_CMD_SETRES, 0x00 }, /* a */
+ { PSMOUSE_CMD_SETRES, 0x01 }, /* b */
+ { PSMOUSE_CMD_SETRES, 0x02 }, /* c */
+ { PSMOUSE_CMD_SETRES, 0x03 }, /* d */
+ { PSMOUSE_CMD_SETSCALE21, 0x00 }, /* e */
+ { PSMOUSE_CMD_SETSCALE11, 0x00 }, /* f */
+};
+
#define ALPS_DUALPOINT 0x02 /* touchpad has trackstick */
#define ALPS_PASS 0x04 /* device has a pass-through port */
@@ -103,6 +122,7 @@ static const struct alps_model_info alps_model_data[] = {
/* Dell Latitude E5500, E6400, E6500, Precision M4400 */
{ { 0x62, 0x02, 0x14 }, 0x00, ALPS_PROTO_V2, 0xcf, 0xcf,
ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED },
+ { { 0x73, 0x00, 0x14 }, 0x00, ALPS_PROTO_V6, 0xff, 0xff, ALPS_DUALPOINT }, /* Dell XT2 */
{ { 0x73, 0x02, 0x50 }, 0x00, ALPS_PROTO_V2, 0xcf, 0xcf, ALPS_FOUR_BUTTONS }, /* Dell Vostro 1400 */
{ { 0x52, 0x01, 0x14 }, 0x00, ALPS_PROTO_V2, 0xff, 0xff,
ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED }, /* Toshiba Tecra A11-11L */
@@ -645,6 +665,76 @@ static void alps_process_packet_v3(struct psmouse *psmouse)
alps_process_touchpad_packet_v3(psmouse);
}
+static void alps_process_packet_v6(struct psmouse *psmouse)
+{
+ struct alps_data *priv = psmouse->private;
+ unsigned char *packet = psmouse->packet;
+ struct input_dev *dev = psmouse->dev;
+ struct input_dev *dev2 = priv->dev2;
+ int x, y, z, left, right, middle;
+
+ /*
+ * We can use Byte5 to distinguish if the packet is from Touchpad
+ * or Trackpoint.
+ * Touchpad: 0 - 0x7E
+ * Trackpoint: 0x7F
+ */
+ if (packet[5] == 0x7F) {
+ /* It should be a DualPoint when received Trackpoint packet */
+ if (!(priv->flags & ALPS_DUALPOINT))
+ return;
+
+ /* Trackpoint packet */
+ x = packet[1] | ((packet[3] & 0x20) << 2);
+ y = packet[2] | ((packet[3] & 0x40) << 1);
+ z = packet[4];
+ left = packet[3] & 0x01;
+ right = packet[3] & 0x02;
+ middle = packet[3] & 0x04;
+
+ /* To prevent the cursor jump when finger lifted */
+ if (x == 0x7F && y == 0x7F && z == 0x7F)
+ x = y = z = 0;
+
+ /* Divide 4 since trackpoint's speed is too fast */
+ input_report_rel(dev2, REL_X, (char)x / 4);
+ input_report_rel(dev2, REL_Y, -((char)y / 4));
+
+ input_report_key(dev2, BTN_LEFT, left);
+ input_report_key(dev2, BTN_RIGHT, right);
+ input_report_key(dev2, BTN_MIDDLE, middle);
+
+ input_sync(dev2);
+ return;
+ }
+
+ /* Touchpad packet */
+ x = packet[1] | ((packet[3] & 0x78) << 4);
+ y = packet[2] | ((packet[4] & 0x78) << 4);
+ z = packet[5];
+ left = packet[3] & 0x01;
+ right = packet[3] & 0x02;
+
+ if (z > 30)
+ input_report_key(dev, BTN_TOUCH, 1);
+ if (z < 25)
+ input_report_key(dev, BTN_TOUCH, 0);
+
+ if (z > 0) {
+ input_report_abs(dev, ABS_X, x);
+ input_report_abs(dev, ABS_Y, y);
+ }
+
+ input_report_abs(dev, ABS_PRESSURE, z);
+ input_report_key(dev, BTN_TOOL_FINGER, z > 0);
+
+ /* v6 touchpad does not have middle button */
+ input_report_key(dev, BTN_LEFT, left);
+ input_report_key(dev, BTN_RIGHT, right);
+
+ input_sync(dev);
+}
+
static void alps_process_packet_v4(struct psmouse *psmouse)
{
struct alps_data *priv = psmouse->private;
@@ -897,7 +987,7 @@ static psmouse_ret_t alps_process_byte(struct psmouse *psmouse)
}
/* Bytes 2 - pktsize should have 0 in the highest bit */
- if (priv->proto_version != ALPS_PROTO_V5 &&
+ if ((priv->proto_version < ALPS_PROTO_V5) &&
psmouse->pktcnt >= 2 && psmouse->pktcnt <= psmouse->pktsize &&
(psmouse->packet[psmouse->pktcnt - 1] & 0x80)) {
psmouse_dbg(psmouse, "refusing packet[%i] = %x\n",
@@ -1085,6 +1175,80 @@ static int alps_absolute_mode_v1_v2(struct psmouse *psmouse)
return ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_SETPOLL);
}
+static int alps_monitor_mode_send_word(struct psmouse *psmouse, u16 word)
+{
+ int i, nibble;
+
+ /*
+ * b0-b11 are valid bits, send sequence is inverse.
+ * e.g. when word = 0x0123, nibble send sequence is 3, 2, 1
+ */
+ for (i = 0; i <= 8; i += 4) {
+ nibble = (word >> i) & 0xf;
+ if (alps_command_mode_send_nibble(psmouse, nibble))
+ return -1;
+ }
+
+ return 0;
+}
+
+static int alps_monitor_mode_write_reg(struct psmouse *psmouse,
+ u16 addr, u16 value)
+{
+ struct ps2dev *ps2dev = &psmouse->ps2dev;
+
+ /* 0x0A0 is the command to write the word */
+ if (ps2_command(ps2dev, NULL, PSMOUSE_CMD_ENABLE) ||
+ alps_monitor_mode_send_word(psmouse, 0x0A0) ||
+ alps_monitor_mode_send_word(psmouse, addr) ||
+ alps_monitor_mode_send_word(psmouse, value) ||
+ ps2_command(ps2dev, NULL, PSMOUSE_CMD_DISABLE))
+ return -1;
+
+ return 0;
+}
+
+static int alps_monitor_mode(struct psmouse *psmouse, bool enable)
+{
+ struct ps2dev *ps2dev = &psmouse->ps2dev;
+
+ if (enable) {
+ /* EC E9 F5 F5 E7 E6 E7 E9 to enter monitor mode */
+ if (ps2_command(ps2dev, NULL, PSMOUSE_CMD_RESET_WRAP) ||
+ ps2_command(ps2dev, NULL, PSMOUSE_CMD_GETINFO) ||
+ ps2_command(ps2dev, NULL, PSMOUSE_CMD_DISABLE) ||
+ ps2_command(ps2dev, NULL, PSMOUSE_CMD_DISABLE) ||
+ ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE21) ||
+ ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE11) ||
+ ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE21) ||
+ ps2_command(ps2dev, NULL, PSMOUSE_CMD_GETINFO))
+ return -1;
+ } else {
+ /* EC to exit monitor mode */
+ if (ps2_command(ps2dev, NULL, PSMOUSE_CMD_RESET_WRAP))
+ return -1;
+ }
+
+ return 0;
+}
+
+static int alps_absolute_mode_v6(struct psmouse *psmouse)
+{
+ u16 reg_val = 0x181;
+ int ret = -1;
+
+ /* enter monitor mode, to write the register */
+ if (alps_monitor_mode(psmouse, true))
+ return -1;
+
+ ret = alps_monitor_mode_write_reg(psmouse, 0x000, reg_val);
+
+ if (alps_monitor_mode(psmouse, false))
+ ret = -1;
+
+ return ret;
+}
+
static int alps_get_status(struct psmouse *psmouse, char *param)
{
/* Get status: 0xF5 0xF5 0xF5 0xE9 */
@@ -1189,6 +1353,32 @@ static int alps_hw_init_v1_v2(struct psmouse *psmouse)
return 0;
}
+static int alps_hw_init_v6(struct psmouse *psmouse)
+{
+ unsigned char param[2] = {0xC8, 0x14};
+
+ /* Enter passthrough mode to let trackpoint enter 6byte raw mode */
+ if (alps_passthrough_mode_v2(psmouse, true))
+ return -1;
+
+ if (ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_SETSCALE11) ||
+ ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_SETSCALE11) ||
+ ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_SETSCALE11) ||
+ ps2_command(&psmouse->ps2dev, &param[0], PSMOUSE_CMD_SETRATE) ||
+ ps2_command(&psmouse->ps2dev, &param[1], PSMOUSE_CMD_SETRATE))
+ return -1;
+
+ if (alps_passthrough_mode_v2(psmouse, false))
+ return -1;
+
+ if (alps_absolute_mode_v6(psmouse)) {
+ psmouse_err(psmouse, "Failed to enable absolute mode\n");
+ return -1;
+ }
+
+ return 0;
+}
+
/*
* Enable or disable passthrough mode to the trackstick.
*/
@@ -1553,6 +1743,8 @@ static void alps_set_defaults(struct alps_data *priv)
priv->hw_init = alps_hw_init_v1_v2;
priv->process_packet = alps_process_packet_v1_v2;
priv->set_abs_params = alps_set_abs_params_st;
+ priv->x_max = 1023;
+ priv->y_max = 767;
break;
case ALPS_PROTO_V3:
priv->hw_init = alps_hw_init_v3;
@@ -1584,6 +1776,14 @@ static void alps_set_defaults(struct alps_data *priv)
priv->x_bits = 23;
priv->y_bits = 12;
break;
+ case ALPS_PROTO_V6:
+ priv->hw_init = alps_hw_init_v6;
+ priv->process_packet = alps_process_packet_v6;
+ priv->set_abs_params = alps_set_abs_params_st;
+ priv->nibble_commands = alps_v6_nibble_commands;
+ priv->x_max = 2047;
+ priv->y_max = 1535;
+ break;
}
}
@@ -1705,8 +1905,8 @@ static void alps_disconnect(struct psmouse *psmouse)
static void alps_set_abs_params_st(struct alps_data *priv,
struct input_dev *dev1)
{
- input_set_abs_params(dev1, ABS_X, 0, 1023, 0, 0);
- input_set_abs_params(dev1, ABS_Y, 0, 767, 0, 0);
+ input_set_abs_params(dev1, ABS_X, 0, priv->x_max, 0, 0);
+ input_set_abs_params(dev1, ABS_Y, 0, priv->y_max, 0, 0);
}
static void alps_set_abs_params_mt(struct alps_data *priv,
diff --git a/drivers/input/mouse/alps.h b/drivers/input/mouse/alps.h
index eee59853b9ce..704f0f924307 100644
--- a/drivers/input/mouse/alps.h
+++ b/drivers/input/mouse/alps.h
@@ -17,6 +17,7 @@
#define ALPS_PROTO_V3 3
#define ALPS_PROTO_V4 4
#define ALPS_PROTO_V5 5
+#define ALPS_PROTO_V6 6
/**
* struct alps_model_info - touchpad ID table
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index 8551dcaf24db..597e9b8fc18d 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -1313,6 +1313,7 @@ static int elantech_set_properties(struct elantech_data *etd)
break;
case 6:
case 7:
+ case 8:
etd->hw_version = 4;
break;
default:
diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
index 98707fb2cb5d..8f4c4ab04bc2 100644
--- a/drivers/input/serio/serio.c
+++ b/drivers/input/serio/serio.c
@@ -455,16 +455,26 @@ static DEVICE_ATTR_RO(type);
static DEVICE_ATTR_RO(proto);
static DEVICE_ATTR_RO(id);
static DEVICE_ATTR_RO(extra);
-static DEVICE_ATTR_RO(modalias);
-static DEVICE_ATTR_WO(drvctl);
-static DEVICE_ATTR(description, S_IRUGO, serio_show_description, NULL);
-static DEVICE_ATTR(bind_mode, S_IWUSR | S_IRUGO, serio_show_bind_mode, serio_set_bind_mode);
static struct attribute *serio_device_id_attrs[] = {
&dev_attr_type.attr,
&dev_attr_proto.attr,
&dev_attr_id.attr,
&dev_attr_extra.attr,
+ NULL
+};
+
+static struct attribute_group serio_id_attr_group = {
+ .name = "id",
+ .attrs = serio_device_id_attrs,
+};
+
+static DEVICE_ATTR_RO(modalias);
+static DEVICE_ATTR_WO(drvctl);
+static DEVICE_ATTR(description, S_IRUGO, serio_show_description, NULL);
+static DEVICE_ATTR(bind_mode, S_IWUSR | S_IRUGO, serio_show_bind_mode, serio_set_bind_mode);
+
+static struct attribute *serio_device_attrs[] = {
&dev_attr_modalias.attr,
&dev_attr_description.attr,
&dev_attr_drvctl.attr,
@@ -472,13 +482,13 @@ static struct attribute *serio_device_id_attrs[] = {
NULL
};
-static struct attribute_group serio_id_attr_group = {
- .name = "id",
- .attrs = serio_device_id_attrs,
+static struct attribute_group serio_device_attr_group = {
+ .attrs = serio_device_attrs,
};
static const struct attribute_group *serio_device_attr_groups[] = {
&serio_id_attr_group,
+ &serio_device_attr_group,
NULL
};
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 00d1e547b211..961d58d32647 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -906,6 +906,17 @@ config TOUCHSCREEN_STMPE
To compile this driver as a module, choose M here: the
module will be called stmpe-ts.
+config TOUCHSCREEN_SUR40
+ tristate "Samsung SUR40 (Surface 2.0/PixelSense) touchscreen"
+ depends on USB
+ select INPUT_POLLDEV
+ help
+ Say Y here if you want support for the Samsung SUR40 touchscreen
+ (also known as Microsoft Surface 2.0 or Microsoft PixelSense).
+
+ To compile this driver as a module, choose M here: the
+ module will be called sur40.
+
config TOUCHSCREEN_TPS6507X
tristate "TPS6507x based touchscreens"
depends on I2C
diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile
index 7587883b8d38..62801f213346 100644
--- a/drivers/input/touchscreen/Makefile
+++ b/drivers/input/touchscreen/Makefile
@@ -54,6 +54,7 @@ obj-$(CONFIG_TOUCHSCREEN_PIXCIR) += pixcir_i2c_ts.o
obj-$(CONFIG_TOUCHSCREEN_S3C2410) += s3c2410_ts.o
obj-$(CONFIG_TOUCHSCREEN_ST1232) += st1232.o
obj-$(CONFIG_TOUCHSCREEN_STMPE) += stmpe-ts.o
+obj-$(CONFIG_TOUCHSCREEN_SUR40) += sur40.o
obj-$(CONFIG_TOUCHSCREEN_TI_AM335X_TSC) += ti_am335x_tsc.o
obj-$(CONFIG_TOUCHSCREEN_TNETV107X) += tnetv107x-ts.o
obj-$(CONFIG_TOUCHSCREEN_TOUCHIT213) += touchit213.o
diff --git a/drivers/input/touchscreen/atmel-wm97xx.c b/drivers/input/touchscreen/atmel-wm97xx.c
index 268a35e55d7f..279c0e42b8a7 100644
--- a/drivers/input/touchscreen/atmel-wm97xx.c
+++ b/drivers/input/touchscreen/atmel-wm97xx.c
@@ -391,7 +391,7 @@ static int __exit atmel_wm97xx_remove(struct platform_device *pdev)
}
#ifdef CONFIG_PM_SLEEP
-static int atmel_wm97xx_suspend(struct *dev)
+static int atmel_wm97xx_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct atmel_wm97xx *atmel_wm97xx = platform_get_drvdata(pdev);
diff --git a/drivers/input/touchscreen/cyttsp4_core.c b/drivers/input/touchscreen/cyttsp4_core.c
index 42d830efa316..a035a390f8e2 100644
--- a/drivers/input/touchscreen/cyttsp4_core.c
+++ b/drivers/input/touchscreen/cyttsp4_core.c
@@ -1246,8 +1246,7 @@ static void cyttsp4_watchdog_timer(unsigned long handle)
dev_vdbg(cd->dev, "%s: Watchdog timer triggered\n", __func__);
- if (!work_pending(&cd->watchdog_work))
- schedule_work(&cd->watchdog_work);
+ schedule_work(&cd->watchdog_work);
return;
}
diff --git a/drivers/input/touchscreen/sur40.c b/drivers/input/touchscreen/sur40.c
new file mode 100644
index 000000000000..f1cb05148b46
--- /dev/null
+++ b/drivers/input/touchscreen/sur40.c
@@ -0,0 +1,466 @@
+/*
+ * Surface2.0/SUR40/PixelSense input driver
+ *
+ * Copyright (c) 2013 by Florian 'floe' Echtler <floe@butterbrot.org>
+ *
+ * Derived from the USB Skeleton driver 1.1,
+ * Copyright (c) 2003 Greg Kroah-Hartman (greg@kroah.com)
+ *
+ * and from the Apple USB BCM5974 multitouch driver,
+ * Copyright (c) 2008 Henrik Rydberg (rydberg@euromail.se)
+ *
+ * and from the generic hid-multitouch driver,
+ * Copyright (c) 2010-2012 Stephane Chatty <chatty@enac.fr>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/completion.h>
+#include <linux/uaccess.h>
+#include <linux/usb.h>
+#include <linux/printk.h>
+#include <linux/input-polldev.h>
+#include <linux/input/mt.h>
+#include <linux/usb/input.h>
+
+/* read 512 bytes from endpoint 0x86 -> get header + blobs */
+struct sur40_header {
+
+ __le16 type; /* always 0x0001 */
+ __le16 count; /* count of blobs (if 0: continue prev. packet) */
+
+ __le32 packet_id; /* unique ID for all packets in one frame */
+
+ __le32 timestamp; /* milliseconds (inc. by 16 or 17 each frame) */
+ __le32 unknown; /* "epoch?" always 02/03 00 00 00 */
+
+} __packed;
+
+struct sur40_blob {
+
+ __le16 blob_id;
+
+ u8 action; /* 0x02 = enter/exit, 0x03 = update (?) */
+ u8 unknown; /* always 0x01 or 0x02 (no idea what this is?) */
+
+ __le16 bb_pos_x; /* upper left corner of bounding box */
+ __le16 bb_pos_y;
+
+ __le16 bb_size_x; /* size of bounding box */
+ __le16 bb_size_y;
+
+ __le16 pos_x; /* finger tip position */
+ __le16 pos_y;
+
+ __le16 ctr_x; /* centroid position */
+ __le16 ctr_y;
+
+ __le16 axis_x; /* somehow related to major/minor axis, mostly: */
+ __le16 axis_y; /* axis_x == bb_size_y && axis_y == bb_size_x */
+
+ __le32 angle; /* orientation in radians relative to x axis -
+ actually an IEEE754 float, don't use in kernel */
+
+ __le32 area; /* size in pixels/pressure (?) */
+
+ u8 padding[32];
+
+} __packed;
+
+/* combined header/blob data */
+struct sur40_data {
+ struct sur40_header header;
+ struct sur40_blob blobs[];
+} __packed;
+
+
+/* version information */
+#define DRIVER_SHORT "sur40"
+#define DRIVER_AUTHOR "Florian 'floe' Echtler <floe@butterbrot.org>"
+#define DRIVER_DESC "Surface2.0/SUR40/PixelSense input driver"
+
+/* vendor and device IDs */
+#define ID_MICROSOFT 0x045e
+#define ID_SUR40 0x0775
+
+/* sensor resolution */
+#define SENSOR_RES_X 1920
+#define SENSOR_RES_Y 1080
+
+/* touch data endpoint */
+#define TOUCH_ENDPOINT 0x86
+
+/* polling interval (ms) */
+#define POLL_INTERVAL 10
+
+/* maximum number of contacts FIXME: this is a guess? */
+#define MAX_CONTACTS 64
+
+/* control commands */
+#define SUR40_GET_VERSION 0xb0 /* 12 bytes string */
+#define SUR40_UNKNOWN1 0xb3 /* 5 bytes */
+#define SUR40_UNKNOWN2 0xc1 /* 24 bytes */
+
+#define SUR40_GET_STATE 0xc5 /* 4 bytes state (?) */
+#define SUR40_GET_SENSORS 0xb1 /* 8 bytes sensors */
+
+/*
+ * Note: an earlier, non-public version of this driver used USB_RECIP_ENDPOINT
+ * here by mistake which is very likely to have corrupted the firmware EEPROM
+ * on two separate SUR40 devices. Thanks to Alan Stern who spotted this bug.
+ * Should you ever run into a similar problem, the background story to this
+ * incident and instructions on how to fix the corrupted EEPROM are available
+ * at https://floe.butterbrot.org/matrix/hacking/surface/brick.html
+*/
+
+struct sur40_state {
+
+ struct usb_device *usbdev;
+ struct device *dev;
+ struct input_polled_dev *input;
+
+ struct sur40_data *bulk_in_buffer;
+ size_t bulk_in_size;
+ u8 bulk_in_epaddr;
+
+ char phys[64];
+};
+
+static int sur40_command(struct sur40_state *dev,
+ u8 command, u16 index, void *buffer, u16 size)
+{
+ return usb_control_msg(dev->usbdev, usb_rcvctrlpipe(dev->usbdev, 0),
+ command,
+ USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
+ 0x00, index, buffer, size, 1000);
+}
+
+/* Initialization routine, called from sur40_open */
+static int sur40_init(struct sur40_state *dev)
+{
+ int result;
+ u8 buffer[24];
+
+ /* stupidly replay the original MS driver init sequence */
+ result = sur40_command(dev, SUR40_GET_VERSION, 0x00, buffer, 12);
+ if (result < 0)
+ return result;
+
+ result = sur40_command(dev, SUR40_GET_VERSION, 0x01, buffer, 12);
+ if (result < 0)
+ return result;
+
+ result = sur40_command(dev, SUR40_GET_VERSION, 0x02, buffer, 12);
+ if (result < 0)
+ return result;
+
+ result = sur40_command(dev, SUR40_UNKNOWN2, 0x00, buffer, 24);
+ if (result < 0)
+ return result;
+
+ result = sur40_command(dev, SUR40_UNKNOWN1, 0x00, buffer, 5);
+ if (result < 0)
+ return result;
+
+ result = sur40_command(dev, SUR40_GET_VERSION, 0x03, buffer, 12);
+
+ /*
+ * Discard the result buffer - no known data inside except
+ * some version strings, maybe extract these sometime...
+ */
+
+ return result;
+}
+
+/*
+ * Callback routines from input_polled_dev
+ */
+
+/* Enable the device, polling will now start. */
+static void sur40_open(struct input_polled_dev *polldev)
+{
+ struct sur40_state *sur40 = polldev->private;
+
+ dev_dbg(sur40->dev, "open\n");
+ sur40_init(sur40);
+}
+
+/* Disable device, polling has stopped. */
+static void sur40_close(struct input_polled_dev *polldev)
+{
+ struct sur40_state *sur40 = polldev->private;
+
+ dev_dbg(sur40->dev, "close\n");
+ /*
+ * There is no known way to stop the device, so we simply
+ * stop polling.
+ */
+}
+
+/*
+ * This function is called when a whole contact has been processed,
+ * so that it can assign it to a slot and store the data there.
+ */
+static void sur40_report_blob(struct sur40_blob *blob, struct input_dev *input)
+{
+ int wide, major, minor;
+
+ int bb_size_x = le16_to_cpu(blob->bb_size_x);
+ int bb_size_y = le16_to_cpu(blob->bb_size_y);
+
+ int pos_x = le16_to_cpu(blob->pos_x);
+ int pos_y = le16_to_cpu(blob->pos_y);
+
+ int ctr_x = le16_to_cpu(blob->ctr_x);
+ int ctr_y = le16_to_cpu(blob->ctr_y);
+
+ int slotnum = input_mt_get_slot_by_key(input, blob->blob_id);
+ if (slotnum < 0 || slotnum >= MAX_CONTACTS)
+ return;
+
+ input_mt_slot(input, slotnum);
+ input_mt_report_slot_state(input, MT_TOOL_FINGER, 1);
+ wide = (bb_size_x > bb_size_y);
+ major = max(bb_size_x, bb_size_y);
+ minor = min(bb_size_x, bb_size_y);
+
+ input_report_abs(input, ABS_MT_POSITION_X, pos_x);
+ input_report_abs(input, ABS_MT_POSITION_Y, pos_y);
+ input_report_abs(input, ABS_MT_TOOL_X, ctr_x);
+ input_report_abs(input, ABS_MT_TOOL_Y, ctr_y);
+
+ /* TODO: use a better orientation measure */
+ input_report_abs(input, ABS_MT_ORIENTATION, wide);
+ input_report_abs(input, ABS_MT_TOUCH_MAJOR, major);
+ input_report_abs(input, ABS_MT_TOUCH_MINOR, minor);
+}
+
+/* core function: poll for new input data */
+static void sur40_poll(struct input_polled_dev *polldev)
+{
+
+ struct sur40_state *sur40 = polldev->private;
+ struct input_dev *input = polldev->input;
+ int result, bulk_read, need_blobs, packet_blobs, i;
+ u32 uninitialized_var(packet_id);
+
+ struct sur40_header *header = &sur40->bulk_in_buffer->header;
+ struct sur40_blob *inblob = &sur40->bulk_in_buffer->blobs[0];
+
+ dev_dbg(sur40->dev, "poll\n");
+
+ need_blobs = -1;
+
+ do {
+
+ /* perform a blocking bulk read to get data from the device */
+ result = usb_bulk_msg(sur40->usbdev,
+ usb_rcvbulkpipe(sur40->usbdev, sur40->bulk_in_epaddr),
+ sur40->bulk_in_buffer, sur40->bulk_in_size,
+ &bulk_read, 1000);
+
+ dev_dbg(sur40->dev, "received %d bytes\n", bulk_read);
+
+ if (result < 0) {
+ dev_err(sur40->dev, "error in usb_bulk_read\n");
+ return;
+ }
+
+ result = bulk_read - sizeof(struct sur40_header);
+
+ if (result % sizeof(struct sur40_blob) != 0) {
+ dev_err(sur40->dev, "transfer size mismatch\n");
+ return;
+ }
+
+ /* first packet? */
+ if (need_blobs == -1) {
+ need_blobs = le16_to_cpu(header->count);
+ dev_dbg(sur40->dev, "need %d blobs\n", need_blobs);
+ packet_id = le32_to_cpu(header->packet_id);
+ }
+
+ /*
+ * Sanity check. when video data is also being retrieved, the
+ * packet ID will usually increase in the middle of a series
+ * instead of at the end.
+ */
+ if (packet_id != header->packet_id)
+ dev_warn(sur40->dev, "packet ID mismatch\n");
+
+ packet_blobs = result / sizeof(struct sur40_blob);
+ dev_dbg(sur40->dev, "received %d blobs\n", packet_blobs);
+
+ /* packets always contain at least 4 blobs, even if empty */
+ if (packet_blobs > need_blobs)
+ packet_blobs = need_blobs;
+
+ for (i = 0; i < packet_blobs; i++) {
+ need_blobs--;
+ dev_dbg(sur40->dev, "processing blob\n");
+ sur40_report_blob(&(inblob[i]), input);
+ }
+
+ } while (need_blobs > 0);
+
+ input_mt_sync_frame(input);
+ input_sync(input);
+}
+
+/* Initialize input device parameters. */
+static void sur40_input_setup(struct input_dev *input_dev)
+{
+ __set_bit(EV_KEY, input_dev->evbit);
+ __set_bit(EV_ABS, input_dev->evbit);
+
+ input_set_abs_params(input_dev, ABS_MT_POSITION_X,
+ 0, SENSOR_RES_X, 0, 0);
+ input_set_abs_params(input_dev, ABS_MT_POSITION_Y,
+ 0, SENSOR_RES_Y, 0, 0);
+
+ input_set_abs_params(input_dev, ABS_MT_TOOL_X,
+ 0, SENSOR_RES_X, 0, 0);
+ input_set_abs_params(input_dev, ABS_MT_TOOL_Y,
+ 0, SENSOR_RES_Y, 0, 0);
+
+ /* max value unknown, but major/minor axis
+ * can never be larger than screen */
+ input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR,
+ 0, SENSOR_RES_X, 0, 0);
+ input_set_abs_params(input_dev, ABS_MT_TOUCH_MINOR,
+ 0, SENSOR_RES_Y, 0, 0);
+
+ input_set_abs_params(input_dev, ABS_MT_ORIENTATION, 0, 1, 0, 0);
+
+ input_mt_init_slots(input_dev, MAX_CONTACTS,
+ INPUT_MT_DIRECT | INPUT_MT_DROP_UNUSED);
+}
+
+/* Check candidate USB interface. */
+static int sur40_probe(struct usb_interface *interface,
+ const struct usb_device_id *id)
+{
+ struct usb_device *usbdev = interface_to_usbdev(interface);
+ struct sur40_state *sur40;
+ struct usb_host_interface *iface_desc;
+ struct usb_endpoint_descriptor *endpoint;
+ struct input_polled_dev *poll_dev;
+ int error;
+
+ /* Check if we really have the right interface. */
+ iface_desc = &interface->altsetting[0];
+ if (iface_desc->desc.bInterfaceClass != 0xFF)
+ return -ENODEV;
+
+ /* Use endpoint #4 (0x86). */
+ endpoint = &iface_desc->endpoint[4].desc;
+ if (endpoint->bEndpointAddress != TOUCH_ENDPOINT)
+ return -ENODEV;
+
+ /* Allocate memory for our device state and initialize it. */
+ sur40 = kzalloc(sizeof(struct sur40_state), GFP_KERNEL);
+ if (!sur40)
+ return -ENOMEM;
+
+ poll_dev = input_allocate_polled_device();
+ if (!poll_dev) {
+ error = -ENOMEM;
+ goto err_free_dev;
+ }
+
+ /* Set up polled input device control structure */
+ poll_dev->private = sur40;
+ poll_dev->poll_interval = POLL_INTERVAL;
+ poll_dev->open = sur40_open;
+ poll_dev->poll = sur40_poll;
+ poll_dev->close = sur40_close;
+
+ /* Set up regular input device structure */
+ sur40_input_setup(poll_dev->input);
+
+ poll_dev->input->name = "Samsung SUR40";
+ usb_to_input_id(usbdev, &poll_dev->input->id);
+ usb_make_path(usbdev, sur40->phys, sizeof(sur40->phys));
+ strlcat(sur40->phys, "/input0", sizeof(sur40->phys));
+ poll_dev->input->phys = sur40->phys;
+ poll_dev->input->dev.parent = &interface->dev;
+
+ sur40->usbdev = usbdev;
+ sur40->dev = &interface->dev;
+ sur40->input = poll_dev;
+
+ /* use the bulk-in endpoint tested above */
+ sur40->bulk_in_size = usb_endpoint_maxp(endpoint);
+ sur40->bulk_in_epaddr = endpoint->bEndpointAddress;
+ sur40->bulk_in_buffer = kmalloc(sur40->bulk_in_size, GFP_KERNEL);
+ if (!sur40->bulk_in_buffer) {
+ dev_err(&interface->dev, "Unable to allocate input buffer.");
+ error = -ENOMEM;
+ goto err_free_polldev;
+ }
+
+ error = input_register_polled_device(poll_dev);
+ if (error) {
+ dev_err(&interface->dev,
+ "Unable to register polled input device.");
+ goto err_free_buffer;
+ }
+
+ /* we can register the device now, as it is ready */
+ usb_set_intfdata(interface, sur40);
+ dev_dbg(&interface->dev, "%s is now attached\n", DRIVER_DESC);
+
+ return 0;
+
+err_free_buffer:
+ kfree(sur40->bulk_in_buffer);
+err_free_polldev:
+ input_free_polled_device(sur40->input);
+err_free_dev:
+ kfree(sur40);
+
+ return error;
+}
+
+/* Unregister device & clean up. */
+static void sur40_disconnect(struct usb_interface *interface)
+{
+ struct sur40_state *sur40 = usb_get_intfdata(interface);
+
+ input_unregister_polled_device(sur40->input);
+ input_free_polled_device(sur40->input);
+ kfree(sur40->bulk_in_buffer);
+ kfree(sur40);
+
+ usb_set_intfdata(interface, NULL);
+ dev_dbg(&interface->dev, "%s is now disconnected\n", DRIVER_DESC);
+}
+
+static const struct usb_device_id sur40_table[] = {
+ { USB_DEVICE(ID_MICROSOFT, ID_SUR40) }, /* Samsung SUR40 */
+ { } /* terminating null entry */
+};
+MODULE_DEVICE_TABLE(usb, sur40_table);
+
+/* USB-specific object needed to register this driver with the USB subsystem. */
+static struct usb_driver sur40_driver = {
+ .name = DRIVER_SHORT,
+ .probe = sur40_probe,
+ .disconnect = sur40_disconnect,
+ .id_table = sur40_table,
+};
+
+module_usb_driver(sur40_driver);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/touchscreen/usbtouchscreen.c b/drivers/input/touchscreen/usbtouchscreen.c
index ae4b6b903629..5f87bed05467 100644
--- a/drivers/input/touchscreen/usbtouchscreen.c
+++ b/drivers/input/touchscreen/usbtouchscreen.c
@@ -106,6 +106,7 @@ struct usbtouch_device_info {
struct usbtouch_usb {
unsigned char *data;
dma_addr_t data_dma;
+ int data_size;
unsigned char *buffer;
int buf_len;
struct urb *irq;
@@ -1521,7 +1522,7 @@ static int usbtouch_reset_resume(struct usb_interface *intf)
static void usbtouch_free_buffers(struct usb_device *udev,
struct usbtouch_usb *usbtouch)
{
- usb_free_coherent(udev, usbtouch->type->rept_size,
+ usb_free_coherent(udev, usbtouch->data_size,
usbtouch->data, usbtouch->data_dma);
kfree(usbtouch->buffer);
}
@@ -1566,7 +1567,20 @@ static int usbtouch_probe(struct usb_interface *intf,
if (!type->process_pkt)
type->process_pkt = usbtouch_process_pkt;
- usbtouch->data = usb_alloc_coherent(udev, type->rept_size,
+ usbtouch->data_size = type->rept_size;
+ if (type->get_pkt_len) {
+ /*
+ * When dealing with variable-length packets we should
+ * not request more than wMaxPacketSize bytes at once
+ * as we do not know if there is more data coming or
+ * we filled exactly wMaxPacketSize bytes and there is
+ * nothing else.
+ */
+ usbtouch->data_size = min(usbtouch->data_size,
+ usb_endpoint_maxp(endpoint));
+ }
+
+ usbtouch->data = usb_alloc_coherent(udev, usbtouch->data_size,
GFP_KERNEL, &usbtouch->data_dma);
if (!usbtouch->data)
goto out_free;
@@ -1626,12 +1640,12 @@ static int usbtouch_probe(struct usb_interface *intf,
if (usb_endpoint_type(endpoint) == USB_ENDPOINT_XFER_INT)
usb_fill_int_urb(usbtouch->irq, udev,
usb_rcvintpipe(udev, endpoint->bEndpointAddress),
- usbtouch->data, type->rept_size,
+ usbtouch->data, usbtouch->data_size,
usbtouch_irq, usbtouch, endpoint->bInterval);
else
usb_fill_bulk_urb(usbtouch->irq, udev,
usb_rcvbulkpipe(udev, endpoint->bEndpointAddress),
- usbtouch->data, type->rept_size,
+ usbtouch->data, usbtouch->data_size,
usbtouch_irq, usbtouch);
usbtouch->irq->dev = udev;
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 1abfb5684ab7..e46a88700b68 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -392,7 +392,7 @@ struct arm_smmu_domain {
struct arm_smmu_cfg root_cfg;
phys_addr_t output_mask;
- spinlock_t lock;
+ struct mutex lock;
};
static DEFINE_SPINLOCK(arm_smmu_devices_lock);
@@ -900,7 +900,7 @@ static int arm_smmu_domain_init(struct iommu_domain *domain)
goto out_free_domain;
smmu_domain->root_cfg.pgd = pgd;
- spin_lock_init(&smmu_domain->lock);
+ mutex_init(&smmu_domain->lock);
domain->priv = smmu_domain;
return 0;
@@ -1137,7 +1137,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
* Sanity check the domain. We don't currently support domains
* that cross between different SMMU chains.
*/
- spin_lock(&smmu_domain->lock);
+ mutex_lock(&smmu_domain->lock);
if (!smmu_domain->leaf_smmu) {
/* Now that we have a master, we can finalise the domain */
ret = arm_smmu_init_domain_context(domain, dev);
@@ -1152,7 +1152,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
dev_name(device_smmu->dev));
goto err_unlock;
}
- spin_unlock(&smmu_domain->lock);
+ mutex_unlock(&smmu_domain->lock);
/* Looks ok, so add the device to the domain */
master = find_smmu_master(smmu_domain->leaf_smmu, dev->of_node);
@@ -1162,7 +1162,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
return arm_smmu_domain_add_master(smmu_domain, master);
err_unlock:
- spin_unlock(&smmu_domain->lock);
+ mutex_unlock(&smmu_domain->lock);
return ret;
}
@@ -1394,7 +1394,7 @@ static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain,
if (paddr & ~output_mask)
return -ERANGE;
- spin_lock(&smmu_domain->lock);
+ mutex_lock(&smmu_domain->lock);
pgd += pgd_index(iova);
end = iova + size;
do {
@@ -1410,7 +1410,7 @@ static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain,
} while (pgd++, iova != end);
out_unlock:
- spin_unlock(&smmu_domain->lock);
+ mutex_unlock(&smmu_domain->lock);
/* Ensure new page tables are visible to the hardware walker */
if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
@@ -1423,9 +1423,8 @@ static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int flags)
{
struct arm_smmu_domain *smmu_domain = domain->priv;
- struct arm_smmu_device *smmu = smmu_domain->leaf_smmu;
- if (!smmu_domain || !smmu)
+ if (!smmu_domain)
return -ENODEV;
/* Check for silent address truncation up the SMMU chain. */
@@ -1449,44 +1448,34 @@ static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
dma_addr_t iova)
{
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
- pte_t *pte;
+ pgd_t *pgdp, pgd;
+ pud_t pud;
+ pmd_t pmd;
+ pte_t pte;
struct arm_smmu_domain *smmu_domain = domain->priv;
struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg;
- struct arm_smmu_device *smmu = root_cfg->smmu;
- spin_lock(&smmu_domain->lock);
- pgd = root_cfg->pgd;
- if (!pgd)
- goto err_unlock;
+ pgdp = root_cfg->pgd;
+ if (!pgdp)
+ return 0;
- pgd += pgd_index(iova);
- if (pgd_none_or_clear_bad(pgd))
- goto err_unlock;
+ pgd = *(pgdp + pgd_index(iova));
+ if (pgd_none(pgd))
+ return 0;
- pud = pud_offset(pgd, iova);
- if (pud_none_or_clear_bad(pud))
- goto err_unlock;
+ pud = *pud_offset(&pgd, iova);
+ if (pud_none(pud))
+ return 0;
- pmd = pmd_offset(pud, iova);
- if (pmd_none_or_clear_bad(pmd))
- goto err_unlock;
+ pmd = *pmd_offset(&pud, iova);
+ if (pmd_none(pmd))
+ return 0;
- pte = pmd_page_vaddr(*pmd) + pte_index(iova);
+ pte = *(pmd_page_vaddr(pmd) + pte_index(iova));
if (pte_none(pte))
- goto err_unlock;
-
- spin_unlock(&smmu_domain->lock);
- return __pfn_to_phys(pte_pfn(*pte)) | (iova & ~PAGE_MASK);
+ return 0;
-err_unlock:
- spin_unlock(&smmu_domain->lock);
- dev_warn(smmu->dev,
- "invalid (corrupt?) page tables detected for iova 0x%llx\n",
- (unsigned long long)iova);
- return -EINVAL;
+ return __pfn_to_phys(pte_pfn(pte)) | (iova & ~PAGE_MASK);
}
static int arm_smmu_domain_has_cap(struct iommu_domain *domain,
@@ -1863,6 +1852,7 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
dev_err(dev,
"found only %d context interrupt(s) but %d required\n",
smmu->num_context_irqs, smmu->num_context_banks);
+ err = -ENODEV;
goto out_put_parent;
}
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 9031171c141b..341c6016812d 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -957,12 +957,13 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start,
if (WARN_ON(!gic->domain))
return;
+ if (gic_nr == 0) {
#ifdef CONFIG_SMP
- set_smp_cross_call(gic_raise_softirq);
- register_cpu_notifier(&gic_cpu_notifier);
+ set_smp_cross_call(gic_raise_softirq);
+ register_cpu_notifier(&gic_cpu_notifier);
#endif
-
- set_handle_irq(gic_handle_irq);
+ set_handle_irq(gic_handle_irq);
+ }
gic_chip.flags |= gic_arch_extn.flags;
gic_dist_init(gic);
diff --git a/drivers/irqchip/irq-renesas-intc-irqpin.c b/drivers/irqchip/irq-renesas-intc-irqpin.c
index 82cec63a9011..3ee78f02e5d7 100644
--- a/drivers/irqchip/irq-renesas-intc-irqpin.c
+++ b/drivers/irqchip/irq-renesas-intc-irqpin.c
@@ -149,8 +149,9 @@ static void intc_irqpin_read_modify_write(struct intc_irqpin_priv *p,
static void intc_irqpin_mask_unmask_prio(struct intc_irqpin_priv *p,
int irq, int do_mask)
{
- int bitfield_width = 4; /* PRIO assumed to have fixed bitfield width */
- int shift = (7 - irq) * bitfield_width; /* PRIO assumed to be 32-bit */
+ /* The PRIO register is assumed to be 32-bit with fixed 4-bit fields. */
+ int bitfield_width = 4;
+ int shift = 32 - (irq + 1) * bitfield_width;
intc_irqpin_read_modify_write(p, INTC_IRQPIN_REG_PRIO,
shift, bitfield_width,
@@ -159,8 +160,9 @@ static void intc_irqpin_mask_unmask_prio(struct intc_irqpin_priv *p,
static int intc_irqpin_set_sense(struct intc_irqpin_priv *p, int irq, int value)
{
+ /* The SENSE register is assumed to be 32-bit. */
int bitfield_width = p->config.sense_bitfield_width;
- int shift = (7 - irq) * bitfield_width; /* SENSE assumed to be 32-bit */
+ int shift = 32 - (irq + 1) * bitfield_width;
dev_dbg(&p->pdev->dev, "sense irq = %d, mode = %d\n", irq, value);
diff --git a/drivers/isdn/hisax/hfc_pci.c b/drivers/isdn/hisax/hfc_pci.c
index 497bd026c237..4a4825528188 100644
--- a/drivers/isdn/hisax/hfc_pci.c
+++ b/drivers/isdn/hisax/hfc_pci.c
@@ -1643,10 +1643,6 @@ setup_hfcpci(struct IsdnCard *card)
int i;
struct pci_dev *tmp_hfcpci = NULL;
-#ifdef __BIG_ENDIAN
-#error "not running on big endian machines now"
-#endif
-
strcpy(tmp, hfcpci_revision);
printk(KERN_INFO "HiSax: HFC-PCI driver Rev. %s\n", HiSax_getrev(tmp));
diff --git a/drivers/isdn/hisax/telespci.c b/drivers/isdn/hisax/telespci.c
index f6ab63aa6995..33eeb4602c7e 100644
--- a/drivers/isdn/hisax/telespci.c
+++ b/drivers/isdn/hisax/telespci.c
@@ -290,10 +290,6 @@ int setup_telespci(struct IsdnCard *card)
struct IsdnCardState *cs = card->cs;
char tmp[64];
-#ifdef __BIG_ENDIAN
-#error "not running on big endian machines now"
-#endif
-
strcpy(tmp, telespci_revision);
printk(KERN_INFO "HiSax: Teles/PCI driver Rev. %s\n", HiSax_getrev(tmp));
if (cs->typ != ISDN_CTYPE_TELESPCI)
diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c
index 8b98d53d9976..d9aebbc510cc 100644
--- a/drivers/isdn/i4l/isdn_net.c
+++ b/drivers/isdn/i4l/isdn_net.c
@@ -1371,7 +1371,7 @@ isdn_net_type_trans(struct sk_buff *skb, struct net_device *dev)
eth = eth_hdr(skb);
if (*eth->h_dest & 1) {
- if (memcmp(eth->h_dest, dev->broadcast, ETH_ALEN) == 0)
+ if (ether_addr_equal(eth->h_dest, dev->broadcast))
skb->pkt_type = PACKET_BROADCAST;
else
skb->pkt_type = PACKET_MULTICAST;
@@ -1382,7 +1382,7 @@ isdn_net_type_trans(struct sk_buff *skb, struct net_device *dev)
*/
else if (dev->flags & (IFF_PROMISC /*| IFF_ALLMULTI*/)) {
- if (memcmp(eth->h_dest, dev->dev_addr, ETH_ALEN))
+ if (!ether_addr_equal(eth->h_dest, dev->dev_addr))
skb->pkt_type = PACKET_OTHERHOST;
}
if (ntohs(eth->h_proto) >= ETH_P_802_3_MIN)
diff --git a/drivers/isdn/sc/event.c b/drivers/isdn/sc/event.c
index 717003a3bdf4..833d96c2cf92 100644
--- a/drivers/isdn/sc/event.c
+++ b/drivers/isdn/sc/event.c
@@ -57,7 +57,7 @@ int indicate_status(int card, int event, ulong Channel, char *Data)
memcpy(&cmd.parm.setup, Data, sizeof(cmd.parm.setup));
break;
default:
- strcpy(cmd.parm.num, Data);
+ strlcpy(cmd.parm.num, Data, sizeof(cmd.parm.num));
}
}
diff --git a/drivers/leds/leds-pwm.c b/drivers/leds/leds-pwm.c
index 2848171b8576..b31d8e99c419 100644
--- a/drivers/leds/leds-pwm.c
+++ b/drivers/leds/leds-pwm.c
@@ -82,22 +82,12 @@ static inline size_t sizeof_pwm_leds_priv(int num_leds)
(sizeof(struct led_pwm_data) * num_leds);
}
-static struct led_pwm_priv *led_pwm_create_of(struct platform_device *pdev)
+static int led_pwm_create_of(struct platform_device *pdev,
+ struct led_pwm_priv *priv)
{
struct device_node *node = pdev->dev.of_node;
struct device_node *child;
- struct led_pwm_priv *priv;
- int count, ret;
-
- /* count LEDs in this device, so we know how much to allocate */
- count = of_get_child_count(node);
- if (!count)
- return NULL;
-
- priv = devm_kzalloc(&pdev->dev, sizeof_pwm_leds_priv(count),
- GFP_KERNEL);
- if (!priv)
- return NULL;
+ int ret;
for_each_child_of_node(node, child) {
struct led_pwm_data *led_dat = &priv->leds[priv->num_leds];
@@ -109,6 +99,7 @@ static struct led_pwm_priv *led_pwm_create_of(struct platform_device *pdev)
if (IS_ERR(led_dat->pwm)) {
dev_err(&pdev->dev, "unable to request PWM for %s\n",
led_dat->cdev.name);
+ ret = PTR_ERR(led_dat->pwm);
goto err;
}
/* Get the period from PWM core when n*/
@@ -137,28 +128,36 @@ static struct led_pwm_priv *led_pwm_create_of(struct platform_device *pdev)
priv->num_leds++;
}
- return priv;
+ return 0;
err:
while (priv->num_leds--)
led_classdev_unregister(&priv->leds[priv->num_leds].cdev);
- return NULL;
+ return ret;
}
static int led_pwm_probe(struct platform_device *pdev)
{
struct led_pwm_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct led_pwm_priv *priv;
- int i, ret = 0;
+ int count, i;
+ int ret = 0;
+
+ if (pdata)
+ count = pdata->num_leds;
+ else
+ count = of_get_child_count(pdev->dev.of_node);
+
+ if (!count)
+ return -EINVAL;
- if (pdata && pdata->num_leds) {
- priv = devm_kzalloc(&pdev->dev,
- sizeof_pwm_leds_priv(pdata->num_leds),
- GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
+ priv = devm_kzalloc(&pdev->dev, sizeof_pwm_leds_priv(count),
+ GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
- for (i = 0; i < pdata->num_leds; i++) {
+ if (pdata) {
+ for (i = 0; i < count; i++) {
struct led_pwm *cur_led = &pdata->leds[i];
struct led_pwm_data *led_dat = &priv->leds[i];
@@ -188,11 +187,11 @@ static int led_pwm_probe(struct platform_device *pdev)
if (ret < 0)
goto err;
}
- priv->num_leds = pdata->num_leds;
+ priv->num_leds = count;
} else {
- priv = led_pwm_create_of(pdev);
- if (!priv)
- return -ENODEV;
+ ret = led_pwm_create_of(pdev, priv);
+ if (ret)
+ return ret;
}
platform_set_drvdata(pdev, priv);
diff --git a/drivers/macintosh/Makefile b/drivers/macintosh/Makefile
index 6753b65f8ede..d2f0120bc878 100644
--- a/drivers/macintosh/Makefile
+++ b/drivers/macintosh/Makefile
@@ -40,6 +40,7 @@ obj-$(CONFIG_WINDFARM_RM31) += windfarm_fcu_controls.o \
windfarm_ad7417_sensor.o \
windfarm_lm75_sensor.o \
windfarm_lm87_sensor.o \
+ windfarm_max6690_sensor.o \
windfarm_pid.o \
windfarm_cpufreq_clamp.o \
windfarm_rm31.o
diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
index 2b46bf1d7e40..4c9852d92b0a 100644
--- a/drivers/md/bcache/alloc.c
+++ b/drivers/md/bcache/alloc.c
@@ -421,9 +421,11 @@ out:
if (watermark <= WATERMARK_METADATA) {
SET_GC_MARK(b, GC_MARK_METADATA);
+ SET_GC_MOVE(b, 0);
b->prio = BTREE_PRIO;
} else {
SET_GC_MARK(b, GC_MARK_RECLAIMABLE);
+ SET_GC_MOVE(b, 0);
b->prio = INITIAL_PRIO;
}
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index 4beb55a0ff30..754f43177483 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -197,7 +197,7 @@ struct bucket {
uint8_t disk_gen;
uint8_t last_gc; /* Most out of date gen in the btree */
uint8_t gc_gen;
- uint16_t gc_mark;
+ uint16_t gc_mark; /* Bitfield used by GC. See below for field */
};
/*
@@ -209,7 +209,8 @@ BITMASK(GC_MARK, struct bucket, gc_mark, 0, 2);
#define GC_MARK_RECLAIMABLE 0
#define GC_MARK_DIRTY 1
#define GC_MARK_METADATA 2
-BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, 14);
+BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, 13);
+BITMASK(GC_MOVE, struct bucket, gc_mark, 15, 1);
#include "journal.h"
#include "stats.h"
@@ -372,14 +373,14 @@ struct cached_dev {
unsigned char writeback_percent;
unsigned writeback_delay;
- int writeback_rate_change;
- int64_t writeback_rate_derivative;
uint64_t writeback_rate_target;
+ int64_t writeback_rate_proportional;
+ int64_t writeback_rate_derivative;
+ int64_t writeback_rate_change;
unsigned writeback_rate_update_seconds;
unsigned writeback_rate_d_term;
unsigned writeback_rate_p_term_inverse;
- unsigned writeback_rate_d_smooth;
};
enum alloc_watermarks {
@@ -445,7 +446,6 @@ struct cache {
* call prio_write() to keep gens from wrapping.
*/
uint8_t need_save_prio;
- unsigned gc_move_threshold;
/*
* If nonzero, we know we aren't going to find any buckets to invalidate
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 5e2765aadce1..31bb53fcc67a 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -1561,6 +1561,28 @@ size_t bch_btree_gc_finish(struct cache_set *c)
SET_GC_MARK(PTR_BUCKET(c, &c->uuid_bucket, i),
GC_MARK_METADATA);
+ /* don't reclaim buckets to which writeback keys point */
+ rcu_read_lock();
+ for (i = 0; i < c->nr_uuids; i++) {
+ struct bcache_device *d = c->devices[i];
+ struct cached_dev *dc;
+ struct keybuf_key *w, *n;
+ unsigned j;
+
+ if (!d || UUID_FLASH_ONLY(&c->uuids[i]))
+ continue;
+ dc = container_of(d, struct cached_dev, disk);
+
+ spin_lock(&dc->writeback_keys.lock);
+ rbtree_postorder_for_each_entry_safe(w, n,
+ &dc->writeback_keys.keys, node)
+ for (j = 0; j < KEY_PTRS(&w->key); j++)
+ SET_GC_MARK(PTR_BUCKET(c, &w->key, j),
+ GC_MARK_DIRTY);
+ spin_unlock(&dc->writeback_keys.lock);
+ }
+ rcu_read_unlock();
+
for_each_cache(ca, c, i) {
uint64_t *i;
@@ -1817,7 +1839,8 @@ static bool fix_overlapping_extents(struct btree *b, struct bkey *insert,
if (KEY_START(k) > KEY_START(insert) + sectors_found)
goto check_failed;
- if (KEY_PTRS(replace_key) != KEY_PTRS(k))
+ if (KEY_PTRS(k) != KEY_PTRS(replace_key) ||
+ KEY_DIRTY(k) != KEY_DIRTY(replace_key))
goto check_failed;
/* skip past gen */
@@ -2217,7 +2240,7 @@ struct btree_insert_op {
struct bkey *replace_key;
};
-int btree_insert_fn(struct btree_op *b_op, struct btree *b)
+static int btree_insert_fn(struct btree_op *b_op, struct btree *b)
{
struct btree_insert_op *op = container_of(b_op,
struct btree_insert_op, op);
diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c
index 7c1275e66025..f2f0998c4a91 100644
--- a/drivers/md/bcache/movinggc.c
+++ b/drivers/md/bcache/movinggc.c
@@ -25,10 +25,9 @@ static bool moving_pred(struct keybuf *buf, struct bkey *k)
unsigned i;
for (i = 0; i < KEY_PTRS(k); i++) {
- struct cache *ca = PTR_CACHE(c, k, i);
struct bucket *g = PTR_BUCKET(c, k, i);
- if (GC_SECTORS_USED(g) < ca->gc_move_threshold)
+ if (GC_MOVE(g))
return true;
}
@@ -65,11 +64,16 @@ static void write_moving_finish(struct closure *cl)
static void read_moving_endio(struct bio *bio, int error)
{
+ struct bbio *b = container_of(bio, struct bbio, bio);
struct moving_io *io = container_of(bio->bi_private,
struct moving_io, cl);
if (error)
io->op.error = error;
+ else if (!KEY_DIRTY(&b->key) &&
+ ptr_stale(io->op.c, &b->key, 0)) {
+ io->op.error = -EINTR;
+ }
bch_bbio_endio(io->op.c, bio, error, "reading data to move");
}
@@ -141,6 +145,11 @@ static void read_moving(struct cache_set *c)
if (!w)
break;
+ if (ptr_stale(c, &w->key, 0)) {
+ bch_keybuf_del(&c->moving_gc_keys, w);
+ continue;
+ }
+
io = kzalloc(sizeof(struct moving_io) + sizeof(struct bio_vec)
* DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS),
GFP_KERNEL);
@@ -184,7 +193,8 @@ static bool bucket_cmp(struct bucket *l, struct bucket *r)
static unsigned bucket_heap_top(struct cache *ca)
{
- return GC_SECTORS_USED(heap_peek(&ca->heap));
+ struct bucket *b;
+ return (b = heap_peek(&ca->heap)) ? GC_SECTORS_USED(b) : 0;
}
void bch_moving_gc(struct cache_set *c)
@@ -226,9 +236,8 @@ void bch_moving_gc(struct cache_set *c)
sectors_to_move -= GC_SECTORS_USED(b);
}
- ca->gc_move_threshold = bucket_heap_top(ca);
-
- pr_debug("threshold %u", ca->gc_move_threshold);
+ while (heap_pop(&ca->heap, b, bucket_cmp))
+ SET_GC_MOVE(b, 1);
}
mutex_unlock(&c->bucket_lock);
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index dec15cd2d797..c57bfa071a57 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -1676,7 +1676,7 @@ err:
static bool can_attach_cache(struct cache *ca, struct cache_set *c)
{
return ca->sb.block_size == c->sb.block_size &&
- ca->sb.bucket_size == c->sb.block_size &&
+ ca->sb.bucket_size == c->sb.bucket_size &&
ca->sb.nr_in_set == c->sb.nr_in_set;
}
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index 80d4c2bee18a..a1f85612f0b3 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -83,7 +83,6 @@ rw_attribute(writeback_rate);
rw_attribute(writeback_rate_update_seconds);
rw_attribute(writeback_rate_d_term);
rw_attribute(writeback_rate_p_term_inverse);
-rw_attribute(writeback_rate_d_smooth);
read_attribute(writeback_rate_debug);
read_attribute(stripe_size);
@@ -129,31 +128,41 @@ SHOW(__bch_cached_dev)
var_printf(writeback_running, "%i");
var_print(writeback_delay);
var_print(writeback_percent);
- sysfs_print(writeback_rate, dc->writeback_rate.rate);
+ sysfs_hprint(writeback_rate, dc->writeback_rate.rate << 9);
var_print(writeback_rate_update_seconds);
var_print(writeback_rate_d_term);
var_print(writeback_rate_p_term_inverse);
- var_print(writeback_rate_d_smooth);
if (attr == &sysfs_writeback_rate_debug) {
+ char rate[20];
char dirty[20];
- char derivative[20];
char target[20];
- bch_hprint(dirty,
- bcache_dev_sectors_dirty(&dc->disk) << 9);
- bch_hprint(derivative, dc->writeback_rate_derivative << 9);
+ char proportional[20];
+ char derivative[20];
+ char change[20];
+ s64 next_io;
+
+ bch_hprint(rate, dc->writeback_rate.rate << 9);
+ bch_hprint(dirty, bcache_dev_sectors_dirty(&dc->disk) << 9);
bch_hprint(target, dc->writeback_rate_target << 9);
+ bch_hprint(proportional,dc->writeback_rate_proportional << 9);
+ bch_hprint(derivative, dc->writeback_rate_derivative << 9);
+ bch_hprint(change, dc->writeback_rate_change << 9);
+
+ next_io = div64_s64(dc->writeback_rate.next - local_clock(),
+ NSEC_PER_MSEC);
return sprintf(buf,
- "rate:\t\t%u\n"
- "change:\t\t%i\n"
+ "rate:\t\t%s/sec\n"
"dirty:\t\t%s\n"
+ "target:\t\t%s\n"
+ "proportional:\t%s\n"
"derivative:\t%s\n"
- "target:\t\t%s\n",
- dc->writeback_rate.rate,
- dc->writeback_rate_change,
- dirty, derivative, target);
+ "change:\t\t%s/sec\n"
+ "next io:\t%llims\n",
+ rate, dirty, target, proportional,
+ derivative, change, next_io);
}
sysfs_hprint(dirty_data,
@@ -189,6 +198,7 @@ STORE(__cached_dev)
struct kobj_uevent_env *env;
#define d_strtoul(var) sysfs_strtoul(var, dc->var)
+#define d_strtoul_nonzero(var) sysfs_strtoul_clamp(var, dc->var, 1, INT_MAX)
#define d_strtoi_h(var) sysfs_hatoi(var, dc->var)
sysfs_strtoul(data_csum, dc->disk.data_csum);
@@ -197,16 +207,15 @@ STORE(__cached_dev)
d_strtoul(writeback_metadata);
d_strtoul(writeback_running);
d_strtoul(writeback_delay);
- sysfs_strtoul_clamp(writeback_rate,
- dc->writeback_rate.rate, 1, 1000000);
+
sysfs_strtoul_clamp(writeback_percent, dc->writeback_percent, 0, 40);
- d_strtoul(writeback_rate_update_seconds);
+ sysfs_strtoul_clamp(writeback_rate,
+ dc->writeback_rate.rate, 1, INT_MAX);
+
+ d_strtoul_nonzero(writeback_rate_update_seconds);
d_strtoul(writeback_rate_d_term);
- d_strtoul(writeback_rate_p_term_inverse);
- sysfs_strtoul_clamp(writeback_rate_p_term_inverse,
- dc->writeback_rate_p_term_inverse, 1, INT_MAX);
- d_strtoul(writeback_rate_d_smooth);
+ d_strtoul_nonzero(writeback_rate_p_term_inverse);
d_strtoi_h(sequential_cutoff);
d_strtoi_h(readahead);
@@ -313,7 +322,6 @@ static struct attribute *bch_cached_dev_files[] = {
&sysfs_writeback_rate_update_seconds,
&sysfs_writeback_rate_d_term,
&sysfs_writeback_rate_p_term_inverse,
- &sysfs_writeback_rate_d_smooth,
&sysfs_writeback_rate_debug,
&sysfs_dirty_data,
&sysfs_stripe_size,
diff --git a/drivers/md/bcache/util.c b/drivers/md/bcache/util.c
index 462214eeacbe..bb37618e7664 100644
--- a/drivers/md/bcache/util.c
+++ b/drivers/md/bcache/util.c
@@ -209,7 +209,13 @@ uint64_t bch_next_delay(struct bch_ratelimit *d, uint64_t done)
{
uint64_t now = local_clock();
- d->next += div_u64(done, d->rate);
+ d->next += div_u64(done * NSEC_PER_SEC, d->rate);
+
+ if (time_before64(now + NSEC_PER_SEC, d->next))
+ d->next = now + NSEC_PER_SEC;
+
+ if (time_after64(now - NSEC_PER_SEC * 2, d->next))
+ d->next = now - NSEC_PER_SEC * 2;
return time_after64(d->next, now)
? div_u64(d->next - now, NSEC_PER_SEC / HZ)
diff --git a/drivers/md/bcache/util.h b/drivers/md/bcache/util.h
index 362c4b3f8b4a..1030c6020e98 100644
--- a/drivers/md/bcache/util.h
+++ b/drivers/md/bcache/util.h
@@ -110,7 +110,7 @@ do { \
_r; \
})
-#define heap_peek(h) ((h)->size ? (h)->data[0] : NULL)
+#define heap_peek(h) ((h)->used ? (h)->data[0] : NULL)
#define heap_full(h) ((h)->used == (h)->size)
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index 99053b1251be..6c44fe059c27 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -30,38 +30,40 @@ static void __update_writeback_rate(struct cached_dev *dc)
/* PD controller */
- int change = 0;
- int64_t error;
int64_t dirty = bcache_dev_sectors_dirty(&dc->disk);
int64_t derivative = dirty - dc->disk.sectors_dirty_last;
+ int64_t proportional = dirty - target;
+ int64_t change;
dc->disk.sectors_dirty_last = dirty;
- derivative *= dc->writeback_rate_d_term;
- derivative = clamp(derivative, -dirty, dirty);
+ /* Scale to sectors per second */
- derivative = ewma_add(dc->disk.sectors_dirty_derivative, derivative,
- dc->writeback_rate_d_smooth, 0);
+ proportional *= dc->writeback_rate_update_seconds;
+ proportional = div_s64(proportional, dc->writeback_rate_p_term_inverse);
- /* Avoid divide by zero */
- if (!target)
- goto out;
+ derivative = div_s64(derivative, dc->writeback_rate_update_seconds);
- error = div64_s64((dirty + derivative - target) << 8, target);
+ derivative = ewma_add(dc->disk.sectors_dirty_derivative, derivative,
+ (dc->writeback_rate_d_term /
+ dc->writeback_rate_update_seconds) ?: 1, 0);
+
+ derivative *= dc->writeback_rate_d_term;
+ derivative = div_s64(derivative, dc->writeback_rate_p_term_inverse);
- change = div_s64((dc->writeback_rate.rate * error) >> 8,
- dc->writeback_rate_p_term_inverse);
+ change = proportional + derivative;
/* Don't increase writeback rate if the device isn't keeping up */
if (change > 0 &&
time_after64(local_clock(),
- dc->writeback_rate.next + 10 * NSEC_PER_MSEC))
+ dc->writeback_rate.next + NSEC_PER_MSEC))
change = 0;
dc->writeback_rate.rate =
- clamp_t(int64_t, dc->writeback_rate.rate + change,
+ clamp_t(int64_t, (int64_t) dc->writeback_rate.rate + change,
1, NSEC_PER_MSEC);
-out:
+
+ dc->writeback_rate_proportional = proportional;
dc->writeback_rate_derivative = derivative;
dc->writeback_rate_change = change;
dc->writeback_rate_target = target;
@@ -87,15 +89,11 @@ static void update_writeback_rate(struct work_struct *work)
static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors)
{
- uint64_t ret;
-
if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
!dc->writeback_percent)
return 0;
- ret = bch_next_delay(&dc->writeback_rate, sectors * 10000000ULL);
-
- return min_t(uint64_t, ret, HZ);
+ return bch_next_delay(&dc->writeback_rate, sectors);
}
struct dirty_io {
@@ -241,7 +239,7 @@ static void read_dirty(struct cached_dev *dc)
if (KEY_START(&w->key) != dc->last_read ||
jiffies_to_msecs(delay) > 50)
while (!kthread_should_stop() && delay)
- delay = schedule_timeout_interruptible(delay);
+ delay = schedule_timeout_uninterruptible(delay);
dc->last_read = KEY_OFFSET(&w->key);
@@ -438,7 +436,7 @@ static int bch_writeback_thread(void *arg)
while (delay &&
!kthread_should_stop() &&
!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags))
- delay = schedule_timeout_interruptible(delay);
+ delay = schedule_timeout_uninterruptible(delay);
}
}
@@ -476,6 +474,8 @@ void bch_sectors_dirty_init(struct cached_dev *dc)
bch_btree_map_keys(&op.op, dc->disk.c, &KEY(op.inode, 0, 0),
sectors_dirty_init_fn, 0);
+
+ dc->disk.sectors_dirty_last = bcache_dev_sectors_dirty(&dc->disk);
}
int bch_cached_dev_writeback_init(struct cached_dev *dc)
@@ -490,18 +490,15 @@ int bch_cached_dev_writeback_init(struct cached_dev *dc)
dc->writeback_delay = 30;
dc->writeback_rate.rate = 1024;
- dc->writeback_rate_update_seconds = 30;
- dc->writeback_rate_d_term = 16;
- dc->writeback_rate_p_term_inverse = 64;
- dc->writeback_rate_d_smooth = 8;
+ dc->writeback_rate_update_seconds = 5;
+ dc->writeback_rate_d_term = 30;
+ dc->writeback_rate_p_term_inverse = 6000;
dc->writeback_thread = kthread_create(bch_writeback_thread, dc,
"bcache_writeback");
if (IS_ERR(dc->writeback_thread))
return PTR_ERR(dc->writeback_thread);
- set_task_state(dc->writeback_thread, TASK_INTERRUPTIBLE);
-
INIT_DELAYED_WORK(&dc->writeback_rate_update, update_writeback_rate);
schedule_delayed_work(&dc->writeback_rate_update,
dc->writeback_rate_update_seconds * HZ);
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index 173cbb20d104..54bdd923316f 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -1717,6 +1717,11 @@ static int __init dm_bufio_init(void)
{
__u64 mem;
+ dm_bufio_allocated_kmem_cache = 0;
+ dm_bufio_allocated_get_free_pages = 0;
+ dm_bufio_allocated_vmalloc = 0;
+ dm_bufio_current_allocated = 0;
+
memset(&dm_bufio_caches, 0, sizeof dm_bufio_caches);
memset(&dm_bufio_cache_names, 0, sizeof dm_bufio_cache_names);
diff --git a/drivers/md/dm-cache-policy-mq.c b/drivers/md/dm-cache-policy-mq.c
index 416b7b752a6e..64780ad73bb0 100644
--- a/drivers/md/dm-cache-policy-mq.c
+++ b/drivers/md/dm-cache-policy-mq.c
@@ -730,15 +730,18 @@ static int pre_cache_entry_found(struct mq_policy *mq, struct entry *e,
int r = 0;
bool updated = updated_this_tick(mq, e);
- requeue_and_update_tick(mq, e);
-
if ((!discarded_oblock && updated) ||
- !should_promote(mq, e, discarded_oblock, data_dir))
+ !should_promote(mq, e, discarded_oblock, data_dir)) {
+ requeue_and_update_tick(mq, e);
result->op = POLICY_MISS;
- else if (!can_migrate)
+
+ } else if (!can_migrate)
r = -EWOULDBLOCK;
- else
+
+ else {
+ requeue_and_update_tick(mq, e);
r = pre_cache_to_cache(mq, e, result);
+ }
return r;
}
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 9efcf1059b99..1b1469ebe5cb 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -2755,7 +2755,7 @@ static int resize_cache_dev(struct cache *cache, dm_cblock_t new_size)
{
int r;
- r = dm_cache_resize(cache->cmd, cache->cache_size);
+ r = dm_cache_resize(cache->cmd, new_size);
if (r) {
DMERR("could not resize cache metadata");
return r;
diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
index 496d5f3646a5..2f91d6d4a2cc 100644
--- a/drivers/md/dm-delay.c
+++ b/drivers/md/dm-delay.c
@@ -20,6 +20,7 @@
struct delay_c {
struct timer_list delay_timer;
struct mutex timer_lock;
+ struct workqueue_struct *kdelayd_wq;
struct work_struct flush_expired_bios;
struct list_head delayed_bios;
atomic_t may_delay;
@@ -45,14 +46,13 @@ struct dm_delay_info {
static DEFINE_MUTEX(delayed_bios_lock);
-static struct workqueue_struct *kdelayd_wq;
static struct kmem_cache *delayed_cache;
static void handle_delayed_timer(unsigned long data)
{
struct delay_c *dc = (struct delay_c *)data;
- queue_work(kdelayd_wq, &dc->flush_expired_bios);
+ queue_work(dc->kdelayd_wq, &dc->flush_expired_bios);
}
static void queue_timeout(struct delay_c *dc, unsigned long expires)
@@ -191,6 +191,12 @@ out:
goto bad_dev_write;
}
+ dc->kdelayd_wq = alloc_workqueue("kdelayd", WQ_MEM_RECLAIM, 0);
+ if (!dc->kdelayd_wq) {
+ DMERR("Couldn't start kdelayd");
+ goto bad_queue;
+ }
+
setup_timer(&dc->delay_timer, handle_delayed_timer, (unsigned long)dc);
INIT_WORK(&dc->flush_expired_bios, flush_expired_bios);
@@ -203,6 +209,8 @@ out:
ti->private = dc;
return 0;
+bad_queue:
+ mempool_destroy(dc->delayed_pool);
bad_dev_write:
if (dc->dev_write)
dm_put_device(ti, dc->dev_write);
@@ -217,7 +225,7 @@ static void delay_dtr(struct dm_target *ti)
{
struct delay_c *dc = ti->private;
- flush_workqueue(kdelayd_wq);
+ destroy_workqueue(dc->kdelayd_wq);
dm_put_device(ti, dc->dev_read);
@@ -350,12 +358,6 @@ static int __init dm_delay_init(void)
{
int r = -ENOMEM;
- kdelayd_wq = alloc_workqueue("kdelayd", WQ_MEM_RECLAIM, 0);
- if (!kdelayd_wq) {
- DMERR("Couldn't start kdelayd");
- goto bad_queue;
- }
-
delayed_cache = KMEM_CACHE(dm_delay_info, 0);
if (!delayed_cache) {
DMERR("Couldn't create delayed bio cache.");
@@ -373,8 +375,6 @@ static int __init dm_delay_init(void)
bad_register:
kmem_cache_destroy(delayed_cache);
bad_memcache:
- destroy_workqueue(kdelayd_wq);
-bad_queue:
return r;
}
@@ -382,7 +382,6 @@ static void __exit dm_delay_exit(void)
{
dm_unregister_target(&delay_target);
kmem_cache_destroy(delayed_cache);
- destroy_workqueue(kdelayd_wq);
}
/* Module hooks */
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index aec57d76db5d..944690bafd93 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -66,6 +66,18 @@ struct dm_snapshot {
atomic_t pending_exceptions_count;
+ /* Protected by "lock" */
+ sector_t exception_start_sequence;
+
+ /* Protected by kcopyd single-threaded callback */
+ sector_t exception_complete_sequence;
+
+ /*
+ * A list of pending exceptions that completed out of order.
+ * Protected by kcopyd single-threaded callback.
+ */
+ struct list_head out_of_order_list;
+
mempool_t *pending_pool;
struct dm_exception_table pending;
@@ -173,6 +185,14 @@ struct dm_snap_pending_exception {
*/
int started;
+ /* There was copying error. */
+ int copy_error;
+
+ /* A sequence number, it is used for in-order completion. */
+ sector_t exception_sequence;
+
+ struct list_head out_of_order_entry;
+
/*
* For writing a complete chunk, bypassing the copy.
*/
@@ -1094,6 +1114,9 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
s->valid = 1;
s->active = 0;
atomic_set(&s->pending_exceptions_count, 0);
+ s->exception_start_sequence = 0;
+ s->exception_complete_sequence = 0;
+ INIT_LIST_HEAD(&s->out_of_order_list);
init_rwsem(&s->lock);
INIT_LIST_HEAD(&s->list);
spin_lock_init(&s->pe_lock);
@@ -1443,6 +1466,19 @@ static void commit_callback(void *context, int success)
pending_complete(pe, success);
}
+static void complete_exception(struct dm_snap_pending_exception *pe)
+{
+ struct dm_snapshot *s = pe->snap;
+
+ if (unlikely(pe->copy_error))
+ pending_complete(pe, 0);
+
+ else
+ /* Update the metadata if we are persistent */
+ s->store->type->commit_exception(s->store, &pe->e,
+ commit_callback, pe);
+}
+
/*
* Called when the copy I/O has finished. kcopyd actually runs
* this code so don't block.
@@ -1452,13 +1488,32 @@ static void copy_callback(int read_err, unsigned long write_err, void *context)
struct dm_snap_pending_exception *pe = context;
struct dm_snapshot *s = pe->snap;
- if (read_err || write_err)
- pending_complete(pe, 0);
+ pe->copy_error = read_err || write_err;
- else
- /* Update the metadata if we are persistent */
- s->store->type->commit_exception(s->store, &pe->e,
- commit_callback, pe);
+ if (pe->exception_sequence == s->exception_complete_sequence) {
+ s->exception_complete_sequence++;
+ complete_exception(pe);
+
+ while (!list_empty(&s->out_of_order_list)) {
+ pe = list_entry(s->out_of_order_list.next,
+ struct dm_snap_pending_exception, out_of_order_entry);
+ if (pe->exception_sequence != s->exception_complete_sequence)
+ break;
+ s->exception_complete_sequence++;
+ list_del(&pe->out_of_order_entry);
+ complete_exception(pe);
+ }
+ } else {
+ struct list_head *lh;
+ struct dm_snap_pending_exception *pe2;
+
+ list_for_each_prev(lh, &s->out_of_order_list) {
+ pe2 = list_entry(lh, struct dm_snap_pending_exception, out_of_order_entry);
+ if (pe2->exception_sequence < pe->exception_sequence)
+ break;
+ }
+ list_add(&pe->out_of_order_entry, lh);
+ }
}
/*
@@ -1553,6 +1608,8 @@ __find_pending_exception(struct dm_snapshot *s,
return NULL;
}
+ pe->exception_sequence = s->exception_start_sequence++;
+
dm_insert_exception(&s->pending, &pe->e);
return pe;
@@ -2192,7 +2249,7 @@ static struct target_type origin_target = {
static struct target_type snapshot_target = {
.name = "snapshot",
- .version = {1, 11, 1},
+ .version = {1, 12, 0},
.module = THIS_MODULE,
.ctr = snapshot_ctr,
.dtr = snapshot_dtr,
diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c
index 3d404c1371ed..28a90122a5a8 100644
--- a/drivers/md/dm-stats.c
+++ b/drivers/md/dm-stats.c
@@ -964,6 +964,7 @@ int dm_stats_message(struct mapped_device *md, unsigned argc, char **argv,
int __init dm_statistics_init(void)
{
+ shared_memory_amount = 0;
dm_stat_need_rcu_barrier = 0;
return 0;
}
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 465f08ca62b1..3ba6a3859ce3 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -200,6 +200,11 @@ int dm_table_create(struct dm_table **result, fmode_t mode,
num_targets = dm_round_up(num_targets, KEYS_PER_NODE);
+ if (!num_targets) {
+ kfree(t);
+ return -ENOMEM;
+ }
+
if (alloc_targets(t, num_targets)) {
kfree(t);
return -ENOMEM;
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index 60bce435f4fa..8a30ad54bd46 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -1697,6 +1697,14 @@ void dm_pool_metadata_read_only(struct dm_pool_metadata *pmd)
up_write(&pmd->root_lock);
}
+void dm_pool_metadata_read_write(struct dm_pool_metadata *pmd)
+{
+ down_write(&pmd->root_lock);
+ pmd->read_only = false;
+ dm_bm_set_read_write(pmd->bm);
+ up_write(&pmd->root_lock);
+}
+
int dm_pool_register_metadata_threshold(struct dm_pool_metadata *pmd,
dm_block_t threshold,
dm_sm_threshold_fn fn,
diff --git a/drivers/md/dm-thin-metadata.h b/drivers/md/dm-thin-metadata.h
index 845ebbe589a9..7bcc0e1d6238 100644
--- a/drivers/md/dm-thin-metadata.h
+++ b/drivers/md/dm-thin-metadata.h
@@ -193,6 +193,7 @@ int dm_pool_resize_metadata_dev(struct dm_pool_metadata *pmd, dm_block_t new_siz
* that nothing is changing.
*/
void dm_pool_metadata_read_only(struct dm_pool_metadata *pmd);
+void dm_pool_metadata_read_write(struct dm_pool_metadata *pmd);
int dm_pool_register_metadata_threshold(struct dm_pool_metadata *pmd,
dm_block_t threshold,
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 2c0cf511ec23..ee29037ffc2e 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -640,7 +640,9 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m)
*/
r = dm_thin_insert_block(tc->td, m->virt_block, m->data_block);
if (r) {
- DMERR_LIMIT("dm_thin_insert_block() failed");
+ DMERR_LIMIT("%s: dm_thin_insert_block() failed: error = %d",
+ dm_device_name(pool->pool_md), r);
+ set_pool_mode(pool, PM_READ_ONLY);
cell_error(pool, m->cell);
goto out;
}
@@ -881,32 +883,23 @@ static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
}
}
-static int commit(struct pool *pool)
-{
- int r;
-
- r = dm_pool_commit_metadata(pool->pmd);
- if (r)
- DMERR_LIMIT("%s: commit failed: error = %d",
- dm_device_name(pool->pool_md), r);
-
- return r;
-}
-
/*
* A non-zero return indicates read_only or fail_io mode.
* Many callers don't care about the return value.
*/
-static int commit_or_fallback(struct pool *pool)
+static int commit(struct pool *pool)
{
int r;
if (get_pool_mode(pool) != PM_WRITE)
return -EINVAL;
- r = commit(pool);
- if (r)
+ r = dm_pool_commit_metadata(pool->pmd);
+ if (r) {
+ DMERR_LIMIT("%s: dm_pool_commit_metadata failed: error = %d",
+ dm_device_name(pool->pool_md), r);
set_pool_mode(pool, PM_READ_ONLY);
+ }
return r;
}
@@ -943,7 +936,9 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
* Try to commit to see if that will free up some
* more space.
*/
- (void) commit_or_fallback(pool);
+ r = commit(pool);
+ if (r)
+ return r;
r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
if (r)
@@ -957,7 +952,7 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
* table reload).
*/
if (!free_blocks) {
- DMWARN("%s: no free space available.",
+ DMWARN("%s: no free data space available.",
dm_device_name(pool->pool_md));
spin_lock_irqsave(&pool->lock, flags);
pool->no_free_space = 1;
@@ -967,8 +962,16 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
}
r = dm_pool_alloc_data_block(pool->pmd, result);
- if (r)
+ if (r) {
+ if (r == -ENOSPC &&
+ !dm_pool_get_free_metadata_block_count(pool->pmd, &free_blocks) &&
+ !free_blocks) {
+ DMWARN("%s: no free metadata space available.",
+ dm_device_name(pool->pool_md));
+ set_pool_mode(pool, PM_READ_ONLY);
+ }
return r;
+ }
return 0;
}
@@ -1349,7 +1352,7 @@ static void process_deferred_bios(struct pool *pool)
if (bio_list_empty(&bios) && !need_commit_due_to_time(pool))
return;
- if (commit_or_fallback(pool)) {
+ if (commit(pool)) {
while ((bio = bio_list_pop(&bios)))
bio_io_error(bio);
return;
@@ -1397,6 +1400,7 @@ static void set_pool_mode(struct pool *pool, enum pool_mode mode)
case PM_FAIL:
DMERR("%s: switching pool to failure mode",
dm_device_name(pool->pool_md));
+ dm_pool_metadata_read_only(pool->pmd);
pool->process_bio = process_bio_fail;
pool->process_discard = process_bio_fail;
pool->process_prepared_mapping = process_prepared_mapping_fail;
@@ -1421,6 +1425,7 @@ static void set_pool_mode(struct pool *pool, enum pool_mode mode)
break;
case PM_WRITE:
+ dm_pool_metadata_read_write(pool->pmd);
pool->process_bio = process_bio;
pool->process_discard = process_discard;
pool->process_prepared_mapping = process_prepared_mapping;
@@ -1637,12 +1642,19 @@ static int bind_control_target(struct pool *pool, struct dm_target *ti)
struct pool_c *pt = ti->private;
/*
- * We want to make sure that degraded pools are never upgraded.
+ * We want to make sure that a pool in PM_FAIL mode is never upgraded.
*/
enum pool_mode old_mode = pool->pf.mode;
enum pool_mode new_mode = pt->adjusted_pf.mode;
- if (old_mode > new_mode)
+ /*
+ * If we were in PM_FAIL mode, rollback of metadata failed. We're
+ * not going to recover without a thin_repair. So we never let the
+ * pool move out of the old mode. On the other hand a PM_READ_ONLY
+ * may have been due to a lack of metadata or data space, and may
+ * now work (ie. if the underlying devices have been resized).
+ */
+ if (old_mode == PM_FAIL)
new_mode = old_mode;
pool->ti = ti;
@@ -2266,7 +2278,7 @@ static int pool_preresume(struct dm_target *ti)
return r;
if (need_commit1 || need_commit2)
- (void) commit_or_fallback(pool);
+ (void) commit(pool);
return 0;
}
@@ -2293,7 +2305,7 @@ static void pool_postsuspend(struct dm_target *ti)
cancel_delayed_work(&pool->waker);
flush_workqueue(pool->wq);
- (void) commit_or_fallback(pool);
+ (void) commit(pool);
}
static int check_arg_count(unsigned argc, unsigned args_required)
@@ -2427,7 +2439,7 @@ static int process_reserve_metadata_snap_mesg(unsigned argc, char **argv, struct
if (r)
return r;
- (void) commit_or_fallback(pool);
+ (void) commit(pool);
r = dm_pool_reserve_metadata_snap(pool->pmd);
if (r)
@@ -2489,7 +2501,7 @@ static int pool_message(struct dm_target *ti, unsigned argc, char **argv)
DMWARN("Unrecognised thin pool target message received: %s", argv[0]);
if (!r)
- (void) commit_or_fallback(pool);
+ (void) commit(pool);
return r;
}
@@ -2544,7 +2556,7 @@ static void pool_status(struct dm_target *ti, status_type_t type,
/* Commit to ensure statistics aren't out-of-date */
if (!(status_flags & DM_STATUS_NOFLUSH_FLAG) && !dm_suspended(ti))
- (void) commit_or_fallback(pool);
+ (void) commit(pool);
r = dm_pool_get_metadata_transaction_id(pool->pmd, &transaction_id);
if (r) {
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 8766eabb0014..21f4d7ff0da2 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -112,7 +112,7 @@ static inline int speed_max(struct mddev *mddev)
static struct ctl_table_header *raid_table_header;
-static ctl_table raid_table[] = {
+static struct ctl_table raid_table[] = {
{
.procname = "speed_limit_min",
.data = &sysctl_speed_limit_min,
@@ -130,7 +130,7 @@ static ctl_table raid_table[] = {
{ }
};
-static ctl_table raid_dir_table[] = {
+static struct ctl_table raid_dir_table[] = {
{
.procname = "raid",
.maxlen = 0,
@@ -140,7 +140,7 @@ static ctl_table raid_dir_table[] = {
{ }
};
-static ctl_table raid_root_table[] = {
+static struct ctl_table raid_root_table[] = {
{
.procname = "dev",
.maxlen = 0,
@@ -562,11 +562,19 @@ static struct mddev * mddev_find(dev_t unit)
goto retry;
}
-static inline int mddev_lock(struct mddev * mddev)
+static inline int __must_check mddev_lock(struct mddev * mddev)
{
return mutex_lock_interruptible(&mddev->reconfig_mutex);
}
+/* Sometimes we need to take the lock in a situation where
+ * failure due to interrupts is not acceptable.
+ */
+static inline void mddev_lock_nointr(struct mddev * mddev)
+{
+ mutex_lock(&mddev->reconfig_mutex);
+}
+
static inline int mddev_is_locked(struct mddev *mddev)
{
return mutex_is_locked(&mddev->reconfig_mutex);
@@ -768,16 +776,10 @@ void md_super_wait(struct mddev *mddev)
finish_wait(&mddev->sb_wait, &wq);
}
-static void bi_complete(struct bio *bio, int error)
-{
- complete((struct completion*)bio->bi_private);
-}
-
int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
struct page *page, int rw, bool metadata_op)
{
struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, rdev->mddev);
- struct completion event;
int ret;
rw |= REQ_SYNC;
@@ -793,11 +795,7 @@ int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
else
bio->bi_sector = sector + rdev->data_offset;
bio_add_page(bio, page, size, 0);
- init_completion(&event);
- bio->bi_private = &event;
- bio->bi_end_io = bi_complete;
- submit_bio(rw, bio);
- wait_for_completion(&event);
+ submit_bio_wait(rw, bio);
ret = test_bit(BIO_UPTODATE, &bio->bi_flags);
bio_put(bio);
@@ -2978,7 +2976,7 @@ rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len)
for_each_mddev(mddev, tmp) {
struct md_rdev *rdev2;
- mddev_lock(mddev);
+ mddev_lock_nointr(mddev);
rdev_for_each(rdev2, mddev)
if (rdev->bdev == rdev2->bdev &&
rdev != rdev2 &&
@@ -2994,7 +2992,7 @@ rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len)
break;
}
}
- mddev_lock(my_mddev);
+ mddev_lock_nointr(my_mddev);
if (overlap) {
/* Someone else could have slipped in a size
* change here, but doing so is just silly.
@@ -3580,6 +3578,7 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
mddev->in_sync = 1;
del_timer_sync(&mddev->safemode_timer);
}
+ blk_set_stacking_limits(&mddev->queue->limits);
pers->run(mddev);
set_bit(MD_CHANGE_DEVS, &mddev->flags);
mddev_resume(mddev);
@@ -5258,7 +5257,7 @@ static void __md_stop_writes(struct mddev *mddev)
void md_stop_writes(struct mddev *mddev)
{
- mddev_lock(mddev);
+ mddev_lock_nointr(mddev);
__md_stop_writes(mddev);
mddev_unlock(mddev);
}
@@ -5291,20 +5290,35 @@ EXPORT_SYMBOL_GPL(md_stop);
static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
{
int err = 0;
+ int did_freeze = 0;
+
+ if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) {
+ did_freeze = 1;
+ set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+ md_wakeup_thread(mddev->thread);
+ }
+ if (mddev->sync_thread) {
+ set_bit(MD_RECOVERY_INTR, &mddev->recovery);
+ /* Thread might be blocked waiting for metadata update
+ * which will now never happen */
+ wake_up_process(mddev->sync_thread->tsk);
+ }
+ mddev_unlock(mddev);
+ wait_event(resync_wait, mddev->sync_thread == NULL);
+ mddev_lock_nointr(mddev);
+
mutex_lock(&mddev->open_mutex);
- if (atomic_read(&mddev->openers) > !!bdev) {
+ if (atomic_read(&mddev->openers) > !!bdev ||
+ mddev->sync_thread ||
+ (bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags))) {
printk("md: %s still in use.\n",mdname(mddev));
+ if (did_freeze) {
+ clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+ md_wakeup_thread(mddev->thread);
+ }
err = -EBUSY;
goto out;
}
- if (bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags)) {
- /* Someone opened the device since we flushed it
- * so page cache could be dirty and it is too late
- * to flush. So abort
- */
- mutex_unlock(&mddev->open_mutex);
- return -EBUSY;
- }
if (mddev->pers) {
__md_stop_writes(mddev);
@@ -5315,7 +5329,7 @@ static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
set_disk_ro(mddev->gendisk, 1);
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
sysfs_notify_dirent_safe(mddev->sysfs_state);
- err = 0;
+ err = 0;
}
out:
mutex_unlock(&mddev->open_mutex);
@@ -5331,20 +5345,34 @@ static int do_md_stop(struct mddev * mddev, int mode,
{
struct gendisk *disk = mddev->gendisk;
struct md_rdev *rdev;
+ int did_freeze = 0;
+
+ if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) {
+ did_freeze = 1;
+ set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+ md_wakeup_thread(mddev->thread);
+ }
+ if (mddev->sync_thread) {
+ set_bit(MD_RECOVERY_INTR, &mddev->recovery);
+ /* Thread might be blocked waiting for metadata update
+ * which will now never happen */
+ wake_up_process(mddev->sync_thread->tsk);
+ }
+ mddev_unlock(mddev);
+ wait_event(resync_wait, mddev->sync_thread == NULL);
+ mddev_lock_nointr(mddev);
mutex_lock(&mddev->open_mutex);
if (atomic_read(&mddev->openers) > !!bdev ||
- mddev->sysfs_active) {
+ mddev->sysfs_active ||
+ mddev->sync_thread ||
+ (bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags))) {
printk("md: %s still in use.\n",mdname(mddev));
mutex_unlock(&mddev->open_mutex);
- return -EBUSY;
- }
- if (bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags)) {
- /* Someone opened the device since we flushed it
- * so page cache could be dirty and it is too late
- * to flush. So abort
- */
- mutex_unlock(&mddev->open_mutex);
+ if (did_freeze) {
+ clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+ md_wakeup_thread(mddev->thread);
+ }
return -EBUSY;
}
if (mddev->pers) {
@@ -6551,7 +6579,7 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
wait_event(mddev->sb_wait,
!test_bit(MD_CHANGE_DEVS, &mddev->flags) &&
!test_bit(MD_CHANGE_PENDING, &mddev->flags));
- mddev_lock(mddev);
+ mddev_lock_nointr(mddev);
}
} else {
err = -EROFS;
@@ -7361,9 +7389,6 @@ void md_do_sync(struct md_thread *thread)
mddev->curr_resync = 2;
try_again:
- if (kthread_should_stop())
- set_bit(MD_RECOVERY_INTR, &mddev->recovery);
-
if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
goto skip;
for_each_mddev(mddev2, tmp) {
@@ -7388,7 +7413,7 @@ void md_do_sync(struct md_thread *thread)
* be caught by 'softlockup'
*/
prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE);
- if (!kthread_should_stop() &&
+ if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
mddev2->curr_resync >= mddev->curr_resync) {
printk(KERN_INFO "md: delaying %s of %s"
" until %s has finished (they"
@@ -7464,7 +7489,7 @@ void md_do_sync(struct md_thread *thread)
last_check = 0;
if (j>2) {
- printk(KERN_INFO
+ printk(KERN_INFO
"md: resuming %s of %s from checkpoint.\n",
desc, mdname(mddev));
mddev->curr_resync = j;
@@ -7501,7 +7526,8 @@ void md_do_sync(struct md_thread *thread)
sysfs_notify(&mddev->kobj, NULL, "sync_completed");
}
- while (j >= mddev->resync_max && !kthread_should_stop()) {
+ while (j >= mddev->resync_max &&
+ !test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
/* As this condition is controlled by user-space,
* we can block indefinitely, so use '_interruptible'
* to avoid triggering warnings.
@@ -7509,17 +7535,18 @@ void md_do_sync(struct md_thread *thread)
flush_signals(current); /* just in case */
wait_event_interruptible(mddev->recovery_wait,
mddev->resync_max > j
- || kthread_should_stop());
+ || test_bit(MD_RECOVERY_INTR,
+ &mddev->recovery));
}
- if (kthread_should_stop())
- goto interrupted;
+ if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
+ break;
sectors = mddev->pers->sync_request(mddev, j, &skipped,
currspeed < speed_min(mddev));
if (sectors == 0) {
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
- goto out;
+ break;
}
if (!skipped) { /* actual IO requested */
@@ -7556,10 +7583,8 @@ void md_do_sync(struct md_thread *thread)
last_mark = next;
}
-
- if (kthread_should_stop())
- goto interrupted;
-
+ if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
+ break;
/*
* this loop exits only if either when we are slower than
@@ -7582,11 +7607,12 @@ void md_do_sync(struct md_thread *thread)
}
}
}
- printk(KERN_INFO "md: %s: %s done.\n",mdname(mddev), desc);
+ printk(KERN_INFO "md: %s: %s %s.\n",mdname(mddev), desc,
+ test_bit(MD_RECOVERY_INTR, &mddev->recovery)
+ ? "interrupted" : "done");
/*
* this also signals 'finished resyncing' to md_stop
*/
- out:
blk_finish_plug(&plug);
wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
@@ -7640,16 +7666,6 @@ void md_do_sync(struct md_thread *thread)
set_bit(MD_RECOVERY_DONE, &mddev->recovery);
md_wakeup_thread(mddev->thread);
return;
-
- interrupted:
- /*
- * got a signal, exit.
- */
- printk(KERN_INFO
- "md: md_do_sync() got signal ... exiting\n");
- set_bit(MD_RECOVERY_INTR, &mddev->recovery);
- goto out;
-
}
EXPORT_SYMBOL_GPL(md_do_sync);
@@ -7751,7 +7767,7 @@ void md_check_recovery(struct mddev *mddev)
if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
return;
if ( ! (
- (mddev->flags & ~ (1<<MD_CHANGE_PENDING)) ||
+ (mddev->flags & MD_UPDATE_SB_FLAGS & ~ (1<<MD_CHANGE_PENDING)) ||
test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
(mddev->external == 0 && mddev->safemode == 1) ||
@@ -7894,6 +7910,7 @@ void md_reap_sync_thread(struct mddev *mddev)
/* resync has finished, collect result */
md_unregister_thread(&mddev->sync_thread);
+ wake_up(&resync_wait);
if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
/* success...*/
diff --git a/drivers/md/persistent-data/dm-array.c b/drivers/md/persistent-data/dm-array.c
index af96e24ec328..1d75b1dc1e2e 100644
--- a/drivers/md/persistent-data/dm-array.c
+++ b/drivers/md/persistent-data/dm-array.c
@@ -317,8 +317,16 @@ static int shadow_ablock(struct dm_array_info *info, dm_block_t *root,
* The shadow op will often be a noop. Only insert if it really
* copied data.
*/
- if (dm_block_location(*block) != b)
+ if (dm_block_location(*block) != b) {
+ /*
+ * dm_tm_shadow_block will have already decremented the old
+ * block, but it is still referenced by the btree. We
+ * increment to stop the insert decrementing it below zero
+ * when overwriting the old value.
+ */
+ dm_tm_inc(info->btree_info.tm, b);
r = insert_ablock(info, index, *block, root);
+ }
return r;
}
diff --git a/drivers/md/persistent-data/dm-block-manager.c b/drivers/md/persistent-data/dm-block-manager.c
index a7e8bf296388..064a3c271baa 100644
--- a/drivers/md/persistent-data/dm-block-manager.c
+++ b/drivers/md/persistent-data/dm-block-manager.c
@@ -626,6 +626,12 @@ void dm_bm_set_read_only(struct dm_block_manager *bm)
}
EXPORT_SYMBOL_GPL(dm_bm_set_read_only);
+void dm_bm_set_read_write(struct dm_block_manager *bm)
+{
+ bm->read_only = false;
+}
+EXPORT_SYMBOL_GPL(dm_bm_set_read_write);
+
u32 dm_bm_checksum(const void *data, size_t len, u32 init_xor)
{
return crc32c(~(u32) 0, data, len) ^ init_xor;
diff --git a/drivers/md/persistent-data/dm-block-manager.h b/drivers/md/persistent-data/dm-block-manager.h
index 9a82083a66b6..13cd58e1fe69 100644
--- a/drivers/md/persistent-data/dm-block-manager.h
+++ b/drivers/md/persistent-data/dm-block-manager.h
@@ -108,9 +108,9 @@ int dm_bm_unlock(struct dm_block *b);
int dm_bm_flush_and_unlock(struct dm_block_manager *bm,
struct dm_block *superblock);
- /*
- * Request data be prefetched into the cache.
- */
+/*
+ * Request data is prefetched into the cache.
+ */
void dm_bm_prefetch(struct dm_block_manager *bm, dm_block_t b);
/*
@@ -125,6 +125,7 @@ void dm_bm_prefetch(struct dm_block_manager *bm, dm_block_t b);
* be returned if you do.
*/
void dm_bm_set_read_only(struct dm_block_manager *bm);
+void dm_bm_set_read_write(struct dm_block_manager *bm);
u32 dm_bm_checksum(const void *data, size_t len, u32 init_xor);
diff --git a/drivers/md/persistent-data/dm-space-map-common.c b/drivers/md/persistent-data/dm-space-map-common.c
index 6058569fe86c..466a60bbd716 100644
--- a/drivers/md/persistent-data/dm-space-map-common.c
+++ b/drivers/md/persistent-data/dm-space-map-common.c
@@ -381,7 +381,7 @@ int sm_ll_find_free_block(struct ll_disk *ll, dm_block_t begin,
}
static int sm_ll_mutate(struct ll_disk *ll, dm_block_t b,
- uint32_t (*mutator)(void *context, uint32_t old),
+ int (*mutator)(void *context, uint32_t old, uint32_t *new),
void *context, enum allocation_event *ev)
{
int r;
@@ -410,11 +410,17 @@ static int sm_ll_mutate(struct ll_disk *ll, dm_block_t b,
if (old > 2) {
r = sm_ll_lookup_big_ref_count(ll, b, &old);
- if (r < 0)
+ if (r < 0) {
+ dm_tm_unlock(ll->tm, nb);
return r;
+ }
}
- ref_count = mutator(context, old);
+ r = mutator(context, old, &ref_count);
+ if (r) {
+ dm_tm_unlock(ll->tm, nb);
+ return r;
+ }
if (ref_count <= 2) {
sm_set_bitmap(bm_le, bit, ref_count);
@@ -465,9 +471,10 @@ static int sm_ll_mutate(struct ll_disk *ll, dm_block_t b,
return ll->save_ie(ll, index, &ie_disk);
}
-static uint32_t set_ref_count(void *context, uint32_t old)
+static int set_ref_count(void *context, uint32_t old, uint32_t *new)
{
- return *((uint32_t *) context);
+ *new = *((uint32_t *) context);
+ return 0;
}
int sm_ll_insert(struct ll_disk *ll, dm_block_t b,
@@ -476,9 +483,10 @@ int sm_ll_insert(struct ll_disk *ll, dm_block_t b,
return sm_ll_mutate(ll, b, set_ref_count, &ref_count, ev);
}
-static uint32_t inc_ref_count(void *context, uint32_t old)
+static int inc_ref_count(void *context, uint32_t old, uint32_t *new)
{
- return old + 1;
+ *new = old + 1;
+ return 0;
}
int sm_ll_inc(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev)
@@ -486,9 +494,15 @@ int sm_ll_inc(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev)
return sm_ll_mutate(ll, b, inc_ref_count, NULL, ev);
}
-static uint32_t dec_ref_count(void *context, uint32_t old)
+static int dec_ref_count(void *context, uint32_t old, uint32_t *new)
{
- return old - 1;
+ if (!old) {
+ DMERR_LIMIT("unable to decrement a reference count below 0");
+ return -EINVAL;
+ }
+
+ *new = old - 1;
+ return 0;
}
int sm_ll_dec(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev)
diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
index 1c959684caef..58fc1eef7499 100644
--- a/drivers/md/persistent-data/dm-space-map-metadata.c
+++ b/drivers/md/persistent-data/dm-space-map-metadata.c
@@ -384,12 +384,16 @@ static int sm_metadata_new_block(struct dm_space_map *sm, dm_block_t *b)
struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
int r = sm_metadata_new_block_(sm, b);
- if (r)
+ if (r) {
DMERR("unable to allocate new metadata block");
+ return r;
+ }
r = sm_metadata_get_nr_free(sm, &count);
- if (r)
+ if (r) {
DMERR("couldn't get free block count");
+ return r;
+ }
check_threshold(&smm->threshold, count);
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index af6681b19776..1e5a540995e9 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -66,7 +66,8 @@
*/
static int max_queued_requests = 1024;
-static void allow_barrier(struct r1conf *conf);
+static void allow_barrier(struct r1conf *conf, sector_t start_next_window,
+ sector_t bi_sector);
static void lower_barrier(struct r1conf *conf);
static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data)
@@ -84,10 +85,12 @@ static void r1bio_pool_free(void *r1_bio, void *data)
}
#define RESYNC_BLOCK_SIZE (64*1024)
-//#define RESYNC_BLOCK_SIZE PAGE_SIZE
+#define RESYNC_DEPTH 32
#define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9)
#define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
-#define RESYNC_WINDOW (2048*1024)
+#define RESYNC_WINDOW (RESYNC_BLOCK_SIZE * RESYNC_DEPTH)
+#define RESYNC_WINDOW_SECTORS (RESYNC_WINDOW >> 9)
+#define NEXT_NORMALIO_DISTANCE (3 * RESYNC_WINDOW_SECTORS)
static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
{
@@ -225,6 +228,8 @@ static void call_bio_endio(struct r1bio *r1_bio)
struct bio *bio = r1_bio->master_bio;
int done;
struct r1conf *conf = r1_bio->mddev->private;
+ sector_t start_next_window = r1_bio->start_next_window;
+ sector_t bi_sector = bio->bi_sector;
if (bio->bi_phys_segments) {
unsigned long flags;
@@ -232,6 +237,11 @@ static void call_bio_endio(struct r1bio *r1_bio)
bio->bi_phys_segments--;
done = (bio->bi_phys_segments == 0);
spin_unlock_irqrestore(&conf->device_lock, flags);
+ /*
+ * make_request() might be waiting for
+ * bi_phys_segments to decrease
+ */
+ wake_up(&conf->wait_barrier);
} else
done = 1;
@@ -243,7 +253,7 @@ static void call_bio_endio(struct r1bio *r1_bio)
* Wake up any possible resync thread that waits for the device
* to go idle.
*/
- allow_barrier(conf);
+ allow_barrier(conf, start_next_window, bi_sector);
}
}
@@ -814,8 +824,6 @@ static void flush_pending_writes(struct r1conf *conf)
* there is no normal IO happeing. It must arrange to call
* lower_barrier when the particular background IO completes.
*/
-#define RESYNC_DEPTH 32
-
static void raise_barrier(struct r1conf *conf)
{
spin_lock_irq(&conf->resync_lock);
@@ -827,9 +835,19 @@ static void raise_barrier(struct r1conf *conf)
/* block any new IO from starting */
conf->barrier++;
- /* Now wait for all pending IO to complete */
+ /* For these conditions we must wait:
+ * A: while the array is in frozen state
+ * B: while barrier >= RESYNC_DEPTH, meaning resync reach
+ * the max count which allowed.
+ * C: next_resync + RESYNC_SECTORS > start_next_window, meaning
+ * next resync will reach to the window which normal bios are
+ * handling.
+ */
wait_event_lock_irq(conf->wait_barrier,
- !conf->nr_pending && conf->barrier < RESYNC_DEPTH,
+ !conf->array_frozen &&
+ conf->barrier < RESYNC_DEPTH &&
+ (conf->start_next_window >=
+ conf->next_resync + RESYNC_SECTORS),
conf->resync_lock);
spin_unlock_irq(&conf->resync_lock);
@@ -845,10 +863,33 @@ static void lower_barrier(struct r1conf *conf)
wake_up(&conf->wait_barrier);
}
-static void wait_barrier(struct r1conf *conf)
+static bool need_to_wait_for_sync(struct r1conf *conf, struct bio *bio)
{
+ bool wait = false;
+
+ if (conf->array_frozen || !bio)
+ wait = true;
+ else if (conf->barrier && bio_data_dir(bio) == WRITE) {
+ if (conf->next_resync < RESYNC_WINDOW_SECTORS)
+ wait = true;
+ else if ((conf->next_resync - RESYNC_WINDOW_SECTORS
+ >= bio_end_sector(bio)) ||
+ (conf->next_resync + NEXT_NORMALIO_DISTANCE
+ <= bio->bi_sector))
+ wait = false;
+ else
+ wait = true;
+ }
+
+ return wait;
+}
+
+static sector_t wait_barrier(struct r1conf *conf, struct bio *bio)
+{
+ sector_t sector = 0;
+
spin_lock_irq(&conf->resync_lock);
- if (conf->barrier) {
+ if (need_to_wait_for_sync(conf, bio)) {
conf->nr_waiting++;
/* Wait for the barrier to drop.
* However if there are already pending
@@ -860,22 +901,67 @@ static void wait_barrier(struct r1conf *conf)
* count down.
*/
wait_event_lock_irq(conf->wait_barrier,
- !conf->barrier ||
- (conf->nr_pending &&
+ !conf->array_frozen &&
+ (!conf->barrier ||
+ ((conf->start_next_window <
+ conf->next_resync + RESYNC_SECTORS) &&
current->bio_list &&
- !bio_list_empty(current->bio_list)),
+ !bio_list_empty(current->bio_list))),
conf->resync_lock);
conf->nr_waiting--;
}
+
+ if (bio && bio_data_dir(bio) == WRITE) {
+ if (conf->next_resync + NEXT_NORMALIO_DISTANCE
+ <= bio->bi_sector) {
+ if (conf->start_next_window == MaxSector)
+ conf->start_next_window =
+ conf->next_resync +
+ NEXT_NORMALIO_DISTANCE;
+
+ if ((conf->start_next_window + NEXT_NORMALIO_DISTANCE)
+ <= bio->bi_sector)
+ conf->next_window_requests++;
+ else
+ conf->current_window_requests++;
+ }
+ if (bio->bi_sector >= conf->start_next_window)
+ sector = conf->start_next_window;
+ }
+
conf->nr_pending++;
spin_unlock_irq(&conf->resync_lock);
+ return sector;
}
-static void allow_barrier(struct r1conf *conf)
+static void allow_barrier(struct r1conf *conf, sector_t start_next_window,
+ sector_t bi_sector)
{
unsigned long flags;
+
spin_lock_irqsave(&conf->resync_lock, flags);
conf->nr_pending--;
+ if (start_next_window) {
+ if (start_next_window == conf->start_next_window) {
+ if (conf->start_next_window + NEXT_NORMALIO_DISTANCE
+ <= bi_sector)
+ conf->next_window_requests--;
+ else
+ conf->current_window_requests--;
+ } else
+ conf->current_window_requests--;
+
+ if (!conf->current_window_requests) {
+ if (conf->next_window_requests) {
+ conf->current_window_requests =
+ conf->next_window_requests;
+ conf->next_window_requests = 0;
+ conf->start_next_window +=
+ NEXT_NORMALIO_DISTANCE;
+ } else
+ conf->start_next_window = MaxSector;
+ }
+ }
spin_unlock_irqrestore(&conf->resync_lock, flags);
wake_up(&conf->wait_barrier);
}
@@ -884,8 +970,7 @@ static void freeze_array(struct r1conf *conf, int extra)
{
/* stop syncio and normal IO and wait for everything to
* go quite.
- * We increment barrier and nr_waiting, and then
- * wait until nr_pending match nr_queued+extra
+ * We wait until nr_pending match nr_queued+extra
* This is called in the context of one normal IO request
* that has failed. Thus any sync request that might be pending
* will be blocked by nr_pending, and we need to wait for
@@ -895,8 +980,7 @@ static void freeze_array(struct r1conf *conf, int extra)
* we continue.
*/
spin_lock_irq(&conf->resync_lock);
- conf->barrier++;
- conf->nr_waiting++;
+ conf->array_frozen = 1;
wait_event_lock_irq_cmd(conf->wait_barrier,
conf->nr_pending == conf->nr_queued+extra,
conf->resync_lock,
@@ -907,8 +991,7 @@ static void unfreeze_array(struct r1conf *conf)
{
/* reverse the effect of the freeze */
spin_lock_irq(&conf->resync_lock);
- conf->barrier--;
- conf->nr_waiting--;
+ conf->array_frozen = 0;
wake_up(&conf->wait_barrier);
spin_unlock_irq(&conf->resync_lock);
}
@@ -1013,6 +1096,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
int first_clone;
int sectors_handled;
int max_sectors;
+ sector_t start_next_window;
/*
* Register the new request and wait if the reconstruction
@@ -1042,7 +1126,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
finish_wait(&conf->wait_barrier, &w);
}
- wait_barrier(conf);
+ start_next_window = wait_barrier(conf, bio);
bitmap = mddev->bitmap;
@@ -1163,6 +1247,7 @@ read_again:
disks = conf->raid_disks * 2;
retry_write:
+ r1_bio->start_next_window = start_next_window;
blocked_rdev = NULL;
rcu_read_lock();
max_sectors = r1_bio->sectors;
@@ -1231,14 +1316,24 @@ read_again:
if (unlikely(blocked_rdev)) {
/* Wait for this device to become unblocked */
int j;
+ sector_t old = start_next_window;
for (j = 0; j < i; j++)
if (r1_bio->bios[j])
rdev_dec_pending(conf->mirrors[j].rdev, mddev);
r1_bio->state = 0;
- allow_barrier(conf);
+ allow_barrier(conf, start_next_window, bio->bi_sector);
md_wait_for_blocked_rdev(blocked_rdev, mddev);
- wait_barrier(conf);
+ start_next_window = wait_barrier(conf, bio);
+ /*
+ * We must make sure the multi r1bios of bio have
+ * the same value of bi_phys_segments
+ */
+ if (bio->bi_phys_segments && old &&
+ old != start_next_window)
+ /* Wait for the former r1bio(s) to complete */
+ wait_event(conf->wait_barrier,
+ bio->bi_phys_segments == 1);
goto retry_write;
}
@@ -1438,11 +1533,14 @@ static void print_conf(struct r1conf *conf)
static void close_sync(struct r1conf *conf)
{
- wait_barrier(conf);
- allow_barrier(conf);
+ wait_barrier(conf, NULL);
+ allow_barrier(conf, 0, 0);
mempool_destroy(conf->r1buf_pool);
conf->r1buf_pool = NULL;
+
+ conf->next_resync = 0;
+ conf->start_next_window = MaxSector;
}
static int raid1_spare_active(struct mddev *mddev)
@@ -2714,6 +2812,9 @@ static struct r1conf *setup_conf(struct mddev *mddev)
conf->pending_count = 0;
conf->recovery_disabled = mddev->recovery_disabled - 1;
+ conf->start_next_window = MaxSector;
+ conf->current_window_requests = conf->next_window_requests = 0;
+
err = -EIO;
for (i = 0; i < conf->raid_disks * 2; i++) {
@@ -2871,8 +2972,8 @@ static int stop(struct mddev *mddev)
atomic_read(&bitmap->behind_writes) == 0);
}
- raise_barrier(conf);
- lower_barrier(conf);
+ freeze_array(conf, 0);
+ unfreeze_array(conf);
md_unregister_thread(&mddev->thread);
if (conf->r1bio_pool)
@@ -3031,10 +3132,10 @@ static void raid1_quiesce(struct mddev *mddev, int state)
wake_up(&conf->wait_barrier);
break;
case 1:
- raise_barrier(conf);
+ freeze_array(conf, 0);
break;
case 0:
- lower_barrier(conf);
+ unfreeze_array(conf);
break;
}
}
@@ -3051,7 +3152,8 @@ static void *raid1_takeover(struct mddev *mddev)
mddev->new_chunk_sectors = 0;
conf = setup_conf(mddev);
if (!IS_ERR(conf))
- conf->barrier = 1;
+ /* Array must appear to be quiesced */
+ conf->array_frozen = 1;
return conf;
}
return ERR_PTR(-EINVAL);
diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h
index 0ff3715fb7eb..9bebca7bff2f 100644
--- a/drivers/md/raid1.h
+++ b/drivers/md/raid1.h
@@ -41,6 +41,19 @@ struct r1conf {
*/
sector_t next_resync;
+ /* When raid1 starts resync, we divide array into four partitions
+ * |---------|--------------|---------------------|-------------|
+ * next_resync start_next_window end_window
+ * start_next_window = next_resync + NEXT_NORMALIO_DISTANCE
+ * end_window = start_next_window + NEXT_NORMALIO_DISTANCE
+ * current_window_requests means the count of normalIO between
+ * start_next_window and end_window.
+ * next_window_requests means the count of normalIO after end_window.
+ * */
+ sector_t start_next_window;
+ int current_window_requests;
+ int next_window_requests;
+
spinlock_t device_lock;
/* list of 'struct r1bio' that need to be processed by raid1d,
@@ -65,6 +78,7 @@ struct r1conf {
int nr_waiting;
int nr_queued;
int barrier;
+ int array_frozen;
/* Set to 1 if a full sync is needed, (fresh device added).
* Cleared when a sync completes.
@@ -111,6 +125,7 @@ struct r1bio {
* in this BehindIO request
*/
sector_t sector;
+ sector_t start_next_window;
int sectors;
unsigned long state;
struct mddev *mddev;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 7c3508abb5e1..c504e8389e69 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -4384,7 +4384,11 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
set_bit(MD_CHANGE_DEVS, &mddev->flags);
md_wakeup_thread(mddev->thread);
wait_event(mddev->sb_wait, mddev->flags == 0 ||
- kthread_should_stop());
+ test_bit(MD_RECOVERY_INTR, &mddev->recovery));
+ if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
+ allow_barrier(conf);
+ return sectors_done;
+ }
conf->reshape_safe = mddev->reshape_position;
allow_barrier(conf);
}
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 7f0e17a27aeb..cc055da02e2a 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -85,6 +85,42 @@ static inline struct hlist_head *stripe_hash(struct r5conf *conf, sector_t sect)
return &conf->stripe_hashtbl[hash];
}
+static inline int stripe_hash_locks_hash(sector_t sect)
+{
+ return (sect >> STRIPE_SHIFT) & STRIPE_HASH_LOCKS_MASK;
+}
+
+static inline void lock_device_hash_lock(struct r5conf *conf, int hash)
+{
+ spin_lock_irq(conf->hash_locks + hash);
+ spin_lock(&conf->device_lock);
+}
+
+static inline void unlock_device_hash_lock(struct r5conf *conf, int hash)
+{
+ spin_unlock(&conf->device_lock);
+ spin_unlock_irq(conf->hash_locks + hash);
+}
+
+static inline void lock_all_device_hash_locks_irq(struct r5conf *conf)
+{
+ int i;
+ local_irq_disable();
+ spin_lock(conf->hash_locks);
+ for (i = 1; i < NR_STRIPE_HASH_LOCKS; i++)
+ spin_lock_nest_lock(conf->hash_locks + i, conf->hash_locks);
+ spin_lock(&conf->device_lock);
+}
+
+static inline void unlock_all_device_hash_locks_irq(struct r5conf *conf)
+{
+ int i;
+ spin_unlock(&conf->device_lock);
+ for (i = NR_STRIPE_HASH_LOCKS; i; i--)
+ spin_unlock(conf->hash_locks + i - 1);
+ local_irq_enable();
+}
+
/* bio's attached to a stripe+device for I/O are linked together in bi_sector
* order without overlap. There may be several bio's per stripe+device, and
* a bio could span several devices.
@@ -249,7 +285,8 @@ static void raid5_wakeup_stripe_thread(struct stripe_head *sh)
}
}
-static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh)
+static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh,
+ struct list_head *temp_inactive_list)
{
BUG_ON(!list_empty(&sh->lru));
BUG_ON(atomic_read(&conf->active_stripes)==0);
@@ -278,23 +315,68 @@ static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh)
< IO_THRESHOLD)
md_wakeup_thread(conf->mddev->thread);
atomic_dec(&conf->active_stripes);
- if (!test_bit(STRIPE_EXPANDING, &sh->state)) {
- list_add_tail(&sh->lru, &conf->inactive_list);
- wake_up(&conf->wait_for_stripe);
- if (conf->retry_read_aligned)
- md_wakeup_thread(conf->mddev->thread);
- }
+ if (!test_bit(STRIPE_EXPANDING, &sh->state))
+ list_add_tail(&sh->lru, temp_inactive_list);
}
}
-static void __release_stripe(struct r5conf *conf, struct stripe_head *sh)
+static void __release_stripe(struct r5conf *conf, struct stripe_head *sh,
+ struct list_head *temp_inactive_list)
{
if (atomic_dec_and_test(&sh->count))
- do_release_stripe(conf, sh);
+ do_release_stripe(conf, sh, temp_inactive_list);
+}
+
+/*
+ * @hash could be NR_STRIPE_HASH_LOCKS, then we have a list of inactive_list
+ *
+ * Be careful: Only one task can add/delete stripes from temp_inactive_list at
+ * given time. Adding stripes only takes device lock, while deleting stripes
+ * only takes hash lock.
+ */
+static void release_inactive_stripe_list(struct r5conf *conf,
+ struct list_head *temp_inactive_list,
+ int hash)
+{
+ int size;
+ bool do_wakeup = false;
+ unsigned long flags;
+
+ if (hash == NR_STRIPE_HASH_LOCKS) {
+ size = NR_STRIPE_HASH_LOCKS;
+ hash = NR_STRIPE_HASH_LOCKS - 1;
+ } else
+ size = 1;
+ while (size) {
+ struct list_head *list = &temp_inactive_list[size - 1];
+
+ /*
+ * We don't hold any lock here yet, get_active_stripe() might
+ * remove stripes from the list
+ */
+ if (!list_empty_careful(list)) {
+ spin_lock_irqsave(conf->hash_locks + hash, flags);
+ if (list_empty(conf->inactive_list + hash) &&
+ !list_empty(list))
+ atomic_dec(&conf->empty_inactive_list_nr);
+ list_splice_tail_init(list, conf->inactive_list + hash);
+ do_wakeup = true;
+ spin_unlock_irqrestore(conf->hash_locks + hash, flags);
+ }
+ size--;
+ hash--;
+ }
+
+ if (do_wakeup) {
+ wake_up(&conf->wait_for_stripe);
+ if (conf->retry_read_aligned)
+ md_wakeup_thread(conf->mddev->thread);
+ }
}
/* should hold conf->device_lock already */
-static int release_stripe_list(struct r5conf *conf)
+static int release_stripe_list(struct r5conf *conf,
+ struct list_head *temp_inactive_list)
{
struct stripe_head *sh;
int count = 0;
@@ -303,6 +385,8 @@ static int release_stripe_list(struct r5conf *conf)
head = llist_del_all(&conf->released_stripes);
head = llist_reverse_order(head);
while (head) {
+ int hash;
+
sh = llist_entry(head, struct stripe_head, release_list);
head = llist_next(head);
/* sh could be readded after STRIPE_ON_RELEASE_LIST is cleard */
@@ -313,7 +397,8 @@ static int release_stripe_list(struct r5conf *conf)
* again, the count is always > 1. This is true for
* STRIPE_ON_UNPLUG_LIST bit too.
*/
- __release_stripe(conf, sh);
+ hash = sh->hash_lock_index;
+ __release_stripe(conf, sh, &temp_inactive_list[hash]);
count++;
}
@@ -324,9 +409,12 @@ static void release_stripe(struct stripe_head *sh)
{
struct r5conf *conf = sh->raid_conf;
unsigned long flags;
+ struct list_head list;
+ int hash;
bool wakeup;
- if (test_and_set_bit(STRIPE_ON_RELEASE_LIST, &sh->state))
+ if (unlikely(!conf->mddev->thread) ||
+ test_and_set_bit(STRIPE_ON_RELEASE_LIST, &sh->state))
goto slow_path;
wakeup = llist_add(&sh->release_list, &conf->released_stripes);
if (wakeup)
@@ -336,8 +424,11 @@ slow_path:
local_irq_save(flags);
/* we are ok here if STRIPE_ON_RELEASE_LIST is set or not */
if (atomic_dec_and_lock(&sh->count, &conf->device_lock)) {
- do_release_stripe(conf, sh);
+ INIT_LIST_HEAD(&list);
+ hash = sh->hash_lock_index;
+ do_release_stripe(conf, sh, &list);
spin_unlock(&conf->device_lock);
+ release_inactive_stripe_list(conf, &list, hash);
}
local_irq_restore(flags);
}
@@ -362,18 +453,21 @@ static inline void insert_hash(struct r5conf *conf, struct stripe_head *sh)
/* find an idle stripe, make sure it is unhashed, and return it. */
-static struct stripe_head *get_free_stripe(struct r5conf *conf)
+static struct stripe_head *get_free_stripe(struct r5conf *conf, int hash)
{
struct stripe_head *sh = NULL;
struct list_head *first;
- if (list_empty(&conf->inactive_list))
+ if (list_empty(conf->inactive_list + hash))
goto out;
- first = conf->inactive_list.next;
+ first = (conf->inactive_list + hash)->next;
sh = list_entry(first, struct stripe_head, lru);
list_del_init(first);
remove_hash(sh);
atomic_inc(&conf->active_stripes);
+ BUG_ON(hash != sh->hash_lock_index);
+ if (list_empty(conf->inactive_list + hash))
+ atomic_inc(&conf->empty_inactive_list_nr);
out:
return sh;
}
@@ -416,7 +510,7 @@ static void stripe_set_idx(sector_t stripe, struct r5conf *conf, int previous,
static void init_stripe(struct stripe_head *sh, sector_t sector, int previous)
{
struct r5conf *conf = sh->raid_conf;
- int i;
+ int i, seq;
BUG_ON(atomic_read(&sh->count) != 0);
BUG_ON(test_bit(STRIPE_HANDLE, &sh->state));
@@ -426,7 +520,8 @@ static void init_stripe(struct stripe_head *sh, sector_t sector, int previous)
(unsigned long long)sh->sector);
remove_hash(sh);
-
+retry:
+ seq = read_seqcount_begin(&conf->gen_lock);
sh->generation = conf->generation - previous;
sh->disks = previous ? conf->previous_raid_disks : conf->raid_disks;
sh->sector = sector;
@@ -448,6 +543,8 @@ static void init_stripe(struct stripe_head *sh, sector_t sector, int previous)
dev->flags = 0;
raid5_build_block(sh, i, previous);
}
+ if (read_seqcount_retry(&conf->gen_lock, seq))
+ goto retry;
insert_hash(conf, sh);
sh->cpu = smp_processor_id();
}
@@ -552,57 +649,59 @@ get_active_stripe(struct r5conf *conf, sector_t sector,
int previous, int noblock, int noquiesce)
{
struct stripe_head *sh;
+ int hash = stripe_hash_locks_hash(sector);
pr_debug("get_stripe, sector %llu\n", (unsigned long long)sector);
- spin_lock_irq(&conf->device_lock);
+ spin_lock_irq(conf->hash_locks + hash);
do {
wait_event_lock_irq(conf->wait_for_stripe,
conf->quiesce == 0 || noquiesce,
- conf->device_lock);
+ *(conf->hash_locks + hash));
sh = __find_stripe(conf, sector, conf->generation - previous);
if (!sh) {
if (!conf->inactive_blocked)
- sh = get_free_stripe(conf);
+ sh = get_free_stripe(conf, hash);
if (noblock && sh == NULL)
break;
if (!sh) {
conf->inactive_blocked = 1;
- wait_event_lock_irq(conf->wait_for_stripe,
- !list_empty(&conf->inactive_list) &&
- (atomic_read(&conf->active_stripes)
- < (conf->max_nr_stripes *3/4)
- || !conf->inactive_blocked),
- conf->device_lock);
+ wait_event_lock_irq(
+ conf->wait_for_stripe,
+ !list_empty(conf->inactive_list + hash) &&
+ (atomic_read(&conf->active_stripes)
+ < (conf->max_nr_stripes * 3 / 4)
+ || !conf->inactive_blocked),
+ *(conf->hash_locks + hash));
conf->inactive_blocked = 0;
} else
init_stripe(sh, sector, previous);
} else {
+ spin_lock(&conf->device_lock);
if (atomic_read(&sh->count)) {
BUG_ON(!list_empty(&sh->lru)
&& !test_bit(STRIPE_EXPANDING, &sh->state)
&& !test_bit(STRIPE_ON_UNPLUG_LIST, &sh->state)
- && !test_bit(STRIPE_ON_RELEASE_LIST, &sh->state));
+ );
} else {
if (!test_bit(STRIPE_HANDLE, &sh->state))
atomic_inc(&conf->active_stripes);
- if (list_empty(&sh->lru) &&
- !test_bit(STRIPE_EXPANDING, &sh->state))
- BUG();
+ BUG_ON(list_empty(&sh->lru));
list_del_init(&sh->lru);
if (sh->group) {
sh->group->stripes_cnt--;
sh->group = NULL;
}
}
+ spin_unlock(&conf->device_lock);
}
} while (sh == NULL);
if (sh)
atomic_inc(&sh->count);
- spin_unlock_irq(&conf->device_lock);
+ spin_unlock_irq(conf->hash_locks + hash);
return sh;
}
@@ -758,7 +857,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
bi->bi_sector = (sh->sector
+ rdev->data_offset);
if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
- bi->bi_rw |= REQ_FLUSH;
+ bi->bi_rw |= REQ_NOMERGE;
bi->bi_vcnt = 1;
bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
@@ -1582,7 +1681,7 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
put_cpu();
}
-static int grow_one_stripe(struct r5conf *conf)
+static int grow_one_stripe(struct r5conf *conf, int hash)
{
struct stripe_head *sh;
sh = kmem_cache_zalloc(conf->slab_cache, GFP_KERNEL);
@@ -1598,6 +1697,7 @@ static int grow_one_stripe(struct r5conf *conf)
kmem_cache_free(conf->slab_cache, sh);
return 0;
}
+ sh->hash_lock_index = hash;
/* we just created an active stripe so... */
atomic_set(&sh->count, 1);
atomic_inc(&conf->active_stripes);
@@ -1610,6 +1710,7 @@ static int grow_stripes(struct r5conf *conf, int num)
{
struct kmem_cache *sc;
int devs = max(conf->raid_disks, conf->previous_raid_disks);
+ int hash;
if (conf->mddev->gendisk)
sprintf(conf->cache_name[0],
@@ -1627,9 +1728,13 @@ static int grow_stripes(struct r5conf *conf, int num)
return 1;
conf->slab_cache = sc;
conf->pool_size = devs;
- while (num--)
- if (!grow_one_stripe(conf))
+ hash = conf->max_nr_stripes % NR_STRIPE_HASH_LOCKS;
+ while (num--) {
+ if (!grow_one_stripe(conf, hash))
return 1;
+ conf->max_nr_stripes++;
+ hash = (hash + 1) % NR_STRIPE_HASH_LOCKS;
+ }
return 0;
}
@@ -1687,6 +1792,7 @@ static int resize_stripes(struct r5conf *conf, int newsize)
int err;
struct kmem_cache *sc;
int i;
+ int hash, cnt;
if (newsize <= conf->pool_size)
return 0; /* never bother to shrink */
@@ -1726,19 +1832,29 @@ static int resize_stripes(struct r5conf *conf, int newsize)
* OK, we have enough stripes, start collecting inactive
* stripes and copying them over
*/
+ hash = 0;
+ cnt = 0;
list_for_each_entry(nsh, &newstripes, lru) {
- spin_lock_irq(&conf->device_lock);
- wait_event_lock_irq(conf->wait_for_stripe,
- !list_empty(&conf->inactive_list),
- conf->device_lock);
- osh = get_free_stripe(conf);
- spin_unlock_irq(&conf->device_lock);
+ lock_device_hash_lock(conf, hash);
+ wait_event_cmd(conf->wait_for_stripe,
+ !list_empty(conf->inactive_list + hash),
+ unlock_device_hash_lock(conf, hash),
+ lock_device_hash_lock(conf, hash));
+ osh = get_free_stripe(conf, hash);
+ unlock_device_hash_lock(conf, hash);
atomic_set(&nsh->count, 1);
for(i=0; i<conf->pool_size; i++)
nsh->dev[i].page = osh->dev[i].page;
for( ; i<newsize; i++)
nsh->dev[i].page = NULL;
+ nsh->hash_lock_index = hash;
kmem_cache_free(conf->slab_cache, osh);
+ cnt++;
+ if (cnt >= conf->max_nr_stripes / NR_STRIPE_HASH_LOCKS +
+ !!((conf->max_nr_stripes % NR_STRIPE_HASH_LOCKS) > hash)) {
+ hash++;
+ cnt = 0;
+ }
}
kmem_cache_destroy(conf->slab_cache);
@@ -1797,13 +1913,13 @@ static int resize_stripes(struct r5conf *conf, int newsize)
return err;
}
-static int drop_one_stripe(struct r5conf *conf)
+static int drop_one_stripe(struct r5conf *conf, int hash)
{
struct stripe_head *sh;
- spin_lock_irq(&conf->device_lock);
- sh = get_free_stripe(conf);
- spin_unlock_irq(&conf->device_lock);
+ spin_lock_irq(conf->hash_locks + hash);
+ sh = get_free_stripe(conf, hash);
+ spin_unlock_irq(conf->hash_locks + hash);
if (!sh)
return 0;
BUG_ON(atomic_read(&sh->count));
@@ -1815,8 +1931,10 @@ static int drop_one_stripe(struct r5conf *conf)
static void shrink_stripes(struct r5conf *conf)
{
- while (drop_one_stripe(conf))
- ;
+ int hash;
+ for (hash = 0; hash < NR_STRIPE_HASH_LOCKS; hash++)
+ while (drop_one_stripe(conf, hash))
+ ;
if (conf->slab_cache)
kmem_cache_destroy(conf->slab_cache);
@@ -1921,6 +2039,9 @@ static void raid5_end_read_request(struct bio * bi, int error)
mdname(conf->mddev), bdn);
else
retry = 1;
+ if (set_bad && test_bit(In_sync, &rdev->flags)
+ && !test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
+ retry = 1;
if (retry)
if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) {
set_bit(R5_ReadError, &sh->dev[i].flags);
@@ -3900,7 +4021,8 @@ static void raid5_activate_delayed(struct r5conf *conf)
}
}
-static void activate_bit_delay(struct r5conf *conf)
+static void activate_bit_delay(struct r5conf *conf,
+ struct list_head *temp_inactive_list)
{
/* device_lock is held */
struct list_head head;
@@ -3908,9 +4030,11 @@ static void activate_bit_delay(struct r5conf *conf)
list_del_init(&conf->bitmap_list);
while (!list_empty(&head)) {
struct stripe_head *sh = list_entry(head.next, struct stripe_head, lru);
+ int hash;
list_del_init(&sh->lru);
atomic_inc(&sh->count);
- __release_stripe(conf, sh);
+ hash = sh->hash_lock_index;
+ __release_stripe(conf, sh, &temp_inactive_list[hash]);
}
}
@@ -3926,7 +4050,7 @@ int md_raid5_congested(struct mddev *mddev, int bits)
return 1;
if (conf->quiesce)
return 1;
- if (list_empty_careful(&conf->inactive_list))
+ if (atomic_read(&conf->empty_inactive_list_nr))
return 1;
return 0;
@@ -4256,6 +4380,7 @@ static struct stripe_head *__get_priority_stripe(struct r5conf *conf, int group)
struct raid5_plug_cb {
struct blk_plug_cb cb;
struct list_head list;
+ struct list_head temp_inactive_list[NR_STRIPE_HASH_LOCKS];
};
static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule)
@@ -4266,6 +4391,7 @@ static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule)
struct mddev *mddev = cb->cb.data;
struct r5conf *conf = mddev->private;
int cnt = 0;
+ int hash;
if (cb->list.next && !list_empty(&cb->list)) {
spin_lock_irq(&conf->device_lock);
@@ -4283,11 +4409,14 @@ static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule)
* STRIPE_ON_RELEASE_LIST could be set here. In that
* case, the count is always > 1 here
*/
- __release_stripe(conf, sh);
+ hash = sh->hash_lock_index;
+ __release_stripe(conf, sh, &cb->temp_inactive_list[hash]);
cnt++;
}
spin_unlock_irq(&conf->device_lock);
}
+ release_inactive_stripe_list(conf, cb->temp_inactive_list,
+ NR_STRIPE_HASH_LOCKS);
if (mddev->queue)
trace_block_unplug(mddev->queue, cnt, !from_schedule);
kfree(cb);
@@ -4308,8 +4437,12 @@ static void release_stripe_plug(struct mddev *mddev,
cb = container_of(blk_cb, struct raid5_plug_cb, cb);
- if (cb->list.next == NULL)
+ if (cb->list.next == NULL) {
+ int i;
INIT_LIST_HEAD(&cb->list);
+ for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++)
+ INIT_LIST_HEAD(cb->temp_inactive_list + i);
+ }
if (!test_and_set_bit(STRIPE_ON_UNPLUG_LIST, &sh->state))
list_add_tail(&sh->lru, &cb->list);
@@ -4692,14 +4825,19 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk
time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) {
/* Cannot proceed until we've updated the superblock... */
wait_event(conf->wait_for_overlap,
- atomic_read(&conf->reshape_stripes)==0);
+ atomic_read(&conf->reshape_stripes)==0
+ || test_bit(MD_RECOVERY_INTR, &mddev->recovery));
+ if (atomic_read(&conf->reshape_stripes) != 0)
+ return 0;
mddev->reshape_position = conf->reshape_progress;
mddev->curr_resync_completed = sector_nr;
conf->reshape_checkpoint = jiffies;
set_bit(MD_CHANGE_DEVS, &mddev->flags);
md_wakeup_thread(mddev->thread);
wait_event(mddev->sb_wait, mddev->flags == 0 ||
- kthread_should_stop());
+ test_bit(MD_RECOVERY_INTR, &mddev->recovery));
+ if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
+ return 0;
spin_lock_irq(&conf->device_lock);
conf->reshape_safe = mddev->reshape_position;
spin_unlock_irq(&conf->device_lock);
@@ -4782,7 +4920,10 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk
>= mddev->resync_max - mddev->curr_resync_completed) {
/* Cannot proceed until we've updated the superblock... */
wait_event(conf->wait_for_overlap,
- atomic_read(&conf->reshape_stripes) == 0);
+ atomic_read(&conf->reshape_stripes) == 0
+ || test_bit(MD_RECOVERY_INTR, &mddev->recovery));
+ if (atomic_read(&conf->reshape_stripes) != 0)
+ goto ret;
mddev->reshape_position = conf->reshape_progress;
mddev->curr_resync_completed = sector_nr;
conf->reshape_checkpoint = jiffies;
@@ -4790,13 +4931,16 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk
md_wakeup_thread(mddev->thread);
wait_event(mddev->sb_wait,
!test_bit(MD_CHANGE_DEVS, &mddev->flags)
- || kthread_should_stop());
+ || test_bit(MD_RECOVERY_INTR, &mddev->recovery));
+ if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
+ goto ret;
spin_lock_irq(&conf->device_lock);
conf->reshape_safe = mddev->reshape_position;
spin_unlock_irq(&conf->device_lock);
wake_up(&conf->wait_for_overlap);
sysfs_notify(&mddev->kobj, NULL, "sync_completed");
}
+ret:
return reshape_sectors;
}
@@ -4954,27 +5098,45 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)
}
static int handle_active_stripes(struct r5conf *conf, int group,
- struct r5worker *worker)
+ struct r5worker *worker,
+ struct list_head *temp_inactive_list)
{
struct stripe_head *batch[MAX_STRIPE_BATCH], *sh;
- int i, batch_size = 0;
+ int i, batch_size = 0, hash;
+ bool release_inactive = false;
while (batch_size < MAX_STRIPE_BATCH &&
(sh = __get_priority_stripe(conf, group)) != NULL)
batch[batch_size++] = sh;
- if (batch_size == 0)
- return batch_size;
+ if (batch_size == 0) {
+ for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++)
+ if (!list_empty(temp_inactive_list + i))
+ break;
+ if (i == NR_STRIPE_HASH_LOCKS)
+ return batch_size;
+ release_inactive = true;
+ }
spin_unlock_irq(&conf->device_lock);
+ release_inactive_stripe_list(conf, temp_inactive_list,
+ NR_STRIPE_HASH_LOCKS);
+
+ if (release_inactive) {
+ spin_lock_irq(&conf->device_lock);
+ return 0;
+ }
+
for (i = 0; i < batch_size; i++)
handle_stripe(batch[i]);
cond_resched();
spin_lock_irq(&conf->device_lock);
- for (i = 0; i < batch_size; i++)
- __release_stripe(conf, batch[i]);
+ for (i = 0; i < batch_size; i++) {
+ hash = batch[i]->hash_lock_index;
+ __release_stripe(conf, batch[i], &temp_inactive_list[hash]);
+ }
return batch_size;
}
@@ -4995,9 +5157,10 @@ static void raid5_do_work(struct work_struct *work)
while (1) {
int batch_size, released;
- released = release_stripe_list(conf);
+ released = release_stripe_list(conf, worker->temp_inactive_list);
- batch_size = handle_active_stripes(conf, group_id, worker);
+ batch_size = handle_active_stripes(conf, group_id, worker,
+ worker->temp_inactive_list);
worker->working = false;
if (!batch_size && !released)
break;
@@ -5036,7 +5199,7 @@ static void raid5d(struct md_thread *thread)
struct bio *bio;
int batch_size, released;
- released = release_stripe_list(conf);
+ released = release_stripe_list(conf, conf->temp_inactive_list);
if (
!list_empty(&conf->bitmap_list)) {
@@ -5046,7 +5209,7 @@ static void raid5d(struct md_thread *thread)
bitmap_unplug(mddev->bitmap);
spin_lock_irq(&conf->device_lock);
conf->seq_write = conf->seq_flush;
- activate_bit_delay(conf);
+ activate_bit_delay(conf, conf->temp_inactive_list);
}
raid5_activate_delayed(conf);
@@ -5060,7 +5223,8 @@ static void raid5d(struct md_thread *thread)
handled++;
}
- batch_size = handle_active_stripes(conf, ANY_GROUP, NULL);
+ batch_size = handle_active_stripes(conf, ANY_GROUP, NULL,
+ conf->temp_inactive_list);
if (!batch_size && !released)
break;
handled += batch_size;
@@ -5096,22 +5260,29 @@ raid5_set_cache_size(struct mddev *mddev, int size)
{
struct r5conf *conf = mddev->private;
int err;
+ int hash;
if (size <= 16 || size > 32768)
return -EINVAL;
+ hash = (conf->max_nr_stripes - 1) % NR_STRIPE_HASH_LOCKS;
while (size < conf->max_nr_stripes) {
- if (drop_one_stripe(conf))
+ if (drop_one_stripe(conf, hash))
conf->max_nr_stripes--;
else
break;
+ hash--;
+ if (hash < 0)
+ hash = NR_STRIPE_HASH_LOCKS - 1;
}
err = md_allow_write(mddev);
if (err)
return err;
+ hash = conf->max_nr_stripes % NR_STRIPE_HASH_LOCKS;
while (size > conf->max_nr_stripes) {
- if (grow_one_stripe(conf))
+ if (grow_one_stripe(conf, hash))
conf->max_nr_stripes++;
else break;
+ hash = (hash + 1) % NR_STRIPE_HASH_LOCKS;
}
return 0;
}
@@ -5199,15 +5370,18 @@ raid5_show_group_thread_cnt(struct mddev *mddev, char *page)
return 0;
}
-static int alloc_thread_groups(struct r5conf *conf, int cnt);
+static int alloc_thread_groups(struct r5conf *conf, int cnt,
+ int *group_cnt,
+ int *worker_cnt_per_group,
+ struct r5worker_group **worker_groups);
static ssize_t
raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len)
{
struct r5conf *conf = mddev->private;
unsigned long new;
int err;
- struct r5worker_group *old_groups;
- int old_group_cnt;
+ struct r5worker_group *new_groups, *old_groups;
+ int group_cnt, worker_cnt_per_group;
if (len >= PAGE_SIZE)
return -EINVAL;
@@ -5223,14 +5397,19 @@ raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len)
mddev_suspend(mddev);
old_groups = conf->worker_groups;
- old_group_cnt = conf->worker_cnt_per_group;
+ if (old_groups)
+ flush_workqueue(raid5_wq);
+
+ err = alloc_thread_groups(conf, new,
+ &group_cnt, &worker_cnt_per_group,
+ &new_groups);
+ if (!err) {
+ spin_lock_irq(&conf->device_lock);
+ conf->group_cnt = group_cnt;
+ conf->worker_cnt_per_group = worker_cnt_per_group;
+ conf->worker_groups = new_groups;
+ spin_unlock_irq(&conf->device_lock);
- conf->worker_groups = NULL;
- err = alloc_thread_groups(conf, new);
- if (err) {
- conf->worker_groups = old_groups;
- conf->worker_cnt_per_group = old_group_cnt;
- } else {
if (old_groups)
kfree(old_groups[0].workers);
kfree(old_groups);
@@ -5260,40 +5439,47 @@ static struct attribute_group raid5_attrs_group = {
.attrs = raid5_attrs,
};
-static int alloc_thread_groups(struct r5conf *conf, int cnt)
+static int alloc_thread_groups(struct r5conf *conf, int cnt,
+ int *group_cnt,
+ int *worker_cnt_per_group,
+ struct r5worker_group **worker_groups)
{
- int i, j;
+ int i, j, k;
ssize_t size;
struct r5worker *workers;
- conf->worker_cnt_per_group = cnt;
+ *worker_cnt_per_group = cnt;
if (cnt == 0) {
- conf->worker_groups = NULL;
+ *group_cnt = 0;
+ *worker_groups = NULL;
return 0;
}
- conf->group_cnt = num_possible_nodes();
+ *group_cnt = num_possible_nodes();
size = sizeof(struct r5worker) * cnt;
- workers = kzalloc(size * conf->group_cnt, GFP_NOIO);
- conf->worker_groups = kzalloc(sizeof(struct r5worker_group) *
- conf->group_cnt, GFP_NOIO);
- if (!conf->worker_groups || !workers) {
+ workers = kzalloc(size * *group_cnt, GFP_NOIO);
+ *worker_groups = kzalloc(sizeof(struct r5worker_group) *
+ *group_cnt, GFP_NOIO);
+ if (!*worker_groups || !workers) {
kfree(workers);
- kfree(conf->worker_groups);
- conf->worker_groups = NULL;
+ kfree(*worker_groups);
return -ENOMEM;
}
- for (i = 0; i < conf->group_cnt; i++) {
+ for (i = 0; i < *group_cnt; i++) {
struct r5worker_group *group;
- group = &conf->worker_groups[i];
+ group = &(*worker_groups)[i];
INIT_LIST_HEAD(&group->handle_list);
group->conf = conf;
group->workers = workers + i * cnt;
for (j = 0; j < cnt; j++) {
- group->workers[j].group = group;
- INIT_WORK(&group->workers[j].work, raid5_do_work);
+ struct r5worker *worker = group->workers + j;
+ worker->group = group;
+ INIT_WORK(&worker->work, raid5_do_work);
+
+ for (k = 0; k < NR_STRIPE_HASH_LOCKS; k++)
+ INIT_LIST_HEAD(worker->temp_inactive_list + k);
}
}
@@ -5444,6 +5630,9 @@ static struct r5conf *setup_conf(struct mddev *mddev)
struct md_rdev *rdev;
struct disk_info *disk;
char pers_name[6];
+ int i;
+ int group_cnt, worker_cnt_per_group;
+ struct r5worker_group *new_group;
if (mddev->new_level != 5
&& mddev->new_level != 4
@@ -5478,7 +5667,12 @@ static struct r5conf *setup_conf(struct mddev *mddev)
if (conf == NULL)
goto abort;
/* Don't enable multi-threading by default*/
- if (alloc_thread_groups(conf, 0))
+ if (!alloc_thread_groups(conf, 0, &group_cnt, &worker_cnt_per_group,
+ &new_group)) {
+ conf->group_cnt = group_cnt;
+ conf->worker_cnt_per_group = worker_cnt_per_group;
+ conf->worker_groups = new_group;
+ } else
goto abort;
spin_lock_init(&conf->device_lock);
seqcount_init(&conf->gen_lock);
@@ -5488,7 +5682,6 @@ static struct r5conf *setup_conf(struct mddev *mddev)
INIT_LIST_HEAD(&conf->hold_list);
INIT_LIST_HEAD(&conf->delayed_list);
INIT_LIST_HEAD(&conf->bitmap_list);
- INIT_LIST_HEAD(&conf->inactive_list);
init_llist_head(&conf->released_stripes);
atomic_set(&conf->active_stripes, 0);
atomic_set(&conf->preread_active_stripes, 0);
@@ -5514,6 +5707,21 @@ static struct r5conf *setup_conf(struct mddev *mddev)
if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL)
goto abort;
+ /* We init hash_locks[0] separately to that it can be used
+ * as the reference lock in the spin_lock_nest_lock() call
+ * in lock_all_device_hash_locks_irq in order to convince
+ * lockdep that we know what we are doing.
+ */
+ spin_lock_init(conf->hash_locks);
+ for (i = 1; i < NR_STRIPE_HASH_LOCKS; i++)
+ spin_lock_init(conf->hash_locks + i);
+
+ for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++)
+ INIT_LIST_HEAD(conf->inactive_list + i);
+
+ for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++)
+ INIT_LIST_HEAD(conf->temp_inactive_list + i);
+
conf->level = mddev->new_level;
if (raid5_alloc_percpu(conf) != 0)
goto abort;
@@ -5554,7 +5762,6 @@ static struct r5conf *setup_conf(struct mddev *mddev)
else
conf->max_degraded = 1;
conf->algorithm = mddev->new_layout;
- conf->max_nr_stripes = NR_STRIPES;
conf->reshape_progress = mddev->reshape_position;
if (conf->reshape_progress != MaxSector) {
conf->prev_chunk_sectors = mddev->chunk_sectors;
@@ -5563,7 +5770,8 @@ static struct r5conf *setup_conf(struct mddev *mddev)
memory = conf->max_nr_stripes * (sizeof(struct stripe_head) +
max_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024;
- if (grow_stripes(conf, conf->max_nr_stripes)) {
+ atomic_set(&conf->empty_inactive_list_nr, NR_STRIPE_HASH_LOCKS);
+ if (grow_stripes(conf, NR_STRIPES)) {
printk(KERN_ERR
"md/raid:%s: couldn't allocate %dkB for buffers\n",
mdname(mddev), memory);
@@ -6369,12 +6577,18 @@ static int raid5_start_reshape(struct mddev *mddev)
if (!mddev->sync_thread) {
mddev->recovery = 0;
spin_lock_irq(&conf->device_lock);
+ write_seqcount_begin(&conf->gen_lock);
mddev->raid_disks = conf->raid_disks = conf->previous_raid_disks;
+ mddev->new_chunk_sectors =
+ conf->chunk_sectors = conf->prev_chunk_sectors;
+ mddev->new_layout = conf->algorithm = conf->prev_algo;
rdev_for_each(rdev, mddev)
rdev->new_data_offset = rdev->data_offset;
smp_wmb();
+ conf->generation --;
conf->reshape_progress = MaxSector;
mddev->reshape_position = MaxSector;
+ write_seqcount_end(&conf->gen_lock);
spin_unlock_irq(&conf->device_lock);
return -EAGAIN;
}
@@ -6462,27 +6676,28 @@ static void raid5_quiesce(struct mddev *mddev, int state)
break;
case 1: /* stop all writes */
- spin_lock_irq(&conf->device_lock);
+ lock_all_device_hash_locks_irq(conf);
/* '2' tells resync/reshape to pause so that all
* active stripes can drain
*/
conf->quiesce = 2;
- wait_event_lock_irq(conf->wait_for_stripe,
+ wait_event_cmd(conf->wait_for_stripe,
atomic_read(&conf->active_stripes) == 0 &&
atomic_read(&conf->active_aligned_reads) == 0,
- conf->device_lock);
+ unlock_all_device_hash_locks_irq(conf),
+ lock_all_device_hash_locks_irq(conf));
conf->quiesce = 1;
- spin_unlock_irq(&conf->device_lock);
+ unlock_all_device_hash_locks_irq(conf);
/* allow reshape to continue */
wake_up(&conf->wait_for_overlap);
break;
case 0: /* re-enable writes */
- spin_lock_irq(&conf->device_lock);
+ lock_all_device_hash_locks_irq(conf);
conf->quiesce = 0;
wake_up(&conf->wait_for_stripe);
wake_up(&conf->wait_for_overlap);
- spin_unlock_irq(&conf->device_lock);
+ unlock_all_device_hash_locks_irq(conf);
break;
}
}
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index b42e6b462eda..01ad8ae8f578 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -205,6 +205,7 @@ struct stripe_head {
short pd_idx; /* parity disk index */
short qd_idx; /* 'Q' disk index for raid6 */
short ddf_layout;/* use DDF ordering to calculate Q */
+ short hash_lock_index;
unsigned long state; /* state flags */
atomic_t count; /* nr of active thread/requests */
int bm_seq; /* sequence number for bitmap flushes */
@@ -367,9 +368,18 @@ struct disk_info {
struct md_rdev *rdev, *replacement;
};
+/* NOTE NR_STRIPE_HASH_LOCKS must remain below 64.
+ * This is because we sometimes take all the spinlocks
+ * and creating that much locking depth can cause
+ * problems.
+ */
+#define NR_STRIPE_HASH_LOCKS 8
+#define STRIPE_HASH_LOCKS_MASK (NR_STRIPE_HASH_LOCKS - 1)
+
struct r5worker {
struct work_struct work;
struct r5worker_group *group;
+ struct list_head temp_inactive_list[NR_STRIPE_HASH_LOCKS];
bool working;
};
@@ -382,6 +392,8 @@ struct r5worker_group {
struct r5conf {
struct hlist_head *stripe_hashtbl;
+ /* only protect corresponding hash list and inactive_list */
+ spinlock_t hash_locks[NR_STRIPE_HASH_LOCKS];
struct mddev *mddev;
int chunk_sectors;
int level, algorithm;
@@ -462,7 +474,8 @@ struct r5conf {
* Free stripes pool
*/
atomic_t active_stripes;
- struct list_head inactive_list;
+ struct list_head inactive_list[NR_STRIPE_HASH_LOCKS];
+ atomic_t empty_inactive_list_nr;
struct llist_head released_stripes;
wait_queue_head_t wait_for_stripe;
wait_queue_head_t wait_for_overlap;
@@ -477,6 +490,7 @@ struct r5conf {
* the new thread here until we fully activate the array.
*/
struct md_thread *thread;
+ struct list_head temp_inactive_list[NR_STRIPE_HASH_LOCKS];
struct r5worker_group *worker_groups;
int group_cnt;
int worker_cnt_per_group;
diff --git a/drivers/media/common/siano/smscoreapi.h b/drivers/media/common/siano/smscoreapi.h
index d0799e323364..9c9063cd3208 100644
--- a/drivers/media/common/siano/smscoreapi.h
+++ b/drivers/media/common/siano/smscoreapi.h
@@ -955,7 +955,7 @@ struct sms_rx_stats {
u32 modem_state; /* from SMSHOSTLIB_DVB_MODEM_STATE_ET */
s32 SNR; /* dB */
u32 ber; /* Post Viterbi ber [1E-5] */
- u32 ber_error_count; /* Number of erronous SYNC bits. */
+ u32 ber_error_count; /* Number of erroneous SYNC bits. */
u32 ber_bit_count; /* Total number of SYNC bits. */
u32 ts_per; /* Transport stream PER,
0xFFFFFFFF indicate N/A */
@@ -981,7 +981,7 @@ struct sms_rx_stats_ex {
u32 modem_state; /* from SMSHOSTLIB_DVB_MODEM_STATE_ET */
s32 SNR; /* dB */
u32 ber; /* Post Viterbi ber [1E-5] */
- u32 ber_error_count; /* Number of erronous SYNC bits. */
+ u32 ber_error_count; /* Number of erroneous SYNC bits. */
u32 ber_bit_count; /* Total number of SYNC bits. */
u32 ts_per; /* Transport stream PER,
0xFFFFFFFF indicate N/A */
diff --git a/drivers/media/common/siano/smsdvb.h b/drivers/media/common/siano/smsdvb.h
index 92c413ba0c79..ae36d0ae0fb1 100644
--- a/drivers/media/common/siano/smsdvb.h
+++ b/drivers/media/common/siano/smsdvb.h
@@ -95,7 +95,7 @@ struct RECEPTION_STATISTICS_PER_SLICES_S {
u32 is_demod_locked; /* 0 - not locked, 1 - locked */
u32 ber_bit_count; /* Total number of SYNC bits. */
- u32 ber_error_count; /* Number of erronous SYNC bits. */
+ u32 ber_error_count; /* Number of erroneous SYNC bits. */
s32 MRC_SNR; /* dB */
s32 mrc_in_band_pwr; /* In band power in dBM */
diff --git a/drivers/media/dvb-core/dvb_demux.c b/drivers/media/dvb-core/dvb_demux.c
index 58de4410c525..6c7ff0cdcd32 100644
--- a/drivers/media/dvb-core/dvb_demux.c
+++ b/drivers/media/dvb-core/dvb_demux.c
@@ -435,7 +435,7 @@ static void dvb_dmx_swfilter_packet(struct dvb_demux *demux, const u8 *buf)
dprintk_tscheck("TEI detected. "
"PID=0x%x data1=0x%x\n",
pid, buf[1]);
- /* data in this packet cant be trusted - drop it unless
+ /* data in this packet can't be trusted - drop it unless
* module option dvb_demux_feed_err_pkts is set */
if (!dvb_demux_feed_err_pkts)
return;
@@ -1032,8 +1032,13 @@ static int dmx_section_feed_release_filter(struct dmx_section_feed *feed,
return -EINVAL;
}
- if (feed->is_filtering)
+ if (feed->is_filtering) {
+ /* release dvbdmx->mutex as far as it is
+ acquired by stop_filtering() itself */
+ mutex_unlock(&dvbdmx->mutex);
feed->stop_filtering(feed);
+ mutex_lock(&dvbdmx->mutex);
+ }
spin_lock_irq(&dvbdmx->lock);
f = dvbdmxfeed->filter;
diff --git a/drivers/media/dvb-core/dvb_net.c b/drivers/media/dvb-core/dvb_net.c
index f91c80c0e9ec..8a86b3025637 100644
--- a/drivers/media/dvb-core/dvb_net.c
+++ b/drivers/media/dvb-core/dvb_net.c
@@ -179,7 +179,7 @@ static __be16 dvb_net_eth_type_trans(struct sk_buff *skb,
eth = eth_hdr(skb);
if (*eth->h_dest & 1) {
- if(memcmp(eth->h_dest,dev->broadcast, ETH_ALEN)==0)
+ if(ether_addr_equal(eth->h_dest,dev->broadcast))
skb->pkt_type=PACKET_BROADCAST;
else
skb->pkt_type=PACKET_MULTICAST;
@@ -674,11 +674,13 @@ static void dvb_net_ule( struct net_device *dev, const u8 *buf, size_t buf_len )
if (priv->rx_mode != RX_MODE_PROMISC) {
if (priv->ule_skb->data[0] & 0x01) {
/* multicast or broadcast */
- if (memcmp(priv->ule_skb->data, bc_addr, ETH_ALEN)) {
+ if (!ether_addr_equal(priv->ule_skb->data, bc_addr)) {
/* multicast */
if (priv->rx_mode == RX_MODE_MULTI) {
int i;
- for(i = 0; i < priv->multi_num && memcmp(priv->ule_skb->data, priv->multi_macs[i], ETH_ALEN); i++)
+ for(i = 0; i < priv->multi_num &&
+ !ether_addr_equal(priv->ule_skb->data,
+ priv->multi_macs[i]); i++)
;
if (i == priv->multi_num)
drop = 1;
@@ -688,7 +690,7 @@ static void dvb_net_ule( struct net_device *dev, const u8 *buf, size_t buf_len )
}
/* else: broadcast */
}
- else if (memcmp(priv->ule_skb->data, dev->dev_addr, ETH_ALEN))
+ else if (!ether_addr_equal(priv->ule_skb->data, dev->dev_addr))
drop = 1;
/* else: destination address matches the MAC address of our receiver device */
}
diff --git a/drivers/media/dvb-frontends/af9033.c b/drivers/media/dvb-frontends/af9033.c
index 30ee59052157..65728c25ea05 100644
--- a/drivers/media/dvb-frontends/af9033.c
+++ b/drivers/media/dvb-frontends/af9033.c
@@ -170,18 +170,18 @@ static int af9033_rd_reg_mask(struct af9033_state *state, u32 reg, u8 *val,
static int af9033_wr_reg_val_tab(struct af9033_state *state,
const struct reg_val *tab, int tab_len)
{
+#define MAX_TAB_LEN 212
int ret, i, j;
- u8 buf[MAX_XFER_SIZE];
+ u8 buf[1 + MAX_TAB_LEN];
+
+ dev_dbg(&state->i2c->dev, "%s: tab_len=%d\n", __func__, tab_len);
if (tab_len > sizeof(buf)) {
- dev_warn(&state->i2c->dev,
- "%s: i2c wr len=%d is too big!\n",
- KBUILD_MODNAME, tab_len);
+ dev_warn(&state->i2c->dev, "%s: tab len %d is too big\n",
+ KBUILD_MODNAME, tab_len);
return -EINVAL;
}
- dev_dbg(&state->i2c->dev, "%s: tab_len=%d\n", __func__, tab_len);
-
for (i = 0, j = 0; i < tab_len; i++) {
buf[j] = tab[i].val;
diff --git a/drivers/media/dvb-frontends/cxd2820r_c.c b/drivers/media/dvb-frontends/cxd2820r_c.c
index 125a44041011..5c6ab4921bf1 100644
--- a/drivers/media/dvb-frontends/cxd2820r_c.c
+++ b/drivers/media/dvb-frontends/cxd2820r_c.c
@@ -78,7 +78,7 @@ int cxd2820r_set_frontend_c(struct dvb_frontend *fe)
num = if_freq / 1000; /* Hz => kHz */
num *= 0x4000;
- if_ctl = cxd2820r_div_u64_round_closest(num, 41000);
+ if_ctl = 0x4000 - cxd2820r_div_u64_round_closest(num, 41000);
buf[0] = (if_ctl >> 8) & 0x3f;
buf[1] = (if_ctl >> 0) & 0xff;
diff --git a/drivers/media/dvb-frontends/dib8000.c b/drivers/media/dvb-frontends/dib8000.c
index 90536147bf04..6dbbee453ee1 100644
--- a/drivers/media/dvb-frontends/dib8000.c
+++ b/drivers/media/dvb-frontends/dib8000.c
@@ -3048,7 +3048,7 @@ static int dib8000_tune(struct dvb_frontend *fe)
dib8000_set_diversity_in(state->fe[0], state->diversity_onoff);
locks = (dib8000_read_word(state, 180) >> 6) & 0x3f; /* P_coff_winlen ? */
- /* coff should lock over P_coff_winlen ofdm symbols : give 3 times this lenght to lock */
+ /* coff should lock over P_coff_winlen ofdm symbols : give 3 times this length to lock */
*timeout = dib8000_get_timeout(state, 2 * locks, SYMBOL_DEPENDENT_ON);
*tune_state = CT_DEMOD_STEP_5;
break;
@@ -3115,7 +3115,7 @@ static int dib8000_tune(struct dvb_frontend *fe)
case CT_DEMOD_STEP_9: /* 39 */
if ((state->revision == 0x8090) || ((dib8000_read_word(state, 1291) >> 9) & 0x1)) { /* fe capable of deinterleaving : esram */
- /* defines timeout for mpeg lock depending on interleaver lenght of longest layer */
+ /* defines timeout for mpeg lock depending on interleaver length of longest layer */
for (i = 0; i < 3; i++) {
if (c->layer[i].interleaving >= deeper_interleaver) {
dprintk("layer%i: time interleaver = %d ", i, c->layer[i].interleaving);
diff --git a/drivers/media/dvb-frontends/drxk_hard.c b/drivers/media/dvb-frontends/drxk_hard.c
index d416c15691da..bf29a3f0e6f0 100644
--- a/drivers/media/dvb-frontends/drxk_hard.c
+++ b/drivers/media/dvb-frontends/drxk_hard.c
@@ -1191,7 +1191,7 @@ static int mpegts_configure_pins(struct drxk_state *state, bool mpeg_enable)
goto error;
if (state->m_enable_parallel == true) {
- /* paralel -> enable MD1 to MD7 */
+ /* parallel -> enable MD1 to MD7 */
status = write16(state, SIO_PDR_MD1_CFG__A,
sio_pdr_mdx_cfg);
if (status < 0)
@@ -1428,7 +1428,7 @@ static int mpegts_stop(struct drxk_state *state)
dprintk(1, "\n");
- /* Gracefull shutdown (byte boundaries) */
+ /* Graceful shutdown (byte boundaries) */
status = read16(state, FEC_OC_SNC_MODE__A, &fec_oc_snc_mode);
if (status < 0)
goto error;
@@ -2021,7 +2021,7 @@ static int mpegts_dto_setup(struct drxk_state *state,
fec_oc_dto_burst_len = 204;
}
- /* Check serial or parrallel output */
+ /* Check serial or parallel output */
fec_oc_reg_ipr_mode &= (~(FEC_OC_IPR_MODE_SERIAL__M));
if (state->m_enable_parallel == false) {
/* MPEG data output is serial -> set ipr_mode[0] */
@@ -2908,7 +2908,7 @@ static int adc_synchronization(struct drxk_state *state)
goto error;
if (count == 1) {
- /* Try sampling on a diffrent edge */
+ /* Try sampling on a different edge */
u16 clk_neg = 0;
status = read16(state, IQM_AF_CLKNEG__A, &clk_neg);
@@ -3306,7 +3306,7 @@ static int dvbt_sc_command(struct drxk_state *state,
if (status < 0)
goto error;
- /* Retreive results parameters from SC */
+ /* Retrieve results parameters from SC */
switch (cmd) {
/* All commands yielding 5 results */
/* All commands yielding 4 results */
@@ -3849,7 +3849,7 @@ static int set_dvbt(struct drxk_state *state, u16 intermediate_freqk_hz,
break;
}
#if 0
- /* No hierachical channels support in BDA */
+ /* No hierarchical channels support in BDA */
/* Priority (only for hierarchical channels) */
switch (channel->priority) {
case DRX_PRIORITY_LOW:
@@ -4081,7 +4081,7 @@ error:
/*============================================================================*/
/**
-* \brief Retreive lock status .
+* \brief Retrieve lock status .
* \param demod Pointer to demodulator instance.
* \param lockStat Pointer to lock status structure.
* \return DRXStatus_t.
@@ -6174,7 +6174,7 @@ static int init_drxk(struct drxk_state *state)
goto error;
/* Stamp driver version number in SCU data RAM in BCD code
- Done to enable field application engineers to retreive drxdriver version
+ Done to enable field application engineers to retrieve drxdriver version
via I2C from SCU RAM.
Not using SCU command interface for SCU register access since no
microcode may be present.
@@ -6399,7 +6399,7 @@ static int drxk_set_parameters(struct dvb_frontend *fe)
fe->ops.tuner_ops.get_if_frequency(fe, &IF);
start(state, 0, IF);
- /* After set_frontend, stats aren't avaliable */
+ /* After set_frontend, stats aren't available */
p->strength.stat[0].scale = FE_SCALE_RELATIVE;
p->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
diff --git a/drivers/media/dvb-frontends/rtl2830.c b/drivers/media/dvb-frontends/rtl2830.c
index 7efb796c472c..50e8b63e5169 100644
--- a/drivers/media/dvb-frontends/rtl2830.c
+++ b/drivers/media/dvb-frontends/rtl2830.c
@@ -710,6 +710,7 @@ struct dvb_frontend *rtl2830_attach(const struct rtl2830_config *cfg,
sizeof(priv->tuner_i2c_adapter.name));
priv->tuner_i2c_adapter.algo = &rtl2830_tuner_i2c_algo;
priv->tuner_i2c_adapter.algo_data = NULL;
+ priv->tuner_i2c_adapter.dev.parent = &i2c->dev;
i2c_set_adapdata(&priv->tuner_i2c_adapter, priv);
if (i2c_add_adapter(&priv->tuner_i2c_adapter) < 0) {
dev_err(&i2c->dev,
diff --git a/drivers/media/i2c/adv7183_regs.h b/drivers/media/i2c/adv7183_regs.h
index 4a5b7d211d2f..b253d400e817 100644
--- a/drivers/media/i2c/adv7183_regs.h
+++ b/drivers/media/i2c/adv7183_regs.h
@@ -52,9 +52,9 @@
#define ADV7183_VS_FIELD_CTRL_1 0x31 /* Vsync field control 1 */
#define ADV7183_VS_FIELD_CTRL_2 0x32 /* Vsync field control 2 */
#define ADV7183_VS_FIELD_CTRL_3 0x33 /* Vsync field control 3 */
-#define ADV7183_HS_POS_CTRL_1 0x34 /* Hsync positon control 1 */
-#define ADV7183_HS_POS_CTRL_2 0x35 /* Hsync positon control 2 */
-#define ADV7183_HS_POS_CTRL_3 0x36 /* Hsync positon control 3 */
+#define ADV7183_HS_POS_CTRL_1 0x34 /* Hsync position control 1 */
+#define ADV7183_HS_POS_CTRL_2 0x35 /* Hsync position control 2 */
+#define ADV7183_HS_POS_CTRL_3 0x36 /* Hsync position control 3 */
#define ADV7183_POLARITY 0x37 /* Polarity */
#define ADV7183_NTSC_COMB_CTRL 0x38 /* NTSC comb control */
#define ADV7183_PAL_COMB_CTRL 0x39 /* PAL comb control */
diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c
index fbfdd2fc2a36..a324106b9f11 100644
--- a/drivers/media/i2c/adv7604.c
+++ b/drivers/media/i2c/adv7604.c
@@ -877,7 +877,7 @@ static void configure_custom_video_timings(struct v4l2_subdev *sd,
break;
case ADV7604_MODE_HDMI:
/* set default prim_mode/vid_std for HDMI
- accoring to [REF_03, c. 4.2] */
+ according to [REF_03, c. 4.2] */
io_write(sd, 0x00, 0x02); /* video std */
io_write(sd, 0x01, 0x06); /* prim mode */
break;
diff --git a/drivers/media/i2c/adv7842.c b/drivers/media/i2c/adv7842.c
index 22f729d66a96..b154f36740b4 100644
--- a/drivers/media/i2c/adv7842.c
+++ b/drivers/media/i2c/adv7842.c
@@ -1013,7 +1013,7 @@ static void configure_custom_video_timings(struct v4l2_subdev *sd,
break;
case ADV7842_MODE_HDMI:
/* set default prim_mode/vid_std for HDMI
- accoring to [REF_03, c. 4.2] */
+ according to [REF_03, c. 4.2] */
io_write(sd, 0x00, 0x02); /* video std */
io_write(sd, 0x01, 0x06); /* prim mode */
break;
diff --git a/drivers/media/i2c/ir-kbd-i2c.c b/drivers/media/i2c/ir-kbd-i2c.c
index 82bf5679da30..99ee456700f4 100644
--- a/drivers/media/i2c/ir-kbd-i2c.c
+++ b/drivers/media/i2c/ir-kbd-i2c.c
@@ -394,7 +394,7 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
if (!rc) {
/*
- * If platform_data doesn't specify rc_dev, initilize it
+ * If platform_data doesn't specify rc_dev, initialize it
* internally
*/
rc = rc_allocate_device();
diff --git a/drivers/media/i2c/m5mols/m5mols_controls.c b/drivers/media/i2c/m5mols/m5mols_controls.c
index f34429e452ab..a60931e66312 100644
--- a/drivers/media/i2c/m5mols/m5mols_controls.c
+++ b/drivers/media/i2c/m5mols/m5mols_controls.c
@@ -544,7 +544,7 @@ int m5mols_init_controls(struct v4l2_subdev *sd)
u16 zoom_step;
int ret;
- /* Determine the firmware dependant control range and step values */
+ /* Determine the firmware dependent control range and step values */
ret = m5mols_read_u16(sd, AE_MAX_GAIN_MON, &exposure_max);
if (ret < 0)
return ret;
diff --git a/drivers/media/i2c/mt9p031.c b/drivers/media/i2c/mt9p031.c
index 4734836fe5a4..1c2303d18bf4 100644
--- a/drivers/media/i2c/mt9p031.c
+++ b/drivers/media/i2c/mt9p031.c
@@ -19,6 +19,7 @@
#include <linux/i2c.h>
#include <linux/log2.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/of_gpio.h>
#include <linux/pm.h>
#include <linux/regulator/consumer.h>
diff --git a/drivers/media/i2c/s5c73m3/s5c73m3-core.c b/drivers/media/i2c/s5c73m3/s5c73m3-core.c
index 6fec9384d86e..e7f555cc827a 100644
--- a/drivers/media/i2c/s5c73m3/s5c73m3-core.c
+++ b/drivers/media/i2c/s5c73m3/s5c73m3-core.c
@@ -1460,7 +1460,7 @@ static int s5c73m3_oif_registered(struct v4l2_subdev *sd)
mutex_unlock(&state->lock);
v4l2_dbg(1, s5c73m3_dbg, sd, "%s: Booting %s (%d)\n",
- __func__, ret ? "failed" : "succeded", ret);
+ __func__, ret ? "failed" : "succeeded", ret);
return ret;
}
diff --git a/drivers/media/i2c/s5c73m3/s5c73m3.h b/drivers/media/i2c/s5c73m3/s5c73m3.h
index 9d2c08652246..9dfa516f6944 100644
--- a/drivers/media/i2c/s5c73m3/s5c73m3.h
+++ b/drivers/media/i2c/s5c73m3/s5c73m3.h
@@ -393,7 +393,7 @@ struct s5c73m3 {
/* External master clock frequency */
u32 mclk_frequency;
- /* Video bus type - MIPI-CSI2/paralell */
+ /* Video bus type - MIPI-CSI2/parallel */
enum v4l2_mbus_type bus_type;
const struct s5c73m3_frame_size *sensor_pix_size[2];
diff --git a/drivers/media/i2c/saa7115.c b/drivers/media/i2c/saa7115.c
index 637d02634527..afdbcb045cee 100644
--- a/drivers/media/i2c/saa7115.c
+++ b/drivers/media/i2c/saa7115.c
@@ -1699,7 +1699,7 @@ static void saa711x_write_platform_data(struct saa711x_state *state,
* the analog demod.
* If the tuner is not found, it returns -ENODEV.
* If auto-detection is disabled and the tuner doesn't match what it was
- * requred, it returns -EINVAL and fills 'name'.
+ * required, it returns -EINVAL and fills 'name'.
* If the chip is found, it returns the chip ID and fills 'name'.
*/
static int saa711x_detect_chip(struct i2c_client *client,
diff --git a/drivers/media/i2c/soc_camera/ov5642.c b/drivers/media/i2c/soc_camera/ov5642.c
index 0a5c5d4fedd6..d2daa6a8f272 100644
--- a/drivers/media/i2c/soc_camera/ov5642.c
+++ b/drivers/media/i2c/soc_camera/ov5642.c
@@ -642,7 +642,7 @@ static const struct ov5642_datafmt
static int reg_read(struct i2c_client *client, u16 reg, u8 *val)
{
int ret;
- /* We have 16-bit i2c addresses - care for endianess */
+ /* We have 16-bit i2c addresses - care for endianness */
unsigned char data[2] = { reg >> 8, reg & 0xff };
ret = i2c_master_send(client, data, 2);
diff --git a/drivers/media/i2c/ths7303.c b/drivers/media/i2c/ths7303.c
index 42276d93624c..ed9ae8875348 100644
--- a/drivers/media/i2c/ths7303.c
+++ b/drivers/media/i2c/ths7303.c
@@ -83,7 +83,8 @@ static int ths7303_write(struct v4l2_subdev *sd, u8 reg, u8 val)
}
/* following function is used to set ths7303 */
-int ths7303_setval(struct v4l2_subdev *sd, enum ths7303_filter_mode mode)
+static int ths7303_setval(struct v4l2_subdev *sd,
+ enum ths7303_filter_mode mode)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct ths7303_state *state = to_state(sd);
diff --git a/drivers/media/i2c/wm8775.c b/drivers/media/i2c/wm8775.c
index 3f584a7d0781..bee7946faa7c 100644
--- a/drivers/media/i2c/wm8775.c
+++ b/drivers/media/i2c/wm8775.c
@@ -130,12 +130,10 @@ static int wm8775_s_routing(struct v4l2_subdev *sd,
return -EINVAL;
}
state->input = input;
- if (!v4l2_ctrl_g_ctrl(state->mute))
+ if (v4l2_ctrl_g_ctrl(state->mute))
return 0;
if (!v4l2_ctrl_g_ctrl(state->vol))
return 0;
- if (!v4l2_ctrl_g_ctrl(state->bal))
- return 0;
wm8775_set_audio(sd, 1);
return 0;
}
diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c
index a3b1ee9c00d7..92a06fd85865 100644
--- a/drivers/media/pci/bt8xx/bttv-driver.c
+++ b/drivers/media/pci/bt8xx/bttv-driver.c
@@ -4182,7 +4182,8 @@ static int bttv_probe(struct pci_dev *dev, const struct pci_device_id *pci_id)
}
btv->std = V4L2_STD_PAL;
init_irqreg(btv);
- v4l2_ctrl_handler_setup(hdl);
+ if (!bttv_tvcards[btv->c.type].no_video)
+ v4l2_ctrl_handler_setup(hdl);
if (hdl->error) {
result = hdl->error;
goto fail2;
diff --git a/drivers/media/pci/cx18/cx18-driver.h b/drivers/media/pci/cx18/cx18-driver.h
index 2767c64df0c8..57f4688ea55b 100644
--- a/drivers/media/pci/cx18/cx18-driver.h
+++ b/drivers/media/pci/cx18/cx18-driver.h
@@ -262,7 +262,7 @@ struct cx18_options {
};
/* per-mdl bit flags */
-#define CX18_F_M_NEED_SWAP 0 /* mdl buffer data must be endianess swapped */
+#define CX18_F_M_NEED_SWAP 0 /* mdl buffer data must be endianness swapped */
/* per-stream, s_flags */
#define CX18_F_S_CLAIMED 3 /* this stream is claimed */
diff --git a/drivers/media/pci/cx23885/cx23885-417.c b/drivers/media/pci/cx23885/cx23885-417.c
index e3fc2c71808a..95666eee7b27 100644
--- a/drivers/media/pci/cx23885/cx23885-417.c
+++ b/drivers/media/pci/cx23885/cx23885-417.c
@@ -427,7 +427,7 @@ int mc417_register_read(struct cx23885_dev *dev, u16 address, u32 *value)
cx_write(MC417_RWD, regval);
/* Transition RD to effect read transaction across bus.
- * Transtion 0x5000 -> 0x9000 correct (RD/RDY -> WR/RDY)?
+ * Transition 0x5000 -> 0x9000 correct (RD/RDY -> WR/RDY)?
* Should it be 0x9000 -> 0xF000 (also why is RDY being set, its
* input only...)
*/
diff --git a/drivers/media/pci/pluto2/pluto2.c b/drivers/media/pci/pluto2/pluto2.c
index 8164d74b46a4..655d6854a8d7 100644
--- a/drivers/media/pci/pluto2/pluto2.c
+++ b/drivers/media/pci/pluto2/pluto2.c
@@ -401,7 +401,7 @@ static int pluto_hw_init(struct pluto *pluto)
/* set automatic LED control by FPGA */
pluto_rw(pluto, REG_MISC, MISC_ALED, MISC_ALED);
- /* set data endianess */
+ /* set data endianness */
#ifdef __LITTLE_ENDIAN
pluto_rw(pluto, REG_PIDn(0), PID0_END, PID0_END);
#else
diff --git a/drivers/media/pci/saa7164/saa7164-core.c b/drivers/media/pci/saa7164/saa7164-core.c
index 57ef5456f1e8..1bf06970ca3e 100644
--- a/drivers/media/pci/saa7164/saa7164-core.c
+++ b/drivers/media/pci/saa7164/saa7164-core.c
@@ -1354,9 +1354,11 @@ static int saa7164_initdev(struct pci_dev *pci_dev,
if (fw_debug) {
dev->kthread = kthread_run(saa7164_thread_function, dev,
"saa7164 debug");
- if (!dev->kthread)
+ if (IS_ERR(dev->kthread)) {
+ dev->kthread = NULL;
printk(KERN_ERR "%s() Failed to create "
"debug kernel thread\n", __func__);
+ }
}
} /* != BOARD_UNKNOWN */
diff --git a/drivers/media/platform/coda.c b/drivers/media/platform/coda.c
index bd72fb97fea5..61f3dbcc259f 100644
--- a/drivers/media/platform/coda.c
+++ b/drivers/media/platform/coda.c
@@ -1434,7 +1434,7 @@ static void coda_buf_queue(struct vb2_buffer *vb)
if (q_data->fourcc == V4L2_PIX_FMT_H264 &&
vb->vb2_queue->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
/*
- * For backwards compatiblity, queuing an empty buffer marks
+ * For backwards compatibility, queuing an empty buffer marks
* the stream end
*/
if (vb2_get_plane_payload(vb, 0) == 0)
diff --git a/drivers/media/platform/exynos4-is/fimc-core.c b/drivers/media/platform/exynos4-is/fimc-core.c
index 3d66d88ea3a1..f7915695c907 100644
--- a/drivers/media/platform/exynos4-is/fimc-core.c
+++ b/drivers/media/platform/exynos4-is/fimc-core.c
@@ -1039,7 +1039,7 @@ static int fimc_runtime_resume(struct device *dev)
dbg("fimc%d: state: 0x%lx", fimc->id, fimc->state);
- /* Enable clocks and perform basic initalization */
+ /* Enable clocks and perform basic initialization */
clk_enable(fimc->clock[CLK_GATE]);
fimc_hw_reset(fimc);
diff --git a/drivers/media/platform/exynos4-is/media-dev.c b/drivers/media/platform/exynos4-is/media-dev.c
index 7a4ee4c0449d..c1bce170df6f 100644
--- a/drivers/media/platform/exynos4-is/media-dev.c
+++ b/drivers/media/platform/exynos4-is/media-dev.c
@@ -759,7 +759,7 @@ static int fimc_md_register_platform_entity(struct fimc_md *fmd,
goto dev_unlock;
drvdata = dev_get_drvdata(dev);
- /* Some subdev didn't probe succesfully id drvdata is NULL */
+ /* Some subdev didn't probe successfully id drvdata is NULL */
if (drvdata) {
switch (plat_entity) {
case IDX_FIMC:
diff --git a/drivers/media/platform/m2m-deinterlace.c b/drivers/media/platform/m2m-deinterlace.c
index 36513e896413..65cab70fefcb 100644
--- a/drivers/media/platform/m2m-deinterlace.c
+++ b/drivers/media/platform/m2m-deinterlace.c
@@ -341,8 +341,7 @@ static void deinterlace_issue_dma(struct deinterlace_ctx *ctx, int op,
ctx->xt->dir = DMA_MEM_TO_MEM;
ctx->xt->src_sgl = false;
ctx->xt->dst_sgl = true;
- flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT |
- DMA_COMPL_SKIP_DEST_UNMAP | DMA_COMPL_SKIP_SRC_UNMAP;
+ flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
tx = dmadev->device_prep_interleaved_dma(chan, ctx->xt, flags);
if (tx == NULL) {
diff --git a/drivers/media/platform/marvell-ccic/mmp-driver.c b/drivers/media/platform/marvell-ccic/mmp-driver.c
index 3458fa0e2fd5..054507f16734 100644
--- a/drivers/media/platform/marvell-ccic/mmp-driver.c
+++ b/drivers/media/platform/marvell-ccic/mmp-driver.c
@@ -142,12 +142,6 @@ static int mmpcam_power_up(struct mcam_camera *mcam)
struct mmp_camera *cam = mcam_to_cam(mcam);
struct mmp_camera_platform_data *pdata;
- if (mcam->bus_type == V4L2_MBUS_CSI2) {
- cam->mipi_clk = devm_clk_get(mcam->dev, "mipi");
- if ((IS_ERR(cam->mipi_clk) && mcam->dphy[2] == 0))
- return PTR_ERR(cam->mipi_clk);
- }
-
/*
* Turn on power and clocks to the controller.
*/
@@ -186,12 +180,6 @@ static void mmpcam_power_down(struct mcam_camera *mcam)
gpio_set_value(pdata->sensor_power_gpio, 0);
gpio_set_value(pdata->sensor_reset_gpio, 0);
- if (mcam->bus_type == V4L2_MBUS_CSI2 && !IS_ERR(cam->mipi_clk)) {
- if (cam->mipi_clk)
- devm_clk_put(mcam->dev, cam->mipi_clk);
- cam->mipi_clk = NULL;
- }
-
mcam_clk_disable(mcam);
}
@@ -292,8 +280,9 @@ void mmpcam_calc_dphy(struct mcam_camera *mcam)
return;
/* get the escape clk, this is hard coded */
+ clk_prepare_enable(cam->mipi_clk);
tx_clk_esc = (clk_get_rate(cam->mipi_clk) / 1000000) / 12;
-
+ clk_disable_unprepare(cam->mipi_clk);
/*
* dphy[2] - CSI2_DPHY6:
* bit 0 ~ bit 7: CK Term Enable
@@ -325,19 +314,6 @@ static irqreturn_t mmpcam_irq(int irq, void *data)
return IRQ_RETVAL(handled);
}
-static void mcam_deinit_clk(struct mcam_camera *mcam)
-{
- unsigned int i;
-
- for (i = 0; i < NR_MCAM_CLK; i++) {
- if (!IS_ERR(mcam->clk[i])) {
- if (mcam->clk[i])
- devm_clk_put(mcam->dev, mcam->clk[i]);
- }
- mcam->clk[i] = NULL;
- }
-}
-
static void mcam_init_clk(struct mcam_camera *mcam)
{
unsigned int i;
@@ -371,7 +347,6 @@ static int mmpcam_probe(struct platform_device *pdev)
if (cam == NULL)
return -ENOMEM;
cam->pdev = pdev;
- cam->mipi_clk = NULL;
INIT_LIST_HEAD(&cam->devlist);
mcam = &cam->mcam;
@@ -387,6 +362,11 @@ static int mmpcam_probe(struct platform_device *pdev)
mcam->mclk_div = pdata->mclk_div;
mcam->bus_type = pdata->bus_type;
mcam->dphy = pdata->dphy;
+ if (mcam->bus_type == V4L2_MBUS_CSI2) {
+ cam->mipi_clk = devm_clk_get(mcam->dev, "mipi");
+ if ((IS_ERR(cam->mipi_clk) && mcam->dphy[2] == 0))
+ return PTR_ERR(cam->mipi_clk);
+ }
mcam->mipi_enabled = false;
mcam->lane = pdata->lane;
mcam->chip_id = MCAM_ARMADA610;
@@ -444,7 +424,7 @@ static int mmpcam_probe(struct platform_device *pdev)
*/
ret = mmpcam_power_up(mcam);
if (ret)
- goto out_deinit_clk;
+ return ret;
ret = mccic_register(mcam);
if (ret)
goto out_power_down;
@@ -469,8 +449,6 @@ out_unregister:
mccic_shutdown(mcam);
out_power_down:
mmpcam_power_down(mcam);
-out_deinit_clk:
- mcam_deinit_clk(mcam);
return ret;
}
@@ -478,18 +456,10 @@ out_deinit_clk:
static int mmpcam_remove(struct mmp_camera *cam)
{
struct mcam_camera *mcam = &cam->mcam;
- struct mmp_camera_platform_data *pdata;
mmpcam_remove_device(cam);
mccic_shutdown(mcam);
mmpcam_power_down(mcam);
- pdata = cam->pdev->dev.platform_data;
- gpio_free(pdata->sensor_reset_gpio);
- gpio_free(pdata->sensor_power_gpio);
- mcam_deinit_clk(mcam);
- iounmap(cam->power_regs);
- iounmap(mcam->regs);
- kfree(cam);
return 0;
}
diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c
index 1c3608039663..561bce8ffb1b 100644
--- a/drivers/media/platform/omap3isp/isp.c
+++ b/drivers/media/platform/omap3isp/isp.c
@@ -1673,7 +1673,7 @@ void omap3isp_print_status(struct isp_device *isp)
* ISP clocks get disabled in suspend(). Similarly, the clocks are reenabled in
* resume(), and the the pipelines are restarted in complete().
*
- * TODO: PM dependencies between the ISP and sensors are not modeled explicitly
+ * TODO: PM dependencies between the ISP and sensors are not modelled explicitly
* yet.
*/
static int isp_pm_prepare(struct device *dev)
diff --git a/drivers/media/platform/omap3isp/ispvideo.c b/drivers/media/platform/omap3isp/ispvideo.c
index a908d006f527..f6304bb074f5 100644
--- a/drivers/media/platform/omap3isp/ispvideo.c
+++ b/drivers/media/platform/omap3isp/ispvideo.c
@@ -339,14 +339,11 @@ __isp_video_get_format(struct isp_video *video, struct v4l2_format *format)
if (subdev == NULL)
return -EINVAL;
- mutex_lock(&video->mutex);
-
fmt.pad = pad;
fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
- ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
- if (ret == -ENOIOCTLCMD)
- ret = -EINVAL;
+ mutex_lock(&video->mutex);
+ ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
mutex_unlock(&video->mutex);
if (ret)
diff --git a/drivers/media/platform/s5p-mfc/regs-mfc.h b/drivers/media/platform/s5p-mfc/regs-mfc.h
index 9319e93599ae..6ccc3f8c122a 100644
--- a/drivers/media/platform/s5p-mfc/regs-mfc.h
+++ b/drivers/media/platform/s5p-mfc/regs-mfc.h
@@ -382,7 +382,7 @@
#define S5P_FIMV_R2H_CMD_EDFU_INIT_RET 16
#define S5P_FIMV_R2H_CMD_ERR_RET 32
-/* Dummy definition for MFCv6 compatibilty */
+/* Dummy definition for MFCv6 compatibility */
#define S5P_FIMV_CODEC_H264_MVC_DEC -1
#define S5P_FIMV_R2H_CMD_FIELD_DONE_RET -1
#define S5P_FIMV_MFC_RESET -1
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c
index 5f2c4ad6c2cb..e46067a57853 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c
@@ -239,7 +239,7 @@ static void s5p_mfc_handle_frame_copy_time(struct s5p_mfc_ctx *ctx)
frame_type = s5p_mfc_hw_call(dev->mfc_ops, get_dec_frame_type, dev);
/* Copy timestamp / timecode from decoded src to dst and set
- appropraite flags */
+ appropriate flags */
src_buf = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
list_for_each_entry(dst_buf, &ctx->dst_queue, list) {
if (vb2_dma_contig_plane_dma_addr(dst_buf->b, 0) == dec_y_addr) {
@@ -428,7 +428,7 @@ static void s5p_mfc_handle_error(struct s5p_mfc_dev *dev,
case MFCINST_FINISHING:
case MFCINST_FINISHED:
case MFCINST_RUNNING:
- /* It is higly probable that an error occured
+ /* It is highly probable that an error occurred
* while decoding a frame */
clear_work_bit(ctx);
ctx->state = MFCINST_ERROR;
@@ -611,7 +611,7 @@ static irqreturn_t s5p_mfc_irq(int irq, void *priv)
mfc_debug(1, "Int reason: %d (err: %08x)\n", reason, err);
switch (reason) {
case S5P_MFC_R2H_CMD_ERR_RET:
- /* An error has occured */
+ /* An error has occurred */
if (ctx->state == MFCINST_RUNNING &&
s5p_mfc_hw_call(dev->mfc_ops, err_dec, err) >=
dev->warn_start)
@@ -840,7 +840,7 @@ static int s5p_mfc_open(struct file *file)
mutex_unlock(&dev->mfc_mutex);
mfc_debug_leave();
return ret;
- /* Deinit when failure occured */
+ /* Deinit when failure occurred */
err_queue_init:
if (dev->num_inst == 1)
s5p_mfc_deinit_hw(dev);
@@ -881,14 +881,14 @@ static int s5p_mfc_release(struct file *file)
/* Mark context as idle */
clear_work_bit_irqsave(ctx);
/* If instance was initialised then
- * return instance and free reosurces */
+ * return instance and free resources */
if (ctx->inst_no != MFC_NO_INSTANCE_SET) {
mfc_debug(2, "Has to free instance\n");
ctx->state = MFCINST_RETURN_INST;
set_work_bit_irqsave(ctx);
s5p_mfc_clean_ctx_int_flags(ctx);
s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
- /* Wait until instance is returned or timeout occured */
+ /* Wait until instance is returned or timeout occurred */
if (s5p_mfc_wait_for_done_ctx
(ctx, S5P_MFC_R2H_CMD_CLOSE_INSTANCE_RET, 0)) {
s5p_mfc_clock_off();
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c b/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c
index 7cab6849fb5b..2475a3c9a0a6 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c
@@ -69,7 +69,7 @@ int s5p_mfc_alloc_firmware(struct s5p_mfc_dev *dev)
} else {
/* In this case bank2 can point to the same address as bank1.
- * Firmware will always occupy the beggining of this area so it is
+ * Firmware will always occupy the beginning of this area so it is
* impossible having a video frame buffer with zero address. */
dev->bank2 = dev->bank1;
}
diff --git a/drivers/media/platform/s5p-tv/mixer.h b/drivers/media/platform/s5p-tv/mixer.h
index 04e6490a45be..fb2acc53112a 100644
--- a/drivers/media/platform/s5p-tv/mixer.h
+++ b/drivers/media/platform/s5p-tv/mixer.h
@@ -65,7 +65,7 @@ struct mxr_format {
int num_subframes;
/** specifies to which subframe belong given plane */
int plane2subframe[MXR_MAX_PLANES];
- /** internal code, driver dependant */
+ /** internal code, driver dependent */
unsigned long cookie;
};
diff --git a/drivers/media/platform/s5p-tv/mixer_video.c b/drivers/media/platform/s5p-tv/mixer_video.c
index 641b1f071e06..81b97db111d8 100644
--- a/drivers/media/platform/s5p-tv/mixer_video.c
+++ b/drivers/media/platform/s5p-tv/mixer_video.c
@@ -528,7 +528,7 @@ static int mxr_s_dv_timings(struct file *file, void *fh,
mutex_lock(&mdev->mutex);
/* timings change cannot be done while there is an entity
- * dependant on output configuration
+ * dependent on output configuration
*/
if (mdev->n_output > 0) {
mutex_unlock(&mdev->mutex);
@@ -585,7 +585,7 @@ static int mxr_s_std(struct file *file, void *fh, v4l2_std_id norm)
mutex_lock(&mdev->mutex);
/* standard change cannot be done while there is an entity
- * dependant on output configuration
+ * dependent on output configuration
*/
if (mdev->n_output > 0) {
mutex_unlock(&mdev->mutex);
diff --git a/drivers/media/platform/soc_camera/omap1_camera.c b/drivers/media/platform/soc_camera/omap1_camera.c
index 6769193c7c7b..74ce8b6b79fa 100644
--- a/drivers/media/platform/soc_camera/omap1_camera.c
+++ b/drivers/media/platform/soc_camera/omap1_camera.c
@@ -1495,7 +1495,7 @@ static int omap1_cam_set_bus_param(struct soc_camera_device *icd)
if (ctrlclock & LCLK_EN)
CAM_WRITE(pcdev, CTRLCLOCK, ctrlclock);
- /* select bus endianess */
+ /* select bus endianness */
xlate = soc_camera_xlate_by_fourcc(icd, pixfmt);
fmt = xlate->host_fmt;
diff --git a/drivers/media/platform/timblogiw.c b/drivers/media/platform/timblogiw.c
index 6a74ce040d28..ccdadd623a3a 100644
--- a/drivers/media/platform/timblogiw.c
+++ b/drivers/media/platform/timblogiw.c
@@ -565,7 +565,7 @@ static void buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb)
desc = dmaengine_prep_slave_sg(fh->chan,
buf->sg, sg_elems, DMA_DEV_TO_MEM,
- DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP);
+ DMA_PREP_INTERRUPT);
if (!desc) {
spin_lock_irq(&fh->queue_lock);
list_del_init(&vb->queue);
diff --git a/drivers/media/platform/vivi.c b/drivers/media/platform/vivi.c
index 1d3f11965196..2d4e73b45c5e 100644
--- a/drivers/media/platform/vivi.c
+++ b/drivers/media/platform/vivi.c
@@ -1108,7 +1108,7 @@ static int vidioc_s_input(struct file *file, void *priv, unsigned int i)
return 0;
}
-/* timeperframe is arbitrary and continous */
+/* timeperframe is arbitrary and continuous */
static int vidioc_enum_frameintervals(struct file *file, void *priv,
struct v4l2_frmivalenum *fival)
{
@@ -1125,7 +1125,7 @@ static int vidioc_enum_frameintervals(struct file *file, void *priv,
fival->type = V4L2_FRMIVAL_TYPE_CONTINUOUS;
- /* fill in stepwise (step=1.0 is requred by V4L2 spec) */
+ /* fill in stepwise (step=1.0 is required by V4L2 spec) */
fival->stepwise.min = tpf_min;
fival->stepwise.max = tpf_max;
fival->stepwise.step = (struct v4l2_fract) {1, 1};
diff --git a/drivers/media/platform/vsp1/vsp1_drv.c b/drivers/media/platform/vsp1/vsp1_drv.c
index 1c9e771aa15c..d16bf0f41e24 100644
--- a/drivers/media/platform/vsp1/vsp1_drv.c
+++ b/drivers/media/platform/vsp1/vsp1_drv.c
@@ -323,7 +323,7 @@ static void vsp1_clocks_disable(struct vsp1_device *vsp1)
* Increment the VSP1 reference count and initialize the device if the first
* reference is taken.
*
- * Return a pointer to the VSP1 device or NULL if an error occured.
+ * Return a pointer to the VSP1 device or NULL if an error occurred.
*/
struct vsp1_device *vsp1_device_get(struct vsp1_device *vsp1)
{
diff --git a/drivers/media/platform/vsp1/vsp1_video.c b/drivers/media/platform/vsp1/vsp1_video.c
index 714c53ef6c11..4b0ac07af662 100644
--- a/drivers/media/platform/vsp1/vsp1_video.c
+++ b/drivers/media/platform/vsp1/vsp1_video.c
@@ -1026,8 +1026,10 @@ int vsp1_video_init(struct vsp1_video *video, struct vsp1_entity *rwpf)
/* ... and the buffers queue... */
video->alloc_ctx = vb2_dma_contig_init_ctx(video->vsp1->dev);
- if (IS_ERR(video->alloc_ctx))
+ if (IS_ERR(video->alloc_ctx)) {
+ ret = PTR_ERR(video->alloc_ctx);
goto error;
+ }
video->queue.type = video->type;
video->queue.io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
diff --git a/drivers/media/radio/radio-shark.c b/drivers/media/radio/radio-shark.c
index 3db8a8cfe1a8..050b3bb96fec 100644
--- a/drivers/media/radio/radio-shark.c
+++ b/drivers/media/radio/radio-shark.c
@@ -271,8 +271,7 @@ static void shark_unregister_leds(struct shark_device *shark)
cancel_work_sync(&shark->led_work);
}
-#ifdef CONFIG_PM
-static void shark_resume_leds(struct shark_device *shark)
+static inline void shark_resume_leds(struct shark_device *shark)
{
if (test_bit(BLUE_IS_PULSE, &shark->brightness_new))
set_bit(BLUE_PULSE_LED, &shark->brightness_new);
@@ -281,7 +280,6 @@ static void shark_resume_leds(struct shark_device *shark)
set_bit(RED_LED, &shark->brightness_new);
schedule_work(&shark->led_work);
}
-#endif
#else
static int shark_register_leds(struct shark_device *shark, struct device *dev)
{
diff --git a/drivers/media/radio/radio-shark2.c b/drivers/media/radio/radio-shark2.c
index d86d90dab8bf..8654e0dc5c95 100644
--- a/drivers/media/radio/radio-shark2.c
+++ b/drivers/media/radio/radio-shark2.c
@@ -237,8 +237,7 @@ static void shark_unregister_leds(struct shark_device *shark)
cancel_work_sync(&shark->led_work);
}
-#ifdef CONFIG_PM
-static void shark_resume_leds(struct shark_device *shark)
+static inline void shark_resume_leds(struct shark_device *shark)
{
int i;
@@ -247,7 +246,6 @@ static void shark_resume_leds(struct shark_device *shark)
schedule_work(&shark->led_work);
}
-#endif
#else
static int shark_register_leds(struct shark_device *shark, struct device *dev)
{
diff --git a/drivers/media/radio/radio-si476x.c b/drivers/media/radio/radio-si476x.c
index 9c9084cb99f7..2fd9009f8663 100644
--- a/drivers/media/radio/radio-si476x.c
+++ b/drivers/media/radio/radio-si476x.c
@@ -268,8 +268,8 @@ struct si476x_radio;
*
* @tune_freq: Tune chip to a specific frequency
* @seek_start: Star station seeking
- * @rsq_status: Get Recieved Signal Quality(RSQ) status
- * @rds_blckcnt: Get recived RDS blocks count
+ * @rsq_status: Get Received Signal Quality(RSQ) status
+ * @rds_blckcnt: Get received RDS blocks count
* @phase_diversity: Change phase diversity mode of the tuner
* @phase_div_status: Get phase diversity mode status
* @acf_status: Get the status of Automatically Controlled
diff --git a/drivers/media/radio/radio-tea5764.c b/drivers/media/radio/radio-tea5764.c
index 036e2f54f4db..3ed1f5669f79 100644
--- a/drivers/media/radio/radio-tea5764.c
+++ b/drivers/media/radio/radio-tea5764.c
@@ -356,7 +356,7 @@ static int vidioc_s_frequency(struct file *file, void *priv,
So we keep it as-is. */
return -EINVAL;
}
- clamp(freq, FREQ_MIN * FREQ_MUL, FREQ_MAX * FREQ_MUL);
+ freq = clamp(freq, FREQ_MIN * FREQ_MUL, FREQ_MAX * FREQ_MUL);
tea5764_power_up(radio);
tea5764_tune(radio, (freq * 125) / 2);
return 0;
diff --git a/drivers/media/radio/tef6862.c b/drivers/media/radio/tef6862.c
index 69e3245a58a0..a9319a24c7ef 100644
--- a/drivers/media/radio/tef6862.c
+++ b/drivers/media/radio/tef6862.c
@@ -112,7 +112,7 @@ static int tef6862_s_frequency(struct v4l2_subdev *sd, const struct v4l2_frequen
if (f->tuner != 0)
return -EINVAL;
- clamp(freq, TEF6862_LO_FREQ, TEF6862_HI_FREQ);
+ freq = clamp(freq, TEF6862_LO_FREQ, TEF6862_HI_FREQ);
pll = 1964 + ((freq - TEF6862_LO_FREQ) * 20) / FREQ_MUL;
i2cmsg[0] = (MSA_MODE_PRESET << MSA_MODE_SHIFT) | WM_SUB_PLLM;
i2cmsg[1] = (pll >> 8) & 0xff;
diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c
index 72e3fa652481..f329485c6629 100644
--- a/drivers/media/rc/imon.c
+++ b/drivers/media/rc/imon.c
@@ -1370,7 +1370,7 @@ static void imon_pad_to_keys(struct imon_context *ictx, unsigned char *buf)
* 0x68nnnnB7 to 0x6AnnnnB7, the left mouse button generates
* 0x688301b7 and the right one 0x688481b7. All other keys generate
* 0x2nnnnnnn. Position coordinate is encoded in buf[1] and buf[2] with
- * reversed endianess. Extract direction from buffer, rotate endianess,
+ * reversed endianness. Extract direction from buffer, rotate endianness,
* adjust sign and feed the values into stabilize(). The resulting codes
* will be 0x01008000, 0x01007F00, which match the newer devices.
*/
diff --git a/drivers/media/rc/redrat3.c b/drivers/media/rc/redrat3.c
index 094484fac94c..a5d4f883d053 100644
--- a/drivers/media/rc/redrat3.c
+++ b/drivers/media/rc/redrat3.c
@@ -118,7 +118,7 @@ static int debug;
#define RR3_IR_IO_LENGTH_FUZZ 0x04
/* Timeout for end of signal detection */
#define RR3_IR_IO_SIG_TIMEOUT 0x05
-/* Minumum value for pause recognition. */
+/* Minimum value for pause recognition. */
#define RR3_IR_IO_MIN_PAUSE 0x06
/* Clock freq. of EZ-USB chip */
diff --git a/drivers/media/tuners/mt2063.c b/drivers/media/tuners/mt2063.c
index 2e1a02e360ff..20cca405bf45 100644
--- a/drivers/media/tuners/mt2063.c
+++ b/drivers/media/tuners/mt2063.c
@@ -1195,7 +1195,7 @@ static u32 mt2063_set_dnc_output_enable(struct mt2063_state *state,
* DNC Output is selected, the other is always off)
*
* @state: ptr to mt2063_state structure
- * @Mode: desired reciever delivery system
+ * @Mode: desired receiver delivery system
*
* Note: Register cache must be valid for it to work
*/
@@ -2119,7 +2119,7 @@ static int mt2063_set_analog_params(struct dvb_frontend *fe,
/*
* As defined on EN 300 429, the DVB-C roll-off factor is 0.15.
- * So, the amount of the needed bandwith is given by:
+ * So, the amount of the needed bandwidth is given by:
* Bw = Symbol_rate * (1 + 0.15)
* As such, the maximum symbol rate supported by 6 MHz is given by:
* max_symbol_rate = 6 MHz / 1.15 = 5217391 Bauds
diff --git a/drivers/media/tuners/tuner-xc2028-types.h b/drivers/media/tuners/tuner-xc2028-types.h
index 74dc46a71f64..7e4798783db7 100644
--- a/drivers/media/tuners/tuner-xc2028-types.h
+++ b/drivers/media/tuners/tuner-xc2028-types.h
@@ -119,7 +119,7 @@
#define V4L2_STD_A2 (V4L2_STD_A2_A | V4L2_STD_A2_B)
#define V4L2_STD_NICAM (V4L2_STD_NICAM_A | V4L2_STD_NICAM_B)
-/* To preserve backward compatibilty,
+/* To preserve backward compatibility,
(std & V4L2_STD_AUDIO) = 0 means that ALL audio stds are supported
*/
diff --git a/drivers/media/usb/cx231xx/cx231xx-cards.c b/drivers/media/usb/cx231xx/cx231xx-cards.c
index e9d017bea377..528cce958a82 100644
--- a/drivers/media/usb/cx231xx/cx231xx-cards.c
+++ b/drivers/media/usb/cx231xx/cx231xx-cards.c
@@ -1412,8 +1412,8 @@ err_v4l2:
usb_set_intfdata(interface, NULL);
err_if:
usb_put_dev(udev);
- kfree(dev);
clear_bit(dev->devno, &cx231xx_devused);
+ kfree(dev);
return retval;
}
diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c b/drivers/media/usb/dvb-usb-v2/af9035.c
index c8fcd78425bd..8f9b2cea88f0 100644
--- a/drivers/media/usb/dvb-usb-v2/af9035.c
+++ b/drivers/media/usb/dvb-usb-v2/af9035.c
@@ -131,7 +131,7 @@ static int af9035_wr_regs(struct dvb_usb_device *d, u32 reg, u8 *val, int len)
{
u8 wbuf[MAX_XFER_SIZE];
u8 mbox = (reg >> 16) & 0xff;
- struct usb_req req = { CMD_MEM_WR, mbox, sizeof(wbuf), wbuf, 0, NULL };
+ struct usb_req req = { CMD_MEM_WR, mbox, 6 + len, wbuf, 0, NULL };
if (6 + len > sizeof(wbuf)) {
dev_warn(&d->udev->dev, "%s: i2c wr: len=%d is too big!\n",
@@ -238,14 +238,15 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap,
} else {
/* I2C */
u8 buf[MAX_XFER_SIZE];
- struct usb_req req = { CMD_I2C_RD, 0, sizeof(buf),
+ struct usb_req req = { CMD_I2C_RD, 0, 5 + msg[0].len,
buf, msg[1].len, msg[1].buf };
if (5 + msg[0].len > sizeof(buf)) {
dev_warn(&d->udev->dev,
"%s: i2c xfer: len=%d is too big!\n",
KBUILD_MODNAME, msg[0].len);
- return -EOPNOTSUPP;
+ ret = -EOPNOTSUPP;
+ goto unlock;
}
req.mbox |= ((msg[0].addr & 0x80) >> 3);
buf[0] = msg[1].len;
@@ -274,14 +275,15 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap,
} else {
/* I2C */
u8 buf[MAX_XFER_SIZE];
- struct usb_req req = { CMD_I2C_WR, 0, sizeof(buf), buf,
- 0, NULL };
+ struct usb_req req = { CMD_I2C_WR, 0, 5 + msg[0].len,
+ buf, 0, NULL };
if (5 + msg[0].len > sizeof(buf)) {
dev_warn(&d->udev->dev,
"%s: i2c xfer: len=%d is too big!\n",
KBUILD_MODNAME, msg[0].len);
- return -EOPNOTSUPP;
+ ret = -EOPNOTSUPP;
+ goto unlock;
}
req.mbox |= ((msg[0].addr & 0x80) >> 3);
buf[0] = msg[0].len;
@@ -319,6 +321,7 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap,
ret = -EOPNOTSUPP;
}
+unlock:
mutex_unlock(&d->i2c_mutex);
if (ret < 0)
@@ -1534,6 +1537,8 @@ static const struct usb_device_id af9035_id_table[] = {
/* XXX: that same ID [0ccd:0099] is used by af9015 driver too */
{ DVB_USB_DEVICE(USB_VID_TERRATEC, 0x0099,
&af9035_props, "TerraTec Cinergy T Stick Dual RC (rev. 2)", NULL) },
+ { DVB_USB_DEVICE(USB_VID_LEADTEK, 0x6a05,
+ &af9035_props, "Leadtek WinFast DTV Dongle Dual", NULL) },
{ }
};
MODULE_DEVICE_TABLE(usb, af9035_id_table);
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf.c b/drivers/media/usb/dvb-usb-v2/mxl111sf.c
index 2627553f7de1..08240e498451 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf.c
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf.c
@@ -266,7 +266,7 @@ static int mxl111sf_adap_fe_init(struct dvb_frontend *fe)
struct mxl111sf_adap_state *adap_state = &state->adap_state[fe->id];
int err;
- /* exit if we didnt initialize the driver yet */
+ /* exit if we didn't initialize the driver yet */
if (!state->chip_id) {
mxl_debug("driver not yet initialized, exit.");
goto fail;
@@ -322,7 +322,7 @@ static int mxl111sf_adap_fe_sleep(struct dvb_frontend *fe)
struct mxl111sf_adap_state *adap_state = &state->adap_state[fe->id];
int err;
- /* exit if we didnt initialize the driver yet */
+ /* exit if we didn't initialize the driver yet */
if (!state->chip_id) {
mxl_debug("driver not yet initialized, exit.");
goto fail;
diff --git a/drivers/media/usb/dvb-usb/technisat-usb2.c b/drivers/media/usb/dvb-usb/technisat-usb2.c
index 40832a1aef6c..98d24aefb640 100644
--- a/drivers/media/usb/dvb-usb/technisat-usb2.c
+++ b/drivers/media/usb/dvb-usb/technisat-usb2.c
@@ -102,7 +102,7 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
if (rxlen > 62) {
err("i2c RX buffer can't exceed 62 bytes (dev 0x%02x)",
device_addr);
- txlen = 62;
+ rxlen = 62;
}
b[0] = I2C_SPEED_100KHZ_BIT;
diff --git a/drivers/media/usb/em28xx/em28xx-video.c b/drivers/media/usb/em28xx/em28xx-video.c
index fc5d60efd4ab..dd19c9ff76e0 100644
--- a/drivers/media/usb/em28xx/em28xx-video.c
+++ b/drivers/media/usb/em28xx/em28xx-video.c
@@ -1664,8 +1664,8 @@ static int em28xx_v4l2_close(struct file *filp)
em28xx_videodbg("users=%d\n", dev->users);
- mutex_lock(&dev->lock);
vb2_fop_release(filp);
+ mutex_lock(&dev->lock);
if (dev->users == 1) {
/* the device is already disconnect,
diff --git a/drivers/media/usb/gspca/gl860/gl860.c b/drivers/media/usb/gspca/gl860/gl860.c
index cb1e64ca59c9..cea8d7f51c3c 100644
--- a/drivers/media/usb/gspca/gl860/gl860.c
+++ b/drivers/media/usb/gspca/gl860/gl860.c
@@ -438,7 +438,7 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
s32 nToSkip =
sd->swapRB * (gspca_dev->cam.cam_mode[mode].bytesperline + 1);
- /* Test only against 0202h, so endianess does not matter */
+ /* Test only against 0202h, so endianness does not matter */
switch (*(s16 *) data) {
case 0x0202: /* End of frame, start a new one */
gspca_frame_add(gspca_dev, LAST_PACKET, NULL, 0);
diff --git a/drivers/media/usb/gspca/pac207.c b/drivers/media/usb/gspca/pac207.c
index cd79c180f67b..07529e5a0c56 100644
--- a/drivers/media/usb/gspca/pac207.c
+++ b/drivers/media/usb/gspca/pac207.c
@@ -416,7 +416,7 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
#if IS_ENABLED(CONFIG_INPUT)
static int sd_int_pkt_scan(struct gspca_dev *gspca_dev,
u8 *data, /* interrupt packet data */
- int len) /* interrput packet length */
+ int len) /* interrupt packet length */
{
int ret = -EINVAL;
diff --git a/drivers/media/usb/gspca/pac7302.c b/drivers/media/usb/gspca/pac7302.c
index a91509643563..2fd1c5e31a0f 100644
--- a/drivers/media/usb/gspca/pac7302.c
+++ b/drivers/media/usb/gspca/pac7302.c
@@ -874,7 +874,7 @@ static int sd_dbg_s_register(struct gspca_dev *gspca_dev,
#if IS_ENABLED(CONFIG_INPUT)
static int sd_int_pkt_scan(struct gspca_dev *gspca_dev,
u8 *data, /* interrupt packet data */
- int len) /* interrput packet length */
+ int len) /* interrupt packet length */
{
int ret = -EINVAL;
u8 data0, data1;
diff --git a/drivers/media/usb/gspca/stk1135.c b/drivers/media/usb/gspca/stk1135.c
index 1fc80af2a189..48234c9a8b6c 100644
--- a/drivers/media/usb/gspca/stk1135.c
+++ b/drivers/media/usb/gspca/stk1135.c
@@ -361,6 +361,9 @@ static void stk1135_configure_clock(struct gspca_dev *gspca_dev)
/* set serial interface clock divider (30MHz/0x1f*16+2) = 60240 kHz) */
reg_w(gspca_dev, STK1135_REG_SICTL + 2, 0x1f);
+
+ /* wait a while for sensor to catch up */
+ udelay(1000);
}
static void stk1135_camera_disable(struct gspca_dev *gspca_dev)
diff --git a/drivers/media/usb/gspca/stv0680.c b/drivers/media/usb/gspca/stv0680.c
index 9c0827631b9c..7f94ec74282e 100644
--- a/drivers/media/usb/gspca/stv0680.c
+++ b/drivers/media/usb/gspca/stv0680.c
@@ -139,7 +139,7 @@ static int sd_config(struct gspca_dev *gspca_dev,
struct sd *sd = (struct sd *) gspca_dev;
struct cam *cam = &gspca_dev->cam;
- /* Give the camera some time to settle, otherwise initalization will
+ /* Give the camera some time to settle, otherwise initialization will
fail on hotplug, and yes it really needs a full second. */
msleep(1000);
diff --git a/drivers/media/usb/gspca/sunplus.c b/drivers/media/usb/gspca/sunplus.c
index a517d185febe..46c9f2229a18 100644
--- a/drivers/media/usb/gspca/sunplus.c
+++ b/drivers/media/usb/gspca/sunplus.c
@@ -1027,6 +1027,7 @@ static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x055f, 0xc650), BS(SPCA533, 0)},
{USB_DEVICE(0x05da, 0x1018), BS(SPCA504B, 0)},
{USB_DEVICE(0x06d6, 0x0031), BS(SPCA533, 0)},
+ {USB_DEVICE(0x06d6, 0x0041), BS(SPCA504B, 0)},
{USB_DEVICE(0x0733, 0x1311), BS(SPCA533, 0)},
{USB_DEVICE(0x0733, 0x1314), BS(SPCA533, 0)},
{USB_DEVICE(0x0733, 0x2211), BS(SPCA533, 0)},
diff --git a/drivers/media/usb/gspca/zc3xx.c b/drivers/media/usb/gspca/zc3xx.c
index 7b95d8e88a20..d3e1b6d8bf49 100644
--- a/drivers/media/usb/gspca/zc3xx.c
+++ b/drivers/media/usb/gspca/zc3xx.c
@@ -6905,7 +6905,7 @@ static int sd_get_jcomp(struct gspca_dev *gspca_dev,
#if IS_ENABLED(CONFIG_INPUT)
static int sd_int_pkt_scan(struct gspca_dev *gspca_dev,
u8 *data, /* interrupt packet data */
- int len) /* interrput packet length */
+ int len) /* interrupt packet length */
{
if (len == 8 && data[4] == 1) {
input_report_key(gspca_dev->input_dev, KEY_CAMERA, 1);
diff --git a/drivers/media/usb/pwc/pwc-if.c b/drivers/media/usb/pwc/pwc-if.c
index 77bbf7889659..78c9bc8e7f56 100644
--- a/drivers/media/usb/pwc/pwc-if.c
+++ b/drivers/media/usb/pwc/pwc-if.c
@@ -1039,7 +1039,7 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
/* Set the leds off */
pwc_set_leds(pdev, 0, 0);
- /* Setup intial videomode */
+ /* Setup initial videomode */
rc = pwc_set_video_mode(pdev, MAX_WIDTH, MAX_HEIGHT,
V4L2_PIX_FMT_YUV420, 30, &compression, 1);
if (rc)
diff --git a/drivers/media/usb/usbtv/usbtv.c b/drivers/media/usb/usbtv/usbtv.c
index 8a505a90d318..6222a4ab1e00 100644
--- a/drivers/media/usb/usbtv/usbtv.c
+++ b/drivers/media/usb/usbtv/usbtv.c
@@ -50,13 +50,8 @@
#define USBTV_ISOC_TRANSFERS 16
#define USBTV_ISOC_PACKETS 8
-#define USBTV_WIDTH 720
-#define USBTV_HEIGHT 480
-
#define USBTV_CHUNK_SIZE 256
#define USBTV_CHUNK 240
-#define USBTV_CHUNKS (USBTV_WIDTH * USBTV_HEIGHT \
- / 4 / USBTV_CHUNK)
/* Chunk header. */
#define USBTV_MAGIC_OK(chunk) ((be32_to_cpu(chunk[0]) & 0xff000000) \
@@ -65,6 +60,27 @@
#define USBTV_ODD(chunk) ((be32_to_cpu(chunk[0]) & 0x0000f000) >> 15)
#define USBTV_CHUNK_NO(chunk) (be32_to_cpu(chunk[0]) & 0x00000fff)
+#define USBTV_TV_STD (V4L2_STD_525_60 | V4L2_STD_PAL)
+
+/* parameters for supported TV norms */
+struct usbtv_norm_params {
+ v4l2_std_id norm;
+ int cap_width, cap_height;
+};
+
+static struct usbtv_norm_params norm_params[] = {
+ {
+ .norm = V4L2_STD_525_60,
+ .cap_width = 720,
+ .cap_height = 480,
+ },
+ {
+ .norm = V4L2_STD_PAL,
+ .cap_width = 720,
+ .cap_height = 576,
+ }
+};
+
/* A single videobuf2 frame buffer. */
struct usbtv_buf {
struct vb2_buffer vb;
@@ -94,11 +110,38 @@ struct usbtv {
USBTV_COMPOSITE_INPUT,
USBTV_SVIDEO_INPUT,
} input;
+ v4l2_std_id norm;
+ int width, height;
+ int n_chunks;
int iso_size;
unsigned int sequence;
struct urb *isoc_urbs[USBTV_ISOC_TRANSFERS];
};
+static int usbtv_configure_for_norm(struct usbtv *usbtv, v4l2_std_id norm)
+{
+ int i, ret = 0;
+ struct usbtv_norm_params *params = NULL;
+
+ for (i = 0; i < ARRAY_SIZE(norm_params); i++) {
+ if (norm_params[i].norm & norm) {
+ params = &norm_params[i];
+ break;
+ }
+ }
+
+ if (params) {
+ usbtv->width = params->cap_width;
+ usbtv->height = params->cap_height;
+ usbtv->n_chunks = usbtv->width * usbtv->height
+ / 4 / USBTV_CHUNK;
+ usbtv->norm = params->norm;
+ } else
+ ret = -EINVAL;
+
+ return ret;
+}
+
static int usbtv_set_regs(struct usbtv *usbtv, const u16 regs[][2], int size)
{
int ret;
@@ -158,6 +201,57 @@ static int usbtv_select_input(struct usbtv *usbtv, int input)
return ret;
}
+static int usbtv_select_norm(struct usbtv *usbtv, v4l2_std_id norm)
+{
+ int ret;
+ static const u16 pal[][2] = {
+ { USBTV_BASE + 0x001a, 0x0068 },
+ { USBTV_BASE + 0x010e, 0x0072 },
+ { USBTV_BASE + 0x010f, 0x00a2 },
+ { USBTV_BASE + 0x0112, 0x00b0 },
+ { USBTV_BASE + 0x0117, 0x0001 },
+ { USBTV_BASE + 0x0118, 0x002c },
+ { USBTV_BASE + 0x012d, 0x0010 },
+ { USBTV_BASE + 0x012f, 0x0020 },
+ { USBTV_BASE + 0x024f, 0x0002 },
+ { USBTV_BASE + 0x0254, 0x0059 },
+ { USBTV_BASE + 0x025a, 0x0016 },
+ { USBTV_BASE + 0x025b, 0x0035 },
+ { USBTV_BASE + 0x0263, 0x0017 },
+ { USBTV_BASE + 0x0266, 0x0016 },
+ { USBTV_BASE + 0x0267, 0x0036 }
+ };
+
+ static const u16 ntsc[][2] = {
+ { USBTV_BASE + 0x001a, 0x0079 },
+ { USBTV_BASE + 0x010e, 0x0068 },
+ { USBTV_BASE + 0x010f, 0x009c },
+ { USBTV_BASE + 0x0112, 0x00f0 },
+ { USBTV_BASE + 0x0117, 0x0000 },
+ { USBTV_BASE + 0x0118, 0x00fc },
+ { USBTV_BASE + 0x012d, 0x0004 },
+ { USBTV_BASE + 0x012f, 0x0008 },
+ { USBTV_BASE + 0x024f, 0x0001 },
+ { USBTV_BASE + 0x0254, 0x005f },
+ { USBTV_BASE + 0x025a, 0x0012 },
+ { USBTV_BASE + 0x025b, 0x0001 },
+ { USBTV_BASE + 0x0263, 0x001c },
+ { USBTV_BASE + 0x0266, 0x0011 },
+ { USBTV_BASE + 0x0267, 0x0005 }
+ };
+
+ ret = usbtv_configure_for_norm(usbtv, norm);
+
+ if (!ret) {
+ if (norm & V4L2_STD_525_60)
+ ret = usbtv_set_regs(usbtv, ntsc, ARRAY_SIZE(ntsc));
+ else if (norm & V4L2_STD_PAL)
+ ret = usbtv_set_regs(usbtv, pal, ARRAY_SIZE(pal));
+ }
+
+ return ret;
+}
+
static int usbtv_setup_capture(struct usbtv *usbtv)
{
int ret;
@@ -225,26 +319,11 @@ static int usbtv_setup_capture(struct usbtv *usbtv)
{ USBTV_BASE + 0x0284, 0x0088 },
{ USBTV_BASE + 0x0003, 0x0004 },
- { USBTV_BASE + 0x001a, 0x0079 },
{ USBTV_BASE + 0x0100, 0x00d3 },
- { USBTV_BASE + 0x010e, 0x0068 },
- { USBTV_BASE + 0x010f, 0x009c },
- { USBTV_BASE + 0x0112, 0x00f0 },
{ USBTV_BASE + 0x0115, 0x0015 },
- { USBTV_BASE + 0x0117, 0x0000 },
- { USBTV_BASE + 0x0118, 0x00fc },
- { USBTV_BASE + 0x012d, 0x0004 },
- { USBTV_BASE + 0x012f, 0x0008 },
{ USBTV_BASE + 0x0220, 0x002e },
{ USBTV_BASE + 0x0225, 0x0008 },
{ USBTV_BASE + 0x024e, 0x0002 },
- { USBTV_BASE + 0x024f, 0x0001 },
- { USBTV_BASE + 0x0254, 0x005f },
- { USBTV_BASE + 0x025a, 0x0012 },
- { USBTV_BASE + 0x025b, 0x0001 },
- { USBTV_BASE + 0x0263, 0x001c },
- { USBTV_BASE + 0x0266, 0x0011 },
- { USBTV_BASE + 0x0267, 0x0005 },
{ USBTV_BASE + 0x024e, 0x0002 },
{ USBTV_BASE + 0x024f, 0x0002 },
};
@@ -253,6 +332,10 @@ static int usbtv_setup_capture(struct usbtv *usbtv)
if (ret)
return ret;
+ ret = usbtv_select_norm(usbtv, usbtv->norm);
+ if (ret)
+ return ret;
+
ret = usbtv_select_input(usbtv, usbtv->input);
if (ret)
return ret;
@@ -296,7 +379,7 @@ static void usbtv_image_chunk(struct usbtv *usbtv, u32 *chunk)
frame_id = USBTV_FRAME_ID(chunk);
odd = USBTV_ODD(chunk);
chunk_no = USBTV_CHUNK_NO(chunk);
- if (chunk_no >= USBTV_CHUNKS)
+ if (chunk_no >= usbtv->n_chunks)
return;
/* Beginning of a frame. */
@@ -324,10 +407,10 @@ static void usbtv_image_chunk(struct usbtv *usbtv, u32 *chunk)
usbtv->chunks_done++;
/* Last chunk in a frame, signalling an end */
- if (odd && chunk_no == USBTV_CHUNKS-1) {
+ if (odd && chunk_no == usbtv->n_chunks-1) {
int size = vb2_plane_size(&buf->vb, 0);
enum vb2_buffer_state state = usbtv->chunks_done ==
- USBTV_CHUNKS ?
+ usbtv->n_chunks ?
VB2_BUF_STATE_DONE :
VB2_BUF_STATE_ERROR;
@@ -500,6 +583,8 @@ static int usbtv_querycap(struct file *file, void *priv,
static int usbtv_enum_input(struct file *file, void *priv,
struct v4l2_input *i)
{
+ struct usbtv *dev = video_drvdata(file);
+
switch (i->index) {
case USBTV_COMPOSITE_INPUT:
strlcpy(i->name, "Composite", sizeof(i->name));
@@ -512,7 +597,7 @@ static int usbtv_enum_input(struct file *file, void *priv,
}
i->type = V4L2_INPUT_TYPE_CAMERA;
- i->std = V4L2_STD_525_60;
+ i->std = dev->vdev.tvnorms;
return 0;
}
@@ -531,23 +616,37 @@ static int usbtv_enum_fmt_vid_cap(struct file *file, void *priv,
static int usbtv_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- f->fmt.pix.width = USBTV_WIDTH;
- f->fmt.pix.height = USBTV_HEIGHT;
+ struct usbtv *usbtv = video_drvdata(file);
+
+ f->fmt.pix.width = usbtv->width;
+ f->fmt.pix.height = usbtv->height;
f->fmt.pix.pixelformat = V4L2_PIX_FMT_YUYV;
f->fmt.pix.field = V4L2_FIELD_INTERLACED;
- f->fmt.pix.bytesperline = USBTV_WIDTH * 2;
+ f->fmt.pix.bytesperline = usbtv->width * 2;
f->fmt.pix.sizeimage = (f->fmt.pix.bytesperline * f->fmt.pix.height);
f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
- f->fmt.pix.priv = 0;
+
return 0;
}
static int usbtv_g_std(struct file *file, void *priv, v4l2_std_id *norm)
{
- *norm = V4L2_STD_525_60;
+ struct usbtv *usbtv = video_drvdata(file);
+ *norm = usbtv->norm;
return 0;
}
+static int usbtv_s_std(struct file *file, void *priv, v4l2_std_id norm)
+{
+ int ret = -EINVAL;
+ struct usbtv *usbtv = video_drvdata(file);
+
+ if ((norm & V4L2_STD_525_60) || (norm & V4L2_STD_PAL))
+ ret = usbtv_select_norm(usbtv, norm);
+
+ return ret;
+}
+
static int usbtv_g_input(struct file *file, void *priv, unsigned int *i)
{
struct usbtv *usbtv = video_drvdata(file);
@@ -561,13 +660,6 @@ static int usbtv_s_input(struct file *file, void *priv, unsigned int i)
return usbtv_select_input(usbtv, i);
}
-static int usbtv_s_std(struct file *file, void *priv, v4l2_std_id norm)
-{
- if (norm & V4L2_STD_525_60)
- return 0;
- return -EINVAL;
-}
-
struct v4l2_ioctl_ops usbtv_ioctl_ops = {
.vidioc_querycap = usbtv_querycap,
.vidioc_enum_input = usbtv_enum_input,
@@ -604,10 +696,12 @@ static int usbtv_queue_setup(struct vb2_queue *vq,
const struct v4l2_format *v4l_fmt, unsigned int *nbuffers,
unsigned int *nplanes, unsigned int sizes[], void *alloc_ctxs[])
{
+ struct usbtv *usbtv = vb2_get_drv_priv(vq);
+
if (*nbuffers < 2)
*nbuffers = 2;
*nplanes = 1;
- sizes[0] = USBTV_WIDTH * USBTV_HEIGHT / 2 * sizeof(u32);
+ sizes[0] = USBTV_CHUNK * usbtv->n_chunks * 2 * sizeof(u32);
return 0;
}
@@ -690,7 +784,11 @@ static int usbtv_probe(struct usb_interface *intf,
return -ENOMEM;
usbtv->dev = dev;
usbtv->udev = usb_get_dev(interface_to_usbdev(intf));
+
usbtv->iso_size = size;
+
+ (void)usbtv_configure_for_norm(usbtv, V4L2_STD_525_60);
+
spin_lock_init(&usbtv->buflock);
mutex_init(&usbtv->v4l2_lock);
mutex_init(&usbtv->vb2q_lock);
@@ -727,7 +825,7 @@ static int usbtv_probe(struct usb_interface *intf,
usbtv->vdev.release = video_device_release_empty;
usbtv->vdev.fops = &usbtv_fops;
usbtv->vdev.ioctl_ops = &usbtv_ioctl_ops;
- usbtv->vdev.tvnorms = V4L2_STD_525_60;
+ usbtv->vdev.tvnorms = USBTV_TV_STD;
usbtv->vdev.queue = &usbtv->vb2q;
usbtv->vdev.lock = &usbtv->v4l2_lock;
set_bit(V4L2_FL_USE_FH_PRIO, &usbtv->vdev.flags);
diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c
index 899cb6d1c4a4..898c208889cd 100644
--- a/drivers/media/usb/uvc/uvc_video.c
+++ b/drivers/media/usb/uvc/uvc_video.c
@@ -556,7 +556,7 @@ static u16 uvc_video_clock_host_sof(const struct uvc_clock_sample *sample)
*
* SOF = ((SOF2 - SOF1) * PTS + SOF1 * STC2 - SOF2 * STC1) / (STC2 - STC1) (1)
*
- * to avoid loosing precision in the division. Similarly, the host timestamp is
+ * to avoid losing precision in the division. Similarly, the host timestamp is
* computed with
*
* TS = ((TS2 - TS1) * PTS + TS1 * SOF2 - TS2 * SOF1) / (SOF2 - SOF1) (2)
diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
index 60dcc0f3b32e..fb46790d0eca 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls.c
@@ -420,7 +420,7 @@ const char * const *v4l2_ctrl_get_menu(u32 id)
"Advanced Simple",
"Core",
"Simple Scalable",
- "Advanced Coding Efficency",
+ "Advanced Coding Efficiency",
NULL,
};
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
index b19b306c8f7f..0edc165f418d 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -145,6 +145,25 @@ static void __vb2_buf_dmabuf_put(struct vb2_buffer *vb)
}
/**
+ * __setup_lengths() - setup initial lengths for every plane in
+ * every buffer on the queue
+ */
+static void __setup_lengths(struct vb2_queue *q, unsigned int n)
+{
+ unsigned int buffer, plane;
+ struct vb2_buffer *vb;
+
+ for (buffer = q->num_buffers; buffer < q->num_buffers + n; ++buffer) {
+ vb = q->bufs[buffer];
+ if (!vb)
+ continue;
+
+ for (plane = 0; plane < vb->num_planes; ++plane)
+ vb->v4l2_planes[plane].length = q->plane_sizes[plane];
+ }
+}
+
+/**
* __setup_offsets() - setup unique offsets ("cookies") for every plane in
* every buffer on the queue
*/
@@ -169,7 +188,6 @@ static void __setup_offsets(struct vb2_queue *q, unsigned int n)
continue;
for (plane = 0; plane < vb->num_planes; ++plane) {
- vb->v4l2_planes[plane].length = q->plane_sizes[plane];
vb->v4l2_planes[plane].m.mem_offset = off;
dprintk(3, "Buffer %d, plane %d offset 0x%08lx\n",
@@ -241,6 +259,7 @@ static int __vb2_queue_alloc(struct vb2_queue *q, enum v4l2_memory memory,
q->bufs[q->num_buffers + buffer] = vb;
}
+ __setup_lengths(q, buffer);
if (memory == V4L2_MEMORY_MMAP)
__setup_offsets(q, buffer);
@@ -1824,8 +1843,8 @@ int vb2_expbuf(struct vb2_queue *q, struct v4l2_exportbuffer *eb)
return -EINVAL;
}
- if (eb->flags & ~O_CLOEXEC) {
- dprintk(1, "Queue does support only O_CLOEXEC flag\n");
+ if (eb->flags & ~(O_CLOEXEC | O_ACCMODE)) {
+ dprintk(1, "Queue does support only O_CLOEXEC and access mode flags\n");
return -EINVAL;
}
@@ -1848,14 +1867,14 @@ int vb2_expbuf(struct vb2_queue *q, struct v4l2_exportbuffer *eb)
vb_plane = &vb->planes[eb->plane];
- dbuf = call_memop(q, get_dmabuf, vb_plane->mem_priv);
+ dbuf = call_memop(q, get_dmabuf, vb_plane->mem_priv, eb->flags & O_ACCMODE);
if (IS_ERR_OR_NULL(dbuf)) {
dprintk(1, "Failed to export buffer %d, plane %d\n",
eb->index, eb->plane);
return -EINVAL;
}
- ret = dma_buf_fd(dbuf, eb->flags);
+ ret = dma_buf_fd(dbuf, eb->flags & ~O_ACCMODE);
if (ret < 0) {
dprintk(3, "buffer %d, plane %d failed to export (%d)\n",
eb->index, eb->plane, ret);
diff --git a/drivers/media/v4l2-core/videobuf2-dma-contig.c b/drivers/media/v4l2-core/videobuf2-dma-contig.c
index 646f08f4f504..33d3871d1e13 100644
--- a/drivers/media/v4l2-core/videobuf2-dma-contig.c
+++ b/drivers/media/v4l2-core/videobuf2-dma-contig.c
@@ -393,7 +393,7 @@ static struct sg_table *vb2_dc_get_base_sgt(struct vb2_dc_buf *buf)
return sgt;
}
-static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv)
+static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv, unsigned long flags)
{
struct vb2_dc_buf *buf = buf_priv;
struct dma_buf *dbuf;
@@ -404,7 +404,7 @@ static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv)
if (WARN_ON(!buf->sgt_base))
return NULL;
- dbuf = dma_buf_export(buf, &vb2_dc_dmabuf_ops, buf->size, 0);
+ dbuf = dma_buf_export(buf, &vb2_dc_dmabuf_ops, buf->size, flags);
if (IS_ERR(dbuf))
return NULL;
diff --git a/drivers/media/v4l2-core/videobuf2-dma-sg.c b/drivers/media/v4l2-core/videobuf2-dma-sg.c
index 2f860543912c..0d3a8ffe47a3 100644
--- a/drivers/media/v4l2-core/videobuf2-dma-sg.c
+++ b/drivers/media/v4l2-core/videobuf2-dma-sg.c
@@ -178,7 +178,7 @@ static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
buf->pages = kzalloc(buf->num_pages * sizeof(struct page *),
GFP_KERNEL);
if (!buf->pages)
- return NULL;
+ goto userptr_fail_alloc_pages;
num_pages_from_user = get_user_pages(current, current->mm,
vaddr & PAGE_MASK,
@@ -204,6 +204,7 @@ userptr_fail_get_user_pages:
while (--num_pages_from_user >= 0)
put_page(buf->pages[num_pages_from_user]);
kfree(buf->pages);
+userptr_fail_alloc_pages:
kfree(buf);
return NULL;
}
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 62a60caa5d1f..dd671582c9a1 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -32,7 +32,7 @@ config MFD_AS3722
select MFD_CORE
select REGMAP_I2C
select REGMAP_IRQ
- depends on I2C && OF
+ depends on I2C=y && OF
help
The ams AS3722 is a compact system PMU suitable for mobile phones,
tablets etc. It has 4 DC/DC step-down regulators, 3 DC/DC step-down
diff --git a/drivers/mfd/lpc_ich.c b/drivers/mfd/lpc_ich.c
index da1c6566d93d..37edf9e989b0 100644
--- a/drivers/mfd/lpc_ich.c
+++ b/drivers/mfd/lpc_ich.c
@@ -506,7 +506,7 @@ static struct lpc_ich_info lpc_chipset_info[] = {
.iTCO_version = 2,
},
[LPC_WPT_LP] = {
- .name = "Lynx Point_LP",
+ .name = "Wildcat Point_LP",
.iTCO_version = 2,
},
};
diff --git a/drivers/mfd/sec-core.c b/drivers/mfd/sec-core.c
index 34c18fb8c089..54cc25546592 100644
--- a/drivers/mfd/sec-core.c
+++ b/drivers/mfd/sec-core.c
@@ -81,31 +81,31 @@ static struct of_device_id sec_dt_match[] = {
int sec_reg_read(struct sec_pmic_dev *sec_pmic, u8 reg, void *dest)
{
- return regmap_read(sec_pmic->regmap, reg, dest);
+ return regmap_read(sec_pmic->regmap_pmic, reg, dest);
}
EXPORT_SYMBOL_GPL(sec_reg_read);
int sec_bulk_read(struct sec_pmic_dev *sec_pmic, u8 reg, int count, u8 *buf)
{
- return regmap_bulk_read(sec_pmic->regmap, reg, buf, count);
+ return regmap_bulk_read(sec_pmic->regmap_pmic, reg, buf, count);
}
EXPORT_SYMBOL_GPL(sec_bulk_read);
int sec_reg_write(struct sec_pmic_dev *sec_pmic, u8 reg, u8 value)
{
- return regmap_write(sec_pmic->regmap, reg, value);
+ return regmap_write(sec_pmic->regmap_pmic, reg, value);
}
EXPORT_SYMBOL_GPL(sec_reg_write);
int sec_bulk_write(struct sec_pmic_dev *sec_pmic, u8 reg, int count, u8 *buf)
{
- return regmap_raw_write(sec_pmic->regmap, reg, buf, count);
+ return regmap_raw_write(sec_pmic->regmap_pmic, reg, buf, count);
}
EXPORT_SYMBOL_GPL(sec_bulk_write);
int sec_reg_update(struct sec_pmic_dev *sec_pmic, u8 reg, u8 val, u8 mask)
{
- return regmap_update_bits(sec_pmic->regmap, reg, mask, val);
+ return regmap_update_bits(sec_pmic->regmap_pmic, reg, mask, val);
}
EXPORT_SYMBOL_GPL(sec_reg_update);
@@ -166,6 +166,11 @@ static struct regmap_config s5m8767_regmap_config = {
.cache_type = REGCACHE_FLAT,
};
+static const struct regmap_config sec_rtc_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
#ifdef CONFIG_OF
/*
* Only the common platform data elements for s5m8767 are parsed here from the
@@ -266,9 +271,9 @@ static int sec_pmic_probe(struct i2c_client *i2c,
break;
}
- sec_pmic->regmap = devm_regmap_init_i2c(i2c, regmap);
- if (IS_ERR(sec_pmic->regmap)) {
- ret = PTR_ERR(sec_pmic->regmap);
+ sec_pmic->regmap_pmic = devm_regmap_init_i2c(i2c, regmap);
+ if (IS_ERR(sec_pmic->regmap_pmic)) {
+ ret = PTR_ERR(sec_pmic->regmap_pmic);
dev_err(&i2c->dev, "Failed to allocate register map: %d\n",
ret);
return ret;
@@ -277,6 +282,15 @@ static int sec_pmic_probe(struct i2c_client *i2c,
sec_pmic->rtc = i2c_new_dummy(i2c->adapter, RTC_I2C_ADDR);
i2c_set_clientdata(sec_pmic->rtc, sec_pmic);
+ sec_pmic->regmap_rtc = devm_regmap_init_i2c(sec_pmic->rtc,
+ &sec_rtc_regmap_config);
+ if (IS_ERR(sec_pmic->regmap_rtc)) {
+ ret = PTR_ERR(sec_pmic->regmap_rtc);
+ dev_err(&i2c->dev, "Failed to allocate RTC register map: %d\n",
+ ret);
+ return ret;
+ }
+
if (pdata && pdata->cfg_pmic_irq)
pdata->cfg_pmic_irq();
diff --git a/drivers/mfd/sec-irq.c b/drivers/mfd/sec-irq.c
index 0dd84e99081e..b441b1be27cb 100644
--- a/drivers/mfd/sec-irq.c
+++ b/drivers/mfd/sec-irq.c
@@ -280,19 +280,19 @@ int sec_irq_init(struct sec_pmic_dev *sec_pmic)
switch (type) {
case S5M8763X:
- ret = regmap_add_irq_chip(sec_pmic->regmap, sec_pmic->irq,
+ ret = regmap_add_irq_chip(sec_pmic->regmap_pmic, sec_pmic->irq,
IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
sec_pmic->irq_base, &s5m8763_irq_chip,
&sec_pmic->irq_data);
break;
case S5M8767X:
- ret = regmap_add_irq_chip(sec_pmic->regmap, sec_pmic->irq,
+ ret = regmap_add_irq_chip(sec_pmic->regmap_pmic, sec_pmic->irq,
IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
sec_pmic->irq_base, &s5m8767_irq_chip,
&sec_pmic->irq_data);
break;
case S2MPS11X:
- ret = regmap_add_irq_chip(sec_pmic->regmap, sec_pmic->irq,
+ ret = regmap_add_irq_chip(sec_pmic->regmap_pmic, sec_pmic->irq,
IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
sec_pmic->irq_base, &s2mps11_irq_chip,
&sec_pmic->irq_data);
diff --git a/drivers/mfd/ti-ssp.c b/drivers/mfd/ti-ssp.c
index 71e3e0c5bf73..a5424579679c 100644
--- a/drivers/mfd/ti-ssp.c
+++ b/drivers/mfd/ti-ssp.c
@@ -32,6 +32,7 @@
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/io.h>
+#include <linux/sched.h>
#include <linux/mfd/core.h>
#include <linux/mfd/ti_ssp.h>
@@ -409,7 +410,6 @@ static int ti_ssp_probe(struct platform_device *pdev)
cells[id].id = id;
cells[id].name = data->dev_name;
cells[id].platform_data = data->pdata;
- cells[id].data_size = data->pdata_size;
}
error = mfd_add_devices(dev, 0, cells, 2, NULL, 0, NULL);
diff --git a/drivers/misc/carma/carma-fpga.c b/drivers/misc/carma/carma-fpga.c
index 08b18f3f5264..9e2b985293fc 100644
--- a/drivers/misc/carma/carma-fpga.c
+++ b/drivers/misc/carma/carma-fpga.c
@@ -633,8 +633,7 @@ static int data_submit_dma(struct fpga_device *priv, struct data_buf *buf)
struct dma_async_tx_descriptor *tx;
dma_cookie_t cookie;
dma_addr_t dst, src;
- unsigned long dma_flags = DMA_COMPL_SKIP_DEST_UNMAP |
- DMA_COMPL_SKIP_SRC_UNMAP;
+ unsigned long dma_flags = 0;
dst_sg = buf->vb.sglist;
dst_nents = buf->vb.sglen;
diff --git a/drivers/misc/enclosure.c b/drivers/misc/enclosure.c
index 0e8df41aaf14..2cf2bbc0b927 100644
--- a/drivers/misc/enclosure.c
+++ b/drivers/misc/enclosure.c
@@ -198,6 +198,13 @@ static void enclosure_remove_links(struct enclosure_component *cdev)
{
char name[ENCLOSURE_NAME_SIZE];
+ /*
+ * In odd circumstances, like multipath devices, something else may
+ * already have removed the links, so check for this condition first.
+ */
+ if (!cdev->dev->kobj.sd)
+ return;
+
enclosure_link_name(cdev, name);
sysfs_remove_link(&cdev->dev->kobj, name);
sysfs_remove_link(&cdev->cdev.kobj, "device");
diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
index 6c0fde55270d..66f411a6e8ea 100644
--- a/drivers/misc/mei/hw-me-regs.h
+++ b/drivers/misc/mei/hw-me-regs.h
@@ -109,9 +109,12 @@
#define MEI_DEV_ID_PPT_2 0x1CBA /* Panther Point */
#define MEI_DEV_ID_PPT_3 0x1DBA /* Panther Point */
-#define MEI_DEV_ID_LPT 0x8C3A /* Lynx Point */
+#define MEI_DEV_ID_LPT_H 0x8C3A /* Lynx Point H */
#define MEI_DEV_ID_LPT_W 0x8D3A /* Lynx Point - Wellsburg */
#define MEI_DEV_ID_LPT_LP 0x9C3A /* Lynx Point LP */
+#define MEI_DEV_ID_LPT_HR 0x8CBA /* Lynx Point H Refresh */
+
+#define MEI_DEV_ID_WPT_LP 0x9CBA /* Wildcat Point LP */
/*
* MEI HW Section
*/
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index b96205aece0c..2cab3c0a6805 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -76,9 +76,11 @@ static DEFINE_PCI_DEVICE_TABLE(mei_me_pci_tbl) = {
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_1)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_2)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_3)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT_H)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT_W)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT_LP)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT_HR)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_WPT_LP)},
/* required last entry */
{0, }
diff --git a/drivers/misc/mic/card/mic_virtio.c b/drivers/misc/mic/card/mic_virtio.c
index 8aa42e738acc..653799b96bfa 100644
--- a/drivers/misc/mic/card/mic_virtio.c
+++ b/drivers/misc/mic/card/mic_virtio.c
@@ -154,14 +154,14 @@ static void mic_reset_inform_host(struct virtio_device *vdev)
{
struct mic_vdev *mvdev = to_micvdev(vdev);
struct mic_device_ctrl __iomem *dc = mvdev->dc;
- int retry = 100, i;
+ int retry;
iowrite8(0, &dc->host_ack);
iowrite8(1, &dc->vdev_reset);
mic_send_intr(mvdev->mdev, mvdev->c2h_vdev_db);
/* Wait till host completes all card accesses and acks the reset */
- for (i = retry; i--;) {
+ for (retry = 100; retry--;) {
if (ioread8(&dc->host_ack))
break;
msleep(100);
@@ -187,11 +187,12 @@ static void mic_reset(struct virtio_device *vdev)
/*
* The virtio_ring code calls this API when it wants to notify the Host.
*/
-static void mic_notify(struct virtqueue *vq)
+static bool mic_notify(struct virtqueue *vq)
{
struct mic_vdev *mvdev = vq->priv;
mic_send_intr(mvdev->mdev, mvdev->c2h_vdev_db);
+ return true;
}
static void mic_del_vq(struct virtqueue *vq, int n)
@@ -247,17 +248,17 @@ static struct virtqueue *mic_find_vq(struct virtio_device *vdev,
/* First assign the vring's allocated in host memory */
vqconfig = mic_vq_config(mvdev->desc) + index;
memcpy_fromio(&config, vqconfig, sizeof(config));
- _vr_size = vring_size(config.num, MIC_VIRTIO_RING_ALIGN);
+ _vr_size = vring_size(le16_to_cpu(config.num), MIC_VIRTIO_RING_ALIGN);
vr_size = PAGE_ALIGN(_vr_size + sizeof(struct _mic_vring_info));
- va = mic_card_map(mvdev->mdev, config.address, vr_size);
+ va = mic_card_map(mvdev->mdev, le64_to_cpu(config.address), vr_size);
if (!va)
return ERR_PTR(-ENOMEM);
mvdev->vr[index] = va;
memset_io(va, 0x0, _vr_size);
- vq = vring_new_virtqueue(index,
- config.num, MIC_VIRTIO_RING_ALIGN, vdev,
- false,
- va, mic_notify, callback, name);
+ vq = vring_new_virtqueue(index, le16_to_cpu(config.num),
+ MIC_VIRTIO_RING_ALIGN, vdev, false,
+ (void __force *)va, mic_notify, callback,
+ name);
if (!vq) {
err = -ENOMEM;
goto unmap;
@@ -272,7 +273,8 @@ static struct virtqueue *mic_find_vq(struct virtio_device *vdev,
/* Allocate and reassign used ring now */
mvdev->used_size[index] = PAGE_ALIGN(sizeof(__u16) * 3 +
- sizeof(struct vring_used_elem) * config.num);
+ sizeof(struct vring_used_elem) *
+ le16_to_cpu(config.num));
used = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
get_order(mvdev->used_size[index]));
if (!used) {
@@ -309,7 +311,7 @@ static int mic_find_vqs(struct virtio_device *vdev, unsigned nvqs,
{
struct mic_vdev *mvdev = to_micvdev(vdev);
struct mic_device_ctrl __iomem *dc = mvdev->dc;
- int i, err, retry = 100;
+ int i, err, retry;
/* We must have this many virtqueues. */
if (nvqs > ioread8(&mvdev->desc->num_vq))
@@ -331,7 +333,7 @@ static int mic_find_vqs(struct virtio_device *vdev, unsigned nvqs,
* rings have been re-assigned.
*/
mic_send_intr(mvdev->mdev, mvdev->c2h_vdev_db);
- for (i = retry; i--;) {
+ for (retry = 100; retry--;) {
if (!ioread8(&dc->used_address_updated))
break;
msleep(100);
@@ -519,8 +521,8 @@ static void mic_scan_devices(struct mic_driver *mdrv, bool remove)
struct device *dev;
int ret;
- for (i = mic_aligned_size(struct mic_bootparam);
- i < MIC_DP_SIZE; i += mic_total_desc_size(d)) {
+ for (i = sizeof(struct mic_bootparam); i < MIC_DP_SIZE;
+ i += mic_total_desc_size(d)) {
d = mdrv->dp + i;
dc = (void __iomem *)d + mic_aligned_desc_size(d);
/*
@@ -539,7 +541,8 @@ static void mic_scan_devices(struct mic_driver *mdrv, bool remove)
continue;
/* device already exists */
- dev = device_find_child(mdrv->dev, d, mic_match_desc);
+ dev = device_find_child(mdrv->dev, (void __force *)d,
+ mic_match_desc);
if (dev) {
if (remove)
iowrite8(MIC_VIRTIO_PARAM_DEV_REMOVE,
diff --git a/drivers/misc/mic/card/mic_virtio.h b/drivers/misc/mic/card/mic_virtio.h
index 2c5c22c93ba8..d0407ba53bb7 100644
--- a/drivers/misc/mic/card/mic_virtio.h
+++ b/drivers/misc/mic/card/mic_virtio.h
@@ -42,8 +42,8 @@
static inline unsigned mic_desc_size(struct mic_device_desc __iomem *desc)
{
- return mic_aligned_size(*desc)
- + ioread8(&desc->num_vq) * mic_aligned_size(struct mic_vqconfig)
+ return sizeof(*desc)
+ + ioread8(&desc->num_vq) * sizeof(struct mic_vqconfig)
+ ioread8(&desc->feature_len) * 2
+ ioread8(&desc->config_len);
}
@@ -67,8 +67,7 @@ mic_vq_configspace(struct mic_device_desc __iomem *desc)
}
static inline unsigned mic_total_desc_size(struct mic_device_desc __iomem *desc)
{
- return mic_aligned_desc_size(desc) +
- mic_aligned_size(struct mic_device_ctrl);
+ return mic_aligned_desc_size(desc) + sizeof(struct mic_device_ctrl);
}
int mic_devices_init(struct mic_driver *mdrv);
diff --git a/drivers/misc/mic/host/mic_boot.c b/drivers/misc/mic/host/mic_boot.c
index 7558d9186438..b75c6b5cc20f 100644
--- a/drivers/misc/mic/host/mic_boot.c
+++ b/drivers/misc/mic/host/mic_boot.c
@@ -62,7 +62,7 @@ void mic_bootparam_init(struct mic_device *mdev)
{
struct mic_bootparam *bootparam = mdev->dp;
- bootparam->magic = MIC_MAGIC;
+ bootparam->magic = cpu_to_le32(MIC_MAGIC);
bootparam->c2h_shutdown_db = mdev->shutdown_db;
bootparam->h2c_shutdown_db = -1;
bootparam->h2c_config_db = -1;
diff --git a/drivers/misc/mic/host/mic_virtio.c b/drivers/misc/mic/host/mic_virtio.c
index 5b8494bd1e00..e04bb4fe6823 100644
--- a/drivers/misc/mic/host/mic_virtio.c
+++ b/drivers/misc/mic/host/mic_virtio.c
@@ -41,7 +41,7 @@ static int mic_virtio_copy_to_user(struct mic_vdev *mvdev,
* We are copying from IO below an should ideally use something
* like copy_to_user_fromio(..) if it existed.
*/
- if (copy_to_user(ubuf, dbuf, len)) {
+ if (copy_to_user(ubuf, (void __force *)dbuf, len)) {
err = -EFAULT;
dev_err(mic_dev(mvdev), "%s %d err %d\n",
__func__, __LINE__, err);
@@ -66,7 +66,7 @@ static int mic_virtio_copy_from_user(struct mic_vdev *mvdev,
* We are copying to IO below and should ideally use something
* like copy_from_user_toio(..) if it existed.
*/
- if (copy_from_user(dbuf, ubuf, len)) {
+ if (copy_from_user((void __force *)dbuf, ubuf, len)) {
err = -EFAULT;
dev_err(mic_dev(mvdev), "%s %d err %d\n",
__func__, __LINE__, err);
@@ -293,7 +293,7 @@ static void mic_virtio_init_post(struct mic_vdev *mvdev)
continue;
}
mvdev->mvr[i].vrh.vring.used =
- mvdev->mdev->aper.va +
+ (void __force *)mvdev->mdev->aper.va +
le64_to_cpu(vqconfig[i].used_address);
}
@@ -378,7 +378,7 @@ int mic_virtio_config_change(struct mic_vdev *mvdev,
void __user *argp)
{
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wake);
- int ret = 0, retry = 100, i;
+ int ret = 0, retry, i;
struct mic_bootparam *bootparam = mvdev->mdev->dp;
s8 db = bootparam->h2c_config_db;
@@ -401,7 +401,7 @@ int mic_virtio_config_change(struct mic_vdev *mvdev,
mvdev->dc->config_change = MIC_VIRTIO_PARAM_CONFIG_CHANGED;
mvdev->mdev->ops->send_intr(mvdev->mdev, db);
- for (i = retry; i--;) {
+ for (retry = 100; retry--;) {
ret = wait_event_timeout(wake,
mvdev->dc->guest_ack, msecs_to_jiffies(100));
if (ret)
@@ -467,7 +467,7 @@ static int mic_copy_dp_entry(struct mic_vdev *mvdev,
}
/* Find the first free device page entry */
- for (i = mic_aligned_size(struct mic_bootparam);
+ for (i = sizeof(struct mic_bootparam);
i < MIC_DP_SIZE - mic_total_desc_size(dd_config);
i += mic_total_desc_size(devp)) {
devp = mdev->dp + i;
@@ -525,6 +525,7 @@ int mic_virtio_add_device(struct mic_vdev *mvdev,
char irqname[10];
struct mic_bootparam *bootparam = mdev->dp;
u16 num;
+ dma_addr_t vr_addr;
mutex_lock(&mdev->mic_mutex);
@@ -559,17 +560,16 @@ int mic_virtio_add_device(struct mic_vdev *mvdev,
}
vr->len = vr_size;
vr->info = vr->va + vring_size(num, MIC_VIRTIO_RING_ALIGN);
- vr->info->magic = MIC_MAGIC + mvdev->virtio_id + i;
- vqconfig[i].address = mic_map_single(mdev,
- vr->va, vr_size);
- if (mic_map_error(vqconfig[i].address)) {
+ vr->info->magic = cpu_to_le32(MIC_MAGIC + mvdev->virtio_id + i);
+ vr_addr = mic_map_single(mdev, vr->va, vr_size);
+ if (mic_map_error(vr_addr)) {
free_pages((unsigned long)vr->va, get_order(vr_size));
ret = -ENOMEM;
dev_err(mic_dev(mvdev), "%s %d err %d\n",
__func__, __LINE__, ret);
goto err;
}
- vqconfig[i].address = cpu_to_le64(vqconfig[i].address);
+ vqconfig[i].address = cpu_to_le64(vr_addr);
vring_init(&vr->vr, num, vr->va, MIC_VIRTIO_RING_ALIGN);
ret = vringh_init_kern(&mvr->vrh,
@@ -639,7 +639,7 @@ void mic_virtio_del_device(struct mic_vdev *mvdev)
struct mic_vdev *tmp_mvdev;
struct mic_device *mdev = mvdev->mdev;
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wake);
- int i, ret, retry = 100;
+ int i, ret, retry;
struct mic_vqconfig *vqconfig;
struct mic_bootparam *bootparam = mdev->dp;
s8 db;
@@ -652,16 +652,16 @@ void mic_virtio_del_device(struct mic_vdev *mvdev)
"Requesting hot remove id %d\n", mvdev->virtio_id);
mvdev->dc->config_change = MIC_VIRTIO_PARAM_DEV_REMOVE;
mdev->ops->send_intr(mdev, db);
- for (i = retry; i--;) {
+ for (retry = 100; retry--;) {
ret = wait_event_timeout(wake,
mvdev->dc->guest_ack, msecs_to_jiffies(100));
if (ret)
break;
}
dev_dbg(mdev->sdev->parent,
- "Device id %d config_change %d guest_ack %d\n",
+ "Device id %d config_change %d guest_ack %d retry %d\n",
mvdev->virtio_id, mvdev->dc->config_change,
- mvdev->dc->guest_ack);
+ mvdev->dc->guest_ack, retry);
mvdev->dc->config_change = 0;
mvdev->dc->guest_ack = 0;
skip_hot_remove:
diff --git a/drivers/misc/mic/host/mic_x100.c b/drivers/misc/mic/host/mic_x100.c
index 81e9541b784c..0dfa8a81436e 100644
--- a/drivers/misc/mic/host/mic_x100.c
+++ b/drivers/misc/mic/host/mic_x100.c
@@ -397,8 +397,8 @@ mic_x100_load_ramdisk(struct mic_device *mdev)
* so copy over the ramdisk @ 128M.
*/
memcpy_toio(mdev->aper.va + (mdev->bootaddr << 1), fw->data, fw->size);
- iowrite32(cpu_to_le32(mdev->bootaddr << 1), &bp->hdr.ramdisk_image);
- iowrite32(cpu_to_le32(fw->size), &bp->hdr.ramdisk_size);
+ iowrite32(mdev->bootaddr << 1, &bp->hdr.ramdisk_image);
+ iowrite32(fw->size, &bp->hdr.ramdisk_size);
release_firmware(fw);
error:
return rc;
diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c
index ef8956568c3a..157b570ba343 100644
--- a/drivers/mmc/core/sdio_bus.c
+++ b/drivers/mmc/core/sdio_bus.c
@@ -308,8 +308,7 @@ static void sdio_acpi_set_handle(struct sdio_func *func)
struct mmc_host *host = func->card->host;
u64 addr = (host->slotno << 16) | func->num;
- ACPI_HANDLE_SET(&func->dev,
- acpi_get_child(ACPI_HANDLE(host->parent), addr));
+ acpi_preset_companion(&func->dev, ACPI_HANDLE(host->parent), addr);
}
#else
static inline void sdio_acpi_set_handle(struct sdio_func *func) {}
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index 0b10a9030f4e..98b6b6ef7e5c 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -22,6 +22,7 @@
#include <linux/delay.h>
#include <linux/spinlock.h>
#include <linux/timer.h>
+#include <linux/of.h>
#include <linux/omap-dma.h>
#include <linux/mmc/host.h>
#include <linux/mmc/card.h>
@@ -90,17 +91,6 @@
#define OMAP_MMC_CMDTYPE_AC 2
#define OMAP_MMC_CMDTYPE_ADTC 3
-#define OMAP_DMA_MMC_TX 21
-#define OMAP_DMA_MMC_RX 22
-#define OMAP_DMA_MMC2_TX 54
-#define OMAP_DMA_MMC2_RX 55
-
-#define OMAP24XX_DMA_MMC2_TX 47
-#define OMAP24XX_DMA_MMC2_RX 48
-#define OMAP24XX_DMA_MMC1_TX 61
-#define OMAP24XX_DMA_MMC1_RX 62
-
-
#define DRIVER_NAME "mmci-omap"
/* Specifies how often in millisecs to poll for card status changes
@@ -1330,7 +1320,7 @@ static int mmc_omap_probe(struct platform_device *pdev)
struct mmc_omap_host *host = NULL;
struct resource *res;
dma_cap_mask_t mask;
- unsigned sig;
+ unsigned sig = 0;
int i, ret = 0;
int irq;
@@ -1340,7 +1330,7 @@ static int mmc_omap_probe(struct platform_device *pdev)
}
if (pdata->nr_slots == 0) {
dev_err(&pdev->dev, "no slots\n");
- return -ENXIO;
+ return -EPROBE_DEFER;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1407,19 +1397,20 @@ static int mmc_omap_probe(struct platform_device *pdev)
host->dma_tx_burst = -1;
host->dma_rx_burst = -1;
- if (mmc_omap2())
- sig = host->id == 0 ? OMAP24XX_DMA_MMC1_TX : OMAP24XX_DMA_MMC2_TX;
- else
- sig = host->id == 0 ? OMAP_DMA_MMC_TX : OMAP_DMA_MMC2_TX;
- host->dma_tx = dma_request_channel(mask, omap_dma_filter_fn, &sig);
+ res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "tx");
+ if (res)
+ sig = res->start;
+ host->dma_tx = dma_request_slave_channel_compat(mask,
+ omap_dma_filter_fn, &sig, &pdev->dev, "tx");
if (!host->dma_tx)
dev_warn(host->dev, "unable to obtain TX DMA engine channel %u\n",
sig);
- if (mmc_omap2())
- sig = host->id == 0 ? OMAP24XX_DMA_MMC1_RX : OMAP24XX_DMA_MMC2_RX;
- else
- sig = host->id == 0 ? OMAP_DMA_MMC_RX : OMAP_DMA_MMC2_RX;
- host->dma_rx = dma_request_channel(mask, omap_dma_filter_fn, &sig);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "rx");
+ if (res)
+ sig = res->start;
+ host->dma_rx = dma_request_slave_channel_compat(mask,
+ omap_dma_filter_fn, &sig, &pdev->dev, "rx");
if (!host->dma_rx)
dev_warn(host->dev, "unable to obtain RX DMA engine channel %u\n",
sig);
@@ -1512,12 +1503,20 @@ static int mmc_omap_remove(struct platform_device *pdev)
return 0;
}
+#if IS_BUILTIN(CONFIG_OF)
+static const struct of_device_id mmc_omap_match[] = {
+ { .compatible = "ti,omap2420-mmc", },
+ { },
+};
+#endif
+
static struct platform_driver mmc_omap_driver = {
.probe = mmc_omap_probe,
.remove = mmc_omap_remove,
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(mmc_omap_match),
},
};
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c
index d78a97d4153a..59f08c44abdb 100644
--- a/drivers/mtd/nand/atmel_nand.c
+++ b/drivers/mtd/nand/atmel_nand.c
@@ -375,8 +375,7 @@ static int atmel_nand_dma_op(struct mtd_info *mtd, void *buf, int len,
dma_dev = host->dma_chan->device;
- flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP |
- DMA_COMPL_SKIP_DEST_UNMAP;
+ flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
phys_addr = dma_map_single(dma_dev->dev, p, len, dir);
if (dma_mapping_error(dma_dev->dev, phys_addr)) {
diff --git a/drivers/mtd/nand/fsmc_nand.c b/drivers/mtd/nand/fsmc_nand.c
index 3dc1a7564d87..8b2752263db9 100644
--- a/drivers/mtd/nand/fsmc_nand.c
+++ b/drivers/mtd/nand/fsmc_nand.c
@@ -573,8 +573,6 @@ static int dma_xfer(struct fsmc_nand_data *host, void *buffer, int len,
dma_dev = chan->device;
dma_addr = dma_map_single(dma_dev->dev, buffer, len, direction);
- flags |= DMA_COMPL_SKIP_SRC_UNMAP | DMA_COMPL_SKIP_DEST_UNMAP;
-
if (direction == DMA_TO_DEVICE) {
dma_src = dma_addr;
dma_dst = host->data_pa;
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index 4cabdc9fda90..4b3aaa898a8b 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -962,7 +962,7 @@ static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info *info)
{
struct platform_device *pdev = info->pdev;
- if (use_dma) {
+ if (info->use_dma) {
pxa_free_dma(info->data_dma_ch);
dma_free_coherent(&pdev->dev, info->buf_size,
info->data_buff, info->data_buff_phys);
@@ -1259,10 +1259,6 @@ static struct of_device_id pxa3xx_nand_dt_ids[] = {
.compatible = "marvell,pxa3xx-nand",
.data = (void *)PXA3XX_NAND_VARIANT_PXA,
},
- {
- .compatible = "marvell,armada370-nand",
- .data = (void *)PXA3XX_NAND_VARIANT_ARMADA370,
- },
{}
};
MODULE_DEVICE_TABLE(of, pxa3xx_nand_dt_ids);
diff --git a/drivers/net/Space.c b/drivers/net/Space.c
index a7271e093845..67977f15af25 100644
--- a/drivers/net/Space.c
+++ b/drivers/net/Space.c
@@ -32,39 +32,12 @@
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/netlink.h>
+#include <net/Space.h>
/* A unified ethernet device probe. This is the easiest way to have every
ethernet adaptor have the name "eth[0123...]".
*/
-extern struct net_device *hp100_probe(int unit);
-extern struct net_device *ultra_probe(int unit);
-extern struct net_device *wd_probe(int unit);
-extern struct net_device *ne_probe(int unit);
-extern struct net_device *fmv18x_probe(int unit);
-extern struct net_device *i82596_probe(int unit);
-extern struct net_device *ni65_probe(int unit);
-extern struct net_device *sonic_probe(int unit);
-extern struct net_device *smc_init(int unit);
-extern struct net_device *atarilance_probe(int unit);
-extern struct net_device *sun3lance_probe(int unit);
-extern struct net_device *sun3_82586_probe(int unit);
-extern struct net_device *apne_probe(int unit);
-extern struct net_device *cs89x0_probe(int unit);
-extern struct net_device *mvme147lance_probe(int unit);
-extern struct net_device *tc515_probe(int unit);
-extern struct net_device *lance_probe(int unit);
-extern struct net_device *mac8390_probe(int unit);
-extern struct net_device *mac89x0_probe(int unit);
-extern struct net_device *cops_probe(int unit);
-extern struct net_device *ltpc_probe(void);
-
-/* Fibre Channel adapters */
-extern int iph5526_probe(struct net_device *dev);
-
-/* SBNI adapters */
-extern int sbni_probe(int unit);
-
struct devprobe2 {
struct net_device *(*probe)(int unit);
int status; /* non-zero if autoprobe has failed */
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 187b1b7772ef..539e24a1c86c 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -90,8 +90,9 @@
#define AD_LINK_SPEED_BITMASK_10000MBPS 0x10
//endalloun
-// compare MAC addresses
-#define MAC_ADDRESS_COMPARE(A, B) memcmp(A, B, ETH_ALEN)
+/* compare MAC addresses */
+#define MAC_ADDRESS_EQUAL(A, B) \
+ ether_addr_equal_64bits((const u8 *)A, (const u8 *)B)
static struct mac_addr null_mac_addr = { { 0, 0, 0, 0, 0, 0 } };
static u16 ad_ticks_per_sec;
@@ -147,11 +148,12 @@ static inline struct aggregator *__get_first_agg(struct port *port)
struct bonding *bond = __get_bond_by_port(port);
struct slave *first_slave;
- // If there's no bond for this port, or bond has no slaves
+ /* If there's no bond for this port, or bond has no slaves */
if (bond == NULL)
return NULL;
- first_slave = bond_first_slave(bond);
-
+ rcu_read_lock();
+ first_slave = bond_first_slave_rcu(bond);
+ rcu_read_unlock();
return first_slave ? &(SLAVE_AD_INFO(first_slave).aggregator) : NULL;
}
@@ -416,17 +418,18 @@ static u16 __ad_timer_to_ticks(u16 timer_type, u16 par)
*/
static void __choose_matched(struct lacpdu *lacpdu, struct port *port)
{
- // check if all parameters are alike
+ /* check if all parameters are alike
+ * or this is individual link(aggregation == FALSE)
+ * then update the state machine Matched variable.
+ */
if (((ntohs(lacpdu->partner_port) == port->actor_port_number) &&
(ntohs(lacpdu->partner_port_priority) == port->actor_port_priority) &&
- !MAC_ADDRESS_COMPARE(&(lacpdu->partner_system), &(port->actor_system)) &&
+ MAC_ADDRESS_EQUAL(&(lacpdu->partner_system), &(port->actor_system)) &&
(ntohs(lacpdu->partner_system_priority) == port->actor_system_priority) &&
(ntohs(lacpdu->partner_key) == port->actor_oper_port_key) &&
((lacpdu->partner_state & AD_STATE_AGGREGATION) == (port->actor_oper_port_state & AD_STATE_AGGREGATION))) ||
- // or this is individual link(aggregation == FALSE)
((lacpdu->actor_state & AD_STATE_AGGREGATION) == 0)
) {
- // update the state machine Matched variable
port->sm_vars |= AD_PORT_MATCHED;
} else {
port->sm_vars &= ~AD_PORT_MATCHED;
@@ -506,14 +509,15 @@ static void __update_selected(struct lacpdu *lacpdu, struct port *port)
if (lacpdu && port) {
const struct port_params *partner = &port->partner_oper;
- // check if any parameter is different
+ /* check if any parameter is different then
+ * update the state machine selected variable.
+ */
if (ntohs(lacpdu->actor_port) != partner->port_number ||
ntohs(lacpdu->actor_port_priority) != partner->port_priority ||
- MAC_ADDRESS_COMPARE(&lacpdu->actor_system, &partner->system) ||
+ !MAC_ADDRESS_EQUAL(&lacpdu->actor_system, &partner->system) ||
ntohs(lacpdu->actor_system_priority) != partner->system_priority ||
ntohs(lacpdu->actor_key) != partner->key ||
(lacpdu->actor_state & AD_STATE_AGGREGATION) != (partner->port_state & AD_STATE_AGGREGATION)) {
- // update the state machine Selected variable
port->sm_vars &= ~AD_PORT_SELECTED;
}
}
@@ -537,15 +541,16 @@ static void __update_default_selected(struct port *port)
const struct port_params *admin = &port->partner_admin;
const struct port_params *oper = &port->partner_oper;
- // check if any parameter is different
+ /* check if any parameter is different then
+ * update the state machine selected variable.
+ */
if (admin->port_number != oper->port_number ||
admin->port_priority != oper->port_priority ||
- MAC_ADDRESS_COMPARE(&admin->system, &oper->system) ||
+ !MAC_ADDRESS_EQUAL(&admin->system, &oper->system) ||
admin->system_priority != oper->system_priority ||
admin->key != oper->key ||
(admin->port_state & AD_STATE_AGGREGATION)
!= (oper->port_state & AD_STATE_AGGREGATION)) {
- // update the state machine Selected variable
port->sm_vars &= ~AD_PORT_SELECTED;
}
}
@@ -565,12 +570,14 @@ static void __update_default_selected(struct port *port)
*/
static void __update_ntt(struct lacpdu *lacpdu, struct port *port)
{
- // validate lacpdu and port
+ /* validate lacpdu and port */
if (lacpdu && port) {
- // check if any parameter is different
+ /* check if any parameter is different then
+ * update the port->ntt.
+ */
if ((ntohs(lacpdu->partner_port) != port->actor_port_number) ||
(ntohs(lacpdu->partner_port_priority) != port->actor_port_priority) ||
- MAC_ADDRESS_COMPARE(&(lacpdu->partner_system), &(port->actor_system)) ||
+ !MAC_ADDRESS_EQUAL(&(lacpdu->partner_system), &(port->actor_system)) ||
(ntohs(lacpdu->partner_system_priority) != port->actor_system_priority) ||
(ntohs(lacpdu->partner_key) != port->actor_oper_port_key) ||
((lacpdu->partner_state & AD_STATE_LACP_ACTIVITY) != (port->actor_oper_port_state & AD_STATE_LACP_ACTIVITY)) ||
@@ -578,7 +585,6 @@ static void __update_ntt(struct lacpdu *lacpdu, struct port *port)
((lacpdu->partner_state & AD_STATE_SYNCHRONIZATION) != (port->actor_oper_port_state & AD_STATE_SYNCHRONIZATION)) ||
((lacpdu->partner_state & AD_STATE_AGGREGATION) != (port->actor_oper_port_state & AD_STATE_AGGREGATION))
) {
-
port->ntt = true;
}
}
@@ -702,9 +708,13 @@ static struct aggregator *__get_active_agg(struct aggregator *aggregator)
struct list_head *iter;
struct slave *slave;
- bond_for_each_slave(bond, slave, iter)
- if (SLAVE_AD_INFO(slave).aggregator.is_active)
+ rcu_read_lock();
+ bond_for_each_slave_rcu(bond, slave, iter)
+ if (SLAVE_AD_INFO(slave).aggregator.is_active) {
+ rcu_read_unlock();
return &(SLAVE_AD_INFO(slave).aggregator);
+ }
+ rcu_read_unlock();
return NULL;
}
@@ -1071,9 +1081,8 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
port->actor_oper_port_state &= ~AD_STATE_EXPIRED;
break;
case AD_RX_CURRENT:
- // detect loopback situation
- if (!MAC_ADDRESS_COMPARE(&(lacpdu->actor_system), &(port->actor_system))) {
- // INFO_RECEIVED_LOOPBACK_FRAMES
+ /* detect loopback situation */
+ if (MAC_ADDRESS_EQUAL(&(lacpdu->actor_system), &(port->actor_system))) {
pr_err("%s: An illegal loopback occurred on adapter (%s).\n"
"Check the configuration to verify that all adapters are connected to 802.3ad compliant switch ports\n",
port->slave->bond->dev->name, port->slave->dev->name);
@@ -1085,7 +1094,7 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
port->sm_rx_timer_counter = __ad_timer_to_ticks(AD_CURRENT_WHILE_TIMER, (u16)(port->actor_oper_port_state & AD_STATE_LACP_TIMEOUT));
port->actor_oper_port_state &= ~AD_STATE_EXPIRED;
break;
- default: //to silence the compiler
+ default: /* to silence the compiler */
break;
}
}
@@ -1276,17 +1285,17 @@ static void ad_port_selection_logic(struct port *port)
free_aggregator = aggregator;
continue;
}
- // check if current aggregator suits us
- if (((aggregator->actor_oper_aggregator_key == port->actor_oper_port_key) && // if all parameters match AND
- !MAC_ADDRESS_COMPARE(&(aggregator->partner_system), &(port->partner_oper.system)) &&
+ /* check if current aggregator suits us */
+ if (((aggregator->actor_oper_aggregator_key == port->actor_oper_port_key) && /* if all parameters match AND */
+ MAC_ADDRESS_EQUAL(&(aggregator->partner_system), &(port->partner_oper.system)) &&
(aggregator->partner_system_priority == port->partner_oper.system_priority) &&
(aggregator->partner_oper_aggregator_key == port->partner_oper.key)
) &&
- ((MAC_ADDRESS_COMPARE(&(port->partner_oper.system), &(null_mac_addr)) && // partner answers
- !aggregator->is_individual) // but is not individual OR
+ ((!MAC_ADDRESS_EQUAL(&(port->partner_oper.system), &(null_mac_addr)) && /* partner answers */
+ !aggregator->is_individual) /* but is not individual OR */
)
) {
- // attach to the founded aggregator
+ /* attach to the founded aggregator */
port->aggregator = aggregator;
port->actor_port_aggregator_identifier =
port->aggregator->aggregator_identifier;
@@ -1471,7 +1480,8 @@ static void ad_agg_selection_logic(struct aggregator *agg)
active = __get_active_agg(agg);
best = (active && agg_device_up(active)) ? active : NULL;
- bond_for_each_slave(bond, slave, iter) {
+ rcu_read_lock();
+ bond_for_each_slave_rcu(bond, slave, iter) {
agg = &(SLAVE_AD_INFO(slave).aggregator);
agg->is_active = 0;
@@ -1505,7 +1515,7 @@ static void ad_agg_selection_logic(struct aggregator *agg)
active->is_active = 1;
}
- // if there is new best aggregator, activate it
+ /* if there is new best aggregator, activate it */
if (best) {
pr_debug("best Agg=%d; P=%d; a k=%d; p k=%d; Ind=%d; Act=%d\n",
best->aggregator_identifier, best->num_of_ports,
@@ -1516,7 +1526,7 @@ static void ad_agg_selection_logic(struct aggregator *agg)
best->lag_ports, best->slave,
best->slave ? best->slave->dev->name : "NULL");
- bond_for_each_slave(bond, slave, iter) {
+ bond_for_each_slave_rcu(bond, slave, iter) {
agg = &(SLAVE_AD_INFO(slave).aggregator);
pr_debug("Agg=%d; P=%d; a k=%d; p k=%d; Ind=%d; Act=%d\n",
@@ -1526,10 +1536,11 @@ static void ad_agg_selection_logic(struct aggregator *agg)
agg->is_individual, agg->is_active);
}
- // check if any partner replys
+ /* check if any partner replys */
if (best->is_individual) {
pr_warning("%s: Warning: No 802.3ad response from the link partner for any adapters in the bond\n",
- best->slave ? best->slave->bond->dev->name : "NULL");
+ best->slave ?
+ best->slave->bond->dev->name : "NULL");
}
best->is_active = 1;
@@ -1541,7 +1552,7 @@ static void ad_agg_selection_logic(struct aggregator *agg)
best->partner_oper_aggregator_key,
best->is_individual, best->is_active);
- // disable the ports that were related to the former active_aggregator
+ /* disable the ports that were related to the former active_aggregator */
if (active) {
for (port = active->lag_ports; port;
port = port->next_port_in_aggregator) {
@@ -1565,6 +1576,8 @@ static void ad_agg_selection_logic(struct aggregator *agg)
}
}
+ rcu_read_unlock();
+
bond_3ad_set_carrier(bond);
}
@@ -1696,7 +1709,7 @@ static void ad_enable_collecting_distributing(struct port *port)
*/
static void ad_disable_collecting_distributing(struct port *port)
{
- if (port->aggregator && MAC_ADDRESS_COMPARE(&(port->aggregator->partner_system), &(null_mac_addr))) {
+ if (port->aggregator && !MAC_ADDRESS_EQUAL(&(port->aggregator->partner_system), &(null_mac_addr))) {
pr_debug("Disabling port %d(LAG %d)\n",
port->actor_port_number,
port->aggregator->aggregator_identifier);
@@ -1817,8 +1830,8 @@ static u16 aggregator_identifier;
*/
void bond_3ad_initialize(struct bonding *bond, u16 tick_resolution)
{
- // check that the bond is not initialized yet
- if (MAC_ADDRESS_COMPARE(&(BOND_AD_INFO(bond).system.sys_mac_addr),
+ /* check that the bond is not initialized yet */
+ if (!MAC_ADDRESS_EQUAL(&(BOND_AD_INFO(bond).system.sys_mac_addr),
bond->dev->dev_addr)) {
aggregator_identifier = 0;
@@ -1842,22 +1855,16 @@ void bond_3ad_initialize(struct bonding *bond, u16 tick_resolution)
* Returns: 0 on success
* < 0 on error
*/
-int bond_3ad_bind_slave(struct slave *slave)
+void bond_3ad_bind_slave(struct slave *slave)
{
struct bonding *bond = bond_get_bond_by_slave(slave);
struct port *port;
struct aggregator *aggregator;
- if (bond == NULL) {
- pr_err("%s: The slave %s is not attached to its bond\n",
- slave->bond->dev->name, slave->dev->name);
- return -1;
- }
-
- //check that the slave has not been initialized yet.
+ /* check that the slave has not been initialized yet. */
if (SLAVE_AD_INFO(slave).port.slave != slave) {
- // port initialization
+ /* port initialization */
port = &(SLAVE_AD_INFO(slave).port);
ad_initialize_port(port, bond->params.lacp_fast);
@@ -1865,28 +1872,30 @@ int bond_3ad_bind_slave(struct slave *slave)
__initialize_port_locks(slave);
port->slave = slave;
port->actor_port_number = SLAVE_AD_INFO(slave).id;
- // key is determined according to the link speed, duplex and user key(which is yet not supported)
- // ------------------------------------------------------------
- // Port key : | User key | Speed |Duplex|
- // ------------------------------------------------------------
- // 16 6 1 0
- port->actor_admin_port_key = 0; // initialize this parameter
+ /* key is determined according to the link speed, duplex and user key(which
+ * is yet not supported)
+ * ------------------------------------------------------------
+ * Port key : | User key | Speed |Duplex|
+ * ------------------------------------------------------------
+ * 16 6 1 0
+ */
+ port->actor_admin_port_key = 0; /* initialize this parameter */
port->actor_admin_port_key |= __get_duplex(port);
port->actor_admin_port_key |= (__get_link_speed(port) << 1);
port->actor_oper_port_key = port->actor_admin_port_key;
- // if the port is not full duplex, then the port should be not lacp Enabled
+ /* if the port is not full duplex, then the port should be not lacp Enabled */
if (!(port->actor_oper_port_key & AD_DUPLEX_KEY_BITS))
port->sm_vars &= ~AD_PORT_LACP_ENABLED;
- // actor system is the bond's system
+ /* actor system is the bond's system */
port->actor_system = BOND_AD_INFO(bond).system.sys_mac_addr;
- // tx timer(to verify that no more than MAX_TX_IN_SECOND lacpdu's are sent in one second)
+ /* tx timer(to verify that no more than MAX_TX_IN_SECOND lacpdu's are sent in one second) */
port->sm_tx_timer_counter = ad_ticks_per_sec/AD_MAX_TX_IN_SECOND;
port->aggregator = NULL;
port->next_port_in_aggregator = NULL;
__disable_port(port);
- // aggregator initialization
+ /* aggregator initialization */
aggregator = &(SLAVE_AD_INFO(slave).aggregator);
ad_initialize_agg(aggregator);
@@ -1897,8 +1906,6 @@ int bond_3ad_bind_slave(struct slave *slave)
aggregator->is_active = 0;
aggregator->num_of_ports = 0;
}
-
- return 0;
}
/**
@@ -2069,17 +2076,18 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
struct port *port;
read_lock(&bond->lock);
+ rcu_read_lock();
- //check if there are any slaves
+ /* check if there are any slaves */
if (!bond_has_slaves(bond))
goto re_arm;
- // check if agg_select_timer timer after initialize is timed out
+ /* check if agg_select_timer timer after initialize is timed out */
if (BOND_AD_INFO(bond).agg_select_timer && !(--BOND_AD_INFO(bond).agg_select_timer)) {
- slave = bond_first_slave(bond);
+ slave = bond_first_slave_rcu(bond);
port = slave ? &(SLAVE_AD_INFO(slave).port) : NULL;
- // select the active aggregator for the bond
+ /* select the active aggregator for the bond */
if (port) {
if (!port->slave) {
pr_warning("%s: Warning: bond's first port is uninitialized\n",
@@ -2093,8 +2101,8 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
bond_3ad_set_carrier(bond);
}
- // for each port run the state machines
- bond_for_each_slave(bond, slave, iter) {
+ /* for each port run the state machines */
+ bond_for_each_slave_rcu(bond, slave, iter) {
port = &(SLAVE_AD_INFO(slave).port);
if (!port->slave) {
pr_warning("%s: Warning: Found an uninitialized port\n",
@@ -2114,7 +2122,7 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
ad_mux_machine(port);
ad_tx_machine(port);
- // turn off the BEGIN bit, since we already handled it
+ /* turn off the BEGIN bit, since we already handled it */
if (port->sm_vars & AD_PORT_BEGIN)
port->sm_vars &= ~AD_PORT_BEGIN;
@@ -2122,9 +2130,9 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
}
re_arm:
- queue_delayed_work(bond->wq, &bond->ad_work, ad_delta_in_ticks);
-
+ rcu_read_unlock();
read_unlock(&bond->lock);
+ queue_delayed_work(bond->wq, &bond->ad_work, ad_delta_in_ticks);
}
/**
@@ -2201,20 +2209,25 @@ void bond_3ad_adapter_speed_changed(struct slave *slave)
port = &(SLAVE_AD_INFO(slave).port);
- // if slave is null, the whole port is not initialized
+ /* if slave is null, the whole port is not initialized */
if (!port->slave) {
pr_warning("Warning: %s: speed changed for uninitialized port on %s\n",
slave->bond->dev->name, slave->dev->name);
return;
}
+ __get_state_machine_lock(port);
+
port->actor_admin_port_key &= ~AD_SPEED_KEY_BITS;
port->actor_oper_port_key = port->actor_admin_port_key |=
(__get_link_speed(port) << 1);
pr_debug("Port %d changed speed\n", port->actor_port_number);
- // there is no need to reselect a new aggregator, just signal the
- // state machines to reinitialize
+ /* there is no need to reselect a new aggregator, just signal the
+ * state machines to reinitialize
+ */
port->sm_vars |= AD_PORT_BEGIN;
+
+ __release_state_machine_lock(port);
}
/**
@@ -2229,20 +2242,25 @@ void bond_3ad_adapter_duplex_changed(struct slave *slave)
port = &(SLAVE_AD_INFO(slave).port);
- // if slave is null, the whole port is not initialized
+ /* if slave is null, the whole port is not initialized */
if (!port->slave) {
pr_warning("%s: Warning: duplex changed for uninitialized port on %s\n",
slave->bond->dev->name, slave->dev->name);
return;
}
+ __get_state_machine_lock(port);
+
port->actor_admin_port_key &= ~AD_DUPLEX_KEY_BITS;
port->actor_oper_port_key = port->actor_admin_port_key |=
__get_duplex(port);
pr_debug("Port %d changed duplex\n", port->actor_port_number);
- // there is no need to reselect a new aggregator, just signal the
- // state machines to reinitialize
+ /* there is no need to reselect a new aggregator, just signal the
+ * state machines to reinitialize
+ */
port->sm_vars |= AD_PORT_BEGIN;
+
+ __release_state_machine_lock(port);
}
/**
@@ -2258,15 +2276,21 @@ void bond_3ad_handle_link_change(struct slave *slave, char link)
port = &(SLAVE_AD_INFO(slave).port);
- // if slave is null, the whole port is not initialized
+ /* if slave is null, the whole port is not initialized */
if (!port->slave) {
pr_warning("Warning: %s: link status changed for uninitialized port on %s\n",
slave->bond->dev->name, slave->dev->name);
return;
}
- // on link down we are zeroing duplex and speed since some of the adaptors(ce1000.lan) report full duplex/speed instead of N/A(duplex) / 0(speed)
- // on link up we are forcing recheck on the duplex and speed since some of he adaptors(ce1000.lan) report
+ __get_state_machine_lock(port);
+ /* on link down we are zeroing duplex and speed since
+ * some of the adaptors(ce1000.lan) report full duplex/speed
+ * instead of N/A(duplex) / 0(speed).
+ *
+ * on link up we are forcing recheck on the duplex and speed since
+ * some of he adaptors(ce1000.lan) report.
+ */
if (link == BOND_LINK_UP) {
port->is_enabled = true;
port->actor_admin_port_key &= ~AD_DUPLEX_KEY_BITS;
@@ -2282,10 +2306,15 @@ void bond_3ad_handle_link_change(struct slave *slave, char link)
port->actor_oper_port_key = (port->actor_admin_port_key &=
~AD_SPEED_KEY_BITS);
}
- //BOND_PRINT_DBG(("Port %d changed link status to %s", port->actor_port_number, ((link == BOND_LINK_UP)?"UP":"DOWN")));
- // there is no need to reselect a new aggregator, just signal the
- // state machines to reinitialize
+ pr_debug("Port %d changed link status to %s",
+ port->actor_port_number,
+ (link == BOND_LINK_UP) ? "UP" : "DOWN");
+ /* there is no need to reselect a new aggregator, just signal the
+ * state machines to reinitialize
+ */
port->sm_vars |= AD_PORT_BEGIN;
+
+ __release_state_machine_lock(port);
}
/*
@@ -2303,7 +2332,9 @@ int bond_3ad_set_carrier(struct bonding *bond)
struct aggregator *active;
struct slave *first_slave;
- first_slave = bond_first_slave(bond);
+ rcu_read_lock();
+ first_slave = bond_first_slave_rcu(bond);
+ rcu_read_unlock();
if (!first_slave)
return 0;
active = __get_active_agg(&(SLAVE_AD_INFO(first_slave).aggregator));
@@ -2385,13 +2416,12 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
struct list_head *iter;
int slaves_in_agg;
int slave_agg_no;
- int res = 1;
int agg_id;
if (__bond_3ad_get_active_agg_info(bond, &ad_info)) {
pr_debug("%s: Error: __bond_3ad_get_active_agg_info failed\n",
dev->name);
- goto out;
+ goto err_free;
}
slaves_in_agg = ad_info.ports;
@@ -2399,7 +2429,7 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
if (slaves_in_agg == 0) {
pr_debug("%s: Error: active aggregator is empty\n", dev->name);
- goto out;
+ goto err_free;
}
slave_agg_no = bond_xmit_hash(bond, skb, slaves_in_agg);
@@ -2418,7 +2448,7 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
}
if (SLAVE_IS_OK(slave)) {
- res = bond_dev_queue_xmit(bond, skb, slave->dev);
+ bond_dev_queue_xmit(bond, skb, slave->dev);
goto out;
}
}
@@ -2426,21 +2456,22 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
if (slave_agg_no >= 0) {
pr_err("%s: Error: Couldn't find a slave to tx on for aggregator ID %d\n",
dev->name, agg_id);
- goto out;
+ goto err_free;
}
/* we couldn't find any suitable slave after the agg_no, so use the
* first suitable found, if found. */
if (first_ok_slave)
- res = bond_dev_queue_xmit(bond, skb, first_ok_slave->dev);
+ bond_dev_queue_xmit(bond, skb, first_ok_slave->dev);
+ else
+ goto err_free;
out:
- if (res) {
- /* no suitable interface, frame not sent */
- kfree_skb(skb);
- }
-
return NETDEV_TX_OK;
+err_free:
+ /* no suitable interface, frame not sent */
+ kfree_skb(skb);
+ goto out;
}
int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond,
diff --git a/drivers/net/bonding/bond_3ad.h b/drivers/net/bonding/bond_3ad.h
index 5d91ad0cc041..13dc9d3c5e34 100644
--- a/drivers/net/bonding/bond_3ad.h
+++ b/drivers/net/bonding/bond_3ad.h
@@ -265,7 +265,7 @@ struct ad_slave_info {
// ================= AD Exported functions to the main bonding code ==================
void bond_3ad_initialize(struct bonding *bond, u16 tick_resolution);
-int bond_3ad_bind_slave(struct slave *slave);
+void bond_3ad_bind_slave(struct slave *slave);
void bond_3ad_unbind_slave(struct slave *slave);
void bond_3ad_state_machine_handler(struct work_struct *);
void bond_3ad_initiate_agg_selection(struct bonding *bond, int timeout);
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 02872405d35d..a2c47476804d 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -12,8 +12,7 @@
* for more details.
*
* You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
@@ -470,7 +469,7 @@ static void rlb_teach_disabled_mac_on_primary(struct bonding *bond, u8 addr[])
/* slave being removed should not be active at this point
*
- * Caller must hold bond lock for read
+ * Caller must hold rtnl.
*/
static void rlb_clear_slave(struct bonding *bond, struct slave *slave)
{
@@ -816,7 +815,7 @@ static void rlb_rebalance(struct bonding *bond)
for (; hash_index != RLB_NULL_INDEX;
hash_index = client_info->used_next) {
client_info = &(bond_info->rx_hashtbl[hash_index]);
- assigned_slave = rlb_next_rx_slave(bond);
+ assigned_slave = __rlb_next_rx_slave(bond);
if (assigned_slave && (client_info->slave != assigned_slave)) {
client_info->slave = assigned_slave;
client_info->ntt = 1;
@@ -1372,7 +1371,6 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
int do_tx_balance = 1;
u32 hash_index = 0;
const u8 *hash_start = NULL;
- int res = 1;
struct ipv6hdr *ip6hdr;
skb_reset_mac_header(skb);
@@ -1470,7 +1468,8 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
ETH_ALEN);
}
- res = bond_dev_queue_xmit(bond, skb, tx_slave->dev);
+ bond_dev_queue_xmit(bond, skb, tx_slave->dev);
+ goto out;
} else {
if (tx_slave) {
_lock_tx_hashtbl(bond);
@@ -1479,11 +1478,9 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
}
}
- if (res) {
- /* no suitable interface, frame not sent */
- kfree_skb(skb);
- }
-
+ /* no suitable interface, frame not sent */
+ kfree_skb(skb);
+out:
return NETDEV_TX_OK;
}
@@ -1495,14 +1492,14 @@ void bond_alb_monitor(struct work_struct *work)
struct list_head *iter;
struct slave *slave;
- read_lock(&bond->lock);
-
if (!bond_has_slaves(bond)) {
bond_info->tx_rebalance_counter = 0;
bond_info->lp_counter = 0;
goto re_arm;
}
+ rcu_read_lock();
+
bond_info->tx_rebalance_counter++;
bond_info->lp_counter++;
@@ -1515,7 +1512,7 @@ void bond_alb_monitor(struct work_struct *work)
*/
read_lock(&bond->curr_slave_lock);
- bond_for_each_slave(bond, slave, iter)
+ bond_for_each_slave_rcu(bond, slave, iter)
alb_send_learning_packets(slave, slave->dev->dev_addr);
read_unlock(&bond->curr_slave_lock);
@@ -1528,7 +1525,7 @@ void bond_alb_monitor(struct work_struct *work)
read_lock(&bond->curr_slave_lock);
- bond_for_each_slave(bond, slave, iter) {
+ bond_for_each_slave_rcu(bond, slave, iter) {
tlb_clear_slave(bond, slave, 1);
if (slave == bond->curr_active_slave) {
SLAVE_TLB_INFO(slave).load =
@@ -1552,11 +1549,9 @@ void bond_alb_monitor(struct work_struct *work)
* dev_set_promiscuity requires rtnl and
* nothing else. Avoid race with bond_close.
*/
- read_unlock(&bond->lock);
- if (!rtnl_trylock()) {
- read_lock(&bond->lock);
+ rcu_read_unlock();
+ if (!rtnl_trylock())
goto re_arm;
- }
bond_info->rlb_promisc_timeout_counter = 0;
@@ -1568,7 +1563,7 @@ void bond_alb_monitor(struct work_struct *work)
bond_info->primary_is_promisc = 0;
rtnl_unlock();
- read_lock(&bond->lock);
+ rcu_read_lock();
}
if (bond_info->rlb_rebalance) {
@@ -1590,11 +1585,9 @@ void bond_alb_monitor(struct work_struct *work)
}
}
}
-
+ rcu_read_unlock();
re_arm:
queue_delayed_work(bond->wq, &bond->alb_work, alb_delta_in_ticks);
-
- read_unlock(&bond->lock);
}
/* assumption: called before the slave is attached to the bond
@@ -1680,14 +1673,11 @@ void bond_alb_handle_link_change(struct bonding *bond, struct slave *slave, char
* If new_slave is NULL, caller must hold curr_slave_lock or
* bond->lock for write.
*
- * If new_slave is not NULL, caller must hold RTNL, bond->lock for
- * read and curr_slave_lock for write. Processing here may sleep, so
- * no other locks may be held.
+ * If new_slave is not NULL, caller must hold RTNL, curr_slave_lock
+ * for write. Processing here may sleep, so no other locks may be held.
*/
void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave)
__releases(&bond->curr_slave_lock)
- __releases(&bond->lock)
- __acquires(&bond->lock)
__acquires(&bond->curr_slave_lock)
{
struct slave *swap_slave;
@@ -1723,7 +1713,6 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave
tlb_clear_slave(bond, new_slave, 1);
write_unlock_bh(&bond->curr_slave_lock);
- read_unlock(&bond->lock);
ASSERT_RTNL();
@@ -1749,11 +1738,9 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave
/* swap mac address */
alb_swap_mac_addr(swap_slave, new_slave);
alb_fasten_mac_swap(bond, swap_slave, new_slave);
- read_lock(&bond->lock);
} else {
/* set the new_slave to the bond mac address */
alb_set_slave_mac_addr(new_slave, bond->dev->dev_addr);
- read_lock(&bond->lock);
alb_send_learning_packets(new_slave, bond->dev->dev_addr);
}
diff --git a/drivers/net/bonding/bond_alb.h b/drivers/net/bonding/bond_alb.h
index 4226044efd08..e09dd4bfafff 100644
--- a/drivers/net/bonding/bond_alb.h
+++ b/drivers/net/bonding/bond_alb.h
@@ -12,8 +12,7 @@
* for more details.
*
* You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 4dd5ee2a34cc..e06c4453eabb 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -113,6 +113,7 @@ static int all_slaves_active;
static struct bond_params bonding_defaults;
static int resend_igmp = BOND_DEFAULT_RESEND_IGMP;
static int packets_per_slave = 1;
+static int lp_interval = BOND_ALB_DEFAULT_LP_INTERVAL;
module_param(max_bonds, int, 0);
MODULE_PARM_DESC(max_bonds, "Max number of bonded devices");
@@ -189,6 +190,10 @@ module_param(packets_per_slave, int, 0);
MODULE_PARM_DESC(packets_per_slave, "Packets to send per slave in balance-rr "
"mode; 0 for a random slave, 1 packet per "
"slave (default), >1 packets per slave.");
+module_param(lp_interval, uint, 0);
+MODULE_PARM_DESC(lp_interval, "The number of seconds between instances where "
+ "the bonding driver sends learning packets to "
+ "each slaves peer switch. The default is 1.");
/*----------------------------- Global variables ----------------------------*/
@@ -299,7 +304,7 @@ const char *bond_mode_name(int mode)
* @skb: hw accel VLAN tagged skb to transmit
* @slave_dev: slave that is supposed to xmit this skbuff
*/
-int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
+void bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
struct net_device *slave_dev)
{
skb->dev = slave_dev;
@@ -312,8 +317,6 @@ int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb);
else
dev_queue_xmit(skb);
-
- return 0;
}
/*
@@ -591,33 +594,22 @@ static int bond_set_allmulti(struct bonding *bond, int inc)
* device and retransmit an IGMP JOIN request to the current active
* slave.
*/
-static void bond_resend_igmp_join_requests(struct bonding *bond)
+static void bond_resend_igmp_join_requests_delayed(struct work_struct *work)
{
+ struct bonding *bond = container_of(work, struct bonding,
+ mcast_work.work);
+
if (!rtnl_trylock()) {
queue_delayed_work(bond->wq, &bond->mcast_work, 1);
return;
}
call_netdevice_notifiers(NETDEV_RESEND_IGMP, bond->dev);
- rtnl_unlock();
- /* We use curr_slave_lock to protect against concurrent access to
- * igmp_retrans from multiple running instances of this function and
- * bond_change_active_slave
- */
- write_lock_bh(&bond->curr_slave_lock);
if (bond->igmp_retrans > 1) {
bond->igmp_retrans--;
queue_delayed_work(bond->wq, &bond->mcast_work, HZ/5);
}
- write_unlock_bh(&bond->curr_slave_lock);
-}
-
-static void bond_resend_igmp_join_requests_delayed(struct work_struct *work)
-{
- struct bonding *bond = container_of(work, struct bonding,
- mcast_work.work);
-
- bond_resend_igmp_join_requests(bond);
+ rtnl_unlock();
}
/* Flush bond's hardware addresses from slave
@@ -697,14 +689,12 @@ static void bond_set_dev_addr(struct net_device *bond_dev,
*
* Perform special MAC address swapping for fail_over_mac settings
*
- * Called with RTNL, bond->lock for read, curr_slave_lock for write_bh.
+ * Called with RTNL, curr_slave_lock for write_bh.
*/
static void bond_do_fail_over_mac(struct bonding *bond,
struct slave *new_active,
struct slave *old_active)
__releases(&bond->curr_slave_lock)
- __releases(&bond->lock)
- __acquires(&bond->lock)
__acquires(&bond->curr_slave_lock)
{
u8 tmp_mac[ETH_ALEN];
@@ -715,9 +705,7 @@ static void bond_do_fail_over_mac(struct bonding *bond,
case BOND_FOM_ACTIVE:
if (new_active) {
write_unlock_bh(&bond->curr_slave_lock);
- read_unlock(&bond->lock);
bond_set_dev_addr(bond->dev, new_active->dev);
- read_lock(&bond->lock);
write_lock_bh(&bond->curr_slave_lock);
}
break;
@@ -731,7 +719,6 @@ static void bond_do_fail_over_mac(struct bonding *bond,
return;
write_unlock_bh(&bond->curr_slave_lock);
- read_unlock(&bond->lock);
if (old_active) {
memcpy(tmp_mac, new_active->dev->dev_addr, ETH_ALEN);
@@ -761,7 +748,6 @@ static void bond_do_fail_over_mac(struct bonding *bond,
pr_err("%s: Error %d setting MAC of slave %s\n",
bond->dev->name, -rv, new_active->dev->name);
out:
- read_lock(&bond->lock);
write_lock_bh(&bond->curr_slave_lock);
break;
default:
@@ -821,7 +807,11 @@ static struct slave *bond_find_best_slave(struct bonding *bond)
static bool bond_should_notify_peers(struct bonding *bond)
{
- struct slave *slave = bond->curr_active_slave;
+ struct slave *slave;
+
+ rcu_read_lock();
+ slave = rcu_dereference(bond->curr_active_slave);
+ rcu_read_unlock();
pr_debug("bond_should_notify_peers: bond %s slave %s\n",
bond->dev->name, slave ? slave->dev->name : "NULL");
@@ -846,8 +836,7 @@ static bool bond_should_notify_peers(struct bonding *bond)
* because it is apparently the best available slave we have, even though its
* updelay hasn't timed out yet.
*
- * If new_active is not NULL, caller must hold bond->lock for read and
- * curr_slave_lock for write_bh.
+ * If new_active is not NULL, caller must hold curr_slave_lock for write_bh.
*/
void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
{
@@ -916,14 +905,12 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
}
write_unlock_bh(&bond->curr_slave_lock);
- read_unlock(&bond->lock);
call_netdevice_notifiers(NETDEV_BONDING_FAILOVER, bond->dev);
if (should_notify_peers)
call_netdevice_notifiers(NETDEV_NOTIFY_PEERS,
bond->dev);
- read_lock(&bond->lock);
write_lock_bh(&bond->curr_slave_lock);
}
}
@@ -949,7 +936,7 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
* - The primary_slave has got its link back.
* - A slave has got its link back and there's no old curr_active_slave.
*
- * Caller must hold bond->lock for read and curr_slave_lock for write_bh.
+ * Caller must hold curr_slave_lock for write_bh.
*/
void bond_select_active_slave(struct bonding *bond)
{
@@ -1594,11 +1581,9 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
bond_set_carrier(bond);
if (USES_PRIMARY(bond->params.mode)) {
- read_lock(&bond->lock);
write_lock_bh(&bond->curr_slave_lock);
bond_select_active_slave(bond);
write_unlock_bh(&bond->curr_slave_lock);
- read_unlock(&bond->lock);
}
pr_info("%s: enslaving %s as a%s interface with a%s link.\n",
@@ -1618,19 +1603,13 @@ err_detach:
bond_hw_addr_flush(bond_dev, slave_dev);
vlan_vids_del_by_dev(slave_dev, bond_dev);
- write_lock_bh(&bond->lock);
if (bond->primary_slave == new_slave)
bond->primary_slave = NULL;
if (bond->curr_active_slave == new_slave) {
- bond_change_active_slave(bond, NULL);
- write_unlock_bh(&bond->lock);
- read_lock(&bond->lock);
write_lock_bh(&bond->curr_slave_lock);
+ bond_change_active_slave(bond, NULL);
bond_select_active_slave(bond);
write_unlock_bh(&bond->curr_slave_lock);
- read_unlock(&bond->lock);
- } else {
- write_unlock_bh(&bond->lock);
}
slave_disable_netpoll(new_slave);
@@ -1658,7 +1637,7 @@ err_free:
err_undo_flags:
/* Enslave of first slave has failed and we need to fix master's mac */
if (!bond_has_slaves(bond) &&
- ether_addr_equal(bond_dev->dev_addr, slave_dev->dev_addr))
+ ether_addr_equal_64bits(bond_dev->dev_addr, slave_dev->dev_addr))
eth_hw_addr_random(bond_dev);
return res;
@@ -1695,20 +1674,16 @@ static int __bond_release_one(struct net_device *bond_dev,
}
block_netpoll_tx();
- write_lock_bh(&bond->lock);
slave = bond_get_slave_by_dev(bond, slave_dev);
if (!slave) {
/* not a slave of this bond */
pr_info("%s: %s not enslaved\n",
bond_dev->name, slave_dev->name);
- write_unlock_bh(&bond->lock);
unblock_netpoll_tx();
return -EINVAL;
}
- write_unlock_bh(&bond->lock);
-
/* release the slave from its bond */
bond->slave_cnt--;
@@ -1720,12 +1695,10 @@ static int __bond_release_one(struct net_device *bond_dev,
write_lock_bh(&bond->lock);
/* Inform AD package of unbinding of slave. */
- if (bond->params.mode == BOND_MODE_8023AD) {
- /* must be called before the slave is
- * detached from the list
- */
+ if (bond->params.mode == BOND_MODE_8023AD)
bond_3ad_unbind_slave(slave);
- }
+
+ write_unlock_bh(&bond->lock);
pr_info("%s: releasing %s interface %s\n",
bond_dev->name,
@@ -1737,7 +1710,7 @@ static int __bond_release_one(struct net_device *bond_dev,
bond->current_arp_slave = NULL;
if (!all && !bond->params.fail_over_mac) {
- if (ether_addr_equal(bond_dev->dev_addr, slave->perm_hwaddr) &&
+ if (ether_addr_equal_64bits(bond_dev->dev_addr, slave->perm_hwaddr) &&
bond_has_slaves(bond))
pr_warn("%s: Warning: the permanent HWaddr of %s - %pM - is still in use by %s. Set the HWaddr of %s to a different address to avoid conflicts.\n",
bond_dev->name, slave_dev->name,
@@ -1748,8 +1721,11 @@ static int __bond_release_one(struct net_device *bond_dev,
if (bond->primary_slave == slave)
bond->primary_slave = NULL;
- if (oldcurrent == slave)
+ if (oldcurrent == slave) {
+ write_lock_bh(&bond->curr_slave_lock);
bond_change_active_slave(bond, NULL);
+ write_unlock_bh(&bond->curr_slave_lock);
+ }
if (bond_is_lb(bond)) {
/* Must be called only after the slave has been
@@ -1757,9 +1733,7 @@ static int __bond_release_one(struct net_device *bond_dev,
* has been cleared (if our_slave == old_current),
* but before a new active slave is selected.
*/
- write_unlock_bh(&bond->lock);
bond_alb_deinit_slave(bond, slave);
- write_lock_bh(&bond->lock);
}
if (all) {
@@ -1770,15 +1744,11 @@ static int __bond_release_one(struct net_device *bond_dev,
* is no concern that another slave add/remove event
* will interfere.
*/
- write_unlock_bh(&bond->lock);
- read_lock(&bond->lock);
write_lock_bh(&bond->curr_slave_lock);
bond_select_active_slave(bond);
write_unlock_bh(&bond->curr_slave_lock);
- read_unlock(&bond->lock);
- write_lock_bh(&bond->lock);
}
if (!bond_has_slaves(bond)) {
@@ -1793,7 +1763,6 @@ static int __bond_release_one(struct net_device *bond_dev,
}
}
- write_unlock_bh(&bond->lock);
unblock_netpoll_tx();
synchronize_rcu();
@@ -1928,7 +1897,7 @@ static int bond_miimon_inspect(struct bonding *bond)
ignore_updelay = !bond->curr_active_slave ? true : false;
- bond_for_each_slave(bond, slave, iter) {
+ bond_for_each_slave_rcu(bond, slave, iter) {
slave->new_link = BOND_LINK_NOCHANGE;
link_state = bond_check_dev_link(bond, slave->dev, 0);
@@ -2119,48 +2088,42 @@ do_failover:
* an acquisition of appropriate locks followed by a commit phase to
* implement whatever link state changes are indicated.
*/
-void bond_mii_monitor(struct work_struct *work)
+static void bond_mii_monitor(struct work_struct *work)
{
struct bonding *bond = container_of(work, struct bonding,
mii_work.work);
bool should_notify_peers = false;
unsigned long delay;
- read_lock(&bond->lock);
-
delay = msecs_to_jiffies(bond->params.miimon);
if (!bond_has_slaves(bond))
goto re_arm;
+ rcu_read_lock();
+
should_notify_peers = bond_should_notify_peers(bond);
if (bond_miimon_inspect(bond)) {
- read_unlock(&bond->lock);
+ rcu_read_unlock();
/* Race avoidance with bond_close cancel of workqueue */
if (!rtnl_trylock()) {
- read_lock(&bond->lock);
delay = 1;
should_notify_peers = false;
goto re_arm;
}
- read_lock(&bond->lock);
-
bond_miimon_commit(bond);
- read_unlock(&bond->lock);
rtnl_unlock(); /* might sleep, hold no other locks */
- read_lock(&bond->lock);
- }
+ } else
+ rcu_read_unlock();
re_arm:
if (bond->params.miimon)
queue_delayed_work(bond->wq, &bond->mii_work, delay);
- read_unlock(&bond->lock);
-
if (should_notify_peers) {
if (!rtnl_trylock())
return;
@@ -2414,7 +2377,7 @@ static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act,
* arp is transmitted to generate traffic. see activebackup_arp_monitor for
* arp monitoring in active backup mode.
*/
-void bond_loadbalance_arp_mon(struct work_struct *work)
+static void bond_loadbalance_arp_mon(struct work_struct *work)
{
struct bonding *bond = container_of(work, struct bonding,
arp_work.work);
@@ -2422,12 +2385,12 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
struct list_head *iter;
int do_failover = 0;
- read_lock(&bond->lock);
-
if (!bond_has_slaves(bond))
goto re_arm;
- oldcurrent = bond->curr_active_slave;
+ rcu_read_lock();
+
+ oldcurrent = ACCESS_ONCE(bond->curr_active_slave);
/* see if any of the previous devices are up now (i.e. they have
* xmt and rcv traffic). the curr_active_slave does not come into
* the picture unless it is null. also, slave->jiffies is not needed
@@ -2436,7 +2399,7 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
* TODO: what about up/down delay in arp mode? it wasn't here before
* so it can wait
*/
- bond_for_each_slave(bond, slave, iter) {
+ bond_for_each_slave_rcu(bond, slave, iter) {
unsigned long trans_start = dev_trans_start(slave->dev);
if (slave->link != BOND_LINK_UP) {
@@ -2498,7 +2461,14 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
bond_arp_send_all(bond, slave);
}
+ rcu_read_unlock();
+
if (do_failover) {
+ /* the bond_select_active_slave must hold RTNL
+ * and curr_slave_lock for write.
+ */
+ if (!rtnl_trylock())
+ goto re_arm;
block_netpoll_tx();
write_lock_bh(&bond->curr_slave_lock);
@@ -2506,14 +2476,13 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
write_unlock_bh(&bond->curr_slave_lock);
unblock_netpoll_tx();
+ rtnl_unlock();
}
re_arm:
if (bond->params.arp_interval)
queue_delayed_work(bond->wq, &bond->arp_work,
msecs_to_jiffies(bond->params.arp_interval));
-
- read_unlock(&bond->lock);
}
/*
@@ -2522,7 +2491,7 @@ re_arm:
* place for the slave. Returns 0 if no changes are found, >0 if changes
* to link states must be committed.
*
- * Called with bond->lock held for read.
+ * Called with rcu_read_lock hold.
*/
static int bond_ab_arp_inspect(struct bonding *bond)
{
@@ -2531,7 +2500,7 @@ static int bond_ab_arp_inspect(struct bonding *bond)
struct slave *slave;
int commit = 0;
- bond_for_each_slave(bond, slave, iter) {
+ bond_for_each_slave_rcu(bond, slave, iter) {
slave->new_link = BOND_LINK_NOCHANGE;
last_rx = slave_last_rx(bond, slave);
@@ -2593,7 +2562,7 @@ static int bond_ab_arp_inspect(struct bonding *bond)
* Called to commit link state changes noted by inspection step of
* active-backup mode ARP monitor.
*
- * Called with RTNL and bond->lock for read.
+ * Called with RTNL hold.
*/
static void bond_ab_arp_commit(struct bonding *bond)
{
@@ -2668,19 +2637,20 @@ do_failover:
/*
* Send ARP probes for active-backup mode ARP monitor.
*
- * Called with bond->lock held for read.
+ * Called with rcu_read_lock hold.
*/
static void bond_ab_arp_probe(struct bonding *bond)
{
- struct slave *slave, *before = NULL, *new_slave = NULL;
+ struct slave *slave, *before = NULL, *new_slave = NULL,
+ *curr_arp_slave = rcu_dereference(bond->current_arp_slave);
struct list_head *iter;
bool found = false;
read_lock(&bond->curr_slave_lock);
- if (bond->current_arp_slave && bond->curr_active_slave)
+ if (curr_arp_slave && bond->curr_active_slave)
pr_info("PROBE: c_arp %s && cas %s BAD\n",
- bond->current_arp_slave->dev->name,
+ curr_arp_slave->dev->name,
bond->curr_active_slave->dev->name);
if (bond->curr_active_slave) {
@@ -2696,15 +2666,15 @@ static void bond_ab_arp_probe(struct bonding *bond)
* for becoming the curr_active_slave
*/
- if (!bond->current_arp_slave) {
- bond->current_arp_slave = bond_first_slave(bond);
- if (!bond->current_arp_slave)
+ if (!curr_arp_slave) {
+ curr_arp_slave = bond_first_slave_rcu(bond);
+ if (!curr_arp_slave)
return;
}
- bond_set_slave_inactive_flags(bond->current_arp_slave);
+ bond_set_slave_inactive_flags(curr_arp_slave);
- bond_for_each_slave(bond, slave, iter) {
+ bond_for_each_slave_rcu(bond, slave, iter) {
if (!found && !before && IS_UP(slave->dev))
before = slave;
@@ -2727,7 +2697,7 @@ static void bond_ab_arp_probe(struct bonding *bond)
pr_info("%s: backup interface %s is now down.\n",
bond->dev->name, slave->dev->name);
}
- if (slave == bond->current_arp_slave)
+ if (slave == curr_arp_slave)
found = true;
}
@@ -2741,54 +2711,48 @@ static void bond_ab_arp_probe(struct bonding *bond)
bond_set_slave_active_flags(new_slave);
bond_arp_send_all(bond, new_slave);
new_slave->jiffies = jiffies;
- bond->current_arp_slave = new_slave;
-
+ rcu_assign_pointer(bond->current_arp_slave, new_slave);
}
-void bond_activebackup_arp_mon(struct work_struct *work)
+static void bond_activebackup_arp_mon(struct work_struct *work)
{
struct bonding *bond = container_of(work, struct bonding,
arp_work.work);
bool should_notify_peers = false;
int delta_in_ticks;
- read_lock(&bond->lock);
-
delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval);
if (!bond_has_slaves(bond))
goto re_arm;
+ rcu_read_lock();
+
should_notify_peers = bond_should_notify_peers(bond);
if (bond_ab_arp_inspect(bond)) {
- read_unlock(&bond->lock);
+ rcu_read_unlock();
/* Race avoidance with bond_close flush of workqueue */
if (!rtnl_trylock()) {
- read_lock(&bond->lock);
delta_in_ticks = 1;
should_notify_peers = false;
goto re_arm;
}
- read_lock(&bond->lock);
-
bond_ab_arp_commit(bond);
- read_unlock(&bond->lock);
rtnl_unlock();
- read_lock(&bond->lock);
+ rcu_read_lock();
}
bond_ab_arp_probe(bond);
+ rcu_read_unlock();
re_arm:
if (bond->params.arp_interval)
queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks);
- read_unlock(&bond->lock);
-
if (should_notify_peers) {
if (!rtnl_trylock())
return;
@@ -3550,7 +3514,7 @@ unwind:
* it fails, it tries to find the first available slave for transmission.
* The skb is consumed in all cases, thus the function is void.
*/
-void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int slave_id)
+static void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int slave_id)
{
struct list_head *iter;
struct slave *slave;
@@ -3707,28 +3671,24 @@ static inline int bond_slave_override(struct bonding *bond,
struct sk_buff *skb)
{
struct slave *slave = NULL;
- struct slave *check_slave;
struct list_head *iter;
- int res = 1;
if (!skb->queue_mapping)
return 1;
/* Find out if any slaves have the same mapping as this skb. */
- bond_for_each_slave_rcu(bond, check_slave, iter) {
- if (check_slave->queue_id == skb->queue_mapping) {
- slave = check_slave;
+ bond_for_each_slave_rcu(bond, slave, iter) {
+ if (slave->queue_id == skb->queue_mapping) {
+ if (slave_can_tx(slave)) {
+ bond_dev_queue_xmit(bond, skb, slave->dev);
+ return 0;
+ }
+ /* If the slave isn't UP, use default transmit policy. */
break;
}
}
- /* If the slave isn't UP, use default transmit policy. */
- if (slave && slave->queue_id && IS_UP(slave->dev) &&
- (slave->link == BOND_LINK_UP)) {
- res = bond_dev_queue_xmit(bond, skb, slave->dev);
- }
-
- return res;
+ return 1;
}
@@ -3973,6 +3933,29 @@ static void bond_uninit(struct net_device *bond_dev)
/*------------------------- Module initialization ---------------------------*/
+int bond_parm_tbl_lookup(int mode, const struct bond_parm_tbl *tbl)
+{
+ int i;
+
+ for (i = 0; tbl[i].modename; i++)
+ if (mode == tbl[i].mode)
+ return tbl[i].mode;
+
+ return -1;
+}
+
+static int bond_parm_tbl_lookup_name(const char *modename,
+ const struct bond_parm_tbl *tbl)
+{
+ int i;
+
+ for (i = 0; tbl[i].modename; i++)
+ if (strcmp(modename, tbl[i].modename) == 0)
+ return tbl[i].mode;
+
+ return -1;
+}
+
/*
* Convert string input module parms. Accept either the
* number of the mode or its string name. A bit complicated because
@@ -3981,27 +3964,17 @@ static void bond_uninit(struct net_device *bond_dev)
*/
int bond_parse_parm(const char *buf, const struct bond_parm_tbl *tbl)
{
- int modeint = -1, i, rv;
- char *p, modestr[BOND_MAX_MODENAME_LEN + 1] = { 0, };
+ int modeint;
+ char *p, modestr[BOND_MAX_MODENAME_LEN + 1];
for (p = (char *)buf; *p; p++)
if (!(isdigit(*p) || isspace(*p)))
break;
- if (*p)
- rv = sscanf(buf, "%20s", modestr);
- else
- rv = sscanf(buf, "%d", &modeint);
-
- if (!rv)
- return -1;
-
- for (i = 0; tbl[i].modename; i++) {
- if (modeint == tbl[i].mode)
- return tbl[i].mode;
- if (strcmp(modestr, tbl[i].modename) == 0)
- return tbl[i].mode;
- }
+ if (*p && sscanf(buf, "%20s", modestr) != 0)
+ return bond_parm_tbl_lookup_name(modestr, tbl);
+ else if (sscanf(buf, "%d", &modeint) != 0)
+ return bond_parm_tbl_lookup(modeint, tbl);
return -1;
}
@@ -4105,12 +4078,12 @@ static int bond_check_params(struct bond_params *params)
num_peer_notif = 1;
}
- /* reset values for 802.3ad */
- if (bond_mode == BOND_MODE_8023AD) {
+ /* reset values for 802.3ad/TLB/ALB */
+ if (BOND_NO_USES_ARP(bond_mode)) {
if (!miimon) {
pr_warning("Warning: miimon must be specified, otherwise bonding will not detect link failure, speed and duplex which are essential for 802.3ad operation\n");
pr_warning("Forcing miimon to 100msec\n");
- miimon = 100;
+ miimon = BOND_DEFAULT_MIIMON;
}
}
@@ -4141,16 +4114,6 @@ static int bond_check_params(struct bond_params *params)
packets_per_slave = 1;
}
- /* reset values for TLB/ALB */
- if ((bond_mode == BOND_MODE_TLB) ||
- (bond_mode == BOND_MODE_ALB)) {
- if (!miimon) {
- pr_warning("Warning: miimon must be specified, otherwise bonding will not detect link failure and link speed which are essential for TLB/ALB load balancing\n");
- pr_warning("Forcing miimon to 100msec\n");
- miimon = 100;
- }
- }
-
if (bond_mode == BOND_MODE_ALB) {
pr_notice("In ALB mode you might experience client disconnections upon reconnection of a link if the bonding module updelay parameter (%d msec) is incompatible with the forwarding delay time of the switch\n",
updelay);
@@ -4199,9 +4162,9 @@ static int bond_check_params(struct bond_params *params)
(arp_ip_count < BOND_MAX_ARP_TARGETS) && arp_ip_target[i]; i++) {
/* not complete check, but should be good enough to
catch mistakes */
- __be32 ip = in_aton(arp_ip_target[i]);
- if (!isdigit(arp_ip_target[i][0]) || ip == 0 ||
- ip == htonl(INADDR_BROADCAST)) {
+ __be32 ip;
+ if (!in4_pton(arp_ip_target[i], -1, (u8 *)&ip, -1, NULL) ||
+ IS_IP_TARGET_UNUSABLE_ADDRESS(ip)) {
pr_warning("Warning: bad arp_ip_target module parameter (%s), ARP monitoring will not be performed\n",
arp_ip_target[i]);
arp_interval = 0;
@@ -4310,6 +4273,12 @@ static int bond_check_params(struct bond_params *params)
fail_over_mac_value = BOND_FOM_NONE;
}
+ if (lp_interval == 0) {
+ pr_warning("Warning: ip_interval must be between 1 and %d, so it was reset to %d\n",
+ INT_MAX, BOND_ALB_DEFAULT_LP_INTERVAL);
+ lp_interval = BOND_ALB_DEFAULT_LP_INTERVAL;
+ }
+
/* fill params struct with the proper values */
params->mode = bond_mode;
params->xmit_policy = xmit_hashtype;
@@ -4329,7 +4298,7 @@ static int bond_check_params(struct bond_params *params)
params->all_slaves_active = all_slaves_active;
params->resend_igmp = resend_igmp;
params->min_links = min_links;
- params->lp_interval = BOND_ALB_DEFAULT_LP_INTERVAL;
+ params->lp_interval = lp_interval;
if (packets_per_slave > 1)
params->packets_per_slave = reciprocal_value(packets_per_slave);
else
diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
index 40e7b1cb4aea..555c7837d8e6 100644
--- a/drivers/net/bonding/bond_netlink.c
+++ b/drivers/net/bonding/bond_netlink.c
@@ -1,6 +1,7 @@
/*
* drivers/net/bond/bond_netlink.c - Netlink interface for bonding
* Copyright (c) 2013 Jiri Pirko <jiri@resnulli.us>
+ * Copyright (c) 2013 Scott Feldman <sfeldma@cumulusnetworks.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -18,11 +19,33 @@
#include <linux/if_ether.h>
#include <net/netlink.h>
#include <net/rtnetlink.h>
+#include <linux/reciprocal_div.h>
#include "bonding.h"
static const struct nla_policy bond_policy[IFLA_BOND_MAX + 1] = {
[IFLA_BOND_MODE] = { .type = NLA_U8 },
[IFLA_BOND_ACTIVE_SLAVE] = { .type = NLA_U32 },
+ [IFLA_BOND_MIIMON] = { .type = NLA_U32 },
+ [IFLA_BOND_UPDELAY] = { .type = NLA_U32 },
+ [IFLA_BOND_DOWNDELAY] = { .type = NLA_U32 },
+ [IFLA_BOND_USE_CARRIER] = { .type = NLA_U8 },
+ [IFLA_BOND_ARP_INTERVAL] = { .type = NLA_U32 },
+ [IFLA_BOND_ARP_IP_TARGET] = { .type = NLA_NESTED },
+ [IFLA_BOND_ARP_VALIDATE] = { .type = NLA_U32 },
+ [IFLA_BOND_ARP_ALL_TARGETS] = { .type = NLA_U32 },
+ [IFLA_BOND_PRIMARY] = { .type = NLA_U32 },
+ [IFLA_BOND_PRIMARY_RESELECT] = { .type = NLA_U8 },
+ [IFLA_BOND_FAIL_OVER_MAC] = { .type = NLA_U8 },
+ [IFLA_BOND_XMIT_HASH_POLICY] = { .type = NLA_U8 },
+ [IFLA_BOND_RESEND_IGMP] = { .type = NLA_U32 },
+ [IFLA_BOND_NUM_PEER_NOTIF] = { .type = NLA_U8 },
+ [IFLA_BOND_ALL_SLAVES_ACTIVE] = { .type = NLA_U8 },
+ [IFLA_BOND_MIN_LINKS] = { .type = NLA_U32 },
+ [IFLA_BOND_LP_INTERVAL] = { .type = NLA_U32 },
+ [IFLA_BOND_PACKETS_PER_SLAVE] = { .type = NLA_U32 },
+ [IFLA_BOND_AD_LACP_RATE] = { .type = NLA_U8 },
+ [IFLA_BOND_AD_SELECT] = { .type = NLA_U8 },
+ [IFLA_BOND_AD_INFO] = { .type = NLA_NESTED },
};
static int bond_validate(struct nlattr *tb[], struct nlattr *data[])
@@ -40,16 +63,20 @@ static int bond_changelink(struct net_device *bond_dev,
struct nlattr *tb[], struct nlattr *data[])
{
struct bonding *bond = netdev_priv(bond_dev);
+ int miimon = 0;
int err;
- if (data && data[IFLA_BOND_MODE]) {
+ if (!data)
+ return 0;
+
+ if (data[IFLA_BOND_MODE]) {
int mode = nla_get_u8(data[IFLA_BOND_MODE]);
err = bond_option_mode_set(bond, mode);
if (err)
return err;
}
- if (data && data[IFLA_BOND_ACTIVE_SLAVE]) {
+ if (data[IFLA_BOND_ACTIVE_SLAVE]) {
int ifindex = nla_get_u32(data[IFLA_BOND_ACTIVE_SLAVE]);
struct net_device *slave_dev;
@@ -65,6 +92,185 @@ static int bond_changelink(struct net_device *bond_dev,
if (err)
return err;
}
+ if (data[IFLA_BOND_MIIMON]) {
+ miimon = nla_get_u32(data[IFLA_BOND_MIIMON]);
+
+ err = bond_option_miimon_set(bond, miimon);
+ if (err)
+ return err;
+ }
+ if (data[IFLA_BOND_UPDELAY]) {
+ int updelay = nla_get_u32(data[IFLA_BOND_UPDELAY]);
+
+ err = bond_option_updelay_set(bond, updelay);
+ if (err)
+ return err;
+ }
+ if (data[IFLA_BOND_DOWNDELAY]) {
+ int downdelay = nla_get_u32(data[IFLA_BOND_DOWNDELAY]);
+
+ err = bond_option_downdelay_set(bond, downdelay);
+ if (err)
+ return err;
+ }
+ if (data[IFLA_BOND_USE_CARRIER]) {
+ int use_carrier = nla_get_u8(data[IFLA_BOND_USE_CARRIER]);
+
+ err = bond_option_use_carrier_set(bond, use_carrier);
+ if (err)
+ return err;
+ }
+ if (data[IFLA_BOND_ARP_INTERVAL]) {
+ int arp_interval = nla_get_u32(data[IFLA_BOND_ARP_INTERVAL]);
+
+ if (arp_interval && miimon) {
+ pr_err("%s: ARP monitoring cannot be used with MII monitoring.\n",
+ bond->dev->name);
+ return -EINVAL;
+ }
+
+ err = bond_option_arp_interval_set(bond, arp_interval);
+ if (err)
+ return err;
+ }
+ if (data[IFLA_BOND_ARP_IP_TARGET]) {
+ __be32 targets[BOND_MAX_ARP_TARGETS] = { 0, };
+ struct nlattr *attr;
+ int i = 0, rem;
+
+ nla_for_each_nested(attr, data[IFLA_BOND_ARP_IP_TARGET], rem) {
+ __be32 target = nla_get_be32(attr);
+ targets[i++] = target;
+ }
+
+ err = bond_option_arp_ip_targets_set(bond, targets, i);
+ if (err)
+ return err;
+ }
+ if (data[IFLA_BOND_ARP_VALIDATE]) {
+ int arp_validate = nla_get_u32(data[IFLA_BOND_ARP_VALIDATE]);
+
+ if (arp_validate && miimon) {
+ pr_err("%s: ARP validating cannot be used with MII monitoring.\n",
+ bond->dev->name);
+ return -EINVAL;
+ }
+
+ err = bond_option_arp_validate_set(bond, arp_validate);
+ if (err)
+ return err;
+ }
+ if (data[IFLA_BOND_ARP_ALL_TARGETS]) {
+ int arp_all_targets =
+ nla_get_u32(data[IFLA_BOND_ARP_ALL_TARGETS]);
+
+ err = bond_option_arp_all_targets_set(bond, arp_all_targets);
+ if (err)
+ return err;
+ }
+ if (data[IFLA_BOND_PRIMARY]) {
+ int ifindex = nla_get_u32(data[IFLA_BOND_PRIMARY]);
+ struct net_device *dev;
+ char *primary = "";
+
+ dev = __dev_get_by_index(dev_net(bond_dev), ifindex);
+ if (dev)
+ primary = dev->name;
+
+ err = bond_option_primary_set(bond, primary);
+ if (err)
+ return err;
+ }
+ if (data[IFLA_BOND_PRIMARY_RESELECT]) {
+ int primary_reselect =
+ nla_get_u8(data[IFLA_BOND_PRIMARY_RESELECT]);
+
+ err = bond_option_primary_reselect_set(bond, primary_reselect);
+ if (err)
+ return err;
+ }
+ if (data[IFLA_BOND_FAIL_OVER_MAC]) {
+ int fail_over_mac =
+ nla_get_u8(data[IFLA_BOND_FAIL_OVER_MAC]);
+
+ err = bond_option_fail_over_mac_set(bond, fail_over_mac);
+ if (err)
+ return err;
+ }
+ if (data[IFLA_BOND_XMIT_HASH_POLICY]) {
+ int xmit_hash_policy =
+ nla_get_u8(data[IFLA_BOND_XMIT_HASH_POLICY]);
+
+ err = bond_option_xmit_hash_policy_set(bond, xmit_hash_policy);
+ if (err)
+ return err;
+ }
+ if (data[IFLA_BOND_RESEND_IGMP]) {
+ int resend_igmp =
+ nla_get_u32(data[IFLA_BOND_RESEND_IGMP]);
+
+ err = bond_option_resend_igmp_set(bond, resend_igmp);
+ if (err)
+ return err;
+ }
+ if (data[IFLA_BOND_NUM_PEER_NOTIF]) {
+ int num_peer_notif =
+ nla_get_u8(data[IFLA_BOND_NUM_PEER_NOTIF]);
+
+ err = bond_option_num_peer_notif_set(bond, num_peer_notif);
+ if (err)
+ return err;
+ }
+ if (data[IFLA_BOND_ALL_SLAVES_ACTIVE]) {
+ int all_slaves_active =
+ nla_get_u8(data[IFLA_BOND_ALL_SLAVES_ACTIVE]);
+
+ err = bond_option_all_slaves_active_set(bond,
+ all_slaves_active);
+ if (err)
+ return err;
+ }
+ if (data[IFLA_BOND_MIN_LINKS]) {
+ int min_links =
+ nla_get_u32(data[IFLA_BOND_MIN_LINKS]);
+
+ err = bond_option_min_links_set(bond, min_links);
+ if (err)
+ return err;
+ }
+ if (data[IFLA_BOND_LP_INTERVAL]) {
+ int lp_interval =
+ nla_get_u32(data[IFLA_BOND_LP_INTERVAL]);
+
+ err = bond_option_lp_interval_set(bond, lp_interval);
+ if (err)
+ return err;
+ }
+ if (data[IFLA_BOND_PACKETS_PER_SLAVE]) {
+ int packets_per_slave =
+ nla_get_u32(data[IFLA_BOND_PACKETS_PER_SLAVE]);
+
+ err = bond_option_packets_per_slave_set(bond,
+ packets_per_slave);
+ if (err)
+ return err;
+ }
+ if (data[IFLA_BOND_AD_LACP_RATE]) {
+ int lacp_rate =
+ nla_get_u8(data[IFLA_BOND_AD_LACP_RATE]);
+
+ err = bond_option_lacp_rate_set(bond, lacp_rate);
+ if (err)
+ return err;
+ }
+ if (data[IFLA_BOND_AD_SELECT]) {
+ int ad_select =
+ nla_get_u8(data[IFLA_BOND_AD_SELECT]);
+
+ err = bond_option_ad_select_set(bond, ad_select);
+ if (err)
+ return err;
+ }
return 0;
}
@@ -83,7 +289,36 @@ static int bond_newlink(struct net *src_net, struct net_device *bond_dev,
static size_t bond_get_size(const struct net_device *bond_dev)
{
return nla_total_size(sizeof(u8)) + /* IFLA_BOND_MODE */
- nla_total_size(sizeof(u32)); /* IFLA_BOND_ACTIVE_SLAVE */
+ nla_total_size(sizeof(u32)) + /* IFLA_BOND_ACTIVE_SLAVE */
+ nla_total_size(sizeof(u32)) + /* IFLA_BOND_MIIMON */
+ nla_total_size(sizeof(u32)) + /* IFLA_BOND_UPDELAY */
+ nla_total_size(sizeof(u32)) + /* IFLA_BOND_DOWNDELAY */
+ nla_total_size(sizeof(u8)) + /* IFLA_BOND_USE_CARRIER */
+ nla_total_size(sizeof(u32)) + /* IFLA_BOND_ARP_INTERVAL */
+ /* IFLA_BOND_ARP_IP_TARGET */
+ nla_total_size(sizeof(struct nlattr)) +
+ nla_total_size(sizeof(u32)) * BOND_MAX_ARP_TARGETS +
+ nla_total_size(sizeof(u32)) + /* IFLA_BOND_ARP_VALIDATE */
+ nla_total_size(sizeof(u32)) + /* IFLA_BOND_ARP_ALL_TARGETS */
+ nla_total_size(sizeof(u32)) + /* IFLA_BOND_PRIMARY */
+ nla_total_size(sizeof(u8)) + /* IFLA_BOND_PRIMARY_RESELECT */
+ nla_total_size(sizeof(u8)) + /* IFLA_BOND_FAIL_OVER_MAC */
+ nla_total_size(sizeof(u8)) + /* IFLA_BOND_XMIT_HASH_POLICY */
+ nla_total_size(sizeof(u32)) + /* IFLA_BOND_RESEND_IGMP */
+ nla_total_size(sizeof(u8)) + /* IFLA_BOND_NUM_PEER_NOTIF */
+ nla_total_size(sizeof(u8)) + /* IFLA_BOND_ALL_SLAVES_ACTIVE */
+ nla_total_size(sizeof(u32)) + /* IFLA_BOND_MIN_LINKS */
+ nla_total_size(sizeof(u32)) + /* IFLA_BOND_LP_INTERVAL */
+ nla_total_size(sizeof(u32)) + /* IFLA_BOND_PACKETS_PER_SLAVE */
+ nla_total_size(sizeof(u8)) + /* IFLA_BOND_AD_LACP_RATE */
+ nla_total_size(sizeof(u8)) + /* IFLA_BOND_AD_SELECT */
+ nla_total_size(sizeof(struct nlattr)) + /* IFLA_BOND_AD_INFO */
+ nla_total_size(sizeof(u16)) + /* IFLA_BOND_AD_INFO_AGGREGATOR */
+ nla_total_size(sizeof(u16)) + /* IFLA_BOND_AD_INFO_NUM_PORTS */
+ nla_total_size(sizeof(u16)) + /* IFLA_BOND_AD_INFO_ACTOR_KEY */
+ nla_total_size(sizeof(u16)) + /* IFLA_BOND_AD_INFO_PARTNER_KEY*/
+ nla_total_size(ETH_ALEN) + /* IFLA_BOND_AD_INFO_PARTNER_MAC*/
+ 0;
}
static int bond_fill_info(struct sk_buff *skb,
@@ -91,11 +326,142 @@ static int bond_fill_info(struct sk_buff *skb,
{
struct bonding *bond = netdev_priv(bond_dev);
struct net_device *slave_dev = bond_option_active_slave_get(bond);
+ struct nlattr *targets;
+ unsigned int packets_per_slave;
+ int i, targets_added;
+
+ if (nla_put_u8(skb, IFLA_BOND_MODE, bond->params.mode))
+ goto nla_put_failure;
+
+ if (slave_dev &&
+ nla_put_u32(skb, IFLA_BOND_ACTIVE_SLAVE, slave_dev->ifindex))
+ goto nla_put_failure;
+
+ if (nla_put_u32(skb, IFLA_BOND_MIIMON, bond->params.miimon))
+ goto nla_put_failure;
+
+ if (nla_put_u32(skb, IFLA_BOND_UPDELAY,
+ bond->params.updelay * bond->params.miimon))
+ goto nla_put_failure;
+
+ if (nla_put_u32(skb, IFLA_BOND_DOWNDELAY,
+ bond->params.downdelay * bond->params.miimon))
+ goto nla_put_failure;
+
+ if (nla_put_u8(skb, IFLA_BOND_USE_CARRIER, bond->params.use_carrier))
+ goto nla_put_failure;
+
+ if (nla_put_u32(skb, IFLA_BOND_ARP_INTERVAL, bond->params.arp_interval))
+ goto nla_put_failure;
+
+ targets = nla_nest_start(skb, IFLA_BOND_ARP_IP_TARGET);
+ if (!targets)
+ goto nla_put_failure;
+
+ targets_added = 0;
+ for (i = 0; i < BOND_MAX_ARP_TARGETS; i++) {
+ if (bond->params.arp_targets[i]) {
+ nla_put_be32(skb, i, bond->params.arp_targets[i]);
+ targets_added = 1;
+ }
+ }
- if (nla_put_u8(skb, IFLA_BOND_MODE, bond->params.mode) ||
- (slave_dev &&
- nla_put_u32(skb, IFLA_BOND_ACTIVE_SLAVE, slave_dev->ifindex)))
+ if (targets_added)
+ nla_nest_end(skb, targets);
+ else
+ nla_nest_cancel(skb, targets);
+
+ if (nla_put_u32(skb, IFLA_BOND_ARP_VALIDATE, bond->params.arp_validate))
+ goto nla_put_failure;
+
+ if (nla_put_u32(skb, IFLA_BOND_ARP_ALL_TARGETS,
+ bond->params.arp_all_targets))
+ goto nla_put_failure;
+
+ if (bond->primary_slave &&
+ nla_put_u32(skb, IFLA_BOND_PRIMARY,
+ bond->primary_slave->dev->ifindex))
+ goto nla_put_failure;
+
+ if (nla_put_u8(skb, IFLA_BOND_PRIMARY_RESELECT,
+ bond->params.primary_reselect))
+ goto nla_put_failure;
+
+ if (nla_put_u8(skb, IFLA_BOND_FAIL_OVER_MAC,
+ bond->params.fail_over_mac))
+ goto nla_put_failure;
+
+ if (nla_put_u8(skb, IFLA_BOND_XMIT_HASH_POLICY,
+ bond->params.xmit_policy))
+ goto nla_put_failure;
+
+ if (nla_put_u32(skb, IFLA_BOND_RESEND_IGMP,
+ bond->params.resend_igmp))
+ goto nla_put_failure;
+
+ if (nla_put_u8(skb, IFLA_BOND_NUM_PEER_NOTIF,
+ bond->params.num_peer_notif))
+ goto nla_put_failure;
+
+ if (nla_put_u8(skb, IFLA_BOND_ALL_SLAVES_ACTIVE,
+ bond->params.all_slaves_active))
+ goto nla_put_failure;
+
+ if (nla_put_u32(skb, IFLA_BOND_MIN_LINKS,
+ bond->params.min_links))
+ goto nla_put_failure;
+
+ if (nla_put_u32(skb, IFLA_BOND_LP_INTERVAL,
+ bond->params.lp_interval))
+ goto nla_put_failure;
+
+ packets_per_slave = bond->params.packets_per_slave;
+ if (packets_per_slave > 1)
+ packets_per_slave = reciprocal_value(packets_per_slave);
+
+ if (nla_put_u32(skb, IFLA_BOND_PACKETS_PER_SLAVE,
+ packets_per_slave))
+ goto nla_put_failure;
+
+ if (nla_put_u8(skb, IFLA_BOND_AD_LACP_RATE,
+ bond->params.lacp_fast))
+ goto nla_put_failure;
+
+ if (nla_put_u8(skb, IFLA_BOND_AD_SELECT,
+ bond->params.ad_select))
goto nla_put_failure;
+
+ if (bond->params.mode == BOND_MODE_8023AD) {
+ struct ad_info info;
+
+ if (!bond_3ad_get_active_agg_info(bond, &info)) {
+ struct nlattr *nest;
+
+ nest = nla_nest_start(skb, IFLA_BOND_AD_INFO);
+ if (!nest)
+ goto nla_put_failure;
+
+ if (nla_put_u16(skb, IFLA_BOND_AD_INFO_AGGREGATOR,
+ info.aggregator_id))
+ goto nla_put_failure;
+ if (nla_put_u16(skb, IFLA_BOND_AD_INFO_NUM_PORTS,
+ info.ports))
+ goto nla_put_failure;
+ if (nla_put_u16(skb, IFLA_BOND_AD_INFO_ACTOR_KEY,
+ info.actor_key))
+ goto nla_put_failure;
+ if (nla_put_u16(skb, IFLA_BOND_AD_INFO_PARTNER_KEY,
+ info.partner_key))
+ goto nla_put_failure;
+ if (nla_put(skb, IFLA_BOND_AD_INFO_PARTNER_MAC,
+ sizeof(info.partner_system),
+ &info.partner_system))
+ goto nla_put_failure;
+
+ nla_nest_end(skb, nest);
+ }
+ }
+
return 0;
nla_put_failure:
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index 9a5223c7b4d1..945a6668da83 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -1,6 +1,7 @@
/*
* drivers/net/bond/bond_options.c - bonding options
* Copyright (c) 2013 Jiri Pirko <jiri@resnulli.us>
+ * Copyright (c) 2013 Scott Feldman <sfeldma@cumulusnetworks.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -15,21 +16,14 @@
#include <linux/netdevice.h>
#include <linux/rwlock.h>
#include <linux/rcupdate.h>
+#include <linux/reciprocal_div.h>
#include "bonding.h"
-static bool bond_mode_is_valid(int mode)
-{
- int i;
-
- for (i = 0; bond_mode_tbl[i].modename; i++);
-
- return mode >= 0 && mode < i;
-}
-
int bond_option_mode_set(struct bonding *bond, int mode)
{
- if (!bond_mode_is_valid(mode)) {
- pr_err("invalid mode value %d.\n", mode);
+ if (bond_parm_tbl_lookup(mode, bond_mode_tbl) < 0) {
+ pr_err("%s: Ignoring invalid mode value %d.\n",
+ bond->dev->name, mode);
return -EINVAL;
}
@@ -45,10 +39,15 @@ int bond_option_mode_set(struct bonding *bond, int mode)
return -EPERM;
}
- if (BOND_MODE_IS_LB(mode) && bond->params.arp_interval) {
- pr_err("%s: %s mode is incompatible with arp monitoring.\n",
- bond->dev->name, bond_mode_tbl[mode].modename);
- return -EINVAL;
+ if (BOND_NO_USES_ARP(mode) && bond->params.arp_interval) {
+ pr_info("%s: %s mode is incompatible with arp monitoring, start mii monitoring\n",
+ bond->dev->name, bond_mode_tbl[mode].modename);
+ /* disable arp monitoring */
+ bond->params.arp_interval = 0;
+ /* set miimon to default value */
+ bond->params.miimon = BOND_DEFAULT_MIIMON;
+ pr_info("%s: Setting MII monitoring interval to %d.\n",
+ bond->dev->name, bond->params.miimon);
}
/* don't cache arp_validate between modes */
@@ -101,7 +100,6 @@ int bond_option_active_slave_set(struct bonding *bond,
}
block_netpoll_tx();
- read_lock(&bond->lock);
write_lock_bh(&bond->curr_slave_lock);
/* check to see if we are clearing active */
@@ -136,7 +134,599 @@ int bond_option_active_slave_set(struct bonding *bond,
}
write_unlock_bh(&bond->curr_slave_lock);
- read_unlock(&bond->lock);
unblock_netpoll_tx();
return ret;
}
+
+int bond_option_miimon_set(struct bonding *bond, int miimon)
+{
+ if (miimon < 0) {
+ pr_err("%s: Invalid miimon value %d not in range %d-%d; rejected.\n",
+ bond->dev->name, miimon, 0, INT_MAX);
+ return -EINVAL;
+ }
+ pr_info("%s: Setting MII monitoring interval to %d.\n",
+ bond->dev->name, miimon);
+ bond->params.miimon = miimon;
+ if (bond->params.updelay)
+ pr_info("%s: Note: Updating updelay (to %d) since it is a multiple of the miimon value.\n",
+ bond->dev->name,
+ bond->params.updelay * bond->params.miimon);
+ if (bond->params.downdelay)
+ pr_info("%s: Note: Updating downdelay (to %d) since it is a multiple of the miimon value.\n",
+ bond->dev->name,
+ bond->params.downdelay * bond->params.miimon);
+ if (miimon && bond->params.arp_interval) {
+ pr_info("%s: MII monitoring cannot be used with ARP monitoring. Disabling ARP monitoring...\n",
+ bond->dev->name);
+ bond->params.arp_interval = 0;
+ if (bond->params.arp_validate)
+ bond->params.arp_validate = BOND_ARP_VALIDATE_NONE;
+ }
+ if (bond->dev->flags & IFF_UP) {
+ /* If the interface is up, we may need to fire off
+ * the MII timer. If the interface is down, the
+ * timer will get fired off when the open function
+ * is called.
+ */
+ if (!miimon) {
+ cancel_delayed_work_sync(&bond->mii_work);
+ } else {
+ cancel_delayed_work_sync(&bond->arp_work);
+ queue_delayed_work(bond->wq, &bond->mii_work, 0);
+ }
+ }
+ return 0;
+}
+
+int bond_option_updelay_set(struct bonding *bond, int updelay)
+{
+ if (!(bond->params.miimon)) {
+ pr_err("%s: Unable to set up delay as MII monitoring is disabled\n",
+ bond->dev->name);
+ return -EPERM;
+ }
+
+ if (updelay < 0) {
+ pr_err("%s: Invalid up delay value %d not in range %d-%d; rejected.\n",
+ bond->dev->name, updelay, 0, INT_MAX);
+ return -EINVAL;
+ } else {
+ if ((updelay % bond->params.miimon) != 0) {
+ pr_warn("%s: Warning: up delay (%d) is not a multiple of miimon (%d), updelay rounded to %d ms\n",
+ bond->dev->name, updelay,
+ bond->params.miimon,
+ (updelay / bond->params.miimon) *
+ bond->params.miimon);
+ }
+ bond->params.updelay = updelay / bond->params.miimon;
+ pr_info("%s: Setting up delay to %d.\n",
+ bond->dev->name,
+ bond->params.updelay * bond->params.miimon);
+ }
+
+ return 0;
+}
+
+int bond_option_downdelay_set(struct bonding *bond, int downdelay)
+{
+ if (!(bond->params.miimon)) {
+ pr_err("%s: Unable to set down delay as MII monitoring is disabled\n",
+ bond->dev->name);
+ return -EPERM;
+ }
+
+ if (downdelay < 0) {
+ pr_err("%s: Invalid down delay value %d not in range %d-%d; rejected.\n",
+ bond->dev->name, downdelay, 0, INT_MAX);
+ return -EINVAL;
+ } else {
+ if ((downdelay % bond->params.miimon) != 0) {
+ pr_warn("%s: Warning: down delay (%d) is not a multiple of miimon (%d), delay rounded to %d ms\n",
+ bond->dev->name, downdelay,
+ bond->params.miimon,
+ (downdelay / bond->params.miimon) *
+ bond->params.miimon);
+ }
+ bond->params.downdelay = downdelay / bond->params.miimon;
+ pr_info("%s: Setting down delay to %d.\n",
+ bond->dev->name,
+ bond->params.downdelay * bond->params.miimon);
+ }
+
+ return 0;
+}
+
+int bond_option_use_carrier_set(struct bonding *bond, int use_carrier)
+{
+ if ((use_carrier == 0) || (use_carrier == 1)) {
+ bond->params.use_carrier = use_carrier;
+ pr_info("%s: Setting use_carrier to %d.\n",
+ bond->dev->name, use_carrier);
+ } else {
+ pr_info("%s: Ignoring invalid use_carrier value %d.\n",
+ bond->dev->name, use_carrier);
+ }
+
+ return 0;
+}
+
+int bond_option_arp_interval_set(struct bonding *bond, int arp_interval)
+{
+ if (arp_interval < 0) {
+ pr_err("%s: Invalid arp_interval value %d not in range 0-%d; rejected.\n",
+ bond->dev->name, arp_interval, INT_MAX);
+ return -EINVAL;
+ }
+ if (BOND_NO_USES_ARP(bond->params.mode)) {
+ pr_info("%s: ARP monitoring cannot be used with ALB/TLB/802.3ad. Only MII monitoring is supported on %s.\n",
+ bond->dev->name, bond->dev->name);
+ return -EINVAL;
+ }
+ pr_info("%s: Setting ARP monitoring interval to %d.\n",
+ bond->dev->name, arp_interval);
+ bond->params.arp_interval = arp_interval;
+ if (arp_interval) {
+ if (bond->params.miimon) {
+ pr_info("%s: ARP monitoring cannot be used with MII monitoring. %s Disabling MII monitoring.\n",
+ bond->dev->name, bond->dev->name);
+ bond->params.miimon = 0;
+ }
+ if (!bond->params.arp_targets[0])
+ pr_info("%s: ARP monitoring has been set up, but no ARP targets have been specified.\n",
+ bond->dev->name);
+ }
+ if (bond->dev->flags & IFF_UP) {
+ /* If the interface is up, we may need to fire off
+ * the ARP timer. If the interface is down, the
+ * timer will get fired off when the open function
+ * is called.
+ */
+ if (!arp_interval) {
+ if (bond->params.arp_validate)
+ bond->recv_probe = NULL;
+ cancel_delayed_work_sync(&bond->arp_work);
+ } else {
+ /* arp_validate can be set only in active-backup mode */
+ if (bond->params.arp_validate)
+ bond->recv_probe = bond_arp_rcv;
+ cancel_delayed_work_sync(&bond->mii_work);
+ queue_delayed_work(bond->wq, &bond->arp_work, 0);
+ }
+ }
+
+ return 0;
+}
+
+static void _bond_options_arp_ip_target_set(struct bonding *bond, int slot,
+ __be32 target,
+ unsigned long last_rx)
+{
+ __be32 *targets = bond->params.arp_targets;
+ struct list_head *iter;
+ struct slave *slave;
+
+ if (slot >= 0 && slot < BOND_MAX_ARP_TARGETS) {
+ bond_for_each_slave(bond, slave, iter)
+ slave->target_last_arp_rx[slot] = last_rx;
+ targets[slot] = target;
+ }
+}
+
+static int _bond_option_arp_ip_target_add(struct bonding *bond, __be32 target)
+{
+ __be32 *targets = bond->params.arp_targets;
+ int ind;
+
+ if (IS_IP_TARGET_UNUSABLE_ADDRESS(target)) {
+ pr_err("%s: invalid ARP target %pI4 specified for addition\n",
+ bond->dev->name, &target);
+ return -EINVAL;
+ }
+
+ if (bond_get_targets_ip(targets, target) != -1) { /* dup */
+ pr_err("%s: ARP target %pI4 is already present\n",
+ bond->dev->name, &target);
+ return -EINVAL;
+ }
+
+ ind = bond_get_targets_ip(targets, 0); /* first free slot */
+ if (ind == -1) {
+ pr_err("%s: ARP target table is full!\n",
+ bond->dev->name);
+ return -EINVAL;
+ }
+
+ pr_info("%s: adding ARP target %pI4.\n", bond->dev->name, &target);
+
+ _bond_options_arp_ip_target_set(bond, ind, target, jiffies);
+
+ return 0;
+}
+
+int bond_option_arp_ip_target_add(struct bonding *bond, __be32 target)
+{
+ int ret;
+
+ /* not to race with bond_arp_rcv */
+ write_lock_bh(&bond->lock);
+ ret = _bond_option_arp_ip_target_add(bond, target);
+ write_unlock_bh(&bond->lock);
+
+ return ret;
+}
+
+int bond_option_arp_ip_target_rem(struct bonding *bond, __be32 target)
+{
+ __be32 *targets = bond->params.arp_targets;
+ struct list_head *iter;
+ struct slave *slave;
+ unsigned long *targets_rx;
+ int ind, i;
+
+ if (IS_IP_TARGET_UNUSABLE_ADDRESS(target)) {
+ pr_err("%s: invalid ARP target %pI4 specified for removal\n",
+ bond->dev->name, &target);
+ return -EINVAL;
+ }
+
+ ind = bond_get_targets_ip(targets, target);
+ if (ind == -1) {
+ pr_err("%s: unable to remove nonexistent ARP target %pI4.\n",
+ bond->dev->name, &target);
+ return -EINVAL;
+ }
+
+ if (ind == 0 && !targets[1] && bond->params.arp_interval)
+ pr_warn("%s: removing last arp target with arp_interval on\n",
+ bond->dev->name);
+
+ pr_info("%s: removing ARP target %pI4.\n", bond->dev->name,
+ &target);
+
+ /* not to race with bond_arp_rcv */
+ write_lock_bh(&bond->lock);
+
+ bond_for_each_slave(bond, slave, iter) {
+ targets_rx = slave->target_last_arp_rx;
+ for (i = ind; (i < BOND_MAX_ARP_TARGETS-1) && targets[i+1]; i++)
+ targets_rx[i] = targets_rx[i+1];
+ targets_rx[i] = 0;
+ }
+ for (i = ind; (i < BOND_MAX_ARP_TARGETS-1) && targets[i+1]; i++)
+ targets[i] = targets[i+1];
+ targets[i] = 0;
+
+ write_unlock_bh(&bond->lock);
+
+ return 0;
+}
+
+int bond_option_arp_ip_targets_set(struct bonding *bond, __be32 *targets,
+ int count)
+{
+ int i, ret = 0;
+
+ /* not to race with bond_arp_rcv */
+ write_lock_bh(&bond->lock);
+
+ /* clear table */
+ for (i = 0; i < BOND_MAX_ARP_TARGETS; i++)
+ _bond_options_arp_ip_target_set(bond, i, 0, 0);
+
+ if (count == 0 && bond->params.arp_interval)
+ pr_warn("%s: removing last arp target with arp_interval on\n",
+ bond->dev->name);
+
+ for (i = 0; i < count; i++) {
+ ret = _bond_option_arp_ip_target_add(bond, targets[i]);
+ if (ret)
+ break;
+ }
+
+ write_unlock_bh(&bond->lock);
+ return ret;
+}
+
+int bond_option_arp_validate_set(struct bonding *bond, int arp_validate)
+{
+ if (bond_parm_tbl_lookup(arp_validate, arp_validate_tbl) < 0) {
+ pr_err("%s: Ignoring invalid arp_validate value %d.\n",
+ bond->dev->name, arp_validate);
+ return -EINVAL;
+ }
+
+ if (bond->params.mode != BOND_MODE_ACTIVEBACKUP) {
+ pr_err("%s: arp_validate only supported in active-backup mode.\n",
+ bond->dev->name);
+ return -EINVAL;
+ }
+
+ pr_info("%s: setting arp_validate to %s (%d).\n",
+ bond->dev->name, arp_validate_tbl[arp_validate].modename,
+ arp_validate);
+
+ if (bond->dev->flags & IFF_UP) {
+ if (!arp_validate)
+ bond->recv_probe = NULL;
+ else if (bond->params.arp_interval)
+ bond->recv_probe = bond_arp_rcv;
+ }
+ bond->params.arp_validate = arp_validate;
+
+ return 0;
+}
+
+int bond_option_arp_all_targets_set(struct bonding *bond, int arp_all_targets)
+{
+ if (bond_parm_tbl_lookup(arp_all_targets, arp_all_targets_tbl) < 0) {
+ pr_err("%s: Ignoring invalid arp_all_targets value %d.\n",
+ bond->dev->name, arp_all_targets);
+ return -EINVAL;
+ }
+
+ pr_info("%s: setting arp_all_targets to %s (%d).\n",
+ bond->dev->name, arp_all_targets_tbl[arp_all_targets].modename,
+ arp_all_targets);
+
+ bond->params.arp_all_targets = arp_all_targets;
+
+ return 0;
+}
+
+int bond_option_primary_set(struct bonding *bond, const char *primary)
+{
+ struct list_head *iter;
+ struct slave *slave;
+ int err = 0;
+
+ block_netpoll_tx();
+ read_lock(&bond->lock);
+ write_lock_bh(&bond->curr_slave_lock);
+
+ if (!USES_PRIMARY(bond->params.mode)) {
+ pr_err("%s: Unable to set primary slave; %s is in mode %d\n",
+ bond->dev->name, bond->dev->name, bond->params.mode);
+ err = -EINVAL;
+ goto out;
+ }
+
+ /* check to see if we are clearing primary */
+ if (!strlen(primary)) {
+ pr_info("%s: Setting primary slave to None.\n",
+ bond->dev->name);
+ bond->primary_slave = NULL;
+ memset(bond->params.primary, 0, sizeof(bond->params.primary));
+ bond_select_active_slave(bond);
+ goto out;
+ }
+
+ bond_for_each_slave(bond, slave, iter) {
+ if (strncmp(slave->dev->name, primary, IFNAMSIZ) == 0) {
+ pr_info("%s: Setting %s as primary slave.\n",
+ bond->dev->name, slave->dev->name);
+ bond->primary_slave = slave;
+ strcpy(bond->params.primary, slave->dev->name);
+ bond_select_active_slave(bond);
+ goto out;
+ }
+ }
+
+ strncpy(bond->params.primary, primary, IFNAMSIZ);
+ bond->params.primary[IFNAMSIZ - 1] = 0;
+
+ pr_info("%s: Recording %s as primary, but it has not been enslaved to %s yet.\n",
+ bond->dev->name, primary, bond->dev->name);
+
+out:
+ write_unlock_bh(&bond->curr_slave_lock);
+ read_unlock(&bond->lock);
+ unblock_netpoll_tx();
+
+ return err;
+}
+
+int bond_option_primary_reselect_set(struct bonding *bond, int primary_reselect)
+{
+ if (bond_parm_tbl_lookup(primary_reselect, pri_reselect_tbl) < 0) {
+ pr_err("%s: Ignoring invalid primary_reselect value %d.\n",
+ bond->dev->name, primary_reselect);
+ return -EINVAL;
+ }
+
+ bond->params.primary_reselect = primary_reselect;
+ pr_info("%s: setting primary_reselect to %s (%d).\n",
+ bond->dev->name, pri_reselect_tbl[primary_reselect].modename,
+ primary_reselect);
+
+ block_netpoll_tx();
+ write_lock_bh(&bond->curr_slave_lock);
+ bond_select_active_slave(bond);
+ write_unlock_bh(&bond->curr_slave_lock);
+ unblock_netpoll_tx();
+
+ return 0;
+}
+
+int bond_option_fail_over_mac_set(struct bonding *bond, int fail_over_mac)
+{
+ if (bond_parm_tbl_lookup(fail_over_mac, fail_over_mac_tbl) < 0) {
+ pr_err("%s: Ignoring invalid fail_over_mac value %d.\n",
+ bond->dev->name, fail_over_mac);
+ return -EINVAL;
+ }
+
+ if (bond_has_slaves(bond)) {
+ pr_err("%s: Can't alter fail_over_mac with slaves in bond.\n",
+ bond->dev->name);
+ return -EPERM;
+ }
+
+ bond->params.fail_over_mac = fail_over_mac;
+ pr_info("%s: Setting fail_over_mac to %s (%d).\n",
+ bond->dev->name, fail_over_mac_tbl[fail_over_mac].modename,
+ fail_over_mac);
+
+ return 0;
+}
+
+int bond_option_xmit_hash_policy_set(struct bonding *bond, int xmit_hash_policy)
+{
+ if (bond_parm_tbl_lookup(xmit_hash_policy, xmit_hashtype_tbl) < 0) {
+ pr_err("%s: Ignoring invalid xmit_hash_policy value %d.\n",
+ bond->dev->name, xmit_hash_policy);
+ return -EINVAL;
+ }
+
+ bond->params.xmit_policy = xmit_hash_policy;
+ pr_info("%s: setting xmit hash policy to %s (%d).\n",
+ bond->dev->name,
+ xmit_hashtype_tbl[xmit_hash_policy].modename, xmit_hash_policy);
+
+ return 0;
+}
+
+int bond_option_resend_igmp_set(struct bonding *bond, int resend_igmp)
+{
+ if (resend_igmp < 0 || resend_igmp > 255) {
+ pr_err("%s: Invalid resend_igmp value %d not in range 0-255; rejected.\n",
+ bond->dev->name, resend_igmp);
+ return -EINVAL;
+ }
+
+ bond->params.resend_igmp = resend_igmp;
+ pr_info("%s: Setting resend_igmp to %d.\n",
+ bond->dev->name, resend_igmp);
+
+ return 0;
+}
+
+int bond_option_num_peer_notif_set(struct bonding *bond, int num_peer_notif)
+{
+ bond->params.num_peer_notif = num_peer_notif;
+ return 0;
+}
+
+int bond_option_all_slaves_active_set(struct bonding *bond,
+ int all_slaves_active)
+{
+ struct list_head *iter;
+ struct slave *slave;
+
+ if (all_slaves_active == bond->params.all_slaves_active)
+ return 0;
+
+ if ((all_slaves_active == 0) || (all_slaves_active == 1)) {
+ bond->params.all_slaves_active = all_slaves_active;
+ } else {
+ pr_info("%s: Ignoring invalid all_slaves_active value %d.\n",
+ bond->dev->name, all_slaves_active);
+ return -EINVAL;
+ }
+
+ bond_for_each_slave(bond, slave, iter) {
+ if (!bond_is_active_slave(slave)) {
+ if (all_slaves_active)
+ slave->inactive = 0;
+ else
+ slave->inactive = 1;
+ }
+ }
+
+ return 0;
+}
+
+int bond_option_min_links_set(struct bonding *bond, int min_links)
+{
+ pr_info("%s: Setting min links value to %u\n",
+ bond->dev->name, min_links);
+ bond->params.min_links = min_links;
+
+ return 0;
+}
+
+int bond_option_lp_interval_set(struct bonding *bond, int lp_interval)
+{
+ if (lp_interval <= 0) {
+ pr_err("%s: lp_interval must be between 1 and %d\n",
+ bond->dev->name, INT_MAX);
+ return -EINVAL;
+ }
+
+ bond->params.lp_interval = lp_interval;
+
+ return 0;
+}
+
+int bond_option_packets_per_slave_set(struct bonding *bond,
+ int packets_per_slave)
+{
+ if (packets_per_slave < 0 || packets_per_slave > USHRT_MAX) {
+ pr_err("%s: packets_per_slave must be between 0 and %u\n",
+ bond->dev->name, USHRT_MAX);
+ return -EINVAL;
+ }
+
+ if (bond->params.mode != BOND_MODE_ROUNDROBIN)
+ pr_warn("%s: Warning: packets_per_slave has effect only in balance-rr mode\n",
+ bond->dev->name);
+
+ if (packets_per_slave > 1)
+ bond->params.packets_per_slave =
+ reciprocal_value(packets_per_slave);
+ else
+ bond->params.packets_per_slave = packets_per_slave;
+
+ return 0;
+}
+
+int bond_option_lacp_rate_set(struct bonding *bond, int lacp_rate)
+{
+ if (bond_parm_tbl_lookup(lacp_rate, bond_lacp_tbl) < 0) {
+ pr_err("%s: Ignoring invalid LACP rate value %d.\n",
+ bond->dev->name, lacp_rate);
+ return -EINVAL;
+ }
+
+ if (bond->dev->flags & IFF_UP) {
+ pr_err("%s: Unable to update LACP rate because interface is up.\n",
+ bond->dev->name);
+ return -EPERM;
+ }
+
+ if (bond->params.mode != BOND_MODE_8023AD) {
+ pr_err("%s: Unable to update LACP rate because bond is not in 802.3ad mode.\n",
+ bond->dev->name);
+ return -EPERM;
+ }
+
+ bond->params.lacp_fast = lacp_rate;
+ bond_3ad_update_lacp_rate(bond);
+ pr_info("%s: Setting LACP rate to %s (%d).\n",
+ bond->dev->name, bond_lacp_tbl[lacp_rate].modename,
+ lacp_rate);
+
+ return 0;
+}
+
+int bond_option_ad_select_set(struct bonding *bond, int ad_select)
+{
+ if (bond_parm_tbl_lookup(ad_select, ad_select_tbl) < 0) {
+ pr_err("%s: Ignoring invalid ad_select value %d.\n",
+ bond->dev->name, ad_select);
+ return -EINVAL;
+ }
+
+ if (bond->dev->flags & IFF_UP) {
+ pr_err("%s: Unable to update ad_select because interface is up.\n",
+ bond->dev->name);
+ return -EPERM;
+ }
+
+ bond->params.ad_select = ad_select;
+ pr_info("%s: Setting ad_select to %s (%d).\n",
+ bond->dev->name, ad_select_tbl[ad_select].modename,
+ ad_select);
+
+ return 0;
+}
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 0ec2a7e8c8a9..011f163c2c67 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -12,8 +12,7 @@
* for more details.
*
* You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
@@ -319,7 +318,7 @@ static ssize_t bonding_store_xmit_hash(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
- int new_value, ret = count;
+ int new_value, ret;
struct bonding *bond = to_bond(d);
new_value = bond_parse_parm(buf, xmit_hashtype_tbl);
@@ -327,14 +326,17 @@ static ssize_t bonding_store_xmit_hash(struct device *d,
pr_err("%s: Ignoring invalid xmit hash policy value %.*s.\n",
bond->dev->name,
(int)strlen(buf) - 1, buf);
- ret = -EINVAL;
- } else {
- bond->params.xmit_policy = new_value;
- pr_info("%s: setting xmit hash policy to %s (%d).\n",
- bond->dev->name,
- xmit_hashtype_tbl[new_value].modename, new_value);
+ return -EINVAL;
}
+ if (!rtnl_trylock())
+ return restart_syscall();
+
+ ret = bond_option_xmit_hash_policy_set(bond, new_value);
+ if (!ret)
+ ret = count;
+
+ rtnl_unlock();
return ret;
}
static DEVICE_ATTR(xmit_hash_policy, S_IRUGO | S_IWUSR,
@@ -359,35 +361,21 @@ static ssize_t bonding_store_arp_validate(struct device *d,
const char *buf, size_t count)
{
struct bonding *bond = to_bond(d);
- int new_value, ret = count;
+ int new_value, ret;
- if (!rtnl_trylock())
- return restart_syscall();
new_value = bond_parse_parm(buf, arp_validate_tbl);
if (new_value < 0) {
pr_err("%s: Ignoring invalid arp_validate value %s\n",
bond->dev->name, buf);
- ret = -EINVAL;
- goto out;
- }
- if (bond->params.mode != BOND_MODE_ACTIVEBACKUP) {
- pr_err("%s: arp_validate only supported in active-backup mode.\n",
- bond->dev->name);
- ret = -EINVAL;
- goto out;
- }
- pr_info("%s: setting arp_validate to %s (%d).\n",
- bond->dev->name, arp_validate_tbl[new_value].modename,
- new_value);
-
- if (bond->dev->flags & IFF_UP) {
- if (!new_value)
- bond->recv_probe = NULL;
- else if (bond->params.arp_interval)
- bond->recv_probe = bond_arp_rcv;
+ return -EINVAL;
}
- bond->params.arp_validate = new_value;
-out:
+ if (!rtnl_trylock())
+ return restart_syscall();
+
+ ret = bond_option_arp_validate_set(bond, new_value);
+ if (!ret)
+ ret = count;
+
rtnl_unlock();
return ret;
@@ -414,7 +402,7 @@ static ssize_t bonding_store_arp_all_targets(struct device *d,
const char *buf, size_t count)
{
struct bonding *bond = to_bond(d);
- int new_value;
+ int new_value, ret;
new_value = bond_parse_parm(buf, arp_all_targets_tbl);
if (new_value < 0) {
@@ -422,13 +410,17 @@ static ssize_t bonding_store_arp_all_targets(struct device *d,
bond->dev->name, buf);
return -EINVAL;
}
- pr_info("%s: setting arp_all_targets to %s (%d).\n",
- bond->dev->name, arp_all_targets_tbl[new_value].modename,
- new_value);
- bond->params.arp_all_targets = new_value;
+ if (!rtnl_trylock())
+ return restart_syscall();
- return count;
+ ret = bond_option_arp_all_targets_set(bond, new_value);
+ if (!ret)
+ ret = count;
+
+ rtnl_unlock();
+
+ return ret;
}
static DEVICE_ATTR(arp_all_targets, S_IRUGO | S_IWUSR,
@@ -453,33 +445,23 @@ static ssize_t bonding_store_fail_over_mac(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
- int new_value, ret = count;
+ int new_value, ret;
struct bonding *bond = to_bond(d);
- if (!rtnl_trylock())
- return restart_syscall();
-
- if (bond_has_slaves(bond)) {
- pr_err("%s: Can't alter fail_over_mac with slaves in bond.\n",
- bond->dev->name);
- ret = -EPERM;
- goto out;
- }
-
new_value = bond_parse_parm(buf, fail_over_mac_tbl);
if (new_value < 0) {
pr_err("%s: Ignoring invalid fail_over_mac value %s.\n",
bond->dev->name, buf);
- ret = -EINVAL;
- goto out;
+ return -EINVAL;
}
- bond->params.fail_over_mac = new_value;
- pr_info("%s: Setting fail_over_mac to %s (%d).\n",
- bond->dev->name, fail_over_mac_tbl[new_value].modename,
- new_value);
+ if (!rtnl_trylock())
+ return restart_syscall();
+
+ ret = bond_option_fail_over_mac_set(bond, new_value);
+ if (!ret)
+ ret = count;
-out:
rtnl_unlock();
return ret;
}
@@ -507,62 +489,21 @@ static ssize_t bonding_store_arp_interval(struct device *d,
const char *buf, size_t count)
{
struct bonding *bond = to_bond(d);
- int new_value, ret = count;
+ int new_value, ret;
- if (!rtnl_trylock())
- return restart_syscall();
if (sscanf(buf, "%d", &new_value) != 1) {
pr_err("%s: no arp_interval value specified.\n",
- bond->dev->name);
- ret = -EINVAL;
- goto out;
- }
- if (new_value < 0) {
- pr_err("%s: Invalid arp_interval value %d not in range 0-%d; rejected.\n",
- bond->dev->name, new_value, INT_MAX);
- ret = -EINVAL;
- goto out;
- }
- if (bond->params.mode == BOND_MODE_ALB ||
- bond->params.mode == BOND_MODE_TLB ||
- bond->params.mode == BOND_MODE_8023AD) {
- pr_info("%s: ARP monitoring cannot be used with ALB/TLB/802.3ad. Only MII monitoring is supported on %s.\n",
- bond->dev->name, bond->dev->name);
- ret = -EINVAL;
- goto out;
- }
- pr_info("%s: Setting ARP monitoring interval to %d.\n",
- bond->dev->name, new_value);
- bond->params.arp_interval = new_value;
- if (new_value) {
- if (bond->params.miimon) {
- pr_info("%s: ARP monitoring cannot be used with MII monitoring. %s Disabling MII monitoring.\n",
- bond->dev->name, bond->dev->name);
- bond->params.miimon = 0;
- }
- if (!bond->params.arp_targets[0])
- pr_info("%s: ARP monitoring has been set up, but no ARP targets have been specified.\n",
- bond->dev->name);
- }
- if (bond->dev->flags & IFF_UP) {
- /* If the interface is up, we may need to fire off
- * the ARP timer. If the interface is down, the
- * timer will get fired off when the open function
- * is called.
- */
- if (!new_value) {
- if (bond->params.arp_validate)
- bond->recv_probe = NULL;
- cancel_delayed_work_sync(&bond->arp_work);
- } else {
- /* arp_validate can be set only in active-backup mode */
- if (bond->params.arp_validate)
- bond->recv_probe = bond_arp_rcv;
- cancel_delayed_work_sync(&bond->mii_work);
- queue_delayed_work(bond->wq, &bond->arp_work, 0);
- }
+ bond->dev->name);
+ return -EINVAL;
}
-out:
+
+ if (!rtnl_trylock())
+ return restart_syscall();
+
+ ret = bond_option_arp_interval_set(bond, new_value);
+ if (!ret)
+ ret = count;
+
rtnl_unlock();
return ret;
}
@@ -594,81 +535,29 @@ static ssize_t bonding_store_arp_targets(struct device *d,
const char *buf, size_t count)
{
struct bonding *bond = to_bond(d);
- struct list_head *iter;
- struct slave *slave;
- __be32 newtarget, *targets;
- unsigned long *targets_rx;
- int ind, i, j, ret = -EINVAL;
-
- if (!rtnl_trylock())
- return restart_syscall();
+ __be32 target;
+ int ret = -EPERM;
- targets = bond->params.arp_targets;
- if (!in4_pton(buf + 1, -1, (u8 *)&newtarget, -1, NULL) ||
- IS_IP_TARGET_UNUSABLE_ADDRESS(newtarget)) {
- pr_err("%s: invalid ARP target %pI4 specified for addition\n",
- bond->dev->name, &newtarget);
- goto out;
+ if (!in4_pton(buf + 1, -1, (u8 *)&target, -1, NULL)) {
+ pr_err("%s: invalid ARP target %pI4 specified\n",
+ bond->dev->name, &target);
+ return -EPERM;
}
- /* look for adds */
- if (buf[0] == '+') {
- if (bond_get_targets_ip(targets, newtarget) != -1) { /* dup */
- pr_err("%s: ARP target %pI4 is already present\n",
- bond->dev->name, &newtarget);
- goto out;
- }
-
- ind = bond_get_targets_ip(targets, 0); /* first free slot */
- if (ind == -1) {
- pr_err("%s: ARP target table is full!\n",
- bond->dev->name);
- goto out;
- }
-
- pr_info("%s: adding ARP target %pI4.\n", bond->dev->name,
- &newtarget);
- /* not to race with bond_arp_rcv */
- write_lock_bh(&bond->lock);
- bond_for_each_slave(bond, slave, iter)
- slave->target_last_arp_rx[ind] = jiffies;
- targets[ind] = newtarget;
- write_unlock_bh(&bond->lock);
- } else if (buf[0] == '-') {
- ind = bond_get_targets_ip(targets, newtarget);
- if (ind == -1) {
- pr_err("%s: unable to remove nonexistent ARP target %pI4.\n",
- bond->dev->name, &newtarget);
- goto out;
- }
-
- if (ind == 0 && !targets[1] && bond->params.arp_interval)
- pr_warn("%s: removing last arp target with arp_interval on\n",
- bond->dev->name);
- pr_info("%s: removing ARP target %pI4.\n", bond->dev->name,
- &newtarget);
+ if (!rtnl_trylock())
+ return restart_syscall();
- write_lock_bh(&bond->lock);
- bond_for_each_slave(bond, slave, iter) {
- targets_rx = slave->target_last_arp_rx;
- j = ind;
- for (; (j < BOND_MAX_ARP_TARGETS-1) && targets[j+1]; j++)
- targets_rx[j] = targets_rx[j+1];
- targets_rx[j] = 0;
- }
- for (i = ind; (i < BOND_MAX_ARP_TARGETS-1) && targets[i+1]; i++)
- targets[i] = targets[i+1];
- targets[i] = 0;
- write_unlock_bh(&bond->lock);
- } else {
+ if (buf[0] == '+')
+ ret = bond_option_arp_ip_target_add(bond, target);
+ else if (buf[0] == '-')
+ ret = bond_option_arp_ip_target_rem(bond, target);
+ else
pr_err("no command found in arp_ip_targets file for bond %s. Use +<addr> or -<addr>.\n",
bond->dev->name);
- ret = -EPERM;
- goto out;
- }
- ret = count;
-out:
+ if (!ret)
+ ret = count;
+
rtnl_unlock();
return ret;
}
@@ -692,44 +581,21 @@ static ssize_t bonding_store_downdelay(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
- int new_value, ret = count;
+ int new_value, ret;
struct bonding *bond = to_bond(d);
- if (!rtnl_trylock())
- return restart_syscall();
- if (!(bond->params.miimon)) {
- pr_err("%s: Unable to set down delay as MII monitoring is disabled\n",
- bond->dev->name);
- ret = -EPERM;
- goto out;
- }
-
if (sscanf(buf, "%d", &new_value) != 1) {
pr_err("%s: no down delay value specified.\n", bond->dev->name);
- ret = -EINVAL;
- goto out;
+ return -EINVAL;
}
- if (new_value < 0) {
- pr_err("%s: Invalid down delay value %d not in range %d-%d; rejected.\n",
- bond->dev->name, new_value, 0, INT_MAX);
- ret = -EINVAL;
- goto out;
- } else {
- if ((new_value % bond->params.miimon) != 0) {
- pr_warning("%s: Warning: down delay (%d) is not a multiple of miimon (%d), delay rounded to %d ms\n",
- bond->dev->name, new_value,
- bond->params.miimon,
- (new_value / bond->params.miimon) *
- bond->params.miimon);
- }
- bond->params.downdelay = new_value / bond->params.miimon;
- pr_info("%s: Setting down delay to %d.\n",
- bond->dev->name,
- bond->params.downdelay * bond->params.miimon);
- }
+ if (!rtnl_trylock())
+ return restart_syscall();
+
+ ret = bond_option_downdelay_set(bond, new_value);
+ if (!ret)
+ ret = count;
-out:
rtnl_unlock();
return ret;
}
@@ -750,44 +616,22 @@ static ssize_t bonding_store_updelay(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
- int new_value, ret = count;
+ int new_value, ret;
struct bonding *bond = to_bond(d);
- if (!rtnl_trylock())
- return restart_syscall();
- if (!(bond->params.miimon)) {
- pr_err("%s: Unable to set up delay as MII monitoring is disabled\n",
- bond->dev->name);
- ret = -EPERM;
- goto out;
- }
-
if (sscanf(buf, "%d", &new_value) != 1) {
pr_err("%s: no up delay value specified.\n",
- bond->dev->name);
- ret = -EINVAL;
- goto out;
- }
- if (new_value < 0) {
- pr_err("%s: Invalid up delay value %d not in range %d-%d; rejected.\n",
- bond->dev->name, new_value, 0, INT_MAX);
- ret = -EINVAL;
- goto out;
- } else {
- if ((new_value % bond->params.miimon) != 0) {
- pr_warning("%s: Warning: up delay (%d) is not a multiple of miimon (%d), updelay rounded to %d ms\n",
- bond->dev->name, new_value,
- bond->params.miimon,
- (new_value / bond->params.miimon) *
- bond->params.miimon);
- }
- bond->params.updelay = new_value / bond->params.miimon;
- pr_info("%s: Setting up delay to %d.\n",
- bond->dev->name,
- bond->params.updelay * bond->params.miimon);
+ bond->dev->name);
+ return -EINVAL;
}
-out:
+ if (!rtnl_trylock())
+ return restart_syscall();
+
+ ret = bond_option_updelay_set(bond, new_value);
+ if (!ret)
+ ret = count;
+
rtnl_unlock();
return ret;
}
@@ -814,41 +658,23 @@ static ssize_t bonding_store_lacp(struct device *d,
const char *buf, size_t count)
{
struct bonding *bond = to_bond(d);
- int new_value, ret = count;
-
- if (!rtnl_trylock())
- return restart_syscall();
-
- if (bond->dev->flags & IFF_UP) {
- pr_err("%s: Unable to update LACP rate because interface is up.\n",
- bond->dev->name);
- ret = -EPERM;
- goto out;
- }
-
- if (bond->params.mode != BOND_MODE_8023AD) {
- pr_err("%s: Unable to update LACP rate because bond is not in 802.3ad mode.\n",
- bond->dev->name);
- ret = -EPERM;
- goto out;
- }
+ int new_value, ret;
new_value = bond_parse_parm(buf, bond_lacp_tbl);
-
- if ((new_value == 1) || (new_value == 0)) {
- bond->params.lacp_fast = new_value;
- bond_3ad_update_lacp_rate(bond);
- pr_info("%s: Setting LACP rate to %s (%d).\n",
- bond->dev->name, bond_lacp_tbl[new_value].modename,
- new_value);
- } else {
+ if (new_value < 0) {
pr_err("%s: Ignoring invalid LACP rate value %.*s.\n",
bond->dev->name, (int)strlen(buf) - 1, buf);
- ret = -EINVAL;
+ return -EINVAL;
}
-out:
- rtnl_unlock();
+ if (!rtnl_trylock())
+ return restart_syscall();
+
+ ret = bond_option_lacp_rate_set(bond, new_value);
+ if (!ret)
+ ret = count;
+
+ rtnl_unlock();
return ret;
}
static DEVICE_ATTR(lacp_rate, S_IRUGO | S_IWUSR,
@@ -878,10 +704,15 @@ static ssize_t bonding_store_min_links(struct device *d,
return ret;
}
- pr_info("%s: Setting min links value to %u\n",
- bond->dev->name, new_value);
- bond->params.min_links = new_value;
- return count;
+ if (!rtnl_trylock())
+ return restart_syscall();
+
+ ret = bond_option_min_links_set(bond, new_value);
+ if (!ret)
+ ret = count;
+
+ rtnl_unlock();
+ return ret;
}
static DEVICE_ATTR(min_links, S_IRUGO | S_IWUSR,
bonding_show_min_links, bonding_store_min_links);
@@ -902,29 +733,24 @@ static ssize_t bonding_store_ad_select(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
- int new_value, ret = count;
+ int new_value, ret;
struct bonding *bond = to_bond(d);
- if (bond->dev->flags & IFF_UP) {
- pr_err("%s: Unable to update ad_select because interface is up.\n",
- bond->dev->name);
- ret = -EPERM;
- goto out;
- }
-
new_value = bond_parse_parm(buf, ad_select_tbl);
-
- if (new_value != -1) {
- bond->params.ad_select = new_value;
- pr_info("%s: Setting ad_select to %s (%d).\n",
- bond->dev->name, ad_select_tbl[new_value].modename,
- new_value);
- } else {
+ if (new_value < 0) {
pr_err("%s: Ignoring invalid ad_select value %.*s.\n",
bond->dev->name, (int)strlen(buf) - 1, buf);
- ret = -EINVAL;
+ return -EINVAL;
}
-out:
+
+ if (!rtnl_trylock())
+ return restart_syscall();
+
+ ret = bond_option_ad_select_set(bond, new_value);
+ if (!ret)
+ ret = count;
+
+ rtnl_unlock();
return ret;
}
static DEVICE_ATTR(ad_select, S_IRUGO | S_IWUSR,
@@ -946,8 +772,25 @@ static ssize_t bonding_store_num_peer_notif(struct device *d,
const char *buf, size_t count)
{
struct bonding *bond = to_bond(d);
- int err = kstrtou8(buf, 10, &bond->params.num_peer_notif);
- return err ? err : count;
+ u8 new_value;
+ int ret;
+
+ ret = kstrtou8(buf, 10, &new_value);
+ if (ret) {
+ pr_err("%s: invalid value %s specified.\n",
+ bond->dev->name, buf);
+ return ret;
+ }
+
+ if (!rtnl_trylock())
+ return restart_syscall();
+
+ ret = bond_option_num_peer_notif_set(bond, new_value);
+ if (!ret)
+ ret = count;
+
+ rtnl_unlock();
+ return ret;
}
static DEVICE_ATTR(num_grat_arp, S_IRUGO | S_IWUSR,
bonding_show_num_peer_notif, bonding_store_num_peer_notif);
@@ -973,55 +816,22 @@ static ssize_t bonding_store_miimon(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
- int new_value, ret = count;
+ int new_value, ret;
struct bonding *bond = to_bond(d);
- if (!rtnl_trylock())
- return restart_syscall();
if (sscanf(buf, "%d", &new_value) != 1) {
pr_err("%s: no miimon value specified.\n",
bond->dev->name);
- ret = -EINVAL;
- goto out;
- }
- if (new_value < 0) {
- pr_err("%s: Invalid miimon value %d not in range %d-%d; rejected.\n",
- bond->dev->name, new_value, 0, INT_MAX);
- ret = -EINVAL;
- goto out;
- }
- pr_info("%s: Setting MII monitoring interval to %d.\n",
- bond->dev->name, new_value);
- bond->params.miimon = new_value;
- if (bond->params.updelay)
- pr_info("%s: Note: Updating updelay (to %d) since it is a multiple of the miimon value.\n",
- bond->dev->name,
- bond->params.updelay * bond->params.miimon);
- if (bond->params.downdelay)
- pr_info("%s: Note: Updating downdelay (to %d) since it is a multiple of the miimon value.\n",
- bond->dev->name,
- bond->params.downdelay * bond->params.miimon);
- if (new_value && bond->params.arp_interval) {
- pr_info("%s: MII monitoring cannot be used with ARP monitoring. Disabling ARP monitoring...\n",
- bond->dev->name);
- bond->params.arp_interval = 0;
- if (bond->params.arp_validate)
- bond->params.arp_validate = BOND_ARP_VALIDATE_NONE;
- }
- if (bond->dev->flags & IFF_UP) {
- /* If the interface is up, we may need to fire off
- * the MII timer. If the interface is down, the
- * timer will get fired off when the open function
- * is called.
- */
- if (!new_value) {
- cancel_delayed_work_sync(&bond->mii_work);
- } else {
- cancel_delayed_work_sync(&bond->arp_work);
- queue_delayed_work(bond->wq, &bond->mii_work, 0);
- }
+ return -EINVAL;
}
-out:
+
+ if (!rtnl_trylock())
+ return restart_syscall();
+
+ ret = bond_option_miimon_set(bond, new_value);
+ if (!ret)
+ ret = count;
+
rtnl_unlock();
return ret;
}
@@ -1053,58 +863,22 @@ static ssize_t bonding_store_primary(struct device *d,
const char *buf, size_t count)
{
struct bonding *bond = to_bond(d);
- struct list_head *iter;
char ifname[IFNAMSIZ];
- struct slave *slave;
-
- if (!rtnl_trylock())
- return restart_syscall();
- block_netpoll_tx();
- read_lock(&bond->lock);
- write_lock_bh(&bond->curr_slave_lock);
-
- if (!USES_PRIMARY(bond->params.mode)) {
- pr_info("%s: Unable to set primary slave; %s is in mode %d\n",
- bond->dev->name, bond->dev->name, bond->params.mode);
- goto out;
- }
+ int ret;
sscanf(buf, "%15s", ifname); /* IFNAMSIZ */
+ if (ifname[0] == '\n')
+ ifname[0] = '\0';
- /* check to see if we are clearing primary */
- if (!strlen(ifname) || buf[0] == '\n') {
- pr_info("%s: Setting primary slave to None.\n",
- bond->dev->name);
- bond->primary_slave = NULL;
- memset(bond->params.primary, 0, sizeof(bond->params.primary));
- bond_select_active_slave(bond);
- goto out;
- }
-
- bond_for_each_slave(bond, slave, iter) {
- if (strncmp(slave->dev->name, ifname, IFNAMSIZ) == 0) {
- pr_info("%s: Setting %s as primary slave.\n",
- bond->dev->name, slave->dev->name);
- bond->primary_slave = slave;
- strcpy(bond->params.primary, slave->dev->name);
- bond_select_active_slave(bond);
- goto out;
- }
- }
+ if (!rtnl_trylock())
+ return restart_syscall();
- strncpy(bond->params.primary, ifname, IFNAMSIZ);
- bond->params.primary[IFNAMSIZ - 1] = 0;
+ ret = bond_option_primary_set(bond, ifname);
+ if (!ret)
+ ret = count;
- pr_info("%s: Recording %s as primary, "
- "but it has not been enslaved to %s yet.\n",
- bond->dev->name, ifname, bond->dev->name);
-out:
- write_unlock_bh(&bond->curr_slave_lock);
- read_unlock(&bond->lock);
- unblock_netpoll_tx();
rtnl_unlock();
-
- return count;
+ return ret;
}
static DEVICE_ATTR(primary, S_IRUGO | S_IWUSR,
bonding_show_primary, bonding_store_primary);
@@ -1127,34 +901,24 @@ static ssize_t bonding_store_primary_reselect(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
- int new_value, ret = count;
+ int new_value, ret;
struct bonding *bond = to_bond(d);
- if (!rtnl_trylock())
- return restart_syscall();
-
new_value = bond_parse_parm(buf, pri_reselect_tbl);
if (new_value < 0) {
pr_err("%s: Ignoring invalid primary_reselect value %.*s.\n",
bond->dev->name,
(int) strlen(buf) - 1, buf);
- ret = -EINVAL;
- goto out;
+ return -EINVAL;
}
- bond->params.primary_reselect = new_value;
- pr_info("%s: setting primary_reselect to %s (%d).\n",
- bond->dev->name, pri_reselect_tbl[new_value].modename,
- new_value);
-
- block_netpoll_tx();
- read_lock(&bond->lock);
- write_lock_bh(&bond->curr_slave_lock);
- bond_select_active_slave(bond);
- write_unlock_bh(&bond->curr_slave_lock);
- read_unlock(&bond->lock);
- unblock_netpoll_tx();
-out:
+ if (!rtnl_trylock())
+ return restart_syscall();
+
+ ret = bond_option_primary_reselect_set(bond, new_value);
+ if (!ret)
+ ret = count;
+
rtnl_unlock();
return ret;
}
@@ -1178,25 +942,23 @@ static ssize_t bonding_store_carrier(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
- int new_value, ret = count;
+ int new_value, ret;
struct bonding *bond = to_bond(d);
-
if (sscanf(buf, "%d", &new_value) != 1) {
pr_err("%s: no use_carrier value specified.\n",
bond->dev->name);
- ret = -EINVAL;
- goto out;
- }
- if ((new_value == 0) || (new_value == 1)) {
- bond->params.use_carrier = new_value;
- pr_info("%s: Setting use_carrier to %d.\n",
- bond->dev->name, new_value);
- } else {
- pr_info("%s: Ignoring invalid use_carrier value %d.\n",
- bond->dev->name, new_value);
+ return -EINVAL;
}
-out:
+
+ if (!rtnl_trylock())
+ return restart_syscall();
+
+ ret = bond_option_use_carrier_set(bond, new_value);
+ if (!ret)
+ ret = count;
+
+ rtnl_unlock();
return ret;
}
static DEVICE_ATTR(use_carrier, S_IRUGO | S_IWUSR,
@@ -1510,41 +1272,21 @@ static ssize_t bonding_store_slaves_active(struct device *d,
const char *buf, size_t count)
{
struct bonding *bond = to_bond(d);
- int new_value, ret = count;
- struct list_head *iter;
- struct slave *slave;
-
- if (!rtnl_trylock())
- return restart_syscall();
+ int new_value, ret;
if (sscanf(buf, "%d", &new_value) != 1) {
pr_err("%s: no all_slaves_active value specified.\n",
bond->dev->name);
- ret = -EINVAL;
- goto out;
+ return -EINVAL;
}
- if (new_value == bond->params.all_slaves_active)
- goto out;
+ if (!rtnl_trylock())
+ return restart_syscall();
- if ((new_value == 0) || (new_value == 1)) {
- bond->params.all_slaves_active = new_value;
- } else {
- pr_info("%s: Ignoring invalid all_slaves_active value %d.\n",
- bond->dev->name, new_value);
- ret = -EINVAL;
- goto out;
- }
+ ret = bond_option_all_slaves_active_set(bond, new_value);
+ if (!ret)
+ ret = count;
- bond_for_each_slave(bond, slave, iter) {
- if (!bond_is_active_slave(slave)) {
- if (new_value)
- slave->inactive = 0;
- else
- slave->inactive = 1;
- }
- }
-out:
rtnl_unlock();
return ret;
}
@@ -1573,21 +1315,17 @@ static ssize_t bonding_store_resend_igmp(struct device *d,
if (sscanf(buf, "%d", &new_value) != 1) {
pr_err("%s: no resend_igmp value specified.\n",
bond->dev->name);
- ret = -EINVAL;
- goto out;
+ return -EINVAL;
}
- if (new_value < 0 || new_value > 255) {
- pr_err("%s: Invalid resend_igmp value %d not in range 0-255; rejected.\n",
- bond->dev->name, new_value);
- ret = -EINVAL;
- goto out;
- }
+ if (!rtnl_trylock())
+ return restart_syscall();
- pr_info("%s: Setting resend_igmp to %d.\n",
- bond->dev->name, new_value);
- bond->params.resend_igmp = new_value;
-out:
+ ret = bond_option_resend_igmp_set(bond, new_value);
+ if (!ret)
+ ret = count;
+
+ rtnl_unlock();
return ret;
}
@@ -1608,24 +1346,22 @@ static ssize_t bonding_store_lp_interval(struct device *d,
const char *buf, size_t count)
{
struct bonding *bond = to_bond(d);
- int new_value, ret = count;
+ int new_value, ret;
if (sscanf(buf, "%d", &new_value) != 1) {
pr_err("%s: no lp interval value specified.\n",
bond->dev->name);
- ret = -EINVAL;
- goto out;
+ return -EINVAL;
}
- if (new_value <= 0) {
- pr_err ("%s: lp_interval must be between 1 and %d\n",
- bond->dev->name, INT_MAX);
- ret = -EINVAL;
- goto out;
- }
+ if (!rtnl_trylock())
+ return restart_syscall();
- bond->params.lp_interval = new_value;
-out:
+ ret = bond_option_lp_interval_set(bond, new_value);
+ if (!ret)
+ ret = count;
+
+ rtnl_unlock();
return ret;
}
@@ -1637,12 +1373,12 @@ static ssize_t bonding_show_packets_per_slave(struct device *d,
char *buf)
{
struct bonding *bond = to_bond(d);
- int packets_per_slave = bond->params.packets_per_slave;
+ unsigned int packets_per_slave = bond->params.packets_per_slave;
if (packets_per_slave > 1)
packets_per_slave = reciprocal_value(packets_per_slave);
- return sprintf(buf, "%d\n", packets_per_slave);
+ return sprintf(buf, "%u\n", packets_per_slave);
}
static ssize_t bonding_store_packets_per_slave(struct device *d,
@@ -1650,28 +1386,22 @@ static ssize_t bonding_store_packets_per_slave(struct device *d,
const char *buf, size_t count)
{
struct bonding *bond = to_bond(d);
- int new_value, ret = count;
+ int new_value, ret;
if (sscanf(buf, "%d", &new_value) != 1) {
pr_err("%s: no packets_per_slave value specified.\n",
bond->dev->name);
- ret = -EINVAL;
- goto out;
- }
- if (new_value < 0 || new_value > USHRT_MAX) {
- pr_err("%s: packets_per_slave must be between 0 and %u\n",
- bond->dev->name, USHRT_MAX);
- ret = -EINVAL;
- goto out;
+ return -EINVAL;
}
- if (bond->params.mode != BOND_MODE_ROUNDROBIN)
- pr_warn("%s: Warning: packets_per_slave has effect only in balance-rr mode\n",
- bond->dev->name);
- if (new_value > 1)
- bond->params.packets_per_slave = reciprocal_value(new_value);
- else
- bond->params.packets_per_slave = new_value;
-out:
+
+ if (!rtnl_trylock())
+ return restart_syscall();
+
+ ret = bond_option_packets_per_slave_set(bond, new_value);
+ if (!ret)
+ ret = count;
+
+ rtnl_unlock();
return ret;
}
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index ca31286aa028..955dc4839f1d 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -35,6 +35,8 @@
#define BOND_MAX_ARP_TARGETS 16
+#define BOND_DEFAULT_MIIMON 100
+
#define IS_UP(dev) \
((((dev)->flags & IFF_UP) == IFF_UP) && \
netif_running(dev) && \
@@ -55,6 +57,11 @@
((mode) == BOND_MODE_TLB) || \
((mode) == BOND_MODE_ALB))
+#define BOND_NO_USES_ARP(mode) \
+ (((mode) == BOND_MODE_8023AD) || \
+ ((mode) == BOND_MODE_TLB) || \
+ ((mode) == BOND_MODE_ALB))
+
#define TX_QUEUE_OVERRIDE(mode) \
(((mode) == BOND_MODE_ACTIVEBACKUP) || \
((mode) == BOND_MODE_ROUNDROBIN))
@@ -94,6 +101,10 @@
netdev_adjacent_get_private(bond_slave_list(bond)->prev) : \
NULL)
+/* Caller must have rcu_read_lock */
+#define bond_first_slave_rcu(bond) \
+ netdev_lower_get_first_private_rcu(bond->dev)
+
#define bond_is_first_slave(bond, pos) (pos == bond_first_slave(bond))
#define bond_is_last_slave(bond, pos) (pos == bond_last_slave(bond))
@@ -387,8 +398,8 @@ static inline __be32 bond_confirm_addr(struct net_device *dev, __be32 dst, __be3
in_dev = __in_dev_get_rcu(dev);
if (in_dev)
- addr = inet_confirm_addr(in_dev, dst, local, RT_SCOPE_HOST);
-
+ addr = inet_confirm_addr(dev_net(dev), in_dev, dst, local,
+ RT_SCOPE_HOST);
rcu_read_unlock();
return addr;
}
@@ -405,19 +416,16 @@ static inline bool slave_can_tx(struct slave *slave)
struct bond_net;
int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond, struct slave *slave);
-int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_device *slave_dev);
-void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int slave_id);
+void bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_device *slave_dev);
int bond_create(struct net *net, const char *name);
int bond_create_sysfs(struct bond_net *net);
void bond_destroy_sysfs(struct bond_net *net);
void bond_prepare_sysfs_group(struct bonding *bond);
int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev);
int bond_release(struct net_device *bond_dev, struct net_device *slave_dev);
-void bond_mii_monitor(struct work_struct *);
-void bond_loadbalance_arp_mon(struct work_struct *);
-void bond_activebackup_arp_mon(struct work_struct *);
int bond_xmit_hash(struct bonding *bond, struct sk_buff *skb, int count);
int bond_parse_parm(const char *mode_arg, const struct bond_parm_tbl *tbl);
+int bond_parm_tbl_lookup(int mode, const struct bond_parm_tbl *tbl);
void bond_select_active_slave(struct bonding *bond);
void bond_change_active_slave(struct bonding *bond, struct slave *new_active);
void bond_create_debugfs(void);
@@ -432,6 +440,33 @@ int bond_netlink_init(void);
void bond_netlink_fini(void);
int bond_option_mode_set(struct bonding *bond, int mode);
int bond_option_active_slave_set(struct bonding *bond, struct net_device *slave_dev);
+int bond_option_miimon_set(struct bonding *bond, int miimon);
+int bond_option_updelay_set(struct bonding *bond, int updelay);
+int bond_option_downdelay_set(struct bonding *bond, int downdelay);
+int bond_option_use_carrier_set(struct bonding *bond, int use_carrier);
+int bond_option_arp_interval_set(struct bonding *bond, int arp_interval);
+int bond_option_arp_ip_targets_set(struct bonding *bond, __be32 *targets,
+ int count);
+int bond_option_arp_ip_target_add(struct bonding *bond, __be32 target);
+int bond_option_arp_ip_target_rem(struct bonding *bond, __be32 target);
+int bond_option_arp_validate_set(struct bonding *bond, int arp_validate);
+int bond_option_arp_all_targets_set(struct bonding *bond, int arp_all_targets);
+int bond_option_primary_set(struct bonding *bond, const char *primary);
+int bond_option_primary_reselect_set(struct bonding *bond,
+ int primary_reselect);
+int bond_option_fail_over_mac_set(struct bonding *bond, int fail_over_mac);
+int bond_option_xmit_hash_policy_set(struct bonding *bond,
+ int xmit_hash_policy);
+int bond_option_resend_igmp_set(struct bonding *bond, int resend_igmp);
+int bond_option_num_peer_notif_set(struct bonding *bond, int num_peer_notif);
+int bond_option_all_slaves_active_set(struct bonding *bond,
+ int all_slaves_active);
+int bond_option_min_links_set(struct bonding *bond, int min_links);
+int bond_option_lp_interval_set(struct bonding *bond, int min_links);
+int bond_option_packets_per_slave_set(struct bonding *bond,
+ int packets_per_slave);
+int bond_option_lacp_rate_set(struct bonding *bond, int lacp_rate);
+int bond_option_ad_select_set(struct bonding *bond, int ad_select);
struct net_device *bond_option_active_slave_get_rcu(struct bonding *bond);
struct net_device *bond_option_active_slave_get(struct bonding *bond);
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index 3c069472eb8b..9e7d95dae2c7 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -71,7 +71,7 @@ config CAN_AT91
and AT91SAM9X5 processors.
config CAN_TI_HECC
- depends on ARCH_OMAP3
+ depends on ARM
tristate "TI High End CAN Controller"
---help---
Driver for TI HECC (High End CAN Controller) module found on many
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
index e3fc07cf2f62..951bfede8f3d 100644
--- a/drivers/net/can/c_can/c_can.c
+++ b/drivers/net/can/c_can/c_can.c
@@ -712,22 +712,31 @@ static int c_can_set_mode(struct net_device *dev, enum can_mode mode)
return 0;
}
-static int c_can_get_berr_counter(const struct net_device *dev,
- struct can_berr_counter *bec)
+static int __c_can_get_berr_counter(const struct net_device *dev,
+ struct can_berr_counter *bec)
{
unsigned int reg_err_counter;
struct c_can_priv *priv = netdev_priv(dev);
- c_can_pm_runtime_get_sync(priv);
-
reg_err_counter = priv->read_reg(priv, C_CAN_ERR_CNT_REG);
bec->rxerr = (reg_err_counter & ERR_CNT_REC_MASK) >>
ERR_CNT_REC_SHIFT;
bec->txerr = reg_err_counter & ERR_CNT_TEC_MASK;
+ return 0;
+}
+
+static int c_can_get_berr_counter(const struct net_device *dev,
+ struct can_berr_counter *bec)
+{
+ struct c_can_priv *priv = netdev_priv(dev);
+ int err;
+
+ c_can_pm_runtime_get_sync(priv);
+ err = __c_can_get_berr_counter(dev, bec);
c_can_pm_runtime_put_sync(priv);
- return 0;
+ return err;
}
/*
@@ -754,6 +763,7 @@ static void c_can_do_tx(struct net_device *dev)
if (!(val & (1 << (msg_obj_no - 1)))) {
can_get_echo_skb(dev,
msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST);
+ c_can_object_get(dev, 0, msg_obj_no, IF_COMM_ALL);
stats->tx_bytes += priv->read_reg(priv,
C_CAN_IFACE(MSGCTRL_REG, 0))
& IF_MCONT_DLC_MASK;
@@ -798,17 +808,19 @@ static int c_can_do_rx_poll(struct net_device *dev, int quota)
u32 num_rx_pkts = 0;
unsigned int msg_obj, msg_ctrl_save;
struct c_can_priv *priv = netdev_priv(dev);
- u32 val = c_can_read_reg32(priv, C_CAN_INTPND1_REG);
+ u16 val;
+
+ /*
+ * It is faster to read only one 16bit register. This is only possible
+ * for a maximum number of 16 objects.
+ */
+ BUILD_BUG_ON_MSG(C_CAN_MSG_OBJ_RX_LAST > 16,
+ "Implementation does not support more message objects than 16");
+
+ while (quota > 0 && (val = priv->read_reg(priv, C_CAN_INTPND1_REG))) {
+ while ((msg_obj = ffs(val)) && quota > 0) {
+ val &= ~BIT(msg_obj - 1);
- for (msg_obj = C_CAN_MSG_OBJ_RX_FIRST;
- msg_obj <= C_CAN_MSG_OBJ_RX_LAST && quota > 0;
- val = c_can_read_reg32(priv, C_CAN_INTPND1_REG),
- msg_obj++) {
- /*
- * as interrupt pending register's bit n-1 corresponds to
- * message object n, we need to handle the same properly.
- */
- if (val & (1 << (msg_obj - 1))) {
c_can_object_get(dev, 0, msg_obj, IF_COMM_ALL &
~IF_COMM_TXRQST);
msg_ctrl_save = priv->read_reg(priv,
@@ -872,7 +884,7 @@ static int c_can_handle_state_change(struct net_device *dev,
if (unlikely(!skb))
return 0;
- c_can_get_berr_counter(dev, &bec);
+ __c_can_get_berr_counter(dev, &bec);
reg_err_counter = priv->read_reg(priv, C_CAN_ERR_CNT_REG);
rx_err_passive = (reg_err_counter & ERR_CNT_RP_MASK) >>
ERR_CNT_RP_SHIFT;
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index bda1888cae9a..13a909822e25 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -13,8 +13,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/module.h>
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index ae08cf129ebb..aaed97bee471 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -1020,13 +1020,13 @@ static int flexcan_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "no ipg clock defined\n");
return PTR_ERR(clk_ipg);
}
- clock_freq = clk_get_rate(clk_ipg);
clk_per = devm_clk_get(&pdev->dev, "per");
if (IS_ERR(clk_per)) {
dev_err(&pdev->dev, "no per clock defined\n");
return PTR_ERR(clk_per);
}
+ clock_freq = clk_get_rate(clk_per);
}
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c
index 08ac401e0214..cdb9808d12db 100644
--- a/drivers/net/can/mcp251x.c
+++ b/drivers/net/can/mcp251x.c
@@ -28,8 +28,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
*
*
@@ -59,6 +58,7 @@
#include <linux/can/dev.h>
#include <linux/can/led.h>
#include <linux/can/platform/mcp251x.h>
+#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/device.h>
@@ -69,6 +69,8 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/netdevice.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/spi/spi.h>
@@ -264,6 +266,7 @@ struct mcp251x_priv {
int restart_tx;
struct regulator *power;
struct regulator *transceiver;
+ struct clk *clk;
};
#define MCP251X_IS(_model) \
@@ -995,22 +998,65 @@ static const struct net_device_ops mcp251x_netdev_ops = {
.ndo_start_xmit = mcp251x_hard_start_xmit,
};
+static const struct of_device_id mcp251x_of_match[] = {
+ {
+ .compatible = "microchip,mcp2510",
+ .data = (void *)CAN_MCP251X_MCP2510,
+ },
+ {
+ .compatible = "microchip,mcp2515",
+ .data = (void *)CAN_MCP251X_MCP2515,
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, mcp251x_of_match);
+
+static const struct spi_device_id mcp251x_id_table[] = {
+ {
+ .name = "mcp2510",
+ .driver_data = (kernel_ulong_t)CAN_MCP251X_MCP2510,
+ },
+ {
+ .name = "mcp2515",
+ .driver_data = (kernel_ulong_t)CAN_MCP251X_MCP2515,
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, mcp251x_id_table);
+
static int mcp251x_can_probe(struct spi_device *spi)
{
+ const struct of_device_id *of_id = of_match_device(mcp251x_of_match,
+ &spi->dev);
+ struct mcp251x_platform_data *pdata = dev_get_platdata(&spi->dev);
struct net_device *net;
struct mcp251x_priv *priv;
- struct mcp251x_platform_data *pdata = dev_get_platdata(&spi->dev);
- int ret = -ENODEV;
+ int freq, ret = -ENODEV;
+ struct clk *clk;
+
+ clk = devm_clk_get(&spi->dev, NULL);
+ if (IS_ERR(clk)) {
+ if (pdata)
+ freq = pdata->oscillator_frequency;
+ else
+ return PTR_ERR(clk);
+ } else {
+ freq = clk_get_rate(clk);
+ }
- if (!pdata)
- /* Platform data is required for osc freq */
- goto error_out;
+ /* Sanity check */
+ if (freq < 1000000 || freq > 25000000)
+ return -ERANGE;
/* Allocate can/net device */
net = alloc_candev(sizeof(struct mcp251x_priv), TX_ECHO_SKB_MAX);
- if (!net) {
- ret = -ENOMEM;
- goto error_alloc;
+ if (!net)
+ return -ENOMEM;
+
+ if (!IS_ERR(clk)) {
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ goto out_free;
}
net->netdev_ops = &mcp251x_netdev_ops;
@@ -1019,23 +1065,27 @@ static int mcp251x_can_probe(struct spi_device *spi)
priv = netdev_priv(net);
priv->can.bittiming_const = &mcp251x_bittiming_const;
priv->can.do_set_mode = mcp251x_do_set_mode;
- priv->can.clock.freq = pdata->oscillator_frequency / 2;
+ priv->can.clock.freq = freq / 2;
priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES |
CAN_CTRLMODE_LOOPBACK | CAN_CTRLMODE_LISTENONLY;
- priv->model = spi_get_device_id(spi)->driver_data;
+ if (of_id)
+ priv->model = (enum mcp251x_model)of_id->data;
+ else
+ priv->model = spi_get_device_id(spi)->driver_data;
priv->net = net;
+ priv->clk = clk;
priv->power = devm_regulator_get(&spi->dev, "vdd");
priv->transceiver = devm_regulator_get(&spi->dev, "xceiver");
if ((PTR_ERR(priv->power) == -EPROBE_DEFER) ||
(PTR_ERR(priv->transceiver) == -EPROBE_DEFER)) {
ret = -EPROBE_DEFER;
- goto error_power;
+ goto out_clk;
}
ret = mcp251x_power_enable(priv->power, 1);
if (ret)
- goto error_power;
+ goto out_clk;
spi_set_drvdata(spi, priv);
@@ -1067,15 +1117,17 @@ static int mcp251x_can_probe(struct spi_device *spi)
/* Allocate non-DMA buffers */
if (!mcp251x_enable_dma) {
- priv->spi_tx_buf = kmalloc(SPI_TRANSFER_BUF_LEN, GFP_KERNEL);
+ priv->spi_tx_buf = devm_kzalloc(&spi->dev, SPI_TRANSFER_BUF_LEN,
+ GFP_KERNEL);
if (!priv->spi_tx_buf) {
ret = -ENOMEM;
- goto error_tx_buf;
+ goto error_probe;
}
- priv->spi_rx_buf = kmalloc(SPI_TRANSFER_BUF_LEN, GFP_KERNEL);
+ priv->spi_rx_buf = devm_kzalloc(&spi->dev, SPI_TRANSFER_BUF_LEN,
+ GFP_KERNEL);
if (!priv->spi_rx_buf) {
ret = -ENOMEM;
- goto error_rx_buf;
+ goto error_probe;
}
}
@@ -1108,21 +1160,18 @@ static int mcp251x_can_probe(struct spi_device *spi)
return ret;
error_probe:
- if (!mcp251x_enable_dma)
- kfree(priv->spi_rx_buf);
-error_rx_buf:
- if (!mcp251x_enable_dma)
- kfree(priv->spi_tx_buf);
-error_tx_buf:
if (mcp251x_enable_dma)
dma_free_coherent(&spi->dev, PAGE_SIZE,
priv->spi_tx_buf, priv->spi_tx_dma);
mcp251x_power_enable(priv->power, 0);
-error_power:
+
+out_clk:
+ if (!IS_ERR(clk))
+ clk_disable_unprepare(clk);
+
+out_free:
free_candev(net);
-error_alloc:
- dev_err(&spi->dev, "probe failed\n");
-error_out:
+
return ret;
}
@@ -1136,13 +1185,13 @@ static int mcp251x_can_remove(struct spi_device *spi)
if (mcp251x_enable_dma) {
dma_free_coherent(&spi->dev, PAGE_SIZE,
priv->spi_tx_buf, priv->spi_tx_dma);
- } else {
- kfree(priv->spi_tx_buf);
- kfree(priv->spi_rx_buf);
}
mcp251x_power_enable(priv->power, 0);
+ if (!IS_ERR(priv->clk))
+ clk_disable_unprepare(priv->clk);
+
free_candev(net);
return 0;
@@ -1205,21 +1254,13 @@ static int mcp251x_can_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(mcp251x_can_pm_ops, mcp251x_can_suspend,
mcp251x_can_resume);
-static const struct spi_device_id mcp251x_id_table[] = {
- { "mcp2510", CAN_MCP251X_MCP2510 },
- { "mcp2515", CAN_MCP251X_MCP2515 },
- { },
-};
-
-MODULE_DEVICE_TABLE(spi, mcp251x_id_table);
-
static struct spi_driver mcp251x_can_driver = {
.driver = {
.name = DEVICE_NAME,
.owner = THIS_MODULE,
+ .of_match_table = mcp251x_of_match,
.pm = &mcp251x_can_pm_ops,
},
-
.id_table = mcp251x_id_table,
.probe = mcp251x_can_probe,
.remove = mcp251x_can_remove,
diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c
index e59b3a392af6..035e235e3118 100644
--- a/drivers/net/can/mscan/mpc5xxx_can.c
+++ b/drivers/net/can/mscan/mpc5xxx_can.c
@@ -16,8 +16,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/kernel.h>
diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c
index a955ec8c4b97..b9f3faabb0f3 100644
--- a/drivers/net/can/mscan/mscan.c
+++ b/drivers/net/can/mscan/mscan.c
@@ -16,8 +16,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/kernel.h>
diff --git a/drivers/net/can/mscan/mscan.h b/drivers/net/can/mscan/mscan.h
index e98abb97a050..ad8e08f9c496 100644
--- a/drivers/net/can/mscan/mscan.h
+++ b/drivers/net/can/mscan/mscan.h
@@ -14,8 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __MSCAN_H__
diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c
index 5f0e9b3bfa7b..79e8699fd35a 100644
--- a/drivers/net/can/pch_can.c
+++ b/drivers/net/can/pch_can.c
@@ -12,8 +12,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/interrupt.h>
diff --git a/drivers/net/can/sja1000/ems_pci.c b/drivers/net/can/sja1000/ems_pci.c
index 835921388e7b..d790b874ca79 100644
--- a/drivers/net/can/sja1000/ems_pci.c
+++ b/drivers/net/can/sja1000/ems_pci.c
@@ -13,8 +13,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/kernel.h>
diff --git a/drivers/net/can/sja1000/kvaser_pci.c b/drivers/net/can/sja1000/kvaser_pci.c
index 087b13bd300e..c96eb14699d5 100644
--- a/drivers/net/can/sja1000/kvaser_pci.c
+++ b/drivers/net/can/sja1000/kvaser_pci.c
@@ -26,8 +26,7 @@
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/kernel.h>
diff --git a/drivers/net/can/sja1000/plx_pci.c b/drivers/net/can/sja1000/plx_pci.c
index f9b4f81cd86a..5df7f9848823 100644
--- a/drivers/net/can/sja1000/plx_pci.c
+++ b/drivers/net/can/sja1000/plx_pci.c
@@ -16,8 +16,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/kernel.h>
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
index 7164a999f50f..f17c3018b7c7 100644
--- a/drivers/net/can/sja1000/sja1000.c
+++ b/drivers/net/can/sja1000/sja1000.c
@@ -494,20 +494,20 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id)
uint8_t isrc, status;
int n = 0;
- /* Shared interrupts and IRQ off? */
- if (priv->read_reg(priv, SJA1000_IER) == IRQ_OFF)
- return IRQ_NONE;
-
if (priv->pre_irq)
priv->pre_irq(priv);
+ /* Shared interrupts and IRQ off? */
+ if (priv->read_reg(priv, SJA1000_IER) == IRQ_OFF)
+ goto out;
+
while ((isrc = priv->read_reg(priv, SJA1000_IR)) &&
(n < SJA1000_MAX_IRQ)) {
- n++;
+
status = priv->read_reg(priv, SJA1000_SR);
/* check for absent controller due to hw unplug */
if (status == 0xFF && sja1000_is_absent(priv))
- return IRQ_NONE;
+ goto out;
if (isrc & IRQ_WUI)
netdev_warn(dev, "wakeup interrupt\n");
@@ -535,7 +535,7 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id)
status = priv->read_reg(priv, SJA1000_SR);
/* check for absent controller */
if (status == 0xFF && sja1000_is_absent(priv))
- return IRQ_NONE;
+ goto out;
}
}
if (isrc & (IRQ_DOI | IRQ_EI | IRQ_BEI | IRQ_EPI | IRQ_ALI)) {
@@ -543,8 +543,9 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id)
if (sja1000_err(dev, isrc, status))
break;
}
+ n++;
}
-
+out:
if (priv->post_irq)
priv->post_irq(priv);
diff --git a/drivers/net/can/sja1000/sja1000_isa.c b/drivers/net/can/sja1000/sja1000_isa.c
index 06a282397fff..df136a2516c4 100644
--- a/drivers/net/can/sja1000/sja1000_isa.c
+++ b/drivers/net/can/sja1000/sja1000_isa.c
@@ -11,8 +11,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/kernel.h>
diff --git a/drivers/net/can/sja1000/sja1000_of_platform.c b/drivers/net/can/sja1000/sja1000_of_platform.c
index 047accd4ede5..2f6e24534231 100644
--- a/drivers/net/can/sja1000/sja1000_of_platform.c
+++ b/drivers/net/can/sja1000/sja1000_of_platform.c
@@ -13,8 +13,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/* This is a generic driver for SJA1000 chips on the OpenFirmware platform
diff --git a/drivers/net/can/sja1000/sja1000_platform.c b/drivers/net/can/sja1000/sja1000_platform.c
index 29f9b6321187..943df645b459 100644
--- a/drivers/net/can/sja1000/sja1000_platform.c
+++ b/drivers/net/can/sja1000/sja1000_platform.c
@@ -12,8 +12,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/kernel.h>
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
index 25377e547f9b..3fcdae266377 100644
--- a/drivers/net/can/slcan.c
+++ b/drivers/net/can/slcan.c
@@ -18,9 +18,7 @@
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307. You can also get it
- * at http://www.gnu.org/licenses/gpl.html
+ * with this program; if not, see http://www.gnu.org/licenses/gpl.html
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
diff --git a/drivers/net/can/softing/softing_cs.c b/drivers/net/can/softing/softing_cs.c
index 498605f833dd..cdc0c7433a4b 100644
--- a/drivers/net/can/softing/softing_cs.c
+++ b/drivers/net/can/softing/softing_cs.c
@@ -13,8 +13,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/module.h>
diff --git a/drivers/net/can/softing/softing_fw.c b/drivers/net/can/softing/softing_fw.c
index b595d3422b9f..52fe50725d74 100644
--- a/drivers/net/can/softing/softing_fw.c
+++ b/drivers/net/can/softing/softing_fw.c
@@ -13,8 +13,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/firmware.h>
diff --git a/drivers/net/can/softing/softing_main.c b/drivers/net/can/softing/softing_main.c
index 6cd5c01b624d..1b8212783640 100644
--- a/drivers/net/can/softing/softing_main.c
+++ b/drivers/net/can/softing/softing_main.c
@@ -13,8 +13,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/module.h>
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index 5f9a7ad9b964..8aeec0b4601a 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -625,6 +625,7 @@ static int ems_usb_start(struct ems_usb *dev)
usb_unanchor_urb(urb);
usb_free_coherent(dev->udev, RX_BUFFER_SIZE, buf,
urb->transfer_dma);
+ usb_free_urb(urb);
break;
}
@@ -798,8 +799,8 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne
* allowed (MAX_TX_URBS).
*/
if (!context) {
- usb_unanchor_urb(urb);
usb_free_coherent(dev->udev, size, buf, urb->transfer_dma);
+ usb_free_urb(urb);
netdev_warn(netdev, "couldn't find free context\n");
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
index 8ee9d1556e6e..263dd921edc4 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
@@ -927,6 +927,9 @@ static int pcan_usb_pro_init(struct peak_usb_device *dev)
/* set LED in default state (end of init phase) */
pcan_usb_pro_set_led(dev, 0, 1);
+ kfree(bi);
+ kfree(fi);
+
return 0;
err_out:
diff --git a/drivers/net/ethernet/3com/3c509.c b/drivers/net/ethernet/3com/3c509.c
index ede8daa68275..9142b473175c 100644
--- a/drivers/net/ethernet/3com/3c509.c
+++ b/drivers/net/ethernet/3com/3c509.c
@@ -252,8 +252,7 @@ static int el3_isa_id_sequence(__be16 *phys_addr)
for (i = 0; i < el3_cards; i++) {
struct el3_private *lp = netdev_priv(el3_devs[i]);
if (lp->type == EL3_PNP &&
- !memcmp(phys_addr, el3_devs[i]->dev_addr,
- ETH_ALEN)) {
+ ether_addr_equal(phys_addr, el3_devs[i]->dev_addr)) {
if (el3_debug > 3)
pr_debug("3c509 with address %02x %02x %02x %02x %02x %02x was found by ISAPnP\n",
phys_addr[0] & 0xff, phys_addr[0] >> 8,
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index ad5272b348f0..0f4241c6e97e 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -693,7 +693,7 @@ DEFINE_WINDOW_IO(16)
DEFINE_WINDOW_IO(32)
#ifdef CONFIG_PCI
-#define DEVICE_PCI(dev) (((dev)->bus == &pci_bus_type) ? to_pci_dev((dev)) : NULL)
+#define DEVICE_PCI(dev) ((dev_is_pci(dev)) ? to_pci_dev((dev)) : NULL)
#else
#define DEVICE_PCI(dev) NULL
#endif
@@ -2079,10 +2079,12 @@ vortex_start_xmit(struct sk_buff *skb, struct net_device *dev)
iowrite16(len, ioaddr + Wn7_MasterLen);
spin_unlock_irq(&vp->window_lock);
vp->tx_skb = skb;
+ skb_tx_timestamp(skb);
iowrite16(StartDMADown, ioaddr + EL3_CMD);
/* netif_wake_queue() will be called at the DMADone interrupt. */
} else {
/* ... and the packet rounded to a doubleword. */
+ skb_tx_timestamp(skb);
iowrite32_rep(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2);
dev_kfree_skb (skb);
if (ioread16(ioaddr + TxFree) > 1536) {
@@ -2212,6 +2214,7 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
prev_entry->status &= cpu_to_le32(~TxIntrUploaded);
#endif
}
+ skb_tx_timestamp(skb);
iowrite16(DownUnstall, ioaddr + EL3_CMD);
spin_unlock_irqrestore(&vp->lock, flags);
return NETDEV_TX_OK;
@@ -2986,6 +2989,7 @@ static const struct ethtool_ops vortex_ethtool_ops = {
.nway_reset = vortex_nway_reset,
.get_wol = vortex_get_wol,
.set_wol = vortex_set_wol,
+ .get_ts_info = ethtool_op_get_ts_info,
};
#ifdef CONFIG_PCI
diff --git a/drivers/net/ethernet/8390/8390.h b/drivers/net/ethernet/8390/8390.h
index 2923c51bb351..3e2f2c2e7b58 100644
--- a/drivers/net/ethernet/8390/8390.h
+++ b/drivers/net/ethernet/8390/8390.h
@@ -21,12 +21,6 @@ struct e8390_pkt_hdr {
unsigned short count; /* header + packet length in bytes */
};
-#ifdef notdef
-extern int ei_debug;
-#else
-#define ei_debug 1
-#endif
-
#ifdef CONFIG_NET_POLL_CONTROLLER
void ei_poll(struct net_device *dev);
void eip_poll(struct net_device *dev);
@@ -99,6 +93,7 @@ struct ei_device {
u32 *reg_offset; /* Register mapping table */
spinlock_t page_lock; /* Page register locks */
unsigned long priv; /* Private field to store bus IDs etc. */
+ u32 msg_enable; /* debug message level */
#ifdef AX88796_PLATFORM
unsigned char rxcr_base; /* default value for RXCR */
#endif
diff --git a/drivers/net/ethernet/8390/apne.c b/drivers/net/ethernet/8390/apne.c
index 912ed7a5f33a..811fa5d5c697 100644
--- a/drivers/net/ethernet/8390/apne.c
+++ b/drivers/net/ethernet/8390/apne.c
@@ -116,9 +116,15 @@ static const char version[] =
static int apne_owned; /* signal if card already owned */
+static u32 apne_msg_enable;
+module_param_named(msg_enable, apne_msg_enable, uint, (S_IRUSR|S_IRGRP|S_IROTH));
+MODULE_PARM_DESC(msg_enable, "Debug message level (see linux/netdevice.h for bitmap)");
+
struct net_device * __init apne_probe(int unit)
{
struct net_device *dev;
+ struct ei_device *ei_local;
+
#ifndef MANUAL_CONFIG
char tuple[8];
#endif
@@ -133,11 +139,11 @@ struct net_device * __init apne_probe(int unit)
if ( !(AMIGAHW_PRESENT(PCMCIA)) )
return ERR_PTR(-ENODEV);
- printk("Looking for PCMCIA ethernet card : ");
+ pr_info("Looking for PCMCIA ethernet card : ");
/* check if a card is inserted */
if (!(PCMCIA_INSERTED)) {
- printk("NO PCMCIA card inserted\n");
+ pr_cont("NO PCMCIA card inserted\n");
return ERR_PTR(-ENODEV);
}
@@ -148,6 +154,8 @@ struct net_device * __init apne_probe(int unit)
sprintf(dev->name, "eth%d", unit);
netdev_boot_setup_check(dev);
}
+ ei_local = netdev_priv(dev);
+ ei_local->msg_enable = apne_msg_enable;
/* disable pcmcia irq for readtuple */
pcmcia_disable_irq();
@@ -155,14 +163,14 @@ struct net_device * __init apne_probe(int unit)
#ifndef MANUAL_CONFIG
if ((pcmcia_copy_tuple(CISTPL_FUNCID, tuple, 8) < 3) ||
(tuple[2] != CISTPL_FUNCID_NETWORK)) {
- printk("not an ethernet card\n");
+ pr_cont("not an ethernet card\n");
/* XXX: shouldn't we re-enable irq here? */
free_netdev(dev);
return ERR_PTR(-ENODEV);
}
#endif
- printk("ethernet PCMCIA card inserted\n");
+ pr_cont("ethernet PCMCIA card inserted\n");
if (!init_pcmcia()) {
/* XXX: shouldn't we re-enable irq here? */
@@ -204,11 +212,12 @@ static int __init apne_probe1(struct net_device *dev, int ioaddr)
int neX000, ctron;
#endif
static unsigned version_printed;
+ struct ei_device *ei_local = netdev_priv(dev);
- if (ei_debug && version_printed++ == 0)
- printk(version);
+ if ((apne_msg_enable & NETIF_MSG_DRV) && (version_printed++ == 0))
+ netdev_info(dev, version);
- printk("PCMCIA NE*000 ethercard probe");
+ netdev_info(dev, "PCMCIA NE*000 ethercard probe");
/* Reset card. Who knows what dain-bramaged state it was left in. */
{ unsigned long reset_start_time = jiffies;
@@ -217,7 +226,7 @@ static int __init apne_probe1(struct net_device *dev, int ioaddr)
while ((inb(ioaddr + NE_EN0_ISR) & ENISR_RESET) == 0)
if (time_after(jiffies, reset_start_time + 2*HZ/100)) {
- printk(" not found (no reset ack).\n");
+ pr_cont(" not found (no reset ack).\n");
return -ENODEV;
}
@@ -288,7 +297,7 @@ static int __init apne_probe1(struct net_device *dev, int ioaddr)
start_page = 0x01;
stop_page = (wordlength == 2) ? 0x40 : 0x20;
} else {
- printk(" not found.\n");
+ pr_cont(" not found.\n");
return -ENXIO;
}
@@ -320,9 +329,9 @@ static int __init apne_probe1(struct net_device *dev, int ioaddr)
for (i = 0; i < ETH_ALEN; i++)
dev->dev_addr[i] = SA_prom[i];
- printk(" %pM\n", dev->dev_addr);
+ pr_cont(" %pM\n", dev->dev_addr);
- printk("%s: %s found.\n", dev->name, name);
+ netdev_info(dev, "%s found.\n", name);
ei_status.name = name;
ei_status.tx_start_page = start_page;
@@ -352,10 +361,11 @@ static void
apne_reset_8390(struct net_device *dev)
{
unsigned long reset_start_time = jiffies;
+ struct ei_device *ei_local = netdev_priv(dev);
init_pcmcia();
- if (ei_debug > 1) printk("resetting the 8390 t=%ld...", jiffies);
+ netif_dbg(ei_local, hw, dev, "resetting the 8390 t=%ld...\n", jiffies);
outb(inb(NE_BASE + NE_RESET), NE_BASE + NE_RESET);
@@ -365,8 +375,8 @@ apne_reset_8390(struct net_device *dev)
/* This check _should_not_ be necessary, omit eventually. */
while ((inb(NE_BASE+NE_EN0_ISR) & ENISR_RESET) == 0)
if (time_after(jiffies, reset_start_time + 2*HZ/100)) {
- printk("%s: ne_reset_8390() did not complete.\n", dev->name);
- break;
+ netdev_err(dev, "ne_reset_8390() did not complete.\n");
+ break;
}
outb(ENISR_RESET, NE_BASE + NE_EN0_ISR); /* Ack intr. */
}
@@ -386,9 +396,9 @@ apne_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_pa
/* This *shouldn't* happen. If it does, it's the last thing you'll see */
if (ei_status.dmaing) {
- printk("%s: DMAing conflict in ne_get_8390_hdr "
- "[DMAstat:%d][irqlock:%d][intr:%d].\n",
- dev->name, ei_status.dmaing, ei_status.irqlock, dev->irq);
+ netdev_err(dev, "DMAing conflict in ne_get_8390_hdr "
+ "[DMAstat:%d][irqlock:%d][intr:%d].\n",
+ ei_status.dmaing, ei_status.irqlock, dev->irq);
return;
}
@@ -433,9 +443,9 @@ apne_block_input(struct net_device *dev, int count, struct sk_buff *skb, int rin
/* This *shouldn't* happen. If it does, it's the last thing you'll see */
if (ei_status.dmaing) {
- printk("%s: DMAing conflict in ne_block_input "
- "[DMAstat:%d][irqlock:%d][intr:%d].\n",
- dev->name, ei_status.dmaing, ei_status.irqlock, dev->irq);
+ netdev_err(dev, "DMAing conflict in ne_block_input "
+ "[DMAstat:%d][irqlock:%d][intr:%d].\n",
+ ei_status.dmaing, ei_status.irqlock, dev->irq);
return;
}
ei_status.dmaing |= 0x01;
@@ -481,9 +491,9 @@ apne_block_output(struct net_device *dev, int count,
/* This *shouldn't* happen. If it does, it's the last thing you'll see */
if (ei_status.dmaing) {
- printk("%s: DMAing conflict in ne_block_output."
- "[DMAstat:%d][irqlock:%d][intr:%d]\n",
- dev->name, ei_status.dmaing, ei_status.irqlock, dev->irq);
+ netdev_err(dev, "DMAing conflict in ne_block_output."
+ "[DMAstat:%d][irqlock:%d][intr:%d]\n",
+ ei_status.dmaing, ei_status.irqlock, dev->irq);
return;
}
ei_status.dmaing |= 0x01;
@@ -513,7 +523,7 @@ apne_block_output(struct net_device *dev, int count,
while ((inb(NE_BASE + NE_EN0_ISR) & ENISR_RDC) == 0)
if (time_after(jiffies, dma_start + 2*HZ/100)) { /* 20ms */
- printk("%s: timeout waiting for Tx RDC.\n", dev->name);
+ netdev_warn(dev, "timeout waiting for Tx RDC.\n");
apne_reset_8390(dev);
NS8390_init(dev,1);
break;
@@ -536,8 +546,8 @@ static irqreturn_t apne_interrupt(int irq, void *dev_id)
pcmcia_ack_int(pcmcia_intreq);
return IRQ_NONE;
}
- if (ei_debug > 3)
- printk("pcmcia intreq = %x\n", pcmcia_intreq);
+ if (apne_msg_enable & NETIF_MSG_INTR)
+ pr_debug("pcmcia intreq = %x\n", pcmcia_intreq);
pcmcia_disable_irq(); /* to get rid of the sti() within ei_interrupt */
ei_interrupt(irq, dev_id);
pcmcia_ack_int(pcmcia_get_intreq());
diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
index 36fa577970bb..8ed5b34d7553 100644
--- a/drivers/net/ethernet/8390/ax88796.c
+++ b/drivers/net/ethernet/8390/ax88796.c
@@ -78,6 +78,8 @@ static unsigned char version[] = "ax88796.c: Copyright 2005,2007 Simtec Electron
#define AX_GPOC_PPDSET BIT(6)
+static u32 ax_msg_enable;
+
/* device private data */
struct ax_device {
@@ -147,8 +149,7 @@ static void ax_reset_8390(struct net_device *dev)
unsigned long reset_start_time = jiffies;
void __iomem *addr = (void __iomem *)dev->base_addr;
- if (ei_debug > 1)
- netdev_dbg(dev, "resetting the 8390 t=%ld\n", jiffies);
+ netif_dbg(ei_local, hw, dev, "resetting the 8390 t=%ld...\n", jiffies);
ei_outb(ei_inb(addr + NE_RESET), addr + NE_RESET);
@@ -496,12 +497,28 @@ static int ax_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
return phy_ethtool_sset(phy_dev, cmd);
}
+static u32 ax_get_msglevel(struct net_device *dev)
+{
+ struct ei_device *ei_local = netdev_priv(dev);
+
+ return ei_local->msg_enable;
+}
+
+static void ax_set_msglevel(struct net_device *dev, u32 v)
+{
+ struct ei_device *ei_local = netdev_priv(dev);
+
+ ei_local->msg_enable = v;
+}
+
static const struct ethtool_ops ax_ethtool_ops = {
.get_drvinfo = ax_get_drvinfo,
.get_settings = ax_get_settings,
.set_settings = ax_set_settings,
.get_link = ethtool_op_get_link,
.get_ts_info = ethtool_op_get_ts_info,
+ .get_msglevel = ax_get_msglevel,
+ .set_msglevel = ax_set_msglevel,
};
#ifdef CONFIG_AX88796_93CX6
@@ -763,6 +780,7 @@ static int ax_init_dev(struct net_device *dev)
ei_local->block_output = &ax_block_output;
ei_local->get_8390_hdr = &ax_get_8390_hdr;
ei_local->priv = 0;
+ ei_local->msg_enable = ax_msg_enable;
dev->netdev_ops = &ax_netdev_ops;
dev->ethtool_ops = &ax_ethtool_ops;
diff --git a/drivers/net/ethernet/8390/axnet_cs.c b/drivers/net/ethernet/8390/axnet_cs.c
index d801c1410fb0..5698a4c85d8e 100644
--- a/drivers/net/ethernet/8390/axnet_cs.c
+++ b/drivers/net/ethernet/8390/axnet_cs.c
@@ -105,6 +105,7 @@ static void AX88190_init(struct net_device *dev, int startp);
static int ax_open(struct net_device *dev);
static int ax_close(struct net_device *dev);
static irqreturn_t ax_interrupt(int irq, void *dev_id);
+static u32 axnet_msg_enable;
/*====================================================================*/
@@ -152,6 +153,7 @@ static int axnet_probe(struct pcmcia_device *link)
return -ENOMEM;
ei_local = netdev_priv(dev);
+ ei_local->msg_enable = axnet_msg_enable;
spin_lock_init(&ei_local->page_lock);
info = PRIV(dev);
@@ -650,11 +652,12 @@ static void block_input(struct net_device *dev, int count,
struct sk_buff *skb, int ring_offset)
{
unsigned int nic_base = dev->base_addr;
+ struct ei_device *ei_local = netdev_priv(dev);
int xfer_count = count;
char *buf = skb->data;
- if ((ei_debug > 4) && (count != 4))
- pr_debug("%s: [bi=%d]\n", dev->name, count+4);
+ if ((netif_msg_rx_status(ei_local)) && (count != 4))
+ netdev_dbg(dev, "[bi=%d]\n", count+4);
outb_p(ring_offset & 0xff, nic_base + EN0_RSARLO);
outb_p(ring_offset >> 8, nic_base + EN0_RSARHI);
outb_p(E8390_RREAD+E8390_START, nic_base + AXNET_CMD);
@@ -810,11 +813,6 @@ module_pcmcia_driver(axnet_cs_driver);
#define ei_block_input (ei_local->block_input)
#define ei_get_8390_hdr (ei_local->get_8390_hdr)
-/* use 0 for production, 1 for verification, >2 for debug */
-#ifndef ei_debug
-int ei_debug = 1;
-#endif
-
/* Index to functions. */
static void ei_tx_intr(struct net_device *dev);
static void ei_tx_err(struct net_device *dev);
@@ -925,11 +923,10 @@ static void axnet_tx_timeout(struct net_device *dev)
isr = inb(e8390_base+EN0_ISR);
spin_unlock_irqrestore(&ei_local->page_lock, flags);
- netdev_printk(KERN_DEBUG, dev,
- "Tx timed out, %s TSR=%#2x, ISR=%#2x, t=%d.\n",
- (txsr & ENTSR_ABT) ? "excess collisions." :
- (isr) ? "lost interrupt?" : "cable problem?",
- txsr, isr, tickssofar);
+ netdev_dbg(dev, "Tx timed out, %s TSR=%#2x, ISR=%#2x, t=%d.\n",
+ (txsr & ENTSR_ABT) ? "excess collisions." :
+ (isr) ? "lost interrupt?" : "cable problem?",
+ txsr, isr, tickssofar);
if (!isr && !dev->stats.tx_packets)
{
@@ -998,29 +995,30 @@ static netdev_tx_t axnet_start_xmit(struct sk_buff *skb,
{
output_page = ei_local->tx_start_page;
ei_local->tx1 = send_length;
- if (ei_debug && ei_local->tx2 > 0)
- netdev_printk(KERN_DEBUG, dev,
- "idle transmitter tx2=%d, lasttx=%d, txing=%d\n",
- ei_local->tx2, ei_local->lasttx,
- ei_local->txing);
+ if ((netif_msg_tx_queued(ei_local)) &&
+ ei_local->tx2 > 0)
+ netdev_dbg(dev,
+ "idle transmitter tx2=%d, lasttx=%d, txing=%d\n",
+ ei_local->tx2, ei_local->lasttx,
+ ei_local->txing);
}
else if (ei_local->tx2 == 0)
{
output_page = ei_local->tx_start_page + TX_PAGES/2;
ei_local->tx2 = send_length;
- if (ei_debug && ei_local->tx1 > 0)
- netdev_printk(KERN_DEBUG, dev,
- "idle transmitter, tx1=%d, lasttx=%d, txing=%d\n",
- ei_local->tx1, ei_local->lasttx,
- ei_local->txing);
+ if ((netif_msg_tx_queued(ei_local)) &&
+ ei_local->tx1 > 0)
+ netdev_dbg(dev,
+ "idle transmitter, tx1=%d, lasttx=%d, txing=%d\n",
+ ei_local->tx1, ei_local->lasttx,
+ ei_local->txing);
}
else
{ /* We should never get here. */
- if (ei_debug)
- netdev_printk(KERN_DEBUG, dev,
- "No Tx buffers free! tx1=%d tx2=%d last=%d\n",
- ei_local->tx1, ei_local->tx2,
- ei_local->lasttx);
+ netif_dbg(ei_local, tx_err, dev,
+ "No Tx buffers free! tx1=%d tx2=%d last=%d\n",
+ ei_local->tx1, ei_local->tx2,
+ ei_local->lasttx);
ei_local->irqlock = 0;
netif_stop_queue(dev);
outb_p(ENISR_ALL, e8390_base + EN0_IMR);
@@ -1124,10 +1122,9 @@ static irqreturn_t ax_interrupt(int irq, void *dev_id)
spin_unlock_irqrestore(&ei_local->page_lock, flags);
return IRQ_NONE;
}
-
- if (ei_debug > 3)
- netdev_printk(KERN_DEBUG, dev, "interrupt(isr=%#2.2x)\n",
- inb_p(e8390_base + EN0_ISR));
+
+ netif_dbg(ei_local, intr, dev, "interrupt(isr=%#2.2x)\n",
+ inb_p(e8390_base + EN0_ISR));
outb_p(0x00, e8390_base + EN0_ISR);
ei_local->irqlock = 1;
@@ -1137,9 +1134,8 @@ static irqreturn_t ax_interrupt(int irq, void *dev_id)
++nr_serviced < MAX_SERVICE)
{
if (!netif_running(dev) || (interrupts == 0xff)) {
- if (ei_debug > 1)
- netdev_warn(dev,
- "interrupt from stopped card\n");
+ netif_warn(ei_local, intr, dev,
+ "interrupt from stopped card\n");
outb_p(interrupts, e8390_base + EN0_ISR);
interrupts = 0;
break;
@@ -1175,14 +1171,15 @@ static irqreturn_t ax_interrupt(int irq, void *dev_id)
}
}
- if (interrupts && ei_debug > 3)
+ if (interrupts && (netif_msg_intr(ei_local)))
{
handled = 1;
if (nr_serviced >= MAX_SERVICE)
{
/* 0xFF is valid for a card removal */
- if(interrupts!=0xFF)
- netdev_warn(dev, "Too much work at interrupt, status %#2.2x\n",
+ if (interrupts != 0xFF)
+ netdev_warn(dev,
+ "Too much work at interrupt, status %#2.2x\n",
interrupts);
outb_p(ENISR_ALL, e8390_base + EN0_ISR); /* Ack. most intrs. */
} else {
@@ -1221,8 +1218,7 @@ static void ei_tx_err(struct net_device *dev)
unsigned char tx_was_aborted = txsr & (ENTSR_ABT+ENTSR_FU);
#ifdef VERBOSE_ERROR_DUMP
- netdev_printk(KERN_DEBUG, dev,
- "transmitter error (%#2x):", txsr);
+ netdev_dbg(dev, "transmitter error (%#2x):", txsr);
if (txsr & ENTSR_ABT)
pr_cont(" excess-collisions");
if (txsr & ENTSR_ND)
@@ -1287,9 +1283,9 @@ static void ei_tx_intr(struct net_device *dev)
else if (ei_local->tx2 < 0)
{
if (ei_local->lasttx != 2 && ei_local->lasttx != -2)
- netdev_info(dev, "%s: bogus last_tx_buffer %d, tx2=%d\n",
- ei_local->name, ei_local->lasttx,
- ei_local->tx2);
+ netdev_err(dev, "%s: bogus last_tx_buffer %d, tx2=%d\n",
+ ei_local->name, ei_local->lasttx,
+ ei_local->tx2);
ei_local->tx2 = 0;
if (ei_local->tx1 > 0)
{
@@ -1366,9 +1362,11 @@ static void ei_receive(struct net_device *dev)
Keep quiet if it looks like a card removal. One problem here
is that some clones crash in roughly the same way.
*/
- if (ei_debug > 0 && this_frame != ei_local->current_page && (this_frame!=0x0 || rxing_page!=0xFF))
- netdev_err(dev, "mismatched read page pointers %2x vs %2x\n",
- this_frame, ei_local->current_page);
+ if ((netif_msg_rx_err(ei_local)) &&
+ this_frame != ei_local->current_page &&
+ (this_frame != 0x0 || rxing_page != 0xFF))
+ netdev_err(dev, "mismatched read page pointers %2x vs %2x\n",
+ this_frame, ei_local->current_page);
if (this_frame == rxing_page) /* Read all the frames? */
break; /* Done for now */
@@ -1383,11 +1381,10 @@ static void ei_receive(struct net_device *dev)
if (pkt_len < 60 || pkt_len > 1518)
{
- if (ei_debug)
- netdev_printk(KERN_DEBUG, dev,
- "bogus packet size: %d, status=%#2x nxpg=%#2x\n",
- rx_frame.count, rx_frame.status,
- rx_frame.next);
+ netif_err(ei_local, rx_err, dev,
+ "bogus packet size: %d, status=%#2x nxpg=%#2x\n",
+ rx_frame.count, rx_frame.status,
+ rx_frame.next);
dev->stats.rx_errors++;
dev->stats.rx_length_errors++;
}
@@ -1398,10 +1395,9 @@ static void ei_receive(struct net_device *dev)
skb = netdev_alloc_skb(dev, pkt_len + 2);
if (skb == NULL)
{
- if (ei_debug > 1)
- netdev_printk(KERN_DEBUG, dev,
- "Couldn't allocate a sk_buff of size %d\n",
- pkt_len);
+ netif_err(ei_local, rx_err, dev,
+ "Couldn't allocate a sk_buff of size %d\n",
+ pkt_len);
dev->stats.rx_dropped++;
break;
}
@@ -1420,11 +1416,10 @@ static void ei_receive(struct net_device *dev)
}
else
{
- if (ei_debug)
- netdev_printk(KERN_DEBUG, dev,
- "bogus packet: status=%#2x nxpg=%#2x size=%d\n",
- rx_frame.status, rx_frame.next,
- rx_frame.count);
+ netif_err(ei_local, rx_err, dev,
+ "bogus packet: status=%#2x nxpg=%#2x size=%d\n",
+ rx_frame.status, rx_frame.next,
+ rx_frame.count);
dev->stats.rx_errors++;
/* NB: The NIC counts CRC, frame and missed errors. */
if (pkt_stat & ENRSR_FO)
@@ -1461,6 +1456,7 @@ static void ei_rx_overrun(struct net_device *dev)
axnet_dev_t *info = PRIV(dev);
long e8390_base = dev->base_addr;
unsigned char was_txing, must_resend = 0;
+ struct ei_device *ei_local = netdev_priv(dev);
/*
* Record whether a Tx was in progress and then issue the
@@ -1468,9 +1464,8 @@ static void ei_rx_overrun(struct net_device *dev)
*/
was_txing = inb_p(e8390_base+E8390_CMD) & E8390_TRANS;
outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD);
-
- if (ei_debug > 1)
- netdev_printk(KERN_DEBUG, dev, "Receiver overrun\n");
+
+ netif_dbg(ei_local, rx_err, dev, "Receiver overrun\n");
dev->stats.rx_over_errors++;
/*
diff --git a/drivers/net/ethernet/8390/etherh.c b/drivers/net/ethernet/8390/etherh.c
index 78c6fb4b1143..b36ee9e0d220 100644
--- a/drivers/net/ethernet/8390/etherh.c
+++ b/drivers/net/ethernet/8390/etherh.c
@@ -56,18 +56,15 @@
#define ei_inb_p(_p) readb((void __iomem *)_p)
#define ei_outb_p(_v,_p) writeb(_v,(void __iomem *)_p)
-#define NET_DEBUG 0
-#define DEBUG_INIT 2
-
#define DRV_NAME "etherh"
#define DRV_VERSION "1.11"
-static char version[] __initdata =
+static char version[] =
"EtherH/EtherM Driver (c) 2002-2004 Russell King " DRV_VERSION "\n";
#include "lib8390.c"
-static unsigned int net_debug = NET_DEBUG;
+static u32 etherh_msg_enable;
struct etherh_priv {
void __iomem *ioc_fast;
@@ -317,9 +314,9 @@ etherh_block_output (struct net_device *dev, int count, const unsigned char *buf
void __iomem *dma_base, *addr;
if (ei_local->dmaing) {
- printk(KERN_ERR "%s: DMAing conflict in etherh_block_input: "
- " DMAstat %d irqlock %d\n", dev->name,
- ei_local->dmaing, ei_local->irqlock);
+ netdev_err(dev, "DMAing conflict in etherh_block_input: "
+ " DMAstat %d irqlock %d\n",
+ ei_local->dmaing, ei_local->irqlock);
return;
}
@@ -361,8 +358,7 @@ etherh_block_output (struct net_device *dev, int count, const unsigned char *buf
while ((readb (addr + EN0_ISR) & ENISR_RDC) == 0)
if (time_after(jiffies, dma_start + 2*HZ/100)) { /* 20ms */
- printk(KERN_ERR "%s: timeout waiting for TX RDC\n",
- dev->name);
+ netdev_warn(dev, "timeout waiting for TX RDC\n");
etherh_reset (dev);
__NS8390_init (dev, 1);
break;
@@ -383,9 +379,9 @@ etherh_block_input (struct net_device *dev, int count, struct sk_buff *skb, int
void __iomem *dma_base, *addr;
if (ei_local->dmaing) {
- printk(KERN_ERR "%s: DMAing conflict in etherh_block_input: "
- " DMAstat %d irqlock %d\n", dev->name,
- ei_local->dmaing, ei_local->irqlock);
+ netdev_err(dev, "DMAing conflict in etherh_block_input: "
+ " DMAstat %d irqlock %d\n",
+ ei_local->dmaing, ei_local->irqlock);
return;
}
@@ -423,9 +419,9 @@ etherh_get_header (struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_p
void __iomem *dma_base, *addr;
if (ei_local->dmaing) {
- printk(KERN_ERR "%s: DMAing conflict in etherh_get_header: "
- " DMAstat %d irqlock %d\n", dev->name,
- ei_local->dmaing, ei_local->irqlock);
+ netdev_err(dev, "DMAing conflict in etherh_get_header: "
+ " DMAstat %d irqlock %d\n",
+ ei_local->dmaing, ei_local->irqlock);
return;
}
@@ -513,8 +509,8 @@ static void __init etherh_banner(void)
{
static int version_printed;
- if (net_debug && version_printed++ == 0)
- printk(KERN_INFO "%s", version);
+ if ((etherh_msg_enable & NETIF_MSG_DRV) && (version_printed++ == 0))
+ pr_info("%s", version);
}
/*
@@ -625,11 +621,27 @@ static int etherh_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
return 0;
}
+static u32 etherh_get_msglevel(struct net_device *dev)
+{
+ struct ei_device *ei_local = netdev_priv(dev);
+
+ return ei_local->msg_enable;
+}
+
+static void etherh_set_msglevel(struct net_device *dev, u32 v)
+{
+ struct ei_device *ei_local = netdev_priv(dev);
+
+ ei_local->msg_enable = v;
+}
+
static const struct ethtool_ops etherh_ethtool_ops = {
.get_settings = etherh_get_settings,
.set_settings = etherh_set_settings,
.get_drvinfo = etherh_get_drvinfo,
.get_ts_info = ethtool_op_get_ts_info,
+ .get_msglevel = etherh_get_msglevel,
+ .set_msglevel = etherh_set_msglevel,
};
static const struct net_device_ops etherh_netdev_ops = {
@@ -746,6 +758,7 @@ etherh_probe(struct expansion_card *ec, const struct ecard_id *id)
ei_local->block_output = etherh_block_output;
ei_local->get_8390_hdr = etherh_get_header;
ei_local->interface_num = 0;
+ ei_local->msg_enable = etherh_msg_enable;
etherh_reset(dev);
__NS8390_init(dev, 0);
@@ -754,8 +767,8 @@ etherh_probe(struct expansion_card *ec, const struct ecard_id *id)
if (ret)
goto free;
- printk(KERN_INFO "%s: %s in slot %d, %pM\n",
- dev->name, data->name, ec->slot_no, dev->dev_addr);
+ netdev_info(dev, "%s in slot %d, %pM\n",
+ data->name, ec->slot_no, dev->dev_addr);
ecard_set_drvdata(ec, dev);
diff --git a/drivers/net/ethernet/8390/hydra.c b/drivers/net/ethernet/8390/hydra.c
index fb3dd4399cf3..d8b86c83dd15 100644
--- a/drivers/net/ethernet/8390/hydra.c
+++ b/drivers/net/ethernet/8390/hydra.c
@@ -66,6 +66,7 @@ static void hydra_block_input(struct net_device *dev, int count,
static void hydra_block_output(struct net_device *dev, int count,
const unsigned char *buf, int start_page);
static void hydra_remove_one(struct zorro_dev *z);
+static u32 hydra_msg_enable;
static struct zorro_device_id hydra_zorro_tbl[] = {
{ ZORRO_PROD_HYDRA_SYSTEMS_AMIGANET },
@@ -119,6 +120,7 @@ static int hydra_init(struct zorro_dev *z)
int start_page, stop_page;
int j;
int err;
+ struct ei_device *ei_local;
static u32 hydra_offsets[16] = {
0x00, 0x02, 0x04, 0x06, 0x08, 0x0a, 0x0c, 0x0e,
@@ -137,6 +139,8 @@ static int hydra_init(struct zorro_dev *z)
start_page = NESM_START_PG;
stop_page = NESM_STOP_PG;
+ ei_local = netdev_priv(dev);
+ ei_local->msg_enable = hydra_msg_enable;
dev->base_addr = ioaddr;
dev->irq = IRQ_AMIGA_PORTS;
@@ -187,15 +191,16 @@ static int hydra_open(struct net_device *dev)
static int hydra_close(struct net_device *dev)
{
- if (ei_debug > 1)
- printk(KERN_DEBUG "%s: Shutting down ethercard.\n", dev->name);
+ struct ei_device *ei_local = netdev_priv(dev);
+
+ netif_dbg(ei_local, ifdown, dev, "Shutting down ethercard.\n");
__ei_close(dev);
return 0;
}
static void hydra_reset_8390(struct net_device *dev)
{
- printk(KERN_INFO "Hydra hw reset not there\n");
+ netdev_info(dev, "Hydra hw reset not there\n");
}
static void hydra_get_8390_hdr(struct net_device *dev,
diff --git a/drivers/net/ethernet/8390/lib8390.c b/drivers/net/ethernet/8390/lib8390.c
index b329f5c0d62b..d2cd80444ade 100644
--- a/drivers/net/ethernet/8390/lib8390.c
+++ b/drivers/net/ethernet/8390/lib8390.c
@@ -99,11 +99,6 @@
#define ei_block_input (ei_local->block_input)
#define ei_get_8390_hdr (ei_local->get_8390_hdr)
-/* use 0 for production, 1 for verification, >2 for debug */
-#ifndef ei_debug
-int ei_debug = 1;
-#endif
-
/* Index to functions. */
static void ei_tx_intr(struct net_device *dev);
static void ei_tx_err(struct net_device *dev);
@@ -116,6 +111,11 @@ static void NS8390_trigger_send(struct net_device *dev, unsigned int length,
static void do_set_multicast_list(struct net_device *dev);
static void __NS8390_init(struct net_device *dev, int startp);
+static unsigned version_printed;
+static u32 msg_enable;
+module_param(msg_enable, uint, (S_IRUSR|S_IRGRP|S_IROTH));
+MODULE_PARM_DESC(msg_enable, "Debug message level (see linux/netdevice.h for bitmap)");
+
/*
* SMP and the 8390 setup.
*
@@ -345,19 +345,23 @@ static netdev_tx_t __ei_start_xmit(struct sk_buff *skb,
if (ei_local->tx1 == 0) {
output_page = ei_local->tx_start_page;
ei_local->tx1 = send_length;
- if (ei_debug && ei_local->tx2 > 0)
- netdev_dbg(dev, "idle transmitter tx2=%d, lasttx=%d, txing=%d\n",
+ if ((netif_msg_tx_queued(ei_local)) &&
+ ei_local->tx2 > 0)
+ netdev_dbg(dev,
+ "idle transmitter tx2=%d, lasttx=%d, txing=%d\n",
ei_local->tx2, ei_local->lasttx, ei_local->txing);
} else if (ei_local->tx2 == 0) {
output_page = ei_local->tx_start_page + TX_PAGES/2;
ei_local->tx2 = send_length;
- if (ei_debug && ei_local->tx1 > 0)
- netdev_dbg(dev, "idle transmitter, tx1=%d, lasttx=%d, txing=%d\n",
+ if ((netif_msg_tx_queued(ei_local)) &&
+ ei_local->tx1 > 0)
+ netdev_dbg(dev,
+ "idle transmitter, tx1=%d, lasttx=%d, txing=%d\n",
ei_local->tx1, ei_local->lasttx, ei_local->txing);
} else { /* We should never get here. */
- if (ei_debug)
- netdev_dbg(dev, "No Tx buffers free! tx1=%d tx2=%d last=%d\n",
- ei_local->tx1, ei_local->tx2, ei_local->lasttx);
+ netif_dbg(ei_local, tx_err, dev,
+ "No Tx buffers free! tx1=%d tx2=%d last=%d\n",
+ ei_local->tx1, ei_local->tx2, ei_local->lasttx);
ei_local->irqlock = 0;
netif_stop_queue(dev);
ei_outb_p(ENISR_ALL, e8390_base + EN0_IMR);
@@ -388,7 +392,7 @@ static netdev_tx_t __ei_start_xmit(struct sk_buff *skb,
} else
ei_local->txqueue++;
- if (ei_local->tx1 && ei_local->tx2)
+ if (ei_local->tx1 && ei_local->tx2)
netif_stop_queue(dev);
else
netif_start_queue(dev);
@@ -445,9 +449,8 @@ static irqreturn_t __ei_interrupt(int irq, void *dev_id)
/* Change to page 0 and read the intr status reg. */
ei_outb_p(E8390_NODMA+E8390_PAGE0, e8390_base + E8390_CMD);
- if (ei_debug > 3)
- netdev_dbg(dev, "interrupt(isr=%#2.2x)\n",
- ei_inb_p(e8390_base + EN0_ISR));
+ netif_dbg(ei_local, intr, dev, "interrupt(isr=%#2.2x)\n",
+ ei_inb_p(e8390_base + EN0_ISR));
/* !!Assumption!! -- we stay in page 0. Don't break this. */
while ((interrupts = ei_inb_p(e8390_base + EN0_ISR)) != 0 &&
@@ -485,7 +488,7 @@ static irqreturn_t __ei_interrupt(int irq, void *dev_id)
ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base + E8390_CMD);
}
- if (interrupts && ei_debug) {
+ if (interrupts && (netif_msg_intr(ei_local))) {
ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base + E8390_CMD);
if (nr_serviced >= MAX_SERVICE) {
/* 0xFF is valid for a card removal */
@@ -676,10 +679,11 @@ static void ei_receive(struct net_device *dev)
Keep quiet if it looks like a card removal. One problem here
is that some clones crash in roughly the same way.
*/
- if (ei_debug > 0 &&
+ if ((netif_msg_rx_status(ei_local)) &&
this_frame != ei_local->current_page &&
(this_frame != 0x0 || rxing_page != 0xFF))
- netdev_err(dev, "mismatched read page pointers %2x vs %2x\n",
+ netdev_err(dev,
+ "mismatched read page pointers %2x vs %2x\n",
this_frame, ei_local->current_page);
if (this_frame == rxing_page) /* Read all the frames? */
@@ -707,10 +711,10 @@ static void ei_receive(struct net_device *dev)
}
if (pkt_len < 60 || pkt_len > 1518) {
- if (ei_debug)
- netdev_dbg(dev, "bogus packet size: %d, status=%#2x nxpg=%#2x\n",
- rx_frame.count, rx_frame.status,
- rx_frame.next);
+ netif_dbg(ei_local, rx_status, dev,
+ "bogus packet size: %d, status=%#2x nxpg=%#2x\n",
+ rx_frame.count, rx_frame.status,
+ rx_frame.next);
dev->stats.rx_errors++;
dev->stats.rx_length_errors++;
} else if ((pkt_stat & 0x0F) == ENRSR_RXOK) {
@@ -718,9 +722,9 @@ static void ei_receive(struct net_device *dev)
skb = netdev_alloc_skb(dev, pkt_len + 2);
if (skb == NULL) {
- if (ei_debug > 1)
- netdev_dbg(dev, "Couldn't allocate a sk_buff of size %d\n",
- pkt_len);
+ netif_err(ei_local, rx_err, dev,
+ "Couldn't allocate a sk_buff of size %d\n",
+ pkt_len);
dev->stats.rx_dropped++;
break;
} else {
@@ -736,10 +740,10 @@ static void ei_receive(struct net_device *dev)
dev->stats.multicast++;
}
} else {
- if (ei_debug)
- netdev_dbg(dev, "bogus packet: status=%#2x nxpg=%#2x size=%d\n",
- rx_frame.status, rx_frame.next,
- rx_frame.count);
+ netif_err(ei_local, rx_err, dev,
+ "bogus packet: status=%#2x nxpg=%#2x size=%d\n",
+ rx_frame.status, rx_frame.next,
+ rx_frame.count);
dev->stats.rx_errors++;
/* NB: The NIC counts CRC, frame and missed errors. */
if (pkt_stat & ENRSR_FO)
@@ -789,8 +793,7 @@ static void ei_rx_overrun(struct net_device *dev)
was_txing = ei_inb_p(e8390_base+E8390_CMD) & E8390_TRANS;
ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD);
- if (ei_debug > 1)
- netdev_dbg(dev, "Receiver overrun\n");
+ netif_dbg(ei_local, rx_err, dev, "Receiver overrun\n");
dev->stats.rx_over_errors++;
/*
@@ -965,8 +968,9 @@ static void __ei_set_multicast_list(struct net_device *dev)
static void ethdev_setup(struct net_device *dev)
{
struct ei_device *ei_local = netdev_priv(dev);
- if (ei_debug > 1)
- printk(version);
+
+ if ((msg_enable & NETIF_MSG_DRV) && (version_printed++ == 0))
+ pr_info("%s", version);
ether_setup(dev);
@@ -1035,9 +1039,10 @@ static void __NS8390_init(struct net_device *dev, int startp)
ei_outb_p(E8390_NODMA + E8390_PAGE1 + E8390_STOP, e8390_base+E8390_CMD); /* 0x61 */
for (i = 0; i < 6; i++) {
ei_outb_p(dev->dev_addr[i], e8390_base + EN1_PHYS_SHIFT(i));
- if (ei_debug > 1 &&
+ if ((netif_msg_probe(ei_local)) &&
ei_inb_p(e8390_base + EN1_PHYS_SHIFT(i)) != dev->dev_addr[i])
- netdev_err(dev, "Hw. address read/write mismap %d\n", i);
+ netdev_err(dev,
+ "Hw. address read/write mismap %d\n", i);
}
ei_outb_p(ei_local->rx_start_page, e8390_base + EN1_CURPAG);
diff --git a/drivers/net/ethernet/8390/mac8390.c b/drivers/net/ethernet/8390/mac8390.c
index 88ccc8b14f0a..90e825e8abfe 100644
--- a/drivers/net/ethernet/8390/mac8390.c
+++ b/drivers/net/ethernet/8390/mac8390.c
@@ -167,6 +167,7 @@ static void slow_sane_block_output(struct net_device *dev, int count,
const unsigned char *buf, int start_page);
static void word_memcpy_tocard(unsigned long tp, const void *fp, int count);
static void word_memcpy_fromcard(void *tp, unsigned long fp, int count);
+static u32 mac8390_msg_enable;
static enum mac8390_type __init mac8390_ident(struct nubus_dev *dev)
{
@@ -402,6 +403,7 @@ struct net_device * __init mac8390_probe(int unit)
struct net_device *dev;
struct nubus_dev *ndev = NULL;
int err = -ENODEV;
+ struct ei_device *ei_local;
static unsigned int slots;
@@ -440,6 +442,10 @@ struct net_device * __init mac8390_probe(int unit)
if (!ndev)
goto out;
+
+ ei_local = netdev_priv(dev);
+ ei_local->msg_enable = mac8390_msg_enable;
+
err = register_netdev(dev);
if (err)
goto out;
@@ -660,19 +666,22 @@ static int mac8390_close(struct net_device *dev)
static void mac8390_no_reset(struct net_device *dev)
{
+ struct ei_device *ei_local = netdev_priv(dev);
+
ei_status.txing = 0;
- if (ei_debug > 1)
- pr_info("reset not supported\n");
+ netif_info(ei_local, hw, dev, "reset not supported\n");
}
static void interlan_reset(struct net_device *dev)
{
unsigned char *target = nubus_slot_addr(IRQ2SLOT(dev->irq));
- if (ei_debug > 1)
- pr_info("Need to reset the NS8390 t=%lu...", jiffies);
+ struct ei_device *ei_local = netdev_priv(dev);
+
+ netif_info(ei_local, hw, dev, "Need to reset the NS8390 t=%lu...",
+ jiffies);
ei_status.txing = 0;
target[0xC0000] = 0;
- if (ei_debug > 1)
+ if (netif_msg_hw(ei_local))
pr_cont("reset complete\n");
}
diff --git a/drivers/net/ethernet/8390/mcf8390.c b/drivers/net/ethernet/8390/mcf8390.c
index 230efd6fa5d5..df0ffca91c1c 100644
--- a/drivers/net/ethernet/8390/mcf8390.c
+++ b/drivers/net/ethernet/8390/mcf8390.c
@@ -39,6 +39,7 @@ static const char version[] =
#define NESM_START_PG 0x40 /* First page of TX buffer */
#define NESM_STOP_PG 0x80 /* Last page +1 of RX ring */
+static u32 mcf8390_msg_enable;
#ifdef NE2000_ODDOFFSET
/*
@@ -153,9 +154,9 @@ static void mcf8390_reset_8390(struct net_device *dev)
{
unsigned long reset_start_time = jiffies;
u32 addr = dev->base_addr;
+ struct ei_device *ei_local = netdev_priv(dev);
- if (ei_debug > 1)
- netdev_dbg(dev, "resetting the 8390 t=%ld...\n", jiffies);
+ netif_dbg(ei_local, hw, dev, "resetting the 8390 t=%ld...\n", jiffies);
ei_outb(ei_inb(addr + NE_RESET), addr + NE_RESET);
@@ -288,7 +289,7 @@ static void mcf8390_block_output(struct net_device *dev, int count,
dma_start = jiffies;
while ((ei_inb(addr + NE_EN0_ISR) & ENISR_RDC) == 0) {
if (time_after(jiffies, dma_start + 2 * HZ / 100)) { /* 20ms */
- netdev_err(dev, "timeout waiting for Tx RDC\n");
+ netdev_warn(dev, "timeout waiting for Tx RDC\n");
mcf8390_reset_8390(dev);
__NS8390_init(dev, 1);
break;
@@ -437,6 +438,7 @@ static int mcf8390_probe(struct platform_device *pdev)
SET_NETDEV_DEV(dev, &pdev->dev);
platform_set_drvdata(pdev, dev);
ei_local = netdev_priv(dev);
+ ei_local->msg_enable = mcf8390_msg_enable;
dev->irq = irq->start;
dev->base_addr = mem->start;
diff --git a/drivers/net/ethernet/8390/ne.c b/drivers/net/ethernet/8390/ne.c
index b2e840513735..58eaa8f34942 100644
--- a/drivers/net/ethernet/8390/ne.c
+++ b/drivers/net/ethernet/8390/ne.c
@@ -71,14 +71,17 @@ static struct platform_device *pdev_ne[MAX_NE_CARDS];
static int io[MAX_NE_CARDS];
static int irq[MAX_NE_CARDS];
static int bad[MAX_NE_CARDS];
+static u32 ne_msg_enable;
#ifdef MODULE
module_param_array(io, int, NULL, 0);
module_param_array(irq, int, NULL, 0);
module_param_array(bad, int, NULL, 0);
+module_param_named(msg_enable, ne_msg_enable, uint, (S_IRUSR|S_IRGRP|S_IROTH));
MODULE_PARM_DESC(io, "I/O base address(es),required");
MODULE_PARM_DESC(irq, "IRQ number(s)");
MODULE_PARM_DESC(bad, "Accept card(s) with bad signatures");
+MODULE_PARM_DESC(msg_enable, "Debug message level (see linux/netdevice.h for bitmap)");
MODULE_DESCRIPTION("NE1000/NE2000 ISA/PnP Ethernet driver");
MODULE_LICENSE("GPL");
#endif /* MODULE */
@@ -214,8 +217,8 @@ static int __init do_ne_probe(struct net_device *dev)
if (base_addr > 0x1ff) { /* Check a single specified location. */
int ret = ne_probe1(dev, base_addr);
if (ret)
- printk(KERN_WARNING "ne.c: No NE*000 card found at "
- "i/o = %#lx\n", base_addr);
+ netdev_warn(dev, "ne.c: No NE*000 card found at "
+ "i/o = %#lx\n", base_addr);
return ret;
}
else if (base_addr != 0) /* Don't probe at all. */
@@ -264,11 +267,14 @@ static int __init ne_probe_isapnp(struct net_device *dev)
/* found it */
dev->base_addr = pnp_port_start(idev, 0);
dev->irq = pnp_irq(idev, 0);
- printk(KERN_INFO "ne.c: ISAPnP reports %s at i/o %#lx, irq %d.\n",
- (char *) isapnp_clone_list[i].driver_data,
- dev->base_addr, dev->irq);
+ netdev_info(dev,
+ "ne.c: ISAPnP reports %s at i/o %#lx, irq %d.\n",
+ (char *) isapnp_clone_list[i].driver_data,
+ dev->base_addr, dev->irq);
if (ne_probe1(dev, dev->base_addr) != 0) { /* Shouldn't happen. */
- printk(KERN_ERR "ne.c: Probe of ISAPnP card at %#lx failed.\n", dev->base_addr);
+ netdev_err(dev,
+ "ne.c: Probe of ISAPnP card at %#lx failed.\n",
+ dev->base_addr);
pnp_device_detach(idev);
return -ENXIO;
}
@@ -293,6 +299,7 @@ static int __init ne_probe1(struct net_device *dev, unsigned long ioaddr)
int neX000, ctron, copam, bad_card;
int reg0, ret;
static unsigned version_printed;
+ struct ei_device *ei_local = netdev_priv(dev);
if (!request_region(ioaddr, NE_IO_EXTENT, DRV_NAME))
return -EBUSY;
@@ -319,10 +326,10 @@ static int __init ne_probe1(struct net_device *dev, unsigned long ioaddr)
}
}
- if (ei_debug && version_printed++ == 0)
- printk(KERN_INFO "%s%s", version1, version2);
+ if ((ne_msg_enable & NETIF_MSG_DRV) && (version_printed++ == 0))
+ netdev_info(dev, "%s%s", version1, version2);
- printk(KERN_INFO "NE*000 ethercard probe at %#3lx:", ioaddr);
+ netdev_info(dev, "NE*000 ethercard probe at %#3lx:", ioaddr);
/* A user with a poor card that fails to ack the reset, or that
does not have a valid 0x57,0x57 signature can still use this
@@ -343,10 +350,10 @@ static int __init ne_probe1(struct net_device *dev, unsigned long ioaddr)
while ((inb_p(ioaddr + EN0_ISR) & ENISR_RESET) == 0)
if (time_after(jiffies, reset_start_time + 2*HZ/100)) {
if (bad_card) {
- printk(" (warning: no reset ack)");
+ pr_cont(" (warning: no reset ack)");
break;
} else {
- printk(" not found (no reset ack).\n");
+ pr_cont(" not found (no reset ack).\n");
ret = -ENODEV;
goto err_out;
}
@@ -454,13 +461,13 @@ static int __init ne_probe1(struct net_device *dev, unsigned long ioaddr)
}
if (bad_clone_list[i].name8 == NULL)
{
- printk(" not found (invalid signature %2.2x %2.2x).\n",
+ pr_cont(" not found (invalid signature %2.2x %2.2x).\n",
SA_prom[14], SA_prom[15]);
ret = -ENXIO;
goto err_out;
}
#else
- printk(" not found.\n");
+ pr_cont(" not found.\n");
ret = -ENXIO;
goto err_out;
#endif
@@ -476,15 +483,15 @@ static int __init ne_probe1(struct net_device *dev, unsigned long ioaddr)
mdelay(10); /* wait 10ms for interrupt to propagate */
outb_p(0x00, ioaddr + EN0_IMR); /* Mask it again. */
dev->irq = probe_irq_off(cookie);
- if (ei_debug > 2)
- printk(" autoirq is %d\n", dev->irq);
+ if (netif_msg_probe(ei_local))
+ pr_cont(" autoirq is %d", dev->irq);
} else if (dev->irq == 2)
/* Fixup for users that don't know that IRQ 2 is really IRQ 9,
or don't know which one to set. */
dev->irq = 9;
if (! dev->irq) {
- printk(" failed to detect IRQ line.\n");
+ pr_cont(" failed to detect IRQ line.\n");
ret = -EAGAIN;
goto err_out;
}
@@ -493,7 +500,7 @@ static int __init ne_probe1(struct net_device *dev, unsigned long ioaddr)
share and the board will usually be enabled. */
ret = request_irq(dev->irq, eip_interrupt, 0, name, dev);
if (ret) {
- printk (" unable to get IRQ %d (errno=%d).\n", dev->irq, ret);
+ pr_cont(" unable to get IRQ %d (errno=%d).\n", dev->irq, ret);
goto err_out;
}
@@ -512,7 +519,7 @@ static int __init ne_probe1(struct net_device *dev, unsigned long ioaddr)
}
#endif
- printk("%pM\n", dev->dev_addr);
+ pr_cont("%pM\n", dev->dev_addr);
ei_status.name = name;
ei_status.tx_start_page = start_page;
@@ -536,11 +543,12 @@ static int __init ne_probe1(struct net_device *dev, unsigned long ioaddr)
dev->netdev_ops = &eip_netdev_ops;
NS8390p_init(dev, 0);
+ ei_local->msg_enable = ne_msg_enable;
ret = register_netdev(dev);
if (ret)
goto out_irq;
- printk(KERN_INFO "%s: %s found at %#lx, using IRQ %d.\n",
- dev->name, name, ioaddr, dev->irq);
+ netdev_info(dev, "%s found at %#lx, using IRQ %d.\n",
+ name, ioaddr, dev->irq);
return 0;
out_irq:
@@ -556,9 +564,9 @@ err_out:
static void ne_reset_8390(struct net_device *dev)
{
unsigned long reset_start_time = jiffies;
+ struct ei_device *ei_local = netdev_priv(dev);
- if (ei_debug > 1)
- printk(KERN_DEBUG "resetting the 8390 t=%ld...", jiffies);
+ netif_dbg(ei_local, hw, dev, "resetting the 8390 t=%ld...\n", jiffies);
/* DON'T change these to inb_p/outb_p or reset will fail on clones. */
outb(inb(NE_BASE + NE_RESET), NE_BASE + NE_RESET);
@@ -569,7 +577,7 @@ static void ne_reset_8390(struct net_device *dev)
/* This check _should_not_ be necessary, omit eventually. */
while ((inb_p(NE_BASE+EN0_ISR) & ENISR_RESET) == 0)
if (time_after(jiffies, reset_start_time + 2*HZ/100)) {
- printk(KERN_WARNING "%s: ne_reset_8390() did not complete.\n", dev->name);
+ netdev_err(dev, "ne_reset_8390() did not complete.\n");
break;
}
outb_p(ENISR_RESET, NE_BASE + EN0_ISR); /* Ack intr. */
@@ -587,9 +595,9 @@ static void ne_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, i
if (ei_status.dmaing)
{
- printk(KERN_EMERG "%s: DMAing conflict in ne_get_8390_hdr "
- "[DMAstat:%d][irqlock:%d].\n",
- dev->name, ei_status.dmaing, ei_status.irqlock);
+ netdev_err(dev, "DMAing conflict in ne_get_8390_hdr "
+ "[DMAstat:%d][irqlock:%d].\n",
+ ei_status.dmaing, ei_status.irqlock);
return;
}
@@ -621,6 +629,7 @@ static void ne_block_input(struct net_device *dev, int count, struct sk_buff *sk
{
#ifdef NE_SANITY_CHECK
int xfer_count = count;
+ struct ei_device *ei_local = netdev_priv(dev);
#endif
int nic_base = dev->base_addr;
char *buf = skb->data;
@@ -628,9 +637,9 @@ static void ne_block_input(struct net_device *dev, int count, struct sk_buff *sk
/* This *shouldn't* happen. If it does, it's the last thing you'll see */
if (ei_status.dmaing)
{
- printk(KERN_EMERG "%s: DMAing conflict in ne_block_input "
- "[DMAstat:%d][irqlock:%d].\n",
- dev->name, ei_status.dmaing, ei_status.irqlock);
+ netdev_err(dev, "DMAing conflict in ne_block_input "
+ "[DMAstat:%d][irqlock:%d].\n",
+ ei_status.dmaing, ei_status.irqlock);
return;
}
ei_status.dmaing |= 0x01;
@@ -660,7 +669,7 @@ static void ne_block_input(struct net_device *dev, int count, struct sk_buff *sk
this message you either 1) have a slightly incompatible clone
or 2) have noise/speed problems with your bus. */
- if (ei_debug > 1)
+ if (netif_msg_rx_status(ei_local))
{
/* DMA termination address check... */
int addr, tries = 20;
@@ -674,9 +683,9 @@ static void ne_block_input(struct net_device *dev, int count, struct sk_buff *sk
break;
} while (--tries > 0);
if (tries <= 0)
- printk(KERN_WARNING "%s: RX transfer address mismatch,"
- "%#4.4x (expected) vs. %#4.4x (actual).\n",
- dev->name, ring_offset + xfer_count, addr);
+ netdev_warn(dev, "RX transfer address mismatch,"
+ "%#4.4x (expected) vs. %#4.4x (actual).\n",
+ ring_offset + xfer_count, addr);
}
#endif
outb_p(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
@@ -690,6 +699,7 @@ static void ne_block_output(struct net_device *dev, int count,
unsigned long dma_start;
#ifdef NE_SANITY_CHECK
int retries = 0;
+ struct ei_device *ei_local = netdev_priv(dev);
#endif
/* Round the count up for word writes. Do we need to do this?
@@ -702,9 +712,9 @@ static void ne_block_output(struct net_device *dev, int count,
/* This *shouldn't* happen. If it does, it's the last thing you'll see */
if (ei_status.dmaing)
{
- printk(KERN_EMERG "%s: DMAing conflict in ne_block_output."
- "[DMAstat:%d][irqlock:%d]\n",
- dev->name, ei_status.dmaing, ei_status.irqlock);
+ netdev_err(dev, "DMAing conflict in ne_block_output."
+ "[DMAstat:%d][irqlock:%d]\n",
+ ei_status.dmaing, ei_status.irqlock);
return;
}
ei_status.dmaing |= 0x01;
@@ -751,7 +761,7 @@ retry:
/* This was for the ALPHA version only, but enough people have
been encountering problems so it is still here. */
- if (ei_debug > 1)
+ if (netif_msg_tx_queued(ei_local))
{
/* DMA termination address check... */
int addr, tries = 20;
@@ -765,9 +775,9 @@ retry:
if (tries <= 0)
{
- printk(KERN_WARNING "%s: Tx packet transfer address mismatch,"
- "%#4.4x (expected) vs. %#4.4x (actual).\n",
- dev->name, (start_page << 8) + count, addr);
+ netdev_warn(dev, "Tx packet transfer address mismatch,"
+ "%#4.4x (expected) vs. %#4.4x (actual).\n",
+ (start_page << 8) + count, addr);
if (retries++ == 0)
goto retry;
}
@@ -776,7 +786,7 @@ retry:
while ((inb_p(nic_base + EN0_ISR) & ENISR_RDC) == 0)
if (time_after(jiffies, dma_start + 2*HZ/100)) { /* 20ms */
- printk(KERN_WARNING "%s: timeout waiting for Tx RDC.\n", dev->name);
+ netdev_warn(dev, "timeout waiting for Tx RDC.\n");
ne_reset_8390(dev);
NS8390p_init(dev, 1);
break;
@@ -936,8 +946,8 @@ int __init init_module(void)
retval = platform_driver_probe(&ne_driver, ne_drv_probe);
if (retval) {
if (io[0] == 0)
- printk(KERN_NOTICE "ne.c: You must supply \"io=0xNNN\""
- " value(s) for ISA cards.\n");
+ pr_notice("ne.c: You must supply \"io=0xNNN\""
+ " value(s) for ISA cards.\n");
ne_loop_rm_unreg(1);
return retval;
}
diff --git a/drivers/net/ethernet/8390/ne2k-pci.c b/drivers/net/ethernet/8390/ne2k-pci.c
index fc14a85e4d5f..f395c967262e 100644
--- a/drivers/net/ethernet/8390/ne2k-pci.c
+++ b/drivers/net/ethernet/8390/ne2k-pci.c
@@ -33,8 +33,6 @@
/* The user-configurable values.
These may be modified when a driver module is loaded.*/
-static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
-
#define MAX_UNITS 8 /* More are supported, limit only on options */
/* Used to pass the full-duplex flag, etc. */
static int full_duplex[MAX_UNITS];
@@ -60,6 +58,8 @@ static int options[MAX_UNITS];
#include "8390.h"
+static u32 ne2k_msg_enable;
+
/* These identify the driver base version and may not be removed. */
static const char version[] =
KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE
@@ -76,10 +76,10 @@ MODULE_AUTHOR("Donald Becker / Paul Gortmaker");
MODULE_DESCRIPTION("PCI NE2000 clone driver");
MODULE_LICENSE("GPL");
-module_param(debug, int, 0);
+module_param_named(msg_enable, ne2k_msg_enable, uint, (S_IRUSR|S_IRGRP|S_IROTH));
module_param_array(options, int, NULL, 0);
module_param_array(full_duplex, int, NULL, 0);
-MODULE_PARM_DESC(debug, "debug level (1-2)");
+MODULE_PARM_DESC(msg_enable, "Debug message level (see linux/netdevice.h for bitmap)");
MODULE_PARM_DESC(options, "Bit 5: full duplex");
MODULE_PARM_DESC(full_duplex, "full duplex setting(s) (1)");
@@ -226,6 +226,7 @@ static int ne2k_pci_init_one(struct pci_dev *pdev,
static unsigned int fnd_cnt;
long ioaddr;
int flags = pci_clone_list[chip_idx].flags;
+ struct ei_device *ei_local;
/* when built into the kernel, we only print version if device is found */
#ifndef MODULE
@@ -280,6 +281,8 @@ static int ne2k_pci_init_one(struct pci_dev *pdev,
goto err_out_free_res;
}
dev->netdev_ops = &ne2k_netdev_ops;
+ ei_local = netdev_priv(dev);
+ ei_local->msg_enable = ne2k_msg_enable;
SET_NETDEV_DEV(dev, &pdev->dev);
@@ -379,9 +382,9 @@ static int ne2k_pci_init_one(struct pci_dev *pdev,
if (i)
goto err_out_free_netdev;
- printk("%s: %s found at %#lx, IRQ %d, %pM.\n",
- dev->name, pci_clone_list[chip_idx].name, ioaddr, dev->irq,
- dev->dev_addr);
+ netdev_info(dev, "%s found at %#lx, IRQ %d, %pM.\n",
+ pci_clone_list[chip_idx].name, ioaddr, dev->irq,
+ dev->dev_addr);
return 0;
@@ -450,9 +453,10 @@ static int ne2k_pci_close(struct net_device *dev)
static void ne2k_pci_reset_8390(struct net_device *dev)
{
unsigned long reset_start_time = jiffies;
+ struct ei_device *ei_local = netdev_priv(dev);
- if (debug > 1) printk("%s: Resetting the 8390 t=%ld...",
- dev->name, jiffies);
+ netif_dbg(ei_local, hw, dev, "resetting the 8390 t=%ld...\n",
+ jiffies);
outb(inb(NE_BASE + NE_RESET), NE_BASE + NE_RESET);
@@ -462,7 +466,7 @@ static void ne2k_pci_reset_8390(struct net_device *dev)
/* This check _should_not_ be necessary, omit eventually. */
while ((inb(NE_BASE+EN0_ISR) & ENISR_RESET) == 0)
if (jiffies - reset_start_time > 2) {
- printk("%s: ne2k_pci_reset_8390() did not complete.\n", dev->name);
+ netdev_err(dev, "ne2k_pci_reset_8390() did not complete.\n");
break;
}
outb(ENISR_RESET, NE_BASE + EN0_ISR); /* Ack intr. */
@@ -479,9 +483,9 @@ static void ne2k_pci_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *
/* This *shouldn't* happen. If it does, it's the last thing you'll see */
if (ei_status.dmaing) {
- printk("%s: DMAing conflict in ne2k_pci_get_8390_hdr "
+ netdev_err(dev, "DMAing conflict in ne2k_pci_get_8390_hdr "
"[DMAstat:%d][irqlock:%d].\n",
- dev->name, ei_status.dmaing, ei_status.irqlock);
+ ei_status.dmaing, ei_status.irqlock);
return;
}
@@ -517,9 +521,9 @@ static void ne2k_pci_block_input(struct net_device *dev, int count,
/* This *shouldn't* happen. If it does, it's the last thing you'll see */
if (ei_status.dmaing) {
- printk("%s: DMAing conflict in ne2k_pci_block_input "
+ netdev_err(dev, "DMAing conflict in ne2k_pci_block_input "
"[DMAstat:%d][irqlock:%d].\n",
- dev->name, ei_status.dmaing, ei_status.irqlock);
+ ei_status.dmaing, ei_status.irqlock);
return;
}
ei_status.dmaing |= 0x01;
@@ -572,9 +576,9 @@ static void ne2k_pci_block_output(struct net_device *dev, int count,
/* This *shouldn't* happen. If it does, it's the last thing you'll see */
if (ei_status.dmaing) {
- printk("%s: DMAing conflict in ne2k_pci_block_output."
+ netdev_err(dev, "DMAing conflict in ne2k_pci_block_output."
"[DMAstat:%d][irqlock:%d]\n",
- dev->name, ei_status.dmaing, ei_status.irqlock);
+ ei_status.dmaing, ei_status.irqlock);
return;
}
ei_status.dmaing |= 0x01;
@@ -619,7 +623,7 @@ static void ne2k_pci_block_output(struct net_device *dev, int count,
while ((inb(nic_base + EN0_ISR) & ENISR_RDC) == 0)
if (jiffies - dma_start > 2) { /* Avoid clock roll-over. */
- printk(KERN_WARNING "%s: timeout waiting for Tx RDC.\n", dev->name);
+ netdev_warn(dev, "timeout waiting for Tx RDC.\n");
ne2k_pci_reset_8390(dev);
NS8390_init(dev,1);
break;
@@ -640,8 +644,24 @@ static void ne2k_pci_get_drvinfo(struct net_device *dev,
strlcpy(info->bus_info, pci_name(pci_dev), sizeof(info->bus_info));
}
+static u32 ne2k_pci_get_msglevel(struct net_device *dev)
+{
+ struct ei_device *ei_local = netdev_priv(dev);
+
+ return ei_local->msg_enable;
+}
+
+static void ne2k_pci_set_msglevel(struct net_device *dev, u32 v)
+{
+ struct ei_device *ei_local = netdev_priv(dev);
+
+ ei_local->msg_enable = v;
+}
+
static const struct ethtool_ops ne2k_pci_ethtool_ops = {
.get_drvinfo = ne2k_pci_get_drvinfo,
+ .get_msglevel = ne2k_pci_get_msglevel,
+ .set_msglevel = ne2k_pci_set_msglevel,
};
static void ne2k_pci_remove_one(struct pci_dev *pdev)
diff --git a/drivers/net/ethernet/8390/pcnet_cs.c b/drivers/net/ethernet/8390/pcnet_cs.c
index 46c5aadaca8e..eea33d667fb0 100644
--- a/drivers/net/ethernet/8390/pcnet_cs.c
+++ b/drivers/net/ethernet/8390/pcnet_cs.c
@@ -67,7 +67,7 @@
#define PCNET_RDC_TIMEOUT (2*HZ/100) /* Max wait in jiffies for Tx RDC */
static const char *if_names[] = { "auto", "10baseT", "10base2"};
-
+static u32 pcnet_msg_enable;
/*====================================================================*/
@@ -558,6 +558,7 @@ static int pcnet_config(struct pcmcia_device *link)
int start_pg, stop_pg, cm_offset;
int has_shmem = 0;
hw_info_t *local_hw_info;
+ struct ei_device *ei_local;
dev_dbg(&link->dev, "pcnet_config\n");
@@ -607,6 +608,8 @@ static int pcnet_config(struct pcmcia_device *link)
mii_phy_probe(dev);
SET_NETDEV_DEV(dev, &link->dev);
+ ei_local = netdev_priv(dev);
+ ei_local->msg_enable = pcnet_msg_enable;
if (register_netdev(dev) != 0) {
pr_notice("register_netdev() failed\n");
@@ -616,7 +619,7 @@ static int pcnet_config(struct pcmcia_device *link)
if (info->flags & (IS_DL10019|IS_DL10022)) {
u_char id = inb(dev->base_addr + 0x1a);
netdev_info(dev, "NE2000 (DL100%d rev %02x): ",
- (info->flags & IS_DL10022) ? 22 : 19, id);
+ (info->flags & IS_DL10022) ? 22 : 19, id);
if (info->pna_phy)
pr_cont("PNA, ");
} else {
@@ -1063,9 +1066,9 @@ static void ei_watchdog(u_long arg)
if (info->phy_id == info->eth_phy) {
if (p)
netdev_info(dev, "autonegotiation complete: "
- "%sbaseT-%cD selected\n",
- ((p & 0x0180) ? "100" : "10"),
- ((p & 0x0140) ? 'F' : 'H'));
+ "%sbaseT-%cD selected\n",
+ ((p & 0x0180) ? "100" : "10"),
+ ((p & 0x0140) ? 'F' : 'H'));
else
netdev_info(dev, "link partner did not autonegotiate\n");
}
@@ -1081,7 +1084,7 @@ static void ei_watchdog(u_long arg)
mdio_write(mii_addr, info->phy_id, 0, 0x0400);
info->phy_id ^= info->pna_phy ^ info->eth_phy;
netdev_info(dev, "switched to %s transceiver\n",
- (info->phy_id == info->eth_phy) ? "ethernet" : "PNA");
+ (info->phy_id == info->eth_phy) ? "ethernet" : "PNA");
mdio_write(mii_addr, info->phy_id, 0,
(info->phy_id == info->eth_phy) ? 0x1000 : 0);
info->link_status = 0;
@@ -1128,9 +1131,9 @@ static void dma_get_8390_hdr(struct net_device *dev,
unsigned int nic_base = dev->base_addr;
if (ei_status.dmaing) {
- netdev_notice(dev, "DMAing conflict in dma_block_input."
- "[DMAstat:%1x][irqlock:%1x]\n",
- ei_status.dmaing, ei_status.irqlock);
+ netdev_err(dev, "DMAing conflict in dma_block_input."
+ "[DMAstat:%1x][irqlock:%1x]\n",
+ ei_status.dmaing, ei_status.irqlock);
return;
}
@@ -1159,13 +1162,14 @@ static void dma_block_input(struct net_device *dev, int count,
unsigned int nic_base = dev->base_addr;
int xfer_count = count;
char *buf = skb->data;
+ struct ei_device *ei_local = netdev_priv(dev);
- if ((ei_debug > 4) && (count != 4))
+ if ((netif_msg_rx_status(ei_local)) && (count != 4))
netdev_dbg(dev, "[bi=%d]\n", count+4);
if (ei_status.dmaing) {
- netdev_notice(dev, "DMAing conflict in dma_block_input."
- "[DMAstat:%1x][irqlock:%1x]\n",
- ei_status.dmaing, ei_status.irqlock);
+ netdev_err(dev, "DMAing conflict in dma_block_input."
+ "[DMAstat:%1x][irqlock:%1x]\n",
+ ei_status.dmaing, ei_status.irqlock);
return;
}
ei_status.dmaing |= 0x01;
@@ -1183,7 +1187,8 @@ static void dma_block_input(struct net_device *dev, int count,
/* This was for the ALPHA version only, but enough people have been
encountering problems that it is still here. */
#ifdef PCMCIA_DEBUG
- if (ei_debug > 4) { /* DMA termination address check... */
+ /* DMA termination address check... */
+ if (netif_msg_rx_status(ei_local)) {
int addr, tries = 20;
do {
/* DON'T check for 'inb_p(EN0_ISR) & ENISR_RDC' here
@@ -1196,8 +1201,8 @@ static void dma_block_input(struct net_device *dev, int count,
} while (--tries > 0);
if (tries <= 0)
netdev_notice(dev, "RX transfer address mismatch,"
- "%#4.4x (expected) vs. %#4.4x (actual).\n",
- ring_offset + xfer_count, addr);
+ "%#4.4x (expected) vs. %#4.4x (actual).\n",
+ ring_offset + xfer_count, addr);
}
#endif
outb_p(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
@@ -1213,12 +1218,12 @@ static void dma_block_output(struct net_device *dev, int count,
pcnet_dev_t *info = PRIV(dev);
#ifdef PCMCIA_DEBUG
int retries = 0;
+ struct ei_device *ei_local = netdev_priv(dev);
#endif
u_long dma_start;
#ifdef PCMCIA_DEBUG
- if (ei_debug > 4)
- netdev_dbg(dev, "[bo=%d]\n", count);
+ netif_dbg(ei_local, tx_queued, dev, "[bo=%d]\n", count);
#endif
/* Round the count up for word writes. Do we need to do this?
@@ -1227,9 +1232,9 @@ static void dma_block_output(struct net_device *dev, int count,
if (count & 0x01)
count++;
if (ei_status.dmaing) {
- netdev_notice(dev, "DMAing conflict in dma_block_output."
- "[DMAstat:%1x][irqlock:%1x]\n",
- ei_status.dmaing, ei_status.irqlock);
+ netdev_err(dev, "DMAing conflict in dma_block_output."
+ "[DMAstat:%1x][irqlock:%1x]\n",
+ ei_status.dmaing, ei_status.irqlock);
return;
}
ei_status.dmaing |= 0x01;
@@ -1256,7 +1261,8 @@ static void dma_block_output(struct net_device *dev, int count,
#ifdef PCMCIA_DEBUG
/* This was for the ALPHA version only, but enough people have been
encountering problems that it is still here. */
- if (ei_debug > 4) { /* DMA termination address check... */
+ /* DMA termination address check... */
+ if (netif_msg_tx_queued(ei_local)) {
int addr, tries = 20;
do {
int high = inb_p(nic_base + EN0_RSARHI);
@@ -1267,8 +1273,8 @@ static void dma_block_output(struct net_device *dev, int count,
} while (--tries > 0);
if (tries <= 0) {
netdev_notice(dev, "Tx packet transfer address mismatch,"
- "%#4.4x (expected) vs. %#4.4x (actual).\n",
- (start_page << 8) + count, addr);
+ "%#4.4x (expected) vs. %#4.4x (actual).\n",
+ (start_page << 8) + count, addr);
if (retries++ == 0)
goto retry;
}
@@ -1277,10 +1283,10 @@ static void dma_block_output(struct net_device *dev, int count,
while ((inb_p(nic_base + EN0_ISR) & ENISR_RDC) == 0)
if (time_after(jiffies, dma_start + PCNET_RDC_TIMEOUT)) {
- netdev_notice(dev, "timeout waiting for Tx RDC.\n");
- pcnet_reset_8390(dev);
- NS8390_init(dev, 1);
- break;
+ netdev_warn(dev, "timeout waiting for Tx RDC.\n");
+ pcnet_reset_8390(dev);
+ NS8390_init(dev, 1);
+ break;
}
outb_p(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
diff --git a/drivers/net/ethernet/8390/smc-ultra.c b/drivers/net/ethernet/8390/smc-ultra.c
index b0fbce39661a..139385dcdaa7 100644
--- a/drivers/net/ethernet/8390/smc-ultra.c
+++ b/drivers/net/ethernet/8390/smc-ultra.c
@@ -111,6 +111,7 @@ static struct isapnp_device_id ultra_device_ids[] __initdata = {
MODULE_DEVICE_TABLE(isapnp, ultra_device_ids);
#endif
+static u32 ultra_msg_enable;
#define START_PG 0x00 /* First page of TX buffer */
@@ -211,6 +212,7 @@ static int __init ultra_probe1(struct net_device *dev, int ioaddr)
unsigned char num_pages, irqreg, addr, piomode;
unsigned char idreg = inb(ioaddr + 7);
unsigned char reg4 = inb(ioaddr + 4) & 0x7f;
+ struct ei_device *ei_local = netdev_priv(dev);
if (!request_region(ioaddr, ULTRA_IO_EXTENT, DRV_NAME))
return -EBUSY;
@@ -232,16 +234,16 @@ static int __init ultra_probe1(struct net_device *dev, int ioaddr)
goto out;
}
- if (ei_debug && version_printed++ == 0)
- printk(version);
+ if ((ultra_msg_enable & NETIF_MSG_DRV) && (version_printed++ == 0))
+ netdev_info(dev, version);
model_name = (idreg & 0xF0) == 0x20 ? "SMC Ultra" : "SMC EtherEZ";
for (i = 0; i < 6; i++)
dev->dev_addr[i] = inb(ioaddr + 8 + i);
- printk("%s: %s at %#3x, %pM", dev->name, model_name,
- ioaddr, dev->dev_addr);
+ netdev_info(dev, "%s at %#3x, %pM", model_name,
+ ioaddr, dev->dev_addr);
/* Switch from the station address to the alternate register set and
read the useful registers there. */
@@ -265,7 +267,7 @@ static int __init ultra_probe1(struct net_device *dev, int ioaddr)
irq = irqmap[((irqreg & 0x40) >> 4) + ((irqreg & 0x0c) >> 2)];
if (irq == 0) {
- printk(", failed to detect IRQ line.\n");
+ pr_cont(", failed to detect IRQ line.\n");
retval = -EAGAIN;
goto out;
}
@@ -296,7 +298,7 @@ static int __init ultra_probe1(struct net_device *dev, int ioaddr)
ei_status.mem = ioremap(dev->mem_start, (ei_status.stop_page - START_PG)*256);
if (!ei_status.mem) {
- printk(", failed to ioremap.\n");
+ pr_cont(", failed to ioremap.\n");
retval = -ENOMEM;
goto out;
}
@@ -304,14 +306,15 @@ static int __init ultra_probe1(struct net_device *dev, int ioaddr)
dev->mem_end = dev->mem_start + (ei_status.stop_page - START_PG)*256;
if (piomode) {
- printk(",%s IRQ %d programmed-I/O mode.\n",
- eeprom_irq ? "EEPROM" : "assigned ", dev->irq);
+ pr_cont(", %s IRQ %d programmed-I/O mode.\n",
+ eeprom_irq ? "EEPROM" : "assigned ", dev->irq);
ei_status.block_input = &ultra_pio_input;
ei_status.block_output = &ultra_pio_output;
ei_status.get_8390_hdr = &ultra_pio_get_hdr;
} else {
- printk(",%s IRQ %d memory %#lx-%#lx.\n", eeprom_irq ? "" : "assigned ",
- dev->irq, dev->mem_start, dev->mem_end-1);
+ pr_cont(", %s IRQ %d memory %#lx-%#lx.\n",
+ eeprom_irq ? "" : "assigned ", dev->irq, dev->mem_start,
+ dev->mem_end-1);
ei_status.block_input = &ultra_block_input;
ei_status.block_output = &ultra_block_output;
ei_status.get_8390_hdr = &ultra_get_8390_hdr;
@@ -320,6 +323,7 @@ static int __init ultra_probe1(struct net_device *dev, int ioaddr)
dev->netdev_ops = &ultra_netdev_ops;
NS8390_init(dev, 0);
+ ei_local->msg_enable = ultra_msg_enable;
retval = register_netdev(dev);
if (retval)
@@ -356,12 +360,15 @@ static int __init ultra_probe_isapnp(struct net_device *dev)
/* found it */
dev->base_addr = pnp_port_start(idev, 0);
dev->irq = pnp_irq(idev, 0);
- printk(KERN_INFO "smc-ultra.c: ISAPnP reports %s at i/o %#lx, irq %d.\n",
- (char *) ultra_device_ids[i].driver_data,
- dev->base_addr, dev->irq);
+ netdev_info(dev,
+ "smc-ultra.c: ISAPnP reports %s at i/o %#lx, irq %d.\n",
+ (char *) ultra_device_ids[i].driver_data,
+ dev->base_addr, dev->irq);
if (ultra_probe1(dev, dev->base_addr) != 0) { /* Shouldn't happen. */
- printk(KERN_ERR "smc-ultra.c: Probe of ISAPnP card at %#lx failed.\n", dev->base_addr);
- pnp_device_detach(idev);
+ netdev_err(dev,
+ "smc-ultra.c: Probe of ISAPnP card at %#lx failed.\n",
+ dev->base_addr);
+ pnp_device_detach(idev);
return -ENXIO;
}
ei_status.priv = (unsigned long)idev;
@@ -412,9 +419,10 @@ static void
ultra_reset_8390(struct net_device *dev)
{
int cmd_port = dev->base_addr - ULTRA_NIC_OFFSET; /* ASIC base addr */
+ struct ei_device *ei_local = netdev_priv(dev);
outb(ULTRA_RESET, cmd_port);
- if (ei_debug > 1) printk("resetting Ultra, t=%ld...", jiffies);
+ netif_dbg(ei_local, hw, dev, "resetting Ultra, t=%ld...\n", jiffies);
ei_status.txing = 0;
outb(0x00, cmd_port); /* Disable shared memory for safety. */
@@ -424,7 +432,7 @@ ultra_reset_8390(struct net_device *dev)
else
outb(0x01, cmd_port + 6); /* Enable interrupts and memory. */
- if (ei_debug > 1) printk("reset done\n");
+ netif_dbg(ei_local, hw, dev, "reset done\n");
}
/* Grab the 8390 specific header. Similar to the block_input routine, but
@@ -530,11 +538,11 @@ static int
ultra_close_card(struct net_device *dev)
{
int ioaddr = dev->base_addr - ULTRA_NIC_OFFSET; /* CMDREG */
+ struct ei_device *ei_local = netdev_priv(dev);
netif_stop_queue(dev);
- if (ei_debug > 1)
- printk("%s: Shutting down ethercard.\n", dev->name);
+ netif_dbg(ei_local, ifdown, dev, "Shutting down ethercard.\n");
outb(0x00, ioaddr + 6); /* Disable interrupts. */
free_irq(dev->irq, dev);
@@ -556,8 +564,10 @@ static int irq[MAX_ULTRA_CARDS];
module_param_array(io, int, NULL, 0);
module_param_array(irq, int, NULL, 0);
+module_param_named(msg_enable, ultra_msg_enable, uint, (S_IRUSR|S_IRGRP|S_IROTH));
MODULE_PARM_DESC(io, "I/O base address(es)");
MODULE_PARM_DESC(irq, "IRQ number(s) (assigned)");
+MODULE_PARM_DESC(msg_enable, "Debug message level (see linux/netdevice.h for bitmap)");
MODULE_DESCRIPTION("SMC Ultra/EtherEZ ISA/PnP Ethernet driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/8390/stnic.c b/drivers/net/ethernet/8390/stnic.c
index 8df4c4157230..aca957d4e121 100644
--- a/drivers/net/ethernet/8390/stnic.c
+++ b/drivers/net/ethernet/8390/stnic.c
@@ -69,6 +69,11 @@ static void stnic_block_output (struct net_device *dev, int count,
static void stnic_init (struct net_device *dev);
+static u32 stnic_msg_enable;
+
+module_param_named(msg_enable, stnic_msg_enable, uint, (S_IRUSR|S_IRGRP|S_IROTH));
+MODULE_PARM_DESC(msg_enable, "Debug message level (see linux/netdevice.h for bitmap)");
+
/* SH7750 specific read/write io. */
static inline void
STNIC_DELAY (void)
@@ -100,6 +105,7 @@ static int __init stnic_probe(void)
{
struct net_device *dev;
int i, err;
+ struct ei_device *ei_local;
/* If we are not running on a SolutionEngine, give up now */
if (! MACH_SE)
@@ -125,10 +131,10 @@ static int __init stnic_probe(void)
share and the board will usually be enabled. */
err = request_irq (dev->irq, ei_interrupt, 0, DRV_NAME, dev);
if (err) {
- printk (KERN_EMERG " unable to get IRQ %d.\n", dev->irq);
- free_netdev(dev);
- return err;
- }
+ netdev_emerg(dev, " unable to get IRQ %d.\n", dev->irq);
+ free_netdev(dev);
+ return err;
+ }
ei_status.name = dev->name;
ei_status.word16 = 1;
@@ -147,6 +153,8 @@ static int __init stnic_probe(void)
ei_status.block_output = &stnic_block_output;
stnic_init (dev);
+ ei_local = netdev_priv(dev);
+ ei_local->msg_enable = stnic_msg_enable;
err = register_netdev(dev);
if (err) {
@@ -156,7 +164,7 @@ static int __init stnic_probe(void)
}
stnic_dev = dev;
- printk (KERN_INFO "NS ST-NIC 83902A\n");
+ netdev_info(dev, "NS ST-NIC 83902A\n");
return 0;
}
@@ -164,10 +172,11 @@ static int __init stnic_probe(void)
static void
stnic_reset (struct net_device *dev)
{
+ struct ei_device *ei_local = netdev_priv(dev);
+
*(vhalf *) PA_83902_RST = 0;
udelay (5);
- if (ei_debug > 1)
- printk (KERN_WARNING "8390 reset done (%ld).\n", jiffies);
+ netif_warn(ei_local, hw, dev, "8390 reset done (%ld).\n", jiffies);
*(vhalf *) PA_83902_RST = ~0;
udelay (5);
}
@@ -176,6 +185,8 @@ static void
stnic_get_hdr (struct net_device *dev, struct e8390_pkt_hdr *hdr,
int ring_page)
{
+ struct ei_device *ei_local = netdev_priv(dev);
+
half buf[2];
STNIC_WRITE (PG0_RSAR0, 0);
@@ -196,8 +207,7 @@ stnic_get_hdr (struct net_device *dev, struct e8390_pkt_hdr *hdr,
hdr->count = ((buf[1] >> 8) & 0xff) | (buf[1] << 8);
#endif
- if (ei_debug > 1)
- printk (KERN_DEBUG "ring %x status %02x next %02x count %04x.\n",
+ netif_dbg(ei_local, probe, dev, "ring %x status %02x next %02x count %04x.\n",
ring_page, hdr->status, hdr->next, hdr->count);
STNIC_WRITE (STNIC_CR, CR_RDMA | CR_PG0 | CR_STA);
diff --git a/drivers/net/ethernet/8390/wd.c b/drivers/net/ethernet/8390/wd.c
index 03eb3eed49fa..dd7d816bde52 100644
--- a/drivers/net/ethernet/8390/wd.c
+++ b/drivers/net/ethernet/8390/wd.c
@@ -60,6 +60,7 @@ static void wd_block_output(struct net_device *dev, int count,
const unsigned char *buf, int start_page);
static int wd_close(struct net_device *dev);
+static u32 wd_msg_enable;
#define WD_START_PG 0x00 /* First page of TX buffer */
#define WD03_STOP_PG 0x20 /* Last page +1 of RX ring */
@@ -170,6 +171,7 @@ static int __init wd_probe1(struct net_device *dev, int ioaddr)
int word16 = 0; /* 0 = 8 bit, 1 = 16 bit */
const char *model_name;
static unsigned version_printed;
+ struct ei_device *ei_local = netdev_priv(dev);
for (i = 0; i < 8; i++)
checksum += inb(ioaddr + 8 + i);
@@ -180,19 +182,19 @@ static int __init wd_probe1(struct net_device *dev, int ioaddr)
/* Check for semi-valid mem_start/end values if supplied. */
if ((dev->mem_start % 0x2000) || (dev->mem_end % 0x2000)) {
- printk(KERN_WARNING "wd.c: user supplied mem_start or mem_end not on 8kB boundary - ignored.\n");
+ netdev_warn(dev,
+ "wd.c: user supplied mem_start or mem_end not on 8kB boundary - ignored.\n");
dev->mem_start = 0;
dev->mem_end = 0;
}
- if (ei_debug && version_printed++ == 0)
- printk(version);
+ if ((wd_msg_enable & NETIF_MSG_DRV) && (version_printed++ == 0))
+ netdev_info(dev, version);
for (i = 0; i < 6; i++)
dev->dev_addr[i] = inb(ioaddr + 8 + i);
- printk("%s: WD80x3 at %#3x, %pM",
- dev->name, ioaddr, dev->dev_addr);
+ netdev_info(dev, "WD80x3 at %#3x, %pM", ioaddr, dev->dev_addr);
/* The following PureData probe code was contributed by
Mike Jagdis <jaggy@purplet.demon.co.uk>. Puredata does software
@@ -244,8 +246,9 @@ static int __init wd_probe1(struct net_device *dev, int ioaddr)
}
#ifndef final_version
if ( !ancient && (inb(ioaddr+1) & 0x01) != (word16 & 0x01))
- printk("\nWD80?3: Bus width conflict, %d (probe) != %d (reg report).",
- word16 ? 16 : 8, (inb(ioaddr+1) & 0x01) ? 16 : 8);
+ pr_cont("\nWD80?3: Bus width conflict, %d (probe) != %d (reg report).",
+ word16 ? 16 : 8,
+ (inb(ioaddr+1) & 0x01) ? 16 : 8);
#endif
}
@@ -259,7 +262,7 @@ static int __init wd_probe1(struct net_device *dev, int ioaddr)
if (reg0 == 0xff || reg0 == 0) {
/* Future plan: this could check a few likely locations first. */
dev->mem_start = 0xd0000;
- printk(" assigning address %#lx", dev->mem_start);
+ pr_cont(" assigning address %#lx", dev->mem_start);
} else {
int high_addr_bits = inb(ioaddr+WD_CMDREG5) & 0x1f;
/* Some boards don't have the register 5 -- it returns 0xff. */
@@ -297,8 +300,8 @@ static int __init wd_probe1(struct net_device *dev, int ioaddr)
outb_p(0x00, nic_addr+EN0_IMR); /* Mask all intrs. again. */
- if (ei_debug > 2)
- printk(" autoirq is %d", dev->irq);
+ if (netif_msg_drv(ei_local))
+ pr_cont(" autoirq is %d", dev->irq);
if (dev->irq < 2)
dev->irq = word16 ? 10 : 5;
} else
@@ -310,7 +313,7 @@ static int __init wd_probe1(struct net_device *dev, int ioaddr)
share and the board will usually be enabled. */
i = request_irq(dev->irq, ei_interrupt, 0, DRV_NAME, dev);
if (i) {
- printk (" unable to get IRQ %d.\n", dev->irq);
+ pr_cont(" unable to get IRQ %d.\n", dev->irq);
return i;
}
@@ -338,8 +341,8 @@ static int __init wd_probe1(struct net_device *dev, int ioaddr)
return -ENOMEM;
}
- printk(" %s, IRQ %d, shared memory at %#lx-%#lx.\n",
- model_name, dev->irq, dev->mem_start, dev->mem_end-1);
+ pr_cont(" %s, IRQ %d, shared memory at %#lx-%#lx.\n",
+ model_name, dev->irq, dev->mem_start, dev->mem_end-1);
ei_status.reset_8390 = wd_reset_8390;
ei_status.block_input = wd_block_input;
@@ -348,6 +351,7 @@ static int __init wd_probe1(struct net_device *dev, int ioaddr)
dev->netdev_ops = &wd_netdev_ops;
NS8390_init(dev, 0);
+ ei_local->msg_enable = wd_msg_enable;
#if 1
/* Enable interrupt generation on softconfig cards -- M.U */
@@ -385,9 +389,11 @@ static void
wd_reset_8390(struct net_device *dev)
{
int wd_cmd_port = dev->base_addr - WD_NIC_OFFSET; /* WD_CMDREG */
+ struct ei_device *ei_local = netdev_priv(dev);
outb(WD_RESET, wd_cmd_port);
- if (ei_debug > 1) printk("resetting the WD80x3 t=%lu...", jiffies);
+ netif_dbg(ei_local, hw, dev, "resetting the WD80x3 t=%lu...\n",
+ jiffies);
ei_status.txing = 0;
/* Set up the ASIC registers, just in case something changed them. */
@@ -395,7 +401,7 @@ wd_reset_8390(struct net_device *dev)
if (ei_status.word16)
outb(NIC16 | ((dev->mem_start>>19) & 0x1f), wd_cmd_port+WD_CMDREG5);
- if (ei_debug > 1) printk("reset done\n");
+ netif_dbg(ei_local, hw, dev, "reset done\n");
}
/* Grab the 8390 specific header. Similar to the block_input routine, but
@@ -474,9 +480,9 @@ static int
wd_close(struct net_device *dev)
{
int wd_cmdreg = dev->base_addr - WD_NIC_OFFSET; /* WD_CMDREG */
+ struct ei_device *ei_local = netdev_priv(dev);
- if (ei_debug > 1)
- printk("%s: Shutting down ethercard.\n", dev->name);
+ netif_dbg(ei_local, ifdown, dev, "Shutting down ethercard.\n");
ei_close(dev);
/* Change from 16-bit to 8-bit shared memory so reboot works. */
@@ -502,10 +508,12 @@ module_param_array(io, int, NULL, 0);
module_param_array(irq, int, NULL, 0);
module_param_array(mem, int, NULL, 0);
module_param_array(mem_end, int, NULL, 0);
+module_param_named(msg_enable, wd_msg_enable, uint, (S_IRUSR|S_IRGRP|S_IROTH));
MODULE_PARM_DESC(io, "I/O base address(es)");
MODULE_PARM_DESC(irq, "IRQ number(s) (ignored for PureData boards)");
MODULE_PARM_DESC(mem, "memory base address(es)(ignored for PureData boards)");
MODULE_PARM_DESC(mem_end, "memory end address(es)");
+MODULE_PARM_DESC(msg_enable, "Debug message level (see linux/netdevice.h for bitmap)");
MODULE_DESCRIPTION("ISA Western Digital wd8003/wd8013 ; SMC Elite, Elite16 ethernet driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/8390/zorro8390.c b/drivers/net/ethernet/8390/zorro8390.c
index 85ec4c2d2645..7b373e65f377 100644
--- a/drivers/net/ethernet/8390/zorro8390.c
+++ b/drivers/net/ethernet/8390/zorro8390.c
@@ -44,6 +44,8 @@
static const char version[] =
"8390.c:v1.10cvs 9/23/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
+static u32 zorro8390_msg_enable;
+
#include "lib8390.c"
#define DRV_NAME "zorro8390"
@@ -86,9 +88,9 @@ static struct card_info {
static void zorro8390_reset_8390(struct net_device *dev)
{
unsigned long reset_start_time = jiffies;
+ struct ei_device *ei_local = netdev_priv(dev);
- if (ei_debug > 1)
- netdev_dbg(dev, "resetting - t=%ld...\n", jiffies);
+ netif_dbg(ei_local, hw, dev, "resetting - t=%ld...\n", jiffies);
z_writeb(z_readb(NE_BASE + NE_RESET), NE_BASE + NE_RESET);
@@ -119,8 +121,9 @@ static void zorro8390_get_8390_hdr(struct net_device *dev,
* If it does, it's the last thing you'll see
*/
if (ei_status.dmaing) {
- netdev_err(dev, "%s: DMAing conflict [DMAstat:%d][irqlock:%d]\n",
- __func__, ei_status.dmaing, ei_status.irqlock);
+ netdev_warn(dev,
+ "%s: DMAing conflict [DMAstat:%d][irqlock:%d]\n",
+ __func__, ei_status.dmaing, ei_status.irqlock);
return;
}
@@ -230,7 +233,7 @@ static void zorro8390_block_output(struct net_device *dev, int count,
while ((z_readb(NE_BASE + NE_EN0_ISR) & ENISR_RDC) == 0)
if (time_after(jiffies, dma_start + 2 * HZ / 100)) {
/* 20ms */
- netdev_err(dev, "timeout waiting for Tx RDC\n");
+ netdev_warn(dev, "timeout waiting for Tx RDC\n");
zorro8390_reset_8390(dev);
__NS8390_init(dev, 1);
break;
@@ -248,8 +251,9 @@ static int zorro8390_open(struct net_device *dev)
static int zorro8390_close(struct net_device *dev)
{
- if (ei_debug > 1)
- netdev_dbg(dev, "Shutting down ethercard\n");
+ struct ei_device *ei_local = netdev_priv(dev);
+
+ netif_dbg(ei_local, ifdown, dev, "Shutting down ethercard\n");
__ei_close(dev);
return 0;
}
@@ -293,6 +297,7 @@ static int zorro8390_init(struct net_device *dev, unsigned long board,
int err;
unsigned char SA_prom[32];
int start_page, stop_page;
+ struct ei_device *ei_local = netdev_priv(dev);
static u32 zorro8390_offsets[16] = {
0x00, 0x02, 0x04, 0x06, 0x08, 0x0a, 0x0c, 0x0e,
0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e,
@@ -383,6 +388,9 @@ static int zorro8390_init(struct net_device *dev, unsigned long board,
dev->netdev_ops = &zorro8390_netdev_ops;
__NS8390_init(dev, 0);
+
+ ei_local->msg_enable = zorro8390_msg_enable;
+
err = register_netdev(dev);
if (err) {
free_irq(IRQ_AMIGA_PORTS, dev);
diff --git a/drivers/net/ethernet/adi/bfin_mac.c b/drivers/net/ethernet/adi/bfin_mac.c
index 75fb1d20d6fd..c0f68dcd1dc1 100644
--- a/drivers/net/ethernet/adi/bfin_mac.c
+++ b/drivers/net/ethernet/adi/bfin_mac.c
@@ -667,8 +667,8 @@ static u32 bfin_select_phc_clock(u32 input_clk, unsigned int *shift_result)
return 1000000000UL / ppn;
}
-static int bfin_mac_hwtstamp_ioctl(struct net_device *netdev,
- struct ifreq *ifr, int cmd)
+static int bfin_mac_hwtstamp_set(struct net_device *netdev,
+ struct ifreq *ifr)
{
struct hwtstamp_config config;
struct bfin_mac_local *lp = netdev_priv(netdev);
@@ -824,6 +824,16 @@ static int bfin_mac_hwtstamp_ioctl(struct net_device *netdev,
-EFAULT : 0;
}
+static int bfin_mac_hwtstamp_get(struct net_device *netdev,
+ struct ifreq *ifr)
+{
+ struct bfin_mac_local *lp = netdev_priv(netdev);
+
+ return copy_to_user(ifr->ifr_data, &lp->stamp_cfg,
+ sizeof(lp->stamp_cfg)) ?
+ -EFAULT : 0;
+}
+
static void bfin_tx_hwtstamp(struct net_device *netdev, struct sk_buff *skb)
{
struct bfin_mac_local *lp = netdev_priv(netdev);
@@ -1062,7 +1072,8 @@ static void bfin_phc_release(struct bfin_mac_local *lp)
#else
# define bfin_mac_hwtstamp_is_none(cfg) 0
# define bfin_mac_hwtstamp_init(dev)
-# define bfin_mac_hwtstamp_ioctl(dev, ifr, cmd) (-EOPNOTSUPP)
+# define bfin_mac_hwtstamp_set(dev, ifr) (-EOPNOTSUPP)
+# define bfin_mac_hwtstamp_get(dev, ifr) (-EOPNOTSUPP)
# define bfin_rx_hwtstamp(dev, skb)
# define bfin_tx_hwtstamp(dev, skb)
# define bfin_phc_init(netdev, dev) 0
@@ -1496,7 +1507,9 @@ static int bfin_mac_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
switch (cmd) {
case SIOCSHWTSTAMP:
- return bfin_mac_hwtstamp_ioctl(netdev, ifr, cmd);
+ return bfin_mac_hwtstamp_set(netdev, ifr);
+ case SIOCGHWTSTAMP:
+ return bfin_mac_hwtstamp_get(netdev, ifr);
default:
if (lp->phydev)
return phy_mii_ioctl(lp->phydev, ifr, cmd);
@@ -1544,7 +1557,6 @@ static int bfin_mac_open(struct net_device *dev)
return ret;
phy_start(lp->phydev);
- phy_write(lp->phydev, MII_BMCR, BMCR_RESET);
setup_system_regs(dev);
setup_mac_addr(dev->dev_addr);
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
index e06694555144..b20cbf0323e0 100644
--- a/drivers/net/ethernet/aeroflex/greth.c
+++ b/drivers/net/ethernet/aeroflex/greth.c
@@ -1361,7 +1361,7 @@ static int greth_mdio_init(struct greth_private *greth)
timeout = jiffies + 6*HZ;
while (!phy_aneg_done(greth->phy) && time_before(jiffies, timeout)) {
}
- genphy_read_status(greth->phy);
+ phy_read_status(greth->phy);
greth_link_change(greth->netdev);
}
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c
index 50b853a79d77..46dfb1378c17 100644
--- a/drivers/net/ethernet/allwinner/sun4i-emac.c
+++ b/drivers/net/ethernet/allwinner/sun4i-emac.c
@@ -717,8 +717,7 @@ static int emac_open(struct net_device *dev)
if (netif_msg_ifup(db))
dev_dbg(db->dev, "enabling %s\n", dev->name);
- if (devm_request_irq(db->dev, dev->irq, &emac_interrupt,
- 0, dev->name, dev))
+ if (request_irq(dev->irq, &emac_interrupt, 0, dev->name, dev))
return -EAGAIN;
/* Initialize EMAC board */
@@ -774,6 +773,8 @@ static int emac_stop(struct net_device *ndev)
emac_shutdown(ndev);
+ free_irq(ndev->irq, ndev);
+
return 0;
}
diff --git a/drivers/net/ethernet/amd/7990.c b/drivers/net/ethernet/amd/7990.c
index 65926a956575..72cfff7d23a5 100644
--- a/drivers/net/ethernet/amd/7990.c
+++ b/drivers/net/ethernet/amd/7990.c
@@ -42,9 +42,9 @@
#include "7990.h"
-#define WRITERAP(lp,x) out_be16(lp->base + LANCE_RAP, (x))
-#define WRITERDP(lp,x) out_be16(lp->base + LANCE_RDP, (x))
-#define READRDP(lp) in_be16(lp->base + LANCE_RDP)
+#define WRITERAP(lp, x) out_be16(lp->base + LANCE_RAP, (x))
+#define WRITERDP(lp, x) out_be16(lp->base + LANCE_RDP, (x))
+#define READRDP(lp) in_be16(lp->base + LANCE_RDP)
#if defined(CONFIG_HPLANCE) || defined(CONFIG_HPLANCE_MODULE)
#include "hplance.h"
@@ -56,9 +56,9 @@
#if defined(CONFIG_MVME147_NET) || defined(CONFIG_MVME147_NET_MODULE)
/* Lossage Factor Nine, Mr Sulu. */
-#define WRITERAP(lp,x) (lp->writerap(lp,x))
-#define WRITERDP(lp,x) (lp->writerdp(lp,x))
-#define READRDP(lp) (lp->readrdp(lp))
+#define WRITERAP(lp, x) (lp->writerap(lp, x))
+#define WRITERDP(lp, x) (lp->writerdp(lp, x))
+#define READRDP(lp) (lp->readrdp(lp))
#else
@@ -94,428 +94,436 @@ static inline __u16 READRDP(struct lance_private *lp)
#ifdef UNDEF
#define PRINT_RINGS() \
do { \
- int t; \
- for (t=0; t < RX_RING_SIZE; t++) { \
- printk("R%d: @(%02X %04X) len %04X, mblen %04X, bits %02X\n",\
- t, ib->brx_ring[t].rmd1_hadr, ib->brx_ring[t].rmd0,\
- ib->brx_ring[t].length,\
- ib->brx_ring[t].mblength, ib->brx_ring[t].rmd1_bits);\
- }\
- for (t=0; t < TX_RING_SIZE; t++) { \
- printk("T%d: @(%02X %04X) len %04X, misc %04X, bits %02X\n",\
- t, ib->btx_ring[t].tmd1_hadr, ib->btx_ring[t].tmd0,\
- ib->btx_ring[t].length,\
- ib->btx_ring[t].misc, ib->btx_ring[t].tmd1_bits);\
- }\
+ int t; \
+ for (t = 0; t < RX_RING_SIZE; t++) { \
+ printk("R%d: @(%02X %04X) len %04X, mblen %04X, bits %02X\n", \
+ t, ib->brx_ring[t].rmd1_hadr, ib->brx_ring[t].rmd0, \
+ ib->brx_ring[t].length, \
+ ib->brx_ring[t].mblength, ib->brx_ring[t].rmd1_bits); \
+ } \
+ for (t = 0; t < TX_RING_SIZE; t++) { \
+ printk("T%d: @(%02X %04X) len %04X, misc %04X, bits %02X\n", \
+ t, ib->btx_ring[t].tmd1_hadr, ib->btx_ring[t].tmd0, \
+ ib->btx_ring[t].length, \
+ ib->btx_ring[t].misc, ib->btx_ring[t].tmd1_bits); \
+ } \
} while (0)
#else
#define PRINT_RINGS()
#endif
/* Load the CSR registers. The LANCE has to be STOPped when we do this! */
-static void load_csrs (struct lance_private *lp)
+static void load_csrs(struct lance_private *lp)
{
- volatile struct lance_init_block *aib = lp->lance_init_block;
- int leptr;
+ volatile struct lance_init_block *aib = lp->lance_init_block;
+ int leptr;
- leptr = LANCE_ADDR (aib);
+ leptr = LANCE_ADDR(aib);
- WRITERAP(lp, LE_CSR1); /* load address of init block */
- WRITERDP(lp, leptr & 0xFFFF);
- WRITERAP(lp, LE_CSR2);
- WRITERDP(lp, leptr >> 16);
- WRITERAP(lp, LE_CSR3);
- WRITERDP(lp, lp->busmaster_regval); /* set byteswap/ALEctrl/byte ctrl */
+ WRITERAP(lp, LE_CSR1); /* load address of init block */
+ WRITERDP(lp, leptr & 0xFFFF);
+ WRITERAP(lp, LE_CSR2);
+ WRITERDP(lp, leptr >> 16);
+ WRITERAP(lp, LE_CSR3);
+ WRITERDP(lp, lp->busmaster_regval); /* set byteswap/ALEctrl/byte ctrl */
- /* Point back to csr0 */
- WRITERAP(lp, LE_CSR0);
+ /* Point back to csr0 */
+ WRITERAP(lp, LE_CSR0);
}
/* #define to 0 or 1 appropriately */
#define DEBUG_IRING 0
/* Set up the Lance Rx and Tx rings and the init block */
-static void lance_init_ring (struct net_device *dev)
+static void lance_init_ring(struct net_device *dev)
{
- struct lance_private *lp = netdev_priv(dev);
- volatile struct lance_init_block *ib = lp->init_block;
- volatile struct lance_init_block *aib; /* for LANCE_ADDR computations */
- int leptr;
- int i;
-
- aib = lp->lance_init_block;
-
- lp->rx_new = lp->tx_new = 0;
- lp->rx_old = lp->tx_old = 0;
-
- ib->mode = LE_MO_PROM; /* normal, enable Tx & Rx */
-
- /* Copy the ethernet address to the lance init block
- * Notice that we do a byteswap if we're big endian.
- * [I think this is the right criterion; at least, sunlance,
- * a2065 and atarilance do the byteswap and lance.c (PC) doesn't.
- * However, the datasheet says that the BSWAP bit doesn't affect
- * the init block, so surely it should be low byte first for
- * everybody? Um.]
- * We could define the ib->physaddr as three 16bit values and
- * use (addr[1] << 8) | addr[0] & co, but this is more efficient.
- */
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_init_block *ib = lp->init_block;
+ volatile struct lance_init_block *aib; /* for LANCE_ADDR computations */
+ int leptr;
+ int i;
+
+ aib = lp->lance_init_block;
+
+ lp->rx_new = lp->tx_new = 0;
+ lp->rx_old = lp->tx_old = 0;
+
+ ib->mode = LE_MO_PROM; /* normal, enable Tx & Rx */
+
+ /* Copy the ethernet address to the lance init block
+ * Notice that we do a byteswap if we're big endian.
+ * [I think this is the right criterion; at least, sunlance,
+ * a2065 and atarilance do the byteswap and lance.c (PC) doesn't.
+ * However, the datasheet says that the BSWAP bit doesn't affect
+ * the init block, so surely it should be low byte first for
+ * everybody? Um.]
+ * We could define the ib->physaddr as three 16bit values and
+ * use (addr[1] << 8) | addr[0] & co, but this is more efficient.
+ */
#ifdef __BIG_ENDIAN
- ib->phys_addr [0] = dev->dev_addr [1];
- ib->phys_addr [1] = dev->dev_addr [0];
- ib->phys_addr [2] = dev->dev_addr [3];
- ib->phys_addr [3] = dev->dev_addr [2];
- ib->phys_addr [4] = dev->dev_addr [5];
- ib->phys_addr [5] = dev->dev_addr [4];
+ ib->phys_addr[0] = dev->dev_addr[1];
+ ib->phys_addr[1] = dev->dev_addr[0];
+ ib->phys_addr[2] = dev->dev_addr[3];
+ ib->phys_addr[3] = dev->dev_addr[2];
+ ib->phys_addr[4] = dev->dev_addr[5];
+ ib->phys_addr[5] = dev->dev_addr[4];
#else
- for (i=0; i<6; i++)
- ib->phys_addr[i] = dev->dev_addr[i];
+ for (i = 0; i < 6; i++)
+ ib->phys_addr[i] = dev->dev_addr[i];
#endif
- if (DEBUG_IRING)
- printk ("TX rings:\n");
+ if (DEBUG_IRING)
+ printk("TX rings:\n");
lp->tx_full = 0;
- /* Setup the Tx ring entries */
- for (i = 0; i < (1<<lp->lance_log_tx_bufs); i++) {
- leptr = LANCE_ADDR(&aib->tx_buf[i][0]);
- ib->btx_ring [i].tmd0 = leptr;
- ib->btx_ring [i].tmd1_hadr = leptr >> 16;
- ib->btx_ring [i].tmd1_bits = 0;
- ib->btx_ring [i].length = 0xf000; /* The ones required by tmd2 */
- ib->btx_ring [i].misc = 0;
- if (DEBUG_IRING)
- printk ("%d: 0x%8.8x\n", i, leptr);
- }
-
- /* Setup the Rx ring entries */
- if (DEBUG_IRING)
- printk ("RX rings:\n");
- for (i = 0; i < (1<<lp->lance_log_rx_bufs); i++) {
- leptr = LANCE_ADDR(&aib->rx_buf[i][0]);
-
- ib->brx_ring [i].rmd0 = leptr;
- ib->brx_ring [i].rmd1_hadr = leptr >> 16;
- ib->brx_ring [i].rmd1_bits = LE_R1_OWN;
- /* 0xf000 == bits that must be one (reserved, presumably) */
- ib->brx_ring [i].length = -RX_BUFF_SIZE | 0xf000;
- ib->brx_ring [i].mblength = 0;
- if (DEBUG_IRING)
- printk ("%d: 0x%8.8x\n", i, leptr);
- }
-
- /* Setup the initialization block */
-
- /* Setup rx descriptor pointer */
- leptr = LANCE_ADDR(&aib->brx_ring);
- ib->rx_len = (lp->lance_log_rx_bufs << 13) | (leptr >> 16);
- ib->rx_ptr = leptr;
- if (DEBUG_IRING)
- printk ("RX ptr: %8.8x\n", leptr);
-
- /* Setup tx descriptor pointer */
- leptr = LANCE_ADDR(&aib->btx_ring);
- ib->tx_len = (lp->lance_log_tx_bufs << 13) | (leptr >> 16);
- ib->tx_ptr = leptr;
- if (DEBUG_IRING)
- printk ("TX ptr: %8.8x\n", leptr);
-
- /* Clear the multicast filter */
- ib->filter [0] = 0;
- ib->filter [1] = 0;
- PRINT_RINGS();
+ /* Setup the Tx ring entries */
+ for (i = 0; i < (1 << lp->lance_log_tx_bufs); i++) {
+ leptr = LANCE_ADDR(&aib->tx_buf[i][0]);
+ ib->btx_ring[i].tmd0 = leptr;
+ ib->btx_ring[i].tmd1_hadr = leptr >> 16;
+ ib->btx_ring[i].tmd1_bits = 0;
+ ib->btx_ring[i].length = 0xf000; /* The ones required by tmd2 */
+ ib->btx_ring[i].misc = 0;
+ if (DEBUG_IRING)
+ printk("%d: 0x%8.8x\n", i, leptr);
+ }
+
+ /* Setup the Rx ring entries */
+ if (DEBUG_IRING)
+ printk("RX rings:\n");
+ for (i = 0; i < (1 << lp->lance_log_rx_bufs); i++) {
+ leptr = LANCE_ADDR(&aib->rx_buf[i][0]);
+
+ ib->brx_ring[i].rmd0 = leptr;
+ ib->brx_ring[i].rmd1_hadr = leptr >> 16;
+ ib->brx_ring[i].rmd1_bits = LE_R1_OWN;
+ /* 0xf000 == bits that must be one (reserved, presumably) */
+ ib->brx_ring[i].length = -RX_BUFF_SIZE | 0xf000;
+ ib->brx_ring[i].mblength = 0;
+ if (DEBUG_IRING)
+ printk("%d: 0x%8.8x\n", i, leptr);
+ }
+
+ /* Setup the initialization block */
+
+ /* Setup rx descriptor pointer */
+ leptr = LANCE_ADDR(&aib->brx_ring);
+ ib->rx_len = (lp->lance_log_rx_bufs << 13) | (leptr >> 16);
+ ib->rx_ptr = leptr;
+ if (DEBUG_IRING)
+ printk("RX ptr: %8.8x\n", leptr);
+
+ /* Setup tx descriptor pointer */
+ leptr = LANCE_ADDR(&aib->btx_ring);
+ ib->tx_len = (lp->lance_log_tx_bufs << 13) | (leptr >> 16);
+ ib->tx_ptr = leptr;
+ if (DEBUG_IRING)
+ printk("TX ptr: %8.8x\n", leptr);
+
+ /* Clear the multicast filter */
+ ib->filter[0] = 0;
+ ib->filter[1] = 0;
+ PRINT_RINGS();
}
/* LANCE must be STOPped before we do this, too... */
-static int init_restart_lance (struct lance_private *lp)
+static int init_restart_lance(struct lance_private *lp)
{
- int i;
+ int i;
- WRITERAP(lp, LE_CSR0);
- WRITERDP(lp, LE_C0_INIT);
+ WRITERAP(lp, LE_CSR0);
+ WRITERDP(lp, LE_C0_INIT);
- /* Need a hook here for sunlance ledma stuff */
+ /* Need a hook here for sunlance ledma stuff */
- /* Wait for the lance to complete initialization */
- for (i = 0; (i < 100) && !(READRDP(lp) & (LE_C0_ERR | LE_C0_IDON)); i++)
- barrier();
- if ((i == 100) || (READRDP(lp) & LE_C0_ERR)) {
- printk ("LANCE unopened after %d ticks, csr0=%4.4x.\n", i, READRDP(lp));
- return -1;
- }
+ /* Wait for the lance to complete initialization */
+ for (i = 0; (i < 100) && !(READRDP(lp) & (LE_C0_ERR | LE_C0_IDON)); i++)
+ barrier();
+ if ((i == 100) || (READRDP(lp) & LE_C0_ERR)) {
+ printk("LANCE unopened after %d ticks, csr0=%4.4x.\n", i, READRDP(lp));
+ return -1;
+ }
- /* Clear IDON by writing a "1", enable interrupts and start lance */
- WRITERDP(lp, LE_C0_IDON);
- WRITERDP(lp, LE_C0_INEA | LE_C0_STRT);
+ /* Clear IDON by writing a "1", enable interrupts and start lance */
+ WRITERDP(lp, LE_C0_IDON);
+ WRITERDP(lp, LE_C0_INEA | LE_C0_STRT);
- return 0;
+ return 0;
}
-static int lance_reset (struct net_device *dev)
+static int lance_reset(struct net_device *dev)
{
- struct lance_private *lp = netdev_priv(dev);
- int status;
+ struct lance_private *lp = netdev_priv(dev);
+ int status;
- /* Stop the lance */
- WRITERAP(lp, LE_CSR0);
- WRITERDP(lp, LE_C0_STOP);
+ /* Stop the lance */
+ WRITERAP(lp, LE_CSR0);
+ WRITERDP(lp, LE_C0_STOP);
- load_csrs (lp);
- lance_init_ring (dev);
- dev->trans_start = jiffies; /* prevent tx timeout */
- status = init_restart_lance (lp);
+ load_csrs(lp);
+ lance_init_ring(dev);
+ dev->trans_start = jiffies; /* prevent tx timeout */
+ status = init_restart_lance(lp);
#ifdef DEBUG_DRIVER
- printk ("Lance restart=%d\n", status);
+ printk("Lance restart=%d\n", status);
#endif
- return status;
+ return status;
}
-static int lance_rx (struct net_device *dev)
+static int lance_rx(struct net_device *dev)
{
- struct lance_private *lp = netdev_priv(dev);
- volatile struct lance_init_block *ib = lp->init_block;
- volatile struct lance_rx_desc *rd;
- unsigned char bits;
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_init_block *ib = lp->init_block;
+ volatile struct lance_rx_desc *rd;
+ unsigned char bits;
#ifdef TEST_HITS
- int i;
+ int i;
#endif
#ifdef TEST_HITS
- printk ("[");
- for (i = 0; i < RX_RING_SIZE; i++) {
- if (i == lp->rx_new)
- printk ("%s",
- ib->brx_ring [i].rmd1_bits & LE_R1_OWN ? "_" : "X");
- else
- printk ("%s",
- ib->brx_ring [i].rmd1_bits & LE_R1_OWN ? "." : "1");
- }
- printk ("]");
+ printk("[");
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ if (i == lp->rx_new)
+ printk("%s",
+ ib->brx_ring[i].rmd1_bits & LE_R1_OWN ? "_" : "X");
+ else
+ printk("%s",
+ ib->brx_ring[i].rmd1_bits & LE_R1_OWN ? "." : "1");
+ }
+ printk("]");
#endif
#ifdef CONFIG_HP300
blinken_leds(0x40, 0);
#endif
- WRITERDP(lp, LE_C0_RINT | LE_C0_INEA); /* ack Rx int, reenable ints */
- for (rd = &ib->brx_ring [lp->rx_new]; /* For each Rx ring we own... */
- !((bits = rd->rmd1_bits) & LE_R1_OWN);
- rd = &ib->brx_ring [lp->rx_new]) {
-
- /* We got an incomplete frame? */
- if ((bits & LE_R1_POK) != LE_R1_POK) {
- dev->stats.rx_over_errors++;
- dev->stats.rx_errors++;
- continue;
- } else if (bits & LE_R1_ERR) {
- /* Count only the end frame as a rx error,
- * not the beginning
- */
- if (bits & LE_R1_BUF) dev->stats.rx_fifo_errors++;
- if (bits & LE_R1_CRC) dev->stats.rx_crc_errors++;
- if (bits & LE_R1_OFL) dev->stats.rx_over_errors++;
- if (bits & LE_R1_FRA) dev->stats.rx_frame_errors++;
- if (bits & LE_R1_EOP) dev->stats.rx_errors++;
- } else {
+ WRITERDP(lp, LE_C0_RINT | LE_C0_INEA); /* ack Rx int, reenable ints */
+ for (rd = &ib->brx_ring[lp->rx_new]; /* For each Rx ring we own... */
+ !((bits = rd->rmd1_bits) & LE_R1_OWN);
+ rd = &ib->brx_ring[lp->rx_new]) {
+
+ /* We got an incomplete frame? */
+ if ((bits & LE_R1_POK) != LE_R1_POK) {
+ dev->stats.rx_over_errors++;
+ dev->stats.rx_errors++;
+ continue;
+ } else if (bits & LE_R1_ERR) {
+ /* Count only the end frame as a rx error,
+ * not the beginning
+ */
+ if (bits & LE_R1_BUF)
+ dev->stats.rx_fifo_errors++;
+ if (bits & LE_R1_CRC)
+ dev->stats.rx_crc_errors++;
+ if (bits & LE_R1_OFL)
+ dev->stats.rx_over_errors++;
+ if (bits & LE_R1_FRA)
+ dev->stats.rx_frame_errors++;
+ if (bits & LE_R1_EOP)
+ dev->stats.rx_errors++;
+ } else {
int len = (rd->mblength & 0xfff) - 4;
struct sk_buff *skb = netdev_alloc_skb(dev, len + 2);
- if (!skb) {
- dev->stats.rx_dropped++;
- rd->mblength = 0;
- rd->rmd1_bits = LE_R1_OWN;
- lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask;
- return 0;
- }
-
- skb_reserve (skb, 2); /* 16 byte align */
- skb_put (skb, len); /* make room */
- skb_copy_to_linear_data(skb,
- (unsigned char *)&(ib->rx_buf [lp->rx_new][0]),
- len);
- skb->protocol = eth_type_trans (skb, dev);
- netif_rx (skb);
+ if (!skb) {
+ dev->stats.rx_dropped++;
+ rd->mblength = 0;
+ rd->rmd1_bits = LE_R1_OWN;
+ lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask;
+ return 0;
+ }
+
+ skb_reserve(skb, 2); /* 16 byte align */
+ skb_put(skb, len); /* make room */
+ skb_copy_to_linear_data(skb,
+ (unsigned char *)&(ib->rx_buf[lp->rx_new][0]),
+ len);
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
dev->stats.rx_packets++;
dev->stats.rx_bytes += len;
- }
-
- /* Return the packet to the pool */
- rd->mblength = 0;
- rd->rmd1_bits = LE_R1_OWN;
- lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask;
- }
- return 0;
+ }
+
+ /* Return the packet to the pool */
+ rd->mblength = 0;
+ rd->rmd1_bits = LE_R1_OWN;
+ lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask;
+ }
+ return 0;
}
-static int lance_tx (struct net_device *dev)
+static int lance_tx(struct net_device *dev)
{
- struct lance_private *lp = netdev_priv(dev);
- volatile struct lance_init_block *ib = lp->init_block;
- volatile struct lance_tx_desc *td;
- int i, j;
- int status;
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_init_block *ib = lp->init_block;
+ volatile struct lance_tx_desc *td;
+ int i, j;
+ int status;
#ifdef CONFIG_HP300
blinken_leds(0x80, 0);
#endif
- /* csr0 is 2f3 */
- WRITERDP(lp, LE_C0_TINT | LE_C0_INEA);
- /* csr0 is 73 */
-
- j = lp->tx_old;
- for (i = j; i != lp->tx_new; i = j) {
- td = &ib->btx_ring [i];
-
- /* If we hit a packet not owned by us, stop */
- if (td->tmd1_bits & LE_T1_OWN)
- break;
-
- if (td->tmd1_bits & LE_T1_ERR) {
- status = td->misc;
-
- dev->stats.tx_errors++;
- if (status & LE_T3_RTY) dev->stats.tx_aborted_errors++;
- if (status & LE_T3_LCOL) dev->stats.tx_window_errors++;
-
- if (status & LE_T3_CLOS) {
- dev->stats.tx_carrier_errors++;
- if (lp->auto_select) {
- lp->tpe = 1 - lp->tpe;
- printk("%s: Carrier Lost, trying %s\n",
- dev->name, lp->tpe?"TPE":"AUI");
- /* Stop the lance */
- WRITERAP(lp, LE_CSR0);
- WRITERDP(lp, LE_C0_STOP);
- lance_init_ring (dev);
- load_csrs (lp);
- init_restart_lance (lp);
- return 0;
- }
- }
-
- /* buffer errors and underflows turn off the transmitter */
- /* Restart the adapter */
- if (status & (LE_T3_BUF|LE_T3_UFL)) {
- dev->stats.tx_fifo_errors++;
-
- printk ("%s: Tx: ERR_BUF|ERR_UFL, restarting\n",
- dev->name);
- /* Stop the lance */
- WRITERAP(lp, LE_CSR0);
- WRITERDP(lp, LE_C0_STOP);
- lance_init_ring (dev);
- load_csrs (lp);
- init_restart_lance (lp);
- return 0;
- }
- } else if ((td->tmd1_bits & LE_T1_POK) == LE_T1_POK) {
- /*
- * So we don't count the packet more than once.
- */
- td->tmd1_bits &= ~(LE_T1_POK);
-
- /* One collision before packet was sent. */
- if (td->tmd1_bits & LE_T1_EONE)
- dev->stats.collisions++;
-
- /* More than one collision, be optimistic. */
- if (td->tmd1_bits & LE_T1_EMORE)
- dev->stats.collisions += 2;
-
- dev->stats.tx_packets++;
- }
-
- j = (j + 1) & lp->tx_ring_mod_mask;
- }
- lp->tx_old = j;
- WRITERDP(lp, LE_C0_TINT | LE_C0_INEA);
- return 0;
+ /* csr0 is 2f3 */
+ WRITERDP(lp, LE_C0_TINT | LE_C0_INEA);
+ /* csr0 is 73 */
+
+ j = lp->tx_old;
+ for (i = j; i != lp->tx_new; i = j) {
+ td = &ib->btx_ring[i];
+
+ /* If we hit a packet not owned by us, stop */
+ if (td->tmd1_bits & LE_T1_OWN)
+ break;
+
+ if (td->tmd1_bits & LE_T1_ERR) {
+ status = td->misc;
+
+ dev->stats.tx_errors++;
+ if (status & LE_T3_RTY)
+ dev->stats.tx_aborted_errors++;
+ if (status & LE_T3_LCOL)
+ dev->stats.tx_window_errors++;
+
+ if (status & LE_T3_CLOS) {
+ dev->stats.tx_carrier_errors++;
+ if (lp->auto_select) {
+ lp->tpe = 1 - lp->tpe;
+ printk("%s: Carrier Lost, trying %s\n",
+ dev->name,
+ lp->tpe ? "TPE" : "AUI");
+ /* Stop the lance */
+ WRITERAP(lp, LE_CSR0);
+ WRITERDP(lp, LE_C0_STOP);
+ lance_init_ring(dev);
+ load_csrs(lp);
+ init_restart_lance(lp);
+ return 0;
+ }
+ }
+
+ /* buffer errors and underflows turn off the transmitter */
+ /* Restart the adapter */
+ if (status & (LE_T3_BUF|LE_T3_UFL)) {
+ dev->stats.tx_fifo_errors++;
+
+ printk("%s: Tx: ERR_BUF|ERR_UFL, restarting\n",
+ dev->name);
+ /* Stop the lance */
+ WRITERAP(lp, LE_CSR0);
+ WRITERDP(lp, LE_C0_STOP);
+ lance_init_ring(dev);
+ load_csrs(lp);
+ init_restart_lance(lp);
+ return 0;
+ }
+ } else if ((td->tmd1_bits & LE_T1_POK) == LE_T1_POK) {
+ /*
+ * So we don't count the packet more than once.
+ */
+ td->tmd1_bits &= ~(LE_T1_POK);
+
+ /* One collision before packet was sent. */
+ if (td->tmd1_bits & LE_T1_EONE)
+ dev->stats.collisions++;
+
+ /* More than one collision, be optimistic. */
+ if (td->tmd1_bits & LE_T1_EMORE)
+ dev->stats.collisions += 2;
+
+ dev->stats.tx_packets++;
+ }
+
+ j = (j + 1) & lp->tx_ring_mod_mask;
+ }
+ lp->tx_old = j;
+ WRITERDP(lp, LE_C0_TINT | LE_C0_INEA);
+ return 0;
}
static irqreturn_t
-lance_interrupt (int irq, void *dev_id)
+lance_interrupt(int irq, void *dev_id)
{
- struct net_device *dev = (struct net_device *)dev_id;
- struct lance_private *lp = netdev_priv(dev);
- int csr0;
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct lance_private *lp = netdev_priv(dev);
+ int csr0;
- spin_lock (&lp->devlock);
+ spin_lock(&lp->devlock);
- WRITERAP(lp, LE_CSR0); /* LANCE Controller Status */
- csr0 = READRDP(lp);
+ WRITERAP(lp, LE_CSR0); /* LANCE Controller Status */
+ csr0 = READRDP(lp);
- PRINT_RINGS();
+ PRINT_RINGS();
- if (!(csr0 & LE_C0_INTR)) { /* Check if any interrupt has */
- spin_unlock (&lp->devlock);
- return IRQ_NONE; /* been generated by the Lance. */
+ if (!(csr0 & LE_C0_INTR)) { /* Check if any interrupt has */
+ spin_unlock(&lp->devlock);
+ return IRQ_NONE; /* been generated by the Lance. */
}
- /* Acknowledge all the interrupt sources ASAP */
- WRITERDP(lp, csr0 & ~(LE_C0_INEA|LE_C0_TDMD|LE_C0_STOP|LE_C0_STRT|LE_C0_INIT));
-
- if ((csr0 & LE_C0_ERR)) {
- /* Clear the error condition */
- WRITERDP(lp, LE_C0_BABL|LE_C0_ERR|LE_C0_MISS|LE_C0_INEA);
- }
-
- if (csr0 & LE_C0_RINT)
- lance_rx (dev);
-
- if (csr0 & LE_C0_TINT)
- lance_tx (dev);
-
- /* Log misc errors. */
- if (csr0 & LE_C0_BABL)
- dev->stats.tx_errors++; /* Tx babble. */
- if (csr0 & LE_C0_MISS)
- dev->stats.rx_errors++; /* Missed a Rx frame. */
- if (csr0 & LE_C0_MERR) {
- printk("%s: Bus master arbitration failure, status %4.4x.\n",
- dev->name, csr0);
- /* Restart the chip. */
- WRITERDP(lp, LE_C0_STRT);
- }
-
- if (lp->tx_full && netif_queue_stopped(dev) && (TX_BUFFS_AVAIL >= 0)) {
+ /* Acknowledge all the interrupt sources ASAP */
+ WRITERDP(lp, csr0 & ~(LE_C0_INEA|LE_C0_TDMD|LE_C0_STOP|LE_C0_STRT|LE_C0_INIT));
+
+ if ((csr0 & LE_C0_ERR)) {
+ /* Clear the error condition */
+ WRITERDP(lp, LE_C0_BABL|LE_C0_ERR|LE_C0_MISS|LE_C0_INEA);
+ }
+
+ if (csr0 & LE_C0_RINT)
+ lance_rx(dev);
+
+ if (csr0 & LE_C0_TINT)
+ lance_tx(dev);
+
+ /* Log misc errors. */
+ if (csr0 & LE_C0_BABL)
+ dev->stats.tx_errors++; /* Tx babble. */
+ if (csr0 & LE_C0_MISS)
+ dev->stats.rx_errors++; /* Missed a Rx frame. */
+ if (csr0 & LE_C0_MERR) {
+ printk("%s: Bus master arbitration failure, status %4.4x.\n",
+ dev->name, csr0);
+ /* Restart the chip. */
+ WRITERDP(lp, LE_C0_STRT);
+ }
+
+ if (lp->tx_full && netif_queue_stopped(dev) && (TX_BUFFS_AVAIL >= 0)) {
lp->tx_full = 0;
- netif_wake_queue (dev);
- }
+ netif_wake_queue(dev);
+ }
- WRITERAP(lp, LE_CSR0);
- WRITERDP(lp, LE_C0_BABL|LE_C0_CERR|LE_C0_MISS|LE_C0_MERR|LE_C0_IDON|LE_C0_INEA);
+ WRITERAP(lp, LE_CSR0);
+ WRITERDP(lp, LE_C0_BABL|LE_C0_CERR|LE_C0_MISS|LE_C0_MERR|LE_C0_IDON|LE_C0_INEA);
- spin_unlock (&lp->devlock);
+ spin_unlock(&lp->devlock);
return IRQ_HANDLED;
}
-int lance_open (struct net_device *dev)
+int lance_open(struct net_device *dev)
{
- struct lance_private *lp = netdev_priv(dev);
+ struct lance_private *lp = netdev_priv(dev);
int res;
- /* Install the Interrupt handler. Or we could shunt this out to specific drivers? */
- if (request_irq(lp->irq, lance_interrupt, IRQF_SHARED, lp->name, dev))
- return -EAGAIN;
+ /* Install the Interrupt handler. Or we could shunt this out to specific drivers? */
+ if (request_irq(lp->irq, lance_interrupt, IRQF_SHARED, lp->name, dev))
+ return -EAGAIN;
- res = lance_reset(dev);
+ res = lance_reset(dev);
spin_lock_init(&lp->devlock);
- netif_start_queue (dev);
+ netif_start_queue(dev);
return res;
}
EXPORT_SYMBOL_GPL(lance_open);
-int lance_close (struct net_device *dev)
+int lance_close(struct net_device *dev)
{
- struct lance_private *lp = netdev_priv(dev);
+ struct lance_private *lp = netdev_priv(dev);
- netif_stop_queue (dev);
+ netif_stop_queue(dev);
- /* Stop the LANCE */
- WRITERAP(lp, LE_CSR0);
- WRITERDP(lp, LE_C0_STOP);
+ /* Stop the LANCE */
+ WRITERAP(lp, LE_CSR0);
+ WRITERDP(lp, LE_C0_STOP);
- free_irq(lp->irq, dev);
+ free_irq(lp->irq, dev);
- return 0;
+ return 0;
}
EXPORT_SYMBOL_GPL(lance_close);
@@ -524,122 +532,122 @@ void lance_tx_timeout(struct net_device *dev)
printk("lance_tx_timeout\n");
lance_reset(dev);
dev->trans_start = jiffies; /* prevent tx timeout */
- netif_wake_queue (dev);
+ netif_wake_queue(dev);
}
EXPORT_SYMBOL_GPL(lance_tx_timeout);
-int lance_start_xmit (struct sk_buff *skb, struct net_device *dev)
+int lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
- struct lance_private *lp = netdev_priv(dev);
- volatile struct lance_init_block *ib = lp->init_block;
- int entry, skblen, len;
- static int outs;
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_init_block *ib = lp->init_block;
+ int entry, skblen, len;
+ static int outs;
unsigned long flags;
- if (!TX_BUFFS_AVAIL)
- return NETDEV_TX_LOCKED;
+ if (!TX_BUFFS_AVAIL)
+ return NETDEV_TX_LOCKED;
- netif_stop_queue (dev);
+ netif_stop_queue(dev);
- skblen = skb->len;
+ skblen = skb->len;
#ifdef DEBUG_DRIVER
- /* dump the packet */
- {
- int i;
-
- for (i = 0; i < 64; i++) {
- if ((i % 16) == 0)
- printk ("\n");
- printk ("%2.2x ", skb->data [i]);
- }
- }
+ /* dump the packet */
+ {
+ int i;
+
+ for (i = 0; i < 64; i++) {
+ if ((i % 16) == 0)
+ printk("\n");
+ printk("%2.2x ", skb->data[i]);
+ }
+ }
#endif
- len = (skblen <= ETH_ZLEN) ? ETH_ZLEN : skblen;
- entry = lp->tx_new & lp->tx_ring_mod_mask;
- ib->btx_ring [entry].length = (-len) | 0xf000;
- ib->btx_ring [entry].misc = 0;
+ len = (skblen <= ETH_ZLEN) ? ETH_ZLEN : skblen;
+ entry = lp->tx_new & lp->tx_ring_mod_mask;
+ ib->btx_ring[entry].length = (-len) | 0xf000;
+ ib->btx_ring[entry].misc = 0;
if (skb->len < ETH_ZLEN)
memset((void *)&ib->tx_buf[entry][0], 0, ETH_ZLEN);
- skb_copy_from_linear_data(skb, (void *)&ib->tx_buf[entry][0], skblen);
+ skb_copy_from_linear_data(skb, (void *)&ib->tx_buf[entry][0], skblen);
- /* Now, give the packet to the lance */
- ib->btx_ring [entry].tmd1_bits = (LE_T1_POK|LE_T1_OWN);
- lp->tx_new = (lp->tx_new+1) & lp->tx_ring_mod_mask;
+ /* Now, give the packet to the lance */
+ ib->btx_ring[entry].tmd1_bits = (LE_T1_POK|LE_T1_OWN);
+ lp->tx_new = (lp->tx_new + 1) & lp->tx_ring_mod_mask;
- outs++;
- /* Kick the lance: transmit now */
- WRITERDP(lp, LE_C0_INEA | LE_C0_TDMD);
- dev_kfree_skb (skb);
+ outs++;
+ /* Kick the lance: transmit now */
+ WRITERDP(lp, LE_C0_INEA | LE_C0_TDMD);
+ dev_kfree_skb(skb);
- spin_lock_irqsave (&lp->devlock, flags);
- if (TX_BUFFS_AVAIL)
- netif_start_queue (dev);
+ spin_lock_irqsave(&lp->devlock, flags);
+ if (TX_BUFFS_AVAIL)
+ netif_start_queue(dev);
else
lp->tx_full = 1;
- spin_unlock_irqrestore (&lp->devlock, flags);
+ spin_unlock_irqrestore(&lp->devlock, flags);
- return NETDEV_TX_OK;
+ return NETDEV_TX_OK;
}
EXPORT_SYMBOL_GPL(lance_start_xmit);
/* taken from the depca driver via a2065.c */
-static void lance_load_multicast (struct net_device *dev)
+static void lance_load_multicast(struct net_device *dev)
{
- struct lance_private *lp = netdev_priv(dev);
- volatile struct lance_init_block *ib = lp->init_block;
- volatile u16 *mcast_table = (u16 *)&ib->filter;
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_init_block *ib = lp->init_block;
+ volatile u16 *mcast_table = (u16 *)&ib->filter;
struct netdev_hw_addr *ha;
- u32 crc;
-
- /* set all multicast bits */
- if (dev->flags & IFF_ALLMULTI){
- ib->filter [0] = 0xffffffff;
- ib->filter [1] = 0xffffffff;
- return;
- }
- /* clear the multicast filter */
- ib->filter [0] = 0;
- ib->filter [1] = 0;
-
- /* Add addresses */
+ u32 crc;
+
+ /* set all multicast bits */
+ if (dev->flags & IFF_ALLMULTI) {
+ ib->filter[0] = 0xffffffff;
+ ib->filter[1] = 0xffffffff;
+ return;
+ }
+ /* clear the multicast filter */
+ ib->filter[0] = 0;
+ ib->filter[1] = 0;
+
+ /* Add addresses */
netdev_for_each_mc_addr(ha, dev) {
crc = ether_crc_le(6, ha->addr);
- crc = crc >> 26;
- mcast_table [crc >> 4] |= 1 << (crc & 0xf);
- }
+ crc = crc >> 26;
+ mcast_table[crc >> 4] |= 1 << (crc & 0xf);
+ }
}
-void lance_set_multicast (struct net_device *dev)
+void lance_set_multicast(struct net_device *dev)
{
- struct lance_private *lp = netdev_priv(dev);
- volatile struct lance_init_block *ib = lp->init_block;
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_init_block *ib = lp->init_block;
int stopped;
stopped = netif_queue_stopped(dev);
if (!stopped)
- netif_stop_queue (dev);
-
- while (lp->tx_old != lp->tx_new)
- schedule();
+ netif_stop_queue(dev);
- WRITERAP(lp, LE_CSR0);
- WRITERDP(lp, LE_C0_STOP);
- lance_init_ring (dev);
+ while (lp->tx_old != lp->tx_new)
+ schedule();
- if (dev->flags & IFF_PROMISC) {
- ib->mode |= LE_MO_PROM;
- } else {
- ib->mode &= ~LE_MO_PROM;
- lance_load_multicast (dev);
- }
- load_csrs (lp);
- init_restart_lance (lp);
+ WRITERAP(lp, LE_CSR0);
+ WRITERDP(lp, LE_C0_STOP);
+ lance_init_ring(dev);
+
+ if (dev->flags & IFF_PROMISC) {
+ ib->mode |= LE_MO_PROM;
+ } else {
+ ib->mode &= ~LE_MO_PROM;
+ lance_load_multicast(dev);
+ }
+ load_csrs(lp);
+ init_restart_lance(lp);
if (!stopped)
- netif_start_queue (dev);
+ netif_start_queue(dev);
}
EXPORT_SYMBOL_GPL(lance_set_multicast);
@@ -648,10 +656,10 @@ void lance_poll(struct net_device *dev)
{
struct lance_private *lp = netdev_priv(dev);
- spin_lock (&lp->devlock);
+ spin_lock(&lp->devlock);
WRITERAP(lp, LE_CSR0);
WRITERDP(lp, LE_C0_STRT);
- spin_unlock (&lp->devlock);
+ spin_unlock(&lp->devlock);
lance_interrupt(dev->irq, dev);
}
#endif
diff --git a/drivers/net/ethernet/amd/7990.h b/drivers/net/ethernet/amd/7990.h
index ae33a99bf476..e9e0be313804 100644
--- a/drivers/net/ethernet/amd/7990.h
+++ b/drivers/net/ethernet/amd/7990.h
@@ -35,33 +35,32 @@
#define LANCE_LOG_RX_BUFFERS 3
#endif
-#define TX_RING_SIZE (1<<LANCE_LOG_TX_BUFFERS)
-#define RX_RING_SIZE (1<<LANCE_LOG_RX_BUFFERS)
-#define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
-#define RX_RING_MOD_MASK (RX_RING_SIZE - 1)
-#define TX_RING_LEN_BITS ((LANCE_LOG_TX_BUFFERS) << 29)
-#define RX_RING_LEN_BITS ((LANCE_LOG_RX_BUFFERS) << 29)
-#define PKT_BUFF_SIZE (1544)
-#define RX_BUFF_SIZE PKT_BUFF_SIZE
-#define TX_BUFF_SIZE PKT_BUFF_SIZE
+#define TX_RING_SIZE (1 << LANCE_LOG_TX_BUFFERS)
+#define RX_RING_SIZE (1 << LANCE_LOG_RX_BUFFERS)
+#define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
+#define RX_RING_MOD_MASK (RX_RING_SIZE - 1)
+#define TX_RING_LEN_BITS ((LANCE_LOG_TX_BUFFERS) << 29)
+#define RX_RING_LEN_BITS ((LANCE_LOG_RX_BUFFERS) << 29)
+#define PKT_BUFF_SIZE (1544)
+#define RX_BUFF_SIZE PKT_BUFF_SIZE
+#define TX_BUFF_SIZE PKT_BUFF_SIZE
/* Each receive buffer is described by a receive message descriptor (RMD) */
struct lance_rx_desc {
- volatile unsigned short rmd0; /* low address of packet */
- volatile unsigned char rmd1_bits; /* descriptor bits */
- volatile unsigned char rmd1_hadr; /* high address of packet */
- volatile short length; /* This length is 2s complement (negative)!
- * Buffer length
- */
- volatile unsigned short mblength; /* Actual number of bytes received */
+ volatile unsigned short rmd0; /* low address of packet */
+ volatile unsigned char rmd1_bits; /* descriptor bits */
+ volatile unsigned char rmd1_hadr; /* high address of packet */
+ volatile short length; /* This length is 2s complement (negative)!
+ * Buffer length */
+ volatile unsigned short mblength; /* Actual number of bytes received */
};
/* Ditto for TMD: */
struct lance_tx_desc {
- volatile unsigned short tmd0; /* low address of packet */
- volatile unsigned char tmd1_bits; /* descriptor bits */
- volatile unsigned char tmd1_hadr; /* high address of packet */
- volatile short length; /* Length is 2s complement (negative)! */
+ volatile unsigned short tmd0; /* low address of packet */
+ volatile unsigned char tmd1_bits; /* descriptor bits */
+ volatile unsigned char tmd1_hadr; /* high address of packet */
+ volatile short length; /* Length is 2s complement (negative)! */
volatile unsigned short misc;
};
@@ -71,181 +70,178 @@ struct lance_tx_desc {
* init block,the Tx and Rx rings and the buffers together in memory:
*/
struct lance_init_block {
- volatile unsigned short mode; /* Pre-set mode (reg. 15) */
- volatile unsigned char phys_addr[6]; /* Physical ethernet address */
- volatile unsigned filter[2]; /* Multicast filter (64 bits) */
-
- /* Receive and transmit ring base, along with extra bits. */
- volatile unsigned short rx_ptr; /* receive descriptor addr */
- volatile unsigned short rx_len; /* receive len and high addr */
- volatile unsigned short tx_ptr; /* transmit descriptor addr */
- volatile unsigned short tx_len; /* transmit len and high addr */
-
- /* The Tx and Rx ring entries must be aligned on 8-byte boundaries.
- * This will be true if this whole struct is 8-byte aligned.
- */
- volatile struct lance_tx_desc btx_ring[TX_RING_SIZE];
- volatile struct lance_rx_desc brx_ring[RX_RING_SIZE];
-
- volatile char tx_buf [TX_RING_SIZE][TX_BUFF_SIZE];
- volatile char rx_buf [RX_RING_SIZE][RX_BUFF_SIZE];
- /* we use this just to make the struct big enough that we can move its startaddr
- * in order to force alignment to an eight byte boundary.
- */
+ volatile unsigned short mode; /* Pre-set mode (reg. 15) */
+ volatile unsigned char phys_addr[6]; /* Physical ethernet address */
+ volatile unsigned filter[2]; /* Multicast filter (64 bits) */
+
+ /* Receive and transmit ring base, along with extra bits. */
+ volatile unsigned short rx_ptr; /* receive descriptor addr */
+ volatile unsigned short rx_len; /* receive len and high addr */
+ volatile unsigned short tx_ptr; /* transmit descriptor addr */
+ volatile unsigned short tx_len; /* transmit len and high addr */
+
+ /* The Tx and Rx ring entries must be aligned on 8-byte boundaries.
+ * This will be true if this whole struct is 8-byte aligned.
+ */
+ volatile struct lance_tx_desc btx_ring[TX_RING_SIZE];
+ volatile struct lance_rx_desc brx_ring[RX_RING_SIZE];
+
+ volatile char tx_buf[TX_RING_SIZE][TX_BUFF_SIZE];
+ volatile char rx_buf[RX_RING_SIZE][RX_BUFF_SIZE];
+ /* we use this just to make the struct big enough that we can move its startaddr
+ * in order to force alignment to an eight byte boundary.
+ */
};
/* This is where we keep all the stuff the driver needs to know about.
* I'm definitely unhappy about the mechanism for allowing specific
* drivers to add things...
*/
-struct lance_private
-{
- char *name;
+struct lance_private {
+ const char *name;
unsigned long base;
- volatile struct lance_init_block *init_block; /* CPU address of RAM */
- volatile struct lance_init_block *lance_init_block; /* LANCE address of RAM */
+ volatile struct lance_init_block *init_block; /* CPU address of RAM */
+ volatile struct lance_init_block *lance_init_block; /* LANCE address of RAM */
- int rx_new, tx_new;
- int rx_old, tx_old;
+ int rx_new, tx_new;
+ int rx_old, tx_old;
- int lance_log_rx_bufs, lance_log_tx_bufs;
- int rx_ring_mod_mask, tx_ring_mod_mask;
+ int lance_log_rx_bufs, lance_log_tx_bufs;
+ int rx_ring_mod_mask, tx_ring_mod_mask;
- int tpe; /* TPE is selected */
- int auto_select; /* cable-selection is by carrier */
- unsigned short busmaster_regval;
+ int tpe; /* TPE is selected */
+ int auto_select; /* cable-selection is by carrier */
+ unsigned short busmaster_regval;
- unsigned int irq; /* IRQ to register */
+ unsigned int irq; /* IRQ to register */
- /* This is because the HP LANCE is disgusting and you have to check
- * a DIO-specific register every time you read/write the LANCE regs :-<
- * [could we get away with making these some sort of macro?]
- */
- void (*writerap)(void *, unsigned short);
- void (*writerdp)(void *, unsigned short);
- unsigned short (*readrdp)(void *);
+ /* This is because the HP LANCE is disgusting and you have to check
+ * a DIO-specific register every time you read/write the LANCE regs :-<
+ * [could we get away with making these some sort of macro?]
+ */
+ void (*writerap)(void *, unsigned short);
+ void (*writerdp)(void *, unsigned short);
+ unsigned short (*readrdp)(void *);
spinlock_t devlock;
char tx_full;
};
/*
- * Am7990 Control and Status Registers
+ * Am7990 Control and Status Registers
*/
-#define LE_CSR0 0x0000 /* LANCE Controller Status */
-#define LE_CSR1 0x0001 /* IADR[15:0] (bit0==0 ie word aligned) */
-#define LE_CSR2 0x0002 /* IADR[23:16] (high bits reserved) */
-#define LE_CSR3 0x0003 /* Misc */
+#define LE_CSR0 0x0000 /* LANCE Controller Status */
+#define LE_CSR1 0x0001 /* IADR[15:0] (bit0==0 ie word aligned) */
+#define LE_CSR2 0x0002 /* IADR[23:16] (high bits reserved) */
+#define LE_CSR3 0x0003 /* Misc */
/*
* Bit definitions for CSR0 (LANCE Controller Status)
*/
-#define LE_C0_ERR 0x8000 /* Error = BABL | CERR | MISS | MERR */
-#define LE_C0_BABL 0x4000 /* Babble: Transmitted too many bits */
-#define LE_C0_CERR 0x2000 /* No Heartbeat (10BASE-T) */
-#define LE_C0_MISS 0x1000 /* Missed Frame (no rx buffer to put it in) */
-#define LE_C0_MERR 0x0800 /* Memory Error */
-#define LE_C0_RINT 0x0400 /* Receive Interrupt */
-#define LE_C0_TINT 0x0200 /* Transmit Interrupt */
-#define LE_C0_IDON 0x0100 /* Initialization Done */
-#define LE_C0_INTR 0x0080 /* Interrupt Flag
- = BABL | MISS | MERR | RINT | TINT | IDON */
-#define LE_C0_INEA 0x0040 /* Interrupt Enable */
-#define LE_C0_RXON 0x0020 /* Receive On */
-#define LE_C0_TXON 0x0010 /* Transmit On */
-#define LE_C0_TDMD 0x0008 /* Transmit Demand */
-#define LE_C0_STOP 0x0004 /* Stop */
-#define LE_C0_STRT 0x0002 /* Start */
-#define LE_C0_INIT 0x0001 /* Initialize */
+#define LE_C0_ERR 0x8000 /* Error = BABL | CERR | MISS | MERR */
+#define LE_C0_BABL 0x4000 /* Babble: Transmitted too many bits */
+#define LE_C0_CERR 0x2000 /* No Heartbeat (10BASE-T) */
+#define LE_C0_MISS 0x1000 /* Missed Frame (no rx buffer to put it in) */
+#define LE_C0_MERR 0x0800 /* Memory Error */
+#define LE_C0_RINT 0x0400 /* Receive Interrupt */
+#define LE_C0_TINT 0x0200 /* Transmit Interrupt */
+#define LE_C0_IDON 0x0100 /* Initialization Done */
+#define LE_C0_INTR 0x0080 /* Interrupt Flag
+ = BABL | MISS | MERR | RINT | TINT | IDON */
+#define LE_C0_INEA 0x0040 /* Interrupt Enable */
+#define LE_C0_RXON 0x0020 /* Receive On */
+#define LE_C0_TXON 0x0010 /* Transmit On */
+#define LE_C0_TDMD 0x0008 /* Transmit Demand */
+#define LE_C0_STOP 0x0004 /* Stop */
+#define LE_C0_STRT 0x0002 /* Start */
+#define LE_C0_INIT 0x0001 /* Initialize */
/*
* Bit definitions for CSR3
*/
-#define LE_C3_BSWP 0x0004 /* Byte Swap
- (on for big endian byte order) */
-#define LE_C3_ACON 0x0002 /* ALE Control
- (on for active low ALE) */
-#define LE_C3_BCON 0x0001 /* Byte Control */
+#define LE_C3_BSWP 0x0004 /* Byte Swap (on for big endian byte order) */
+#define LE_C3_ACON 0x0002 /* ALE Control (on for active low ALE) */
+#define LE_C3_BCON 0x0001 /* Byte Control */
/*
* Mode Flags
*/
-#define LE_MO_PROM 0x8000 /* Promiscuous Mode */
+#define LE_MO_PROM 0x8000 /* Promiscuous Mode */
/* these next ones 0x4000 -- 0x0080 are not available on the LANCE 7990,
* but they are in NetBSD's am7990.h, presumably for backwards-compatible chips
*/
-#define LE_MO_DRCVBC 0x4000 /* disable receive broadcast */
-#define LE_MO_DRCVPA 0x2000 /* disable physical address detection */
-#define LE_MO_DLNKTST 0x1000 /* disable link status */
-#define LE_MO_DAPC 0x0800 /* disable automatic polarity correction */
-#define LE_MO_MENDECL 0x0400 /* MENDEC loopback mode */
-#define LE_MO_LRTTSEL 0x0200 /* lower RX threshold / TX mode selection */
-#define LE_MO_PSEL1 0x0100 /* port selection bit1 */
-#define LE_MO_PSEL0 0x0080 /* port selection bit0 */
+#define LE_MO_DRCVBC 0x4000 /* disable receive broadcast */
+#define LE_MO_DRCVPA 0x2000 /* disable physical address detection */
+#define LE_MO_DLNKTST 0x1000 /* disable link status */
+#define LE_MO_DAPC 0x0800 /* disable automatic polarity correction */
+#define LE_MO_MENDECL 0x0400 /* MENDEC loopback mode */
+#define LE_MO_LRTTSEL 0x0200 /* lower RX threshold / TX mode selection */
+#define LE_MO_PSEL1 0x0100 /* port selection bit1 */
+#define LE_MO_PSEL0 0x0080 /* port selection bit0 */
/* and this one is from the C-LANCE data sheet... */
-#define LE_MO_EMBA 0x0080 /* Enable Modified Backoff Algorithm
- (C-LANCE, not original LANCE) */
-#define LE_MO_INTL 0x0040 /* Internal Loopback */
-#define LE_MO_DRTY 0x0020 /* Disable Retry */
-#define LE_MO_FCOLL 0x0010 /* Force Collision */
-#define LE_MO_DXMTFCS 0x0008 /* Disable Transmit CRC */
-#define LE_MO_LOOP 0x0004 /* Loopback Enable */
-#define LE_MO_DTX 0x0002 /* Disable Transmitter */
-#define LE_MO_DRX 0x0001 /* Disable Receiver */
+#define LE_MO_EMBA 0x0080 /* Enable Modified Backoff Algorithm
+ (C-LANCE, not original LANCE) */
+#define LE_MO_INTL 0x0040 /* Internal Loopback */
+#define LE_MO_DRTY 0x0020 /* Disable Retry */
+#define LE_MO_FCOLL 0x0010 /* Force Collision */
+#define LE_MO_DXMTFCS 0x0008 /* Disable Transmit CRC */
+#define LE_MO_LOOP 0x0004 /* Loopback Enable */
+#define LE_MO_DTX 0x0002 /* Disable Transmitter */
+#define LE_MO_DRX 0x0001 /* Disable Receiver */
/*
* Receive Flags
*/
-#define LE_R1_OWN 0x80 /* LANCE owns the descriptor */
-#define LE_R1_ERR 0x40 /* Error */
-#define LE_R1_FRA 0x20 /* Framing Error */
-#define LE_R1_OFL 0x10 /* Overflow Error */
-#define LE_R1_CRC 0x08 /* CRC Error */
-#define LE_R1_BUF 0x04 /* Buffer Error */
-#define LE_R1_SOP 0x02 /* Start of Packet */
-#define LE_R1_EOP 0x01 /* End of Packet */
-#define LE_R1_POK 0x03 /* Packet is complete: SOP + EOP */
+#define LE_R1_OWN 0x80 /* LANCE owns the descriptor */
+#define LE_R1_ERR 0x40 /* Error */
+#define LE_R1_FRA 0x20 /* Framing Error */
+#define LE_R1_OFL 0x10 /* Overflow Error */
+#define LE_R1_CRC 0x08 /* CRC Error */
+#define LE_R1_BUF 0x04 /* Buffer Error */
+#define LE_R1_SOP 0x02 /* Start of Packet */
+#define LE_R1_EOP 0x01 /* End of Packet */
+#define LE_R1_POK 0x03 /* Packet is complete: SOP + EOP */
/*
* Transmit Flags
*/
-#define LE_T1_OWN 0x80 /* LANCE owns the descriptor */
-#define LE_T1_ERR 0x40 /* Error */
-#define LE_T1_RES 0x20 /* Reserved, LANCE writes this with a zero */
-#define LE_T1_EMORE 0x10 /* More than one retry needed */
-#define LE_T1_EONE 0x08 /* One retry needed */
-#define LE_T1_EDEF 0x04 /* Deferred */
-#define LE_T1_SOP 0x02 /* Start of Packet */
-#define LE_T1_EOP 0x01 /* End of Packet */
-#define LE_T1_POK 0x03 /* Packet is complete: SOP + EOP */
+#define LE_T1_OWN 0x80 /* LANCE owns the descriptor */
+#define LE_T1_ERR 0x40 /* Error */
+#define LE_T1_RES 0x20 /* Reserved, LANCE writes this with a zero */
+#define LE_T1_EMORE 0x10 /* More than one retry needed */
+#define LE_T1_EONE 0x08 /* One retry needed */
+#define LE_T1_EDEF 0x04 /* Deferred */
+#define LE_T1_SOP 0x02 /* Start of Packet */
+#define LE_T1_EOP 0x01 /* End of Packet */
+#define LE_T1_POK 0x03 /* Packet is complete: SOP + EOP */
/*
* Error Flags
*/
-#define LE_T3_BUF 0x8000 /* Buffer Error */
-#define LE_T3_UFL 0x4000 /* Underflow Error */
-#define LE_T3_LCOL 0x1000 /* Late Collision */
-#define LE_T3_CLOS 0x0800 /* Loss of Carrier */
-#define LE_T3_RTY 0x0400 /* Retry Error */
-#define LE_T3_TDR 0x03ff /* Time Domain Reflectometry */
+#define LE_T3_BUF 0x8000 /* Buffer Error */
+#define LE_T3_UFL 0x4000 /* Underflow Error */
+#define LE_T3_LCOL 0x1000 /* Late Collision */
+#define LE_T3_CLOS 0x0800 /* Loss of Carrier */
+#define LE_T3_RTY 0x0400 /* Retry Error */
+#define LE_T3_TDR 0x03ff /* Time Domain Reflectometry */
/* Miscellaneous useful macros */
-#define TX_BUFFS_AVAIL ((lp->tx_old<=lp->tx_new)?\
- lp->tx_old+lp->tx_ring_mod_mask-lp->tx_new:\
- lp->tx_old - lp->tx_new-1)
+#define TX_BUFFS_AVAIL ((lp->tx_old <= lp->tx_new) ? \
+ lp->tx_old + lp->tx_ring_mod_mask - lp->tx_new : \
+ lp->tx_old - lp->tx_new - 1)
/* The LANCE only uses 24 bit addresses. This does the obvious thing. */
#define LANCE_ADDR(x) ((int)(x) & ~0xff000000)
/* Now the prototypes we export */
int lance_open(struct net_device *dev);
-int lance_close (struct net_device *dev);
-int lance_start_xmit (struct sk_buff *skb, struct net_device *dev);
-void lance_set_multicast (struct net_device *dev);
+int lance_close(struct net_device *dev);
+int lance_start_xmit(struct sk_buff *skb, struct net_device *dev);
+void lance_set_multicast(struct net_device *dev);
void lance_tx_timeout(struct net_device *dev);
#ifdef CONFIG_NET_POLL_CONTROLLER
void lance_poll(struct net_device *dev);
diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c
index d042511bdc13..d6beba0bc01f 100644
--- a/drivers/net/ethernet/amd/amd8111e.c
+++ b/drivers/net/ethernet/amd/amd8111e.c
@@ -24,9 +24,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
- * USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
Module Name:
diff --git a/drivers/net/ethernet/amd/amd8111e.h b/drivers/net/ethernet/amd/amd8111e.h
index 8baa3527ba74..b1fae36360d2 100644
--- a/drivers/net/ethernet/amd/amd8111e.h
+++ b/drivers/net/ethernet/amd/amd8111e.h
@@ -13,9 +13,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
- * USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
Module Name:
diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c
index 427c148bb643..2eee5764805d 100644
--- a/drivers/net/ethernet/amd/au1000_eth.c
+++ b/drivers/net/ethernet/amd/au1000_eth.c
@@ -27,8 +27,7 @@
* for more details.
*
* You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
*
* ########################################################################
*
diff --git a/drivers/net/ethernet/amd/au1000_eth.h b/drivers/net/ethernet/amd/au1000_eth.h
index 4b7f7ad62bb8..ca53024f017f 100644
--- a/drivers/net/ethernet/amd/au1000_eth.h
+++ b/drivers/net/ethernet/amd/au1000_eth.h
@@ -18,8 +18,7 @@
* for more details.
*
* You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
*
* ########################################################################
*
diff --git a/drivers/net/ethernet/amd/hplance.c b/drivers/net/ethernet/amd/hplance.c
index 0c61fd50d882..47ce57c2c893 100644
--- a/drivers/net/ethernet/amd/hplance.c
+++ b/drivers/net/ethernet/amd/hplance.c
@@ -127,41 +127,41 @@ static void hplance_remove_one(struct dio_dev *d)
/* Initialise a single lance board at the given DIO device */
static void hplance_init(struct net_device *dev, struct dio_dev *d)
{
- unsigned long va = (d->resource.start + DIO_VIRADDRBASE);
- struct hplance_private *lp;
- int i;
-
- /* reset the board */
- out_8(va+DIO_IDOFF, 0xff);
- udelay(100); /* ariba! ariba! udelay! udelay! */
-
- /* Fill the dev fields */
- dev->base_addr = va;
- dev->netdev_ops = &hplance_netdev_ops;
- dev->dma = 0;
-
- for (i=0; i<6; i++) {
- /* The NVRAM holds our ethernet address, one nibble per byte,
- * at bytes NVRAMOFF+1,3,5,7,9...
- */
- dev->dev_addr[i] = ((in_8(va + HPLANCE_NVRAMOFF + i*4 + 1) & 0xF) << 4)
- | (in_8(va + HPLANCE_NVRAMOFF + i*4 + 3) & 0xF);
- }
-
- lp = netdev_priv(dev);
- lp->lance.name = (char*)d->name; /* discards const, shut up gcc */
- lp->lance.base = va;
- lp->lance.init_block = (struct lance_init_block *)(va + HPLANCE_MEMOFF); /* CPU addr */
- lp->lance.lance_init_block = NULL; /* LANCE addr of same RAM */
- lp->lance.busmaster_regval = LE_C3_BSWP; /* we're bigendian */
- lp->lance.irq = d->ipl;
- lp->lance.writerap = hplance_writerap;
- lp->lance.writerdp = hplance_writerdp;
- lp->lance.readrdp = hplance_readrdp;
- lp->lance.lance_log_rx_bufs = LANCE_LOG_RX_BUFFERS;
- lp->lance.lance_log_tx_bufs = LANCE_LOG_TX_BUFFERS;
- lp->lance.rx_ring_mod_mask = RX_RING_MOD_MASK;
- lp->lance.tx_ring_mod_mask = TX_RING_MOD_MASK;
+ unsigned long va = (d->resource.start + DIO_VIRADDRBASE);
+ struct hplance_private *lp;
+ int i;
+
+ /* reset the board */
+ out_8(va + DIO_IDOFF, 0xff);
+ udelay(100); /* ariba! ariba! udelay! udelay! */
+
+ /* Fill the dev fields */
+ dev->base_addr = va;
+ dev->netdev_ops = &hplance_netdev_ops;
+ dev->dma = 0;
+
+ for (i = 0; i < 6; i++) {
+ /* The NVRAM holds our ethernet address, one nibble per byte,
+ * at bytes NVRAMOFF+1,3,5,7,9...
+ */
+ dev->dev_addr[i] = ((in_8(va + HPLANCE_NVRAMOFF + i*4 + 1) & 0xF) << 4)
+ | (in_8(va + HPLANCE_NVRAMOFF + i*4 + 3) & 0xF);
+ }
+
+ lp = netdev_priv(dev);
+ lp->lance.name = d->name;
+ lp->lance.base = va;
+ lp->lance.init_block = (struct lance_init_block *)(va + HPLANCE_MEMOFF); /* CPU addr */
+ lp->lance.lance_init_block = NULL; /* LANCE addr of same RAM */
+ lp->lance.busmaster_regval = LE_C3_BSWP; /* we're bigendian */
+ lp->lance.irq = d->ipl;
+ lp->lance.writerap = hplance_writerap;
+ lp->lance.writerdp = hplance_writerdp;
+ lp->lance.readrdp = hplance_readrdp;
+ lp->lance.lance_log_rx_bufs = LANCE_LOG_RX_BUFFERS;
+ lp->lance.lance_log_tx_bufs = LANCE_LOG_TX_BUFFERS;
+ lp->lance.rx_ring_mod_mask = RX_RING_MOD_MASK;
+ lp->lance.tx_ring_mod_mask = TX_RING_MOD_MASK;
}
/* This is disgusting. We have to check the DIO status register for ack every
@@ -195,25 +195,25 @@ static unsigned short hplance_readrdp(void *priv)
static int hplance_open(struct net_device *dev)
{
- int status;
- struct lance_private *lp = netdev_priv(dev);
+ int status;
+ struct lance_private *lp = netdev_priv(dev);
- status = lance_open(dev); /* call generic lance open code */
- if (status)
- return status;
- /* enable interrupts at board level. */
- out_8(lp->base + HPLANCE_STATUS, LE_IE);
+ status = lance_open(dev); /* call generic lance open code */
+ if (status)
+ return status;
+ /* enable interrupts at board level. */
+ out_8(lp->base + HPLANCE_STATUS, LE_IE);
- return 0;
+ return 0;
}
static int hplance_close(struct net_device *dev)
{
- struct lance_private *lp = netdev_priv(dev);
+ struct lance_private *lp = netdev_priv(dev);
- out_8(lp->base + HPLANCE_STATUS, 0); /* disable interrupts at boardlevel */
- lance_close(dev);
- return 0;
+ out_8(lp->base + HPLANCE_STATUS, 0); /* disable interrupts at boardlevel */
+ lance_close(dev);
+ return 0;
}
static int __init hplance_init_module(void)
@@ -223,7 +223,7 @@ static int __init hplance_init_module(void)
static void __exit hplance_cleanup_module(void)
{
- dio_unregister_driver(&hplance_driver);
+ dio_unregister_driver(&hplance_driver);
}
module_init(hplance_init_module);
diff --git a/drivers/net/ethernet/amd/mvme147.c b/drivers/net/ethernet/amd/mvme147.c
index e108e911da05..0e8399dec054 100644
--- a/drivers/net/ethernet/amd/mvme147.c
+++ b/drivers/net/ethernet/amd/mvme147.c
@@ -94,33 +94,31 @@ struct net_device * __init mvme147lance_probe(int unit)
dev->netdev_ops = &lance_netdev_ops;
dev->dma = 0;
- addr=(u_long *)ETHERNET_ADDRESS;
+ addr = (u_long *)ETHERNET_ADDRESS;
address = *addr;
- dev->dev_addr[0]=0x08;
- dev->dev_addr[1]=0x00;
- dev->dev_addr[2]=0x3e;
- address=address>>8;
- dev->dev_addr[5]=address&0xff;
- address=address>>8;
- dev->dev_addr[4]=address&0xff;
- address=address>>8;
- dev->dev_addr[3]=address&0xff;
-
- printk("%s: MVME147 at 0x%08lx, irq %d, "
- "Hardware Address %pM\n",
+ dev->dev_addr[0] = 0x08;
+ dev->dev_addr[1] = 0x00;
+ dev->dev_addr[2] = 0x3e;
+ address = address >> 8;
+ dev->dev_addr[5] = address&0xff;
+ address = address >> 8;
+ dev->dev_addr[4] = address&0xff;
+ address = address >> 8;
+ dev->dev_addr[3] = address&0xff;
+
+ printk("%s: MVME147 at 0x%08lx, irq %d, Hardware Address %pM\n",
dev->name, dev->base_addr, MVME147_LANCE_IRQ,
dev->dev_addr);
lp = netdev_priv(dev);
lp->ram = __get_dma_pages(GFP_ATOMIC, 3); /* 16K */
- if (!lp->ram)
- {
+ if (!lp->ram) {
printk("%s: No memory for LANCE buffers\n", dev->name);
free_netdev(dev);
return ERR_PTR(-ENOMEM);
}
- lp->lance.name = (char*)name; /* discards const, shut up gcc */
+ lp->lance.name = name;
lp->lance.base = dev->base_addr;
lp->lance.init_block = (struct lance_init_block *)(lp->ram); /* CPU addr */
lp->lance.lance_init_block = (struct lance_init_block *)(lp->ram); /* LANCE addr of same RAM */
@@ -167,8 +165,8 @@ static int m147lance_open(struct net_device *dev)
if (status)
return status;
/* enable interrupts at board level. */
- m147_pcc->lan_cntrl=0; /* clear the interrupts (if any) */
- m147_pcc->lan_cntrl=0x08 | 0x04; /* Enable irq 4 */
+ m147_pcc->lan_cntrl = 0; /* clear the interrupts (if any) */
+ m147_pcc->lan_cntrl = 0x08 | 0x04; /* Enable irq 4 */
return 0;
}
@@ -176,7 +174,7 @@ static int m147lance_open(struct net_device *dev)
static int m147lance_close(struct net_device *dev)
{
/* disable interrupts at boardlevel */
- m147_pcc->lan_cntrl=0x0; /* disable interrupts */
+ m147_pcc->lan_cntrl = 0x0; /* disable interrupts */
lance_close(dev);
return 0;
}
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index 38492e0b704e..9339cccfe05a 100644
--- a/drivers/net/ethernet/amd/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -1668,7 +1668,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
for (i = 0; i < ETH_ALEN; i++)
promaddr[i] = inb(ioaddr + i);
- if (memcmp(promaddr, dev->dev_addr, ETH_ALEN) ||
+ if (!ether_addr_equal(promaddr, dev->dev_addr) ||
!is_valid_ether_addr(dev->dev_addr)) {
if (is_valid_ether_addr(promaddr)) {
if (pcnet32_debug & NETIF_MSG_PROBE) {
diff --git a/drivers/net/ethernet/arc/emac.h b/drivers/net/ethernet/arc/emac.h
index dc08678bf9a4..928fac6dd10a 100644
--- a/drivers/net/ethernet/arc/emac.h
+++ b/drivers/net/ethernet/arc/emac.h
@@ -122,7 +122,6 @@ struct buffer_state {
* @link: PHY's last seen link state.
* @duplex: PHY's last set duplex mode.
* @speed: PHY's last set speed.
- * @max_speed: Maximum supported by current system network data-rate.
*/
struct arc_emac_priv {
/* Devices */
@@ -152,7 +151,6 @@ struct arc_emac_priv {
unsigned int link;
unsigned int duplex;
unsigned int speed;
- unsigned int max_speed;
};
/**
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index b2ffad1304d2..eeecc29cf5b7 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -381,17 +381,7 @@ static int arc_emac_open(struct net_device *ndev)
phy_dev->autoneg = AUTONEG_ENABLE;
phy_dev->speed = 0;
phy_dev->duplex = 0;
- phy_dev->advertising = phy_dev->supported;
-
- if (priv->max_speed > 100) {
- phy_dev->advertising &= PHY_GBIT_FEATURES;
- } else if (priv->max_speed <= 100) {
- phy_dev->advertising &= PHY_BASIC_FEATURES;
- if (priv->max_speed <= 10) {
- phy_dev->advertising &= ~SUPPORTED_100baseT_Half;
- phy_dev->advertising &= ~SUPPORTED_100baseT_Full;
- }
- }
+ phy_dev->advertising &= phy_dev->supported;
priv->last_rx_bd = 0;
@@ -565,6 +555,8 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
/* Make sure pointer to data buffer is set */
wmb();
+ skb_tx_timestamp(skb);
+
*info = cpu_to_le32(FOR_EMAC | FIRST_OR_LAST_MASK | len);
/* Increment index to point to the next BD */
@@ -579,8 +571,6 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
arc_reg_set(priv, R_STATUS, TXPL_MASK);
- skb_tx_timestamp(skb);
-
return NETDEV_TX_OK;
}
@@ -704,14 +694,6 @@ static int arc_emac_probe(struct platform_device *pdev)
/* Set poll rate so that it polls every 1 ms */
arc_reg_set(priv, R_POLLRATE, clock_frequency / 1000000);
- /* Get max speed of operation from device tree */
- if (of_property_read_u32(pdev->dev.of_node, "max-speed",
- &priv->max_speed)) {
- dev_err(&pdev->dev, "failed to retrieve <max-speed> from device tree\n");
- err = -EINVAL;
- goto out;
- }
-
ndev->irq = irq;
dev_info(&pdev->dev, "IRQ is %d\n", ndev->irq);
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index a36a760ada28..29801750f239 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -145,9 +145,11 @@ static void atl1c_reset_pcie(struct atl1c_hw *hw, u32 flag)
* Mask some pcie error bits
*/
pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
- pci_read_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, &data);
- data &= ~(PCI_ERR_UNC_DLP | PCI_ERR_UNC_FCP);
- pci_write_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, data);
+ if (pos) {
+ pci_read_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, &data);
+ data &= ~(PCI_ERR_UNC_DLP | PCI_ERR_UNC_FCP);
+ pci_write_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, data);
+ }
/* clear error status */
pcie_capability_write_word(pdev, PCI_EXP_DEVSTA,
PCI_EXP_DEVSTA_NFED |
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index 538211d6f7d9..55d86ecdacfe 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -3122,7 +3122,8 @@ static void atl1_remove(struct pci_dev *pdev)
* from the BIOS during POST. If we've been messing with the MAC
* address, we need to save the permanent one.
*/
- if (memcmp(adapter->hw.mac_addr, adapter->hw.perm_mac_addr, ETH_ALEN)) {
+ if (!ether_addr_equal_unaligned(adapter->hw.mac_addr,
+ adapter->hw.perm_mac_addr)) {
memcpy(adapter->hw.mac_addr, adapter->hw.perm_mac_addr,
ETH_ALEN);
atl1_set_mac_addr(&adapter->hw);
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index 2fa5b86f139d..3f97d9fd0a71 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -23,6 +23,7 @@ config B44
depends on SSB_POSSIBLE && HAS_DMA
select SSB
select MII
+ select PHYLIB
---help---
If you have a network (Ethernet) controller of this type, say Y
or M and read the Ethernet-HOWTO, available from
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index 90e54d5488dc..1f7b5aa114fa 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -6,6 +6,7 @@
* Copyright (C) 2006 Felix Fietkau (nbd@openwrt.org)
* Copyright (C) 2006 Broadcom Corporation.
* Copyright (C) 2007 Michael Buesch <m@bues.ch>
+ * Copyright (C) 2013 Hauke Mehrtens <hauke@hauke-m.de>
*
* Distribute under GPL.
*/
@@ -29,6 +30,7 @@
#include <linux/dma-mapping.h>
#include <linux/ssb/ssb.h>
#include <linux/slab.h>
+#include <linux/phy.h>
#include <asm/uaccess.h>
#include <asm/io.h>
@@ -284,7 +286,7 @@ static int __b44_writephy(struct b44 *bp, int phy_addr, int reg, u32 val)
static inline int b44_readphy(struct b44 *bp, int reg, u32 *val)
{
- if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
+ if (bp->flags & B44_FLAG_EXTERNAL_PHY)
return 0;
return __b44_readphy(bp, bp->phy_addr, reg, val);
@@ -292,14 +294,14 @@ static inline int b44_readphy(struct b44 *bp, int reg, u32 *val)
static inline int b44_writephy(struct b44 *bp, int reg, u32 val)
{
- if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
+ if (bp->flags & B44_FLAG_EXTERNAL_PHY)
return 0;
return __b44_writephy(bp, bp->phy_addr, reg, val);
}
/* miilib interface */
-static int b44_mii_read(struct net_device *dev, int phy_id, int location)
+static int b44_mdio_read_mii(struct net_device *dev, int phy_id, int location)
{
u32 val;
struct b44 *bp = netdev_priv(dev);
@@ -309,19 +311,36 @@ static int b44_mii_read(struct net_device *dev, int phy_id, int location)
return val;
}
-static void b44_mii_write(struct net_device *dev, int phy_id, int location,
- int val)
+static void b44_mdio_write_mii(struct net_device *dev, int phy_id, int location,
+ int val)
{
struct b44 *bp = netdev_priv(dev);
__b44_writephy(bp, phy_id, location, val);
}
+static int b44_mdio_read_phylib(struct mii_bus *bus, int phy_id, int location)
+{
+ u32 val;
+ struct b44 *bp = bus->priv;
+ int rc = __b44_readphy(bp, phy_id, location, &val);
+ if (rc)
+ return 0xffffffff;
+ return val;
+}
+
+static int b44_mdio_write_phylib(struct mii_bus *bus, int phy_id, int location,
+ u16 val)
+{
+ struct b44 *bp = bus->priv;
+ return __b44_writephy(bp, phy_id, location, val);
+}
+
static int b44_phy_reset(struct b44 *bp)
{
u32 val;
int err;
- if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
+ if (bp->flags & B44_FLAG_EXTERNAL_PHY)
return 0;
err = b44_writephy(bp, MII_BMCR, BMCR_RESET);
if (err)
@@ -423,7 +442,7 @@ static int b44_setup_phy(struct b44 *bp)
b44_wap54g10_workaround(bp);
- if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
+ if (bp->flags & B44_FLAG_EXTERNAL_PHY)
return 0;
if ((err = b44_readphy(bp, B44_MII_ALEDCTRL, &val)) != 0)
goto out;
@@ -521,12 +540,14 @@ static void b44_check_phy(struct b44 *bp)
{
u32 bmsr, aux;
- if (bp->phy_addr == B44_PHY_ADDR_NO_PHY) {
+ if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
bp->flags |= B44_FLAG_100_BASE_T;
- bp->flags |= B44_FLAG_FULL_DUPLEX;
if (!netif_carrier_ok(bp->dev)) {
u32 val = br32(bp, B44_TX_CTRL);
- val |= TX_CTRL_DUPLEX;
+ if (bp->flags & B44_FLAG_FULL_DUPLEX)
+ val |= TX_CTRL_DUPLEX;
+ else
+ val &= ~TX_CTRL_DUPLEX;
bw32(bp, B44_TX_CTRL, val);
netif_carrier_on(bp->dev);
b44_link_report(bp);
@@ -1315,7 +1336,7 @@ static void b44_chip_reset(struct b44 *bp, int reset_kind)
if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) {
bw32(bp, B44_ENET_CTRL, ENET_CTRL_EPSEL);
br32(bp, B44_ENET_CTRL);
- bp->flags &= ~B44_FLAG_INTERNAL_PHY;
+ bp->flags |= B44_FLAG_EXTERNAL_PHY;
} else {
u32 val = br32(bp, B44_DEVCTRL);
@@ -1324,7 +1345,7 @@ static void b44_chip_reset(struct b44 *bp, int reset_kind)
br32(bp, B44_DEVCTRL);
udelay(100);
}
- bp->flags |= B44_FLAG_INTERNAL_PHY;
+ bp->flags &= ~B44_FLAG_EXTERNAL_PHY;
}
}
@@ -1339,7 +1360,10 @@ static void b44_halt(struct b44 *bp)
bw32(bp, B44_MAC_CTRL, MAC_CTRL_PHY_PDOWN);
/* now reset the chip, but without enabling the MAC&PHY
* part of it. This has to be done _after_ we shut down the PHY */
- b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL);
+ if (bp->flags & B44_FLAG_EXTERNAL_PHY)
+ b44_chip_reset(bp, B44_CHIP_RESET_FULL);
+ else
+ b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL);
}
/* bp->lock is held. */
@@ -1805,6 +1829,11 @@ static int b44_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct b44 *bp = netdev_priv(dev);
+ if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
+ BUG_ON(!bp->phydev);
+ return phy_ethtool_gset(bp->phydev, cmd);
+ }
+
cmd->supported = (SUPPORTED_Autoneg);
cmd->supported |= (SUPPORTED_100baseT_Half |
SUPPORTED_100baseT_Full |
@@ -1828,8 +1857,8 @@ static int b44_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
DUPLEX_FULL : DUPLEX_HALF;
cmd->port = 0;
cmd->phy_address = bp->phy_addr;
- cmd->transceiver = (bp->flags & B44_FLAG_INTERNAL_PHY) ?
- XCVR_INTERNAL : XCVR_EXTERNAL;
+ cmd->transceiver = (bp->flags & B44_FLAG_EXTERNAL_PHY) ?
+ XCVR_EXTERNAL : XCVR_INTERNAL;
cmd->autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ?
AUTONEG_DISABLE : AUTONEG_ENABLE;
if (cmd->autoneg == AUTONEG_ENABLE)
@@ -1846,7 +1875,23 @@ static int b44_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
static int b44_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct b44 *bp = netdev_priv(dev);
- u32 speed = ethtool_cmd_speed(cmd);
+ u32 speed;
+ int ret;
+
+ if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
+ BUG_ON(!bp->phydev);
+ spin_lock_irq(&bp->lock);
+ if (netif_running(dev))
+ b44_setup_phy(bp);
+
+ ret = phy_ethtool_sset(bp->phydev, cmd);
+
+ spin_unlock_irq(&bp->lock);
+
+ return ret;
+ }
+
+ speed = ethtool_cmd_speed(cmd);
/* We do not support gigabit. */
if (cmd->autoneg == AUTONEG_ENABLE) {
@@ -2076,7 +2121,6 @@ static const struct ethtool_ops b44_ethtool_ops = {
static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
- struct mii_ioctl_data *data = if_mii(ifr);
struct b44 *bp = netdev_priv(dev);
int err = -EINVAL;
@@ -2084,7 +2128,12 @@ static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
goto out;
spin_lock_irq(&bp->lock);
- err = generic_mii_ioctl(&bp->mii_if, data, cmd, NULL);
+ if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
+ BUG_ON(!bp->phydev);
+ err = phy_mii_ioctl(bp->phydev, ifr, cmd);
+ } else {
+ err = generic_mii_ioctl(&bp->mii_if, if_mii(ifr), cmd, NULL);
+ }
spin_unlock_irq(&bp->lock);
out:
return err;
@@ -2146,6 +2195,141 @@ static const struct net_device_ops b44_netdev_ops = {
#endif
};
+static void b44_adjust_link(struct net_device *dev)
+{
+ struct b44 *bp = netdev_priv(dev);
+ struct phy_device *phydev = bp->phydev;
+ bool status_changed = 0;
+
+ BUG_ON(!phydev);
+
+ if (bp->old_link != phydev->link) {
+ status_changed = 1;
+ bp->old_link = phydev->link;
+ }
+
+ /* reflect duplex change */
+ if (phydev->link) {
+ if ((phydev->duplex == DUPLEX_HALF) &&
+ (bp->flags & B44_FLAG_FULL_DUPLEX)) {
+ status_changed = 1;
+ bp->flags &= ~B44_FLAG_FULL_DUPLEX;
+ } else if ((phydev->duplex == DUPLEX_FULL) &&
+ !(bp->flags & B44_FLAG_FULL_DUPLEX)) {
+ status_changed = 1;
+ bp->flags |= B44_FLAG_FULL_DUPLEX;
+ }
+ }
+
+ if (status_changed) {
+ b44_check_phy(bp);
+ phy_print_status(phydev);
+ }
+}
+
+static int b44_register_phy_one(struct b44 *bp)
+{
+ struct mii_bus *mii_bus;
+ struct ssb_device *sdev = bp->sdev;
+ struct phy_device *phydev;
+ char bus_id[MII_BUS_ID_SIZE + 3];
+ struct ssb_sprom *sprom = &sdev->bus->sprom;
+ int err;
+
+ mii_bus = mdiobus_alloc();
+ if (!mii_bus) {
+ dev_err(sdev->dev, "mdiobus_alloc() failed\n");
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ mii_bus->priv = bp;
+ mii_bus->read = b44_mdio_read_phylib;
+ mii_bus->write = b44_mdio_write_phylib;
+ mii_bus->name = "b44_eth_mii";
+ mii_bus->parent = sdev->dev;
+ mii_bus->phy_mask = ~(1 << bp->phy_addr);
+ snprintf(mii_bus->id, MII_BUS_ID_SIZE, "%x", instance);
+ mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
+ if (!mii_bus->irq) {
+ dev_err(sdev->dev, "mii_bus irq allocation failed\n");
+ err = -ENOMEM;
+ goto err_out_mdiobus;
+ }
+
+ memset(mii_bus->irq, PHY_POLL, sizeof(int) * PHY_MAX_ADDR);
+
+ bp->mii_bus = mii_bus;
+
+ err = mdiobus_register(mii_bus);
+ if (err) {
+ dev_err(sdev->dev, "failed to register MII bus\n");
+ goto err_out_mdiobus_irq;
+ }
+
+ if (!bp->mii_bus->phy_map[bp->phy_addr] &&
+ (sprom->boardflags_lo & (B44_BOARDFLAG_ROBO | B44_BOARDFLAG_ADM))) {
+
+ dev_info(sdev->dev,
+ "could not find PHY at %i, use fixed one\n",
+ bp->phy_addr);
+
+ bp->phy_addr = 0;
+ snprintf(bus_id, sizeof(bus_id), PHY_ID_FMT, "fixed-0",
+ bp->phy_addr);
+ } else {
+ snprintf(bus_id, sizeof(bus_id), PHY_ID_FMT, mii_bus->id,
+ bp->phy_addr);
+ }
+
+ phydev = phy_connect(bp->dev, bus_id, &b44_adjust_link,
+ PHY_INTERFACE_MODE_MII);
+ if (IS_ERR(phydev)) {
+ dev_err(sdev->dev, "could not attach PHY at %i\n",
+ bp->phy_addr);
+ err = PTR_ERR(phydev);
+ goto err_out_mdiobus_unregister;
+ }
+
+ /* mask with MAC supported features */
+ phydev->supported &= (SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_Autoneg |
+ SUPPORTED_MII);
+ phydev->advertising = phydev->supported;
+
+ bp->phydev = phydev;
+ bp->old_link = 0;
+ bp->phy_addr = phydev->addr;
+
+ dev_info(sdev->dev, "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
+ phydev->drv->name, dev_name(&phydev->dev));
+
+ return 0;
+
+err_out_mdiobus_unregister:
+ mdiobus_unregister(mii_bus);
+
+err_out_mdiobus_irq:
+ kfree(mii_bus->irq);
+
+err_out_mdiobus:
+ mdiobus_free(mii_bus);
+
+err_out:
+ return err;
+}
+
+static void b44_unregister_phy_one(struct b44 *bp)
+{
+ struct mii_bus *mii_bus = bp->mii_bus;
+
+ phy_disconnect(bp->phydev);
+ mdiobus_unregister(mii_bus);
+ kfree(mii_bus->irq);
+ mdiobus_free(mii_bus);
+}
+
static int b44_init_one(struct ssb_device *sdev,
const struct ssb_device_id *ent)
{
@@ -2206,9 +2390,15 @@ static int b44_init_one(struct ssb_device *sdev,
goto err_out_powerdown;
}
+ if (bp->phy_addr == B44_PHY_ADDR_NO_PHY) {
+ dev_err(sdev->dev, "No PHY present on this MAC, aborting\n");
+ err = -ENODEV;
+ goto err_out_powerdown;
+ }
+
bp->mii_if.dev = dev;
- bp->mii_if.mdio_read = b44_mii_read;
- bp->mii_if.mdio_write = b44_mii_write;
+ bp->mii_if.mdio_read = b44_mdio_read_mii;
+ bp->mii_if.mdio_write = b44_mdio_write_mii;
bp->mii_if.phy_id = bp->phy_addr;
bp->mii_if.phy_id_mask = 0x1f;
bp->mii_if.reg_num_mask = 0x1f;
@@ -2236,13 +2426,26 @@ static int b44_init_one(struct ssb_device *sdev,
b44_chip_reset(bp, B44_CHIP_RESET_FULL);
/* do a phy reset to test if there is an active phy */
- if (b44_phy_reset(bp) < 0)
- bp->phy_addr = B44_PHY_ADDR_NO_PHY;
+ err = b44_phy_reset(bp);
+ if (err < 0) {
+ dev_err(sdev->dev, "phy reset failed\n");
+ goto err_out_unregister_netdev;
+ }
+
+ if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
+ err = b44_register_phy_one(bp);
+ if (err) {
+ dev_err(sdev->dev, "Cannot register PHY, aborting\n");
+ goto err_out_unregister_netdev;
+ }
+ }
netdev_info(dev, "%s %pM\n", DRV_DESCRIPTION, dev->dev_addr);
return 0;
+err_out_unregister_netdev:
+ unregister_netdev(dev);
err_out_powerdown:
ssb_bus_may_powerdown(sdev->bus);
@@ -2256,8 +2459,11 @@ out:
static void b44_remove_one(struct ssb_device *sdev)
{
struct net_device *dev = ssb_get_drvdata(sdev);
+ struct b44 *bp = netdev_priv(dev);
unregister_netdev(dev);
+ if (bp->flags & B44_FLAG_EXTERNAL_PHY)
+ b44_unregister_phy_one(bp);
ssb_device_disable(sdev, 0);
ssb_bus_may_powerdown(sdev->bus);
free_netdev(dev);
diff --git a/drivers/net/ethernet/broadcom/b44.h b/drivers/net/ethernet/broadcom/b44.h
index 8993d72f0420..3e9c3fc7591b 100644
--- a/drivers/net/ethernet/broadcom/b44.h
+++ b/drivers/net/ethernet/broadcom/b44.h
@@ -280,9 +280,10 @@ struct ring_info {
dma_addr_t mapping;
};
-#define B44_MCAST_TABLE_SIZE 32
-#define B44_PHY_ADDR_NO_PHY 30
-#define B44_MDC_RATIO 5000000
+#define B44_MCAST_TABLE_SIZE 32
+#define B44_PHY_ADDR_NO_LOCAL_PHY 30 /* no local phy regs */
+#define B44_PHY_ADDR_NO_PHY 31 /* no phy present at all */
+#define B44_MDC_RATIO 5000000
#define B44_STAT_REG_DECLARE \
_B44(tx_good_octets) \
@@ -344,6 +345,9 @@ B44_STAT_REG_DECLARE
struct u64_stats_sync syncp;
};
+#define B44_BOARDFLAG_ROBO 0x0010 /* Board has robo switch */
+#define B44_BOARDFLAG_ADM 0x0080 /* Board has ADMtek switch */
+
struct ssb_device;
struct b44 {
@@ -376,7 +380,7 @@ struct b44 {
#define B44_FLAG_ADV_10FULL 0x02000000
#define B44_FLAG_ADV_100HALF 0x04000000
#define B44_FLAG_ADV_100FULL 0x08000000
-#define B44_FLAG_INTERNAL_PHY 0x10000000
+#define B44_FLAG_EXTERNAL_PHY 0x10000000
#define B44_FLAG_RX_RING_HACK 0x20000000
#define B44_FLAG_TX_RING_HACK 0x40000000
#define B44_FLAG_WOL_ENABLE 0x80000000
@@ -396,6 +400,9 @@ struct b44 {
u32 tx_pending;
u8 phy_addr;
u8 force_copybreak;
+ struct phy_device *phydev;
+ struct mii_bus *mii_bus;
+ int old_link;
struct mii_if_info mii_if;
};
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index e2aa09ce6af7..7f968a9b52c8 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -96,6 +96,19 @@ static void bgmac_dma_tx_enable(struct bgmac *bgmac,
u32 ctl;
ctl = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL);
+ if (bgmac->core->id.rev >= 4) {
+ ctl &= ~BGMAC_DMA_TX_BL_MASK;
+ ctl |= BGMAC_DMA_TX_BL_128 << BGMAC_DMA_TX_BL_SHIFT;
+
+ ctl &= ~BGMAC_DMA_TX_MR_MASK;
+ ctl |= BGMAC_DMA_TX_MR_2 << BGMAC_DMA_TX_MR_SHIFT;
+
+ ctl &= ~BGMAC_DMA_TX_PC_MASK;
+ ctl |= BGMAC_DMA_TX_PC_16 << BGMAC_DMA_TX_PC_SHIFT;
+
+ ctl &= ~BGMAC_DMA_TX_PT_MASK;
+ ctl |= BGMAC_DMA_TX_PT_8 << BGMAC_DMA_TX_PT_SHIFT;
+ }
ctl |= BGMAC_DMA_TX_ENABLE;
ctl |= BGMAC_DMA_TX_PARITY_DISABLE;
bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL, ctl);
@@ -240,6 +253,16 @@ static void bgmac_dma_rx_enable(struct bgmac *bgmac,
u32 ctl;
ctl = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL);
+ if (bgmac->core->id.rev >= 4) {
+ ctl &= ~BGMAC_DMA_RX_BL_MASK;
+ ctl |= BGMAC_DMA_RX_BL_128 << BGMAC_DMA_RX_BL_SHIFT;
+
+ ctl &= ~BGMAC_DMA_RX_PC_MASK;
+ ctl |= BGMAC_DMA_RX_PC_8 << BGMAC_DMA_RX_PC_SHIFT;
+
+ ctl &= ~BGMAC_DMA_RX_PT_MASK;
+ ctl |= BGMAC_DMA_RX_PT_1 << BGMAC_DMA_RX_PT_SHIFT;
+ }
ctl &= BGMAC_DMA_RX_ADDREXT_MASK;
ctl |= BGMAC_DMA_RX_ENABLE;
ctl |= BGMAC_DMA_RX_PARITY_DISABLE;
@@ -682,70 +705,6 @@ static int bgmac_phy_write(struct bgmac *bgmac, u8 phyaddr, u8 reg, u16 value)
return 0;
}
-/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyforce */
-static void bgmac_phy_force(struct bgmac *bgmac)
-{
- u16 ctl;
- u16 mask = ~(BGMAC_PHY_CTL_SPEED | BGMAC_PHY_CTL_SPEED_MSB |
- BGMAC_PHY_CTL_ANENAB | BGMAC_PHY_CTL_DUPLEX);
-
- if (bgmac->phyaddr == BGMAC_PHY_NOREGS)
- return;
-
- if (bgmac->autoneg)
- return;
-
- ctl = bgmac_phy_read(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL);
- ctl &= mask;
- if (bgmac->full_duplex)
- ctl |= BGMAC_PHY_CTL_DUPLEX;
- if (bgmac->speed == BGMAC_SPEED_100)
- ctl |= BGMAC_PHY_CTL_SPEED_100;
- else if (bgmac->speed == BGMAC_SPEED_1000)
- ctl |= BGMAC_PHY_CTL_SPEED_1000;
- bgmac_phy_write(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL, ctl);
-}
-
-/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyadvertise */
-static void bgmac_phy_advertise(struct bgmac *bgmac)
-{
- u16 adv;
-
- if (bgmac->phyaddr == BGMAC_PHY_NOREGS)
- return;
-
- if (!bgmac->autoneg)
- return;
-
- /* Adv selected 10/100 speeds */
- adv = bgmac_phy_read(bgmac, bgmac->phyaddr, BGMAC_PHY_ADV);
- adv &= ~(BGMAC_PHY_ADV_10HALF | BGMAC_PHY_ADV_10FULL |
- BGMAC_PHY_ADV_100HALF | BGMAC_PHY_ADV_100FULL);
- if (!bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_10)
- adv |= BGMAC_PHY_ADV_10HALF;
- if (!bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_100)
- adv |= BGMAC_PHY_ADV_100HALF;
- if (bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_10)
- adv |= BGMAC_PHY_ADV_10FULL;
- if (bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_100)
- adv |= BGMAC_PHY_ADV_100FULL;
- bgmac_phy_write(bgmac, bgmac->phyaddr, BGMAC_PHY_ADV, adv);
-
- /* Adv selected 1000 speeds */
- adv = bgmac_phy_read(bgmac, bgmac->phyaddr, BGMAC_PHY_ADV2);
- adv &= ~(BGMAC_PHY_ADV2_1000HALF | BGMAC_PHY_ADV2_1000FULL);
- if (!bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_1000)
- adv |= BGMAC_PHY_ADV2_1000HALF;
- if (bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_1000)
- adv |= BGMAC_PHY_ADV2_1000FULL;
- bgmac_phy_write(bgmac, bgmac->phyaddr, BGMAC_PHY_ADV2, adv);
-
- /* Restart */
- bgmac_phy_write(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL,
- bgmac_phy_read(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL) |
- BGMAC_PHY_CTL_RESTART);
-}
-
/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyinit */
static void bgmac_phy_init(struct bgmac *bgmac)
{
@@ -789,11 +748,9 @@ static void bgmac_phy_reset(struct bgmac *bgmac)
if (bgmac->phyaddr == BGMAC_PHY_NOREGS)
return;
- bgmac_phy_write(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL,
- BGMAC_PHY_CTL_RESET);
+ bgmac_phy_write(bgmac, bgmac->phyaddr, MII_BMCR, BMCR_RESET);
udelay(100);
- if (bgmac_phy_read(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL) &
- BGMAC_PHY_CTL_RESET)
+ if (bgmac_phy_read(bgmac, bgmac->phyaddr, MII_BMCR) & BMCR_RESET)
bgmac_err(bgmac, "PHY reset failed\n");
bgmac_phy_init(bgmac);
}
@@ -811,13 +768,13 @@ static void bgmac_cmdcfg_maskset(struct bgmac *bgmac, u32 mask, u32 set,
u32 cmdcfg = bgmac_read(bgmac, BGMAC_CMDCFG);
u32 new_val = (cmdcfg & mask) | set;
- bgmac_set(bgmac, BGMAC_CMDCFG, BGMAC_CMDCFG_SR);
+ bgmac_set(bgmac, BGMAC_CMDCFG, BGMAC_CMDCFG_SR(bgmac->core->id.rev));
udelay(2);
if (new_val != cmdcfg || force)
bgmac_write(bgmac, BGMAC_CMDCFG, new_val);
- bgmac_mask(bgmac, BGMAC_CMDCFG, ~BGMAC_CMDCFG_SR);
+ bgmac_mask(bgmac, BGMAC_CMDCFG, ~BGMAC_CMDCFG_SR(bgmac->core->id.rev));
udelay(2);
}
@@ -876,31 +833,56 @@ static void bgmac_clear_mib(struct bgmac *bgmac)
}
/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_speed */
-static void bgmac_speed(struct bgmac *bgmac, int speed)
+static void bgmac_mac_speed(struct bgmac *bgmac)
{
u32 mask = ~(BGMAC_CMDCFG_ES_MASK | BGMAC_CMDCFG_HD);
u32 set = 0;
- if (speed & BGMAC_SPEED_10)
+ switch (bgmac->mac_speed) {
+ case SPEED_10:
set |= BGMAC_CMDCFG_ES_10;
- if (speed & BGMAC_SPEED_100)
+ break;
+ case SPEED_100:
set |= BGMAC_CMDCFG_ES_100;
- if (speed & BGMAC_SPEED_1000)
+ break;
+ case SPEED_1000:
set |= BGMAC_CMDCFG_ES_1000;
- if (!bgmac->full_duplex)
+ break;
+ case SPEED_2500:
+ set |= BGMAC_CMDCFG_ES_2500;
+ break;
+ default:
+ bgmac_err(bgmac, "Unsupported speed: %d\n", bgmac->mac_speed);
+ }
+
+ if (bgmac->mac_duplex == DUPLEX_HALF)
set |= BGMAC_CMDCFG_HD;
+
bgmac_cmdcfg_maskset(bgmac, mask, set, true);
}
static void bgmac_miiconfig(struct bgmac *bgmac)
{
- u8 imode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) & BGMAC_DS_MM_MASK) >>
- BGMAC_DS_MM_SHIFT;
- if (imode == 0 || imode == 1) {
- if (bgmac->autoneg)
- bgmac_speed(bgmac, BGMAC_SPEED_100);
- else
- bgmac_speed(bgmac, bgmac->speed);
+ struct bcma_device *core = bgmac->core;
+ struct bcma_chipinfo *ci = &core->bus->chipinfo;
+ u8 imode;
+
+ if (ci->id == BCMA_CHIP_ID_BCM4707 ||
+ ci->id == BCMA_CHIP_ID_BCM53018) {
+ bcma_awrite32(core, BCMA_IOCTL,
+ bcma_aread32(core, BCMA_IOCTL) | 0x40 |
+ BGMAC_BCMA_IOCTL_SW_CLKEN);
+ bgmac->mac_speed = SPEED_2500;
+ bgmac->mac_duplex = DUPLEX_FULL;
+ bgmac_mac_speed(bgmac);
+ } else {
+ imode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) &
+ BGMAC_DS_MM_MASK) >> BGMAC_DS_MM_SHIFT;
+ if (imode == 0 || imode == 1) {
+ bgmac->mac_speed = SPEED_100;
+ bgmac->mac_duplex = DUPLEX_FULL;
+ bgmac_mac_speed(bgmac);
+ }
}
}
@@ -910,7 +892,7 @@ static void bgmac_chip_reset(struct bgmac *bgmac)
struct bcma_device *core = bgmac->core;
struct bcma_bus *bus = core->bus;
struct bcma_chipinfo *ci = &bus->chipinfo;
- u32 flags = 0;
+ u32 flags;
u32 iost;
int i;
@@ -933,26 +915,36 @@ static void bgmac_chip_reset(struct bgmac *bgmac)
}
iost = bcma_aread32(core, BCMA_IOST);
- if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == 10) ||
+ if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == BCMA_PKG_ID_BCM47186) ||
(ci->id == BCMA_CHIP_ID_BCM4749 && ci->pkg == 10) ||
- (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg == 9))
+ (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg == BCMA_PKG_ID_BCM47188))
iost &= ~BGMAC_BCMA_IOST_ATTACHED;
- if (iost & BGMAC_BCMA_IOST_ATTACHED) {
- flags = BGMAC_BCMA_IOCTL_SW_CLKEN;
- if (!bgmac->has_robosw)
- flags |= BGMAC_BCMA_IOCTL_SW_RESET;
+ /* 3GMAC: for BCM4707, only do core reset at bgmac_probe() */
+ if (ci->id != BCMA_CHIP_ID_BCM4707) {
+ flags = 0;
+ if (iost & BGMAC_BCMA_IOST_ATTACHED) {
+ flags = BGMAC_BCMA_IOCTL_SW_CLKEN;
+ if (!bgmac->has_robosw)
+ flags |= BGMAC_BCMA_IOCTL_SW_RESET;
+ }
+ bcma_core_enable(core, flags);
}
- bcma_core_enable(core, flags);
-
- if (core->id.rev > 2) {
- bgmac_set(bgmac, BCMA_CLKCTLST, 1 << 8);
- bgmac_wait_value(bgmac->core, BCMA_CLKCTLST, 1 << 24, 1 << 24,
+ /* Request Misc PLL for corerev > 2 */
+ if (core->id.rev > 2 &&
+ ci->id != BCMA_CHIP_ID_BCM4707 &&
+ ci->id != BCMA_CHIP_ID_BCM53018) {
+ bgmac_set(bgmac, BCMA_CLKCTLST,
+ BGMAC_BCMA_CLKCTLST_MISC_PLL_REQ);
+ bgmac_wait_value(bgmac->core, BCMA_CLKCTLST,
+ BGMAC_BCMA_CLKCTLST_MISC_PLL_ST,
+ BGMAC_BCMA_CLKCTLST_MISC_PLL_ST,
1000);
}
- if (ci->id == BCMA_CHIP_ID_BCM5357 || ci->id == BCMA_CHIP_ID_BCM4749 ||
+ if (ci->id == BCMA_CHIP_ID_BCM5357 ||
+ ci->id == BCMA_CHIP_ID_BCM4749 ||
ci->id == BCMA_CHIP_ID_BCM53572) {
struct bcma_drv_cc *cc = &bgmac->core->bus->drv_cc;
u8 et_swtype = 0;
@@ -967,10 +959,11 @@ static void bgmac_chip_reset(struct bgmac *bgmac)
et_swtype &= 0x0f;
et_swtype <<= 4;
sw_type = et_swtype;
- } else if (ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == 9) {
+ } else if (ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == BCMA_PKG_ID_BCM5358) {
sw_type = BGMAC_CHIPCTL_1_SW_TYPE_EPHYRMII;
- } else if ((ci->id != BCMA_CHIP_ID_BCM53572 && ci->pkg == 10) ||
- (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg == 9)) {
+ } else if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == BCMA_PKG_ID_BCM47186) ||
+ (ci->id == BCMA_CHIP_ID_BCM4749 && ci->pkg == 10) ||
+ (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg == BCMA_PKG_ID_BCM47188)) {
sw_type = BGMAC_CHIPCTL_1_IF_TYPE_RGMII |
BGMAC_CHIPCTL_1_SW_TYPE_RGMII;
}
@@ -1007,8 +1000,10 @@ static void bgmac_chip_reset(struct bgmac *bgmac)
BGMAC_CMDCFG_PROM |
BGMAC_CMDCFG_NLC |
BGMAC_CMDCFG_CFE |
- BGMAC_CMDCFG_SR,
+ BGMAC_CMDCFG_SR(core->id.rev),
false);
+ bgmac->mac_speed = SPEED_UNKNOWN;
+ bgmac->mac_duplex = DUPLEX_UNKNOWN;
bgmac_clear_mib(bgmac);
if (core->id.id == BCMA_CORE_4706_MAC_GBIT)
@@ -1048,7 +1043,7 @@ static void bgmac_enable(struct bgmac *bgmac)
cmdcfg = bgmac_read(bgmac, BGMAC_CMDCFG);
bgmac_cmdcfg_maskset(bgmac, ~(BGMAC_CMDCFG_TE | BGMAC_CMDCFG_RE),
- BGMAC_CMDCFG_SR, true);
+ BGMAC_CMDCFG_SR(bgmac->core->id.rev), true);
udelay(2);
cmdcfg |= BGMAC_CMDCFG_TE | BGMAC_CMDCFG_RE;
bgmac_write(bgmac, BGMAC_CMDCFG, cmdcfg);
@@ -1077,12 +1072,16 @@ static void bgmac_enable(struct bgmac *bgmac)
break;
}
- rxq_ctl = bgmac_read(bgmac, BGMAC_RXQ_CTL);
- rxq_ctl &= ~BGMAC_RXQ_CTL_MDP_MASK;
- bp_clk = bcma_pmu_get_bus_clock(&bgmac->core->bus->drv_cc) / 1000000;
- mdp = (bp_clk * 128 / 1000) - 3;
- rxq_ctl |= (mdp << BGMAC_RXQ_CTL_MDP_SHIFT);
- bgmac_write(bgmac, BGMAC_RXQ_CTL, rxq_ctl);
+ if (ci->id != BCMA_CHIP_ID_BCM4707 &&
+ ci->id != BCMA_CHIP_ID_BCM53018) {
+ rxq_ctl = bgmac_read(bgmac, BGMAC_RXQ_CTL);
+ rxq_ctl &= ~BGMAC_RXQ_CTL_MDP_MASK;
+ bp_clk = bcma_pmu_get_bus_clock(&bgmac->core->bus->drv_cc) /
+ 1000000;
+ mdp = (bp_clk * 128 / 1000) - 3;
+ rxq_ctl |= (mdp << BGMAC_RXQ_CTL_MDP_SHIFT);
+ bgmac_write(bgmac, BGMAC_RXQ_CTL, rxq_ctl);
+ }
}
/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipinit */
@@ -1108,13 +1107,6 @@ static void bgmac_chip_init(struct bgmac *bgmac, bool full_init)
bgmac_write(bgmac, BGMAC_RXMAX_LENGTH, 32 + ETHER_MAX_LEN);
- if (!bgmac->autoneg) {
- bgmac_speed(bgmac, bgmac->speed);
- bgmac_phy_force(bgmac);
- } else if (bgmac->speed) { /* if there is anything to adv */
- bgmac_phy_advertise(bgmac);
- }
-
if (full_init) {
bgmac_dma_init(bgmac);
if (1) /* FIXME: is there any case we don't want IRQs? */
@@ -1204,6 +1196,8 @@ static int bgmac_open(struct net_device *net_dev)
}
napi_enable(&bgmac->napi);
+ phy_start(bgmac->phy_dev);
+
netif_carrier_on(net_dev);
err_out:
@@ -1216,6 +1210,8 @@ static int bgmac_stop(struct net_device *net_dev)
netif_carrier_off(net_dev);
+ phy_stop(bgmac->phy_dev);
+
napi_disable(&bgmac->napi);
bgmac_chip_intrs_off(bgmac);
free_irq(bgmac->core->irq, net_dev);
@@ -1252,27 +1248,11 @@ static int bgmac_set_mac_address(struct net_device *net_dev, void *addr)
static int bgmac_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
{
struct bgmac *bgmac = netdev_priv(net_dev);
- struct mii_ioctl_data *data = if_mii(ifr);
-
- switch (cmd) {
- case SIOCGMIIPHY:
- data->phy_id = bgmac->phyaddr;
- /* fallthru */
- case SIOCGMIIREG:
- if (!netif_running(net_dev))
- return -EAGAIN;
- data->val_out = bgmac_phy_read(bgmac, data->phy_id,
- data->reg_num & 0x1f);
- return 0;
- case SIOCSMIIREG:
- if (!netif_running(net_dev))
- return -EAGAIN;
- bgmac_phy_write(bgmac, data->phy_id, data->reg_num & 0x1f,
- data->val_in);
- return 0;
- default:
- return -EOPNOTSUPP;
- }
+
+ if (!netif_running(net_dev))
+ return -EINVAL;
+
+ return phy_mii_ioctl(bgmac->phy_dev, ifr, cmd);
}
static const struct net_device_ops bgmac_netdev_ops = {
@@ -1294,61 +1274,16 @@ static int bgmac_get_settings(struct net_device *net_dev,
{
struct bgmac *bgmac = netdev_priv(net_dev);
- cmd->supported = SUPPORTED_10baseT_Half |
- SUPPORTED_10baseT_Full |
- SUPPORTED_100baseT_Half |
- SUPPORTED_100baseT_Full |
- SUPPORTED_1000baseT_Half |
- SUPPORTED_1000baseT_Full |
- SUPPORTED_Autoneg;
-
- if (bgmac->autoneg) {
- WARN_ON(cmd->advertising);
- if (bgmac->full_duplex) {
- if (bgmac->speed & BGMAC_SPEED_10)
- cmd->advertising |= ADVERTISED_10baseT_Full;
- if (bgmac->speed & BGMAC_SPEED_100)
- cmd->advertising |= ADVERTISED_100baseT_Full;
- if (bgmac->speed & BGMAC_SPEED_1000)
- cmd->advertising |= ADVERTISED_1000baseT_Full;
- } else {
- if (bgmac->speed & BGMAC_SPEED_10)
- cmd->advertising |= ADVERTISED_10baseT_Half;
- if (bgmac->speed & BGMAC_SPEED_100)
- cmd->advertising |= ADVERTISED_100baseT_Half;
- if (bgmac->speed & BGMAC_SPEED_1000)
- cmd->advertising |= ADVERTISED_1000baseT_Half;
- }
- } else {
- switch (bgmac->speed) {
- case BGMAC_SPEED_10:
- ethtool_cmd_speed_set(cmd, SPEED_10);
- break;
- case BGMAC_SPEED_100:
- ethtool_cmd_speed_set(cmd, SPEED_100);
- break;
- case BGMAC_SPEED_1000:
- ethtool_cmd_speed_set(cmd, SPEED_1000);
- break;
- }
- }
-
- cmd->duplex = bgmac->full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
-
- cmd->autoneg = bgmac->autoneg;
-
- return 0;
+ return phy_ethtool_gset(bgmac->phy_dev, cmd);
}
-#if 0
static int bgmac_set_settings(struct net_device *net_dev,
struct ethtool_cmd *cmd)
{
struct bgmac *bgmac = netdev_priv(net_dev);
- return -1;
+ return phy_ethtool_sset(bgmac->phy_dev, cmd);
}
-#endif
static void bgmac_get_drvinfo(struct net_device *net_dev,
struct ethtool_drvinfo *info)
@@ -1359,6 +1294,7 @@ static void bgmac_get_drvinfo(struct net_device *net_dev,
static const struct ethtool_ops bgmac_ethtool_ops = {
.get_settings = bgmac_get_settings,
+ .set_settings = bgmac_set_settings,
.get_drvinfo = bgmac_get_drvinfo,
};
@@ -1377,9 +1313,35 @@ static int bgmac_mii_write(struct mii_bus *bus, int mii_id, int regnum,
return bgmac_phy_write(bus->priv, mii_id, regnum, value);
}
+static void bgmac_adjust_link(struct net_device *net_dev)
+{
+ struct bgmac *bgmac = netdev_priv(net_dev);
+ struct phy_device *phy_dev = bgmac->phy_dev;
+ bool update = false;
+
+ if (phy_dev->link) {
+ if (phy_dev->speed != bgmac->mac_speed) {
+ bgmac->mac_speed = phy_dev->speed;
+ update = true;
+ }
+
+ if (phy_dev->duplex != bgmac->mac_duplex) {
+ bgmac->mac_duplex = phy_dev->duplex;
+ update = true;
+ }
+ }
+
+ if (update) {
+ bgmac_mac_speed(bgmac);
+ phy_print_status(phy_dev);
+ }
+}
+
static int bgmac_mii_register(struct bgmac *bgmac)
{
struct mii_bus *mii_bus;
+ struct phy_device *phy_dev;
+ char bus_id[MII_BUS_ID_SIZE + 3];
int i, err = 0;
mii_bus = mdiobus_alloc();
@@ -1411,8 +1373,22 @@ static int bgmac_mii_register(struct bgmac *bgmac)
bgmac->mii_bus = mii_bus;
+ /* Connect to the PHY */
+ snprintf(bus_id, sizeof(bus_id), PHY_ID_FMT, mii_bus->id,
+ bgmac->phyaddr);
+ phy_dev = phy_connect(bgmac->net_dev, bus_id, &bgmac_adjust_link,
+ PHY_INTERFACE_MODE_MII);
+ if (IS_ERR(phy_dev)) {
+ bgmac_err(bgmac, "PHY connecton failed\n");
+ err = PTR_ERR(phy_dev);
+ goto err_unregister_bus;
+ }
+ bgmac->phy_dev = phy_dev;
+
return err;
+err_unregister_bus:
+ mdiobus_unregister(mii_bus);
err_free_irq:
kfree(mii_bus->irq);
err_free_bus:
@@ -1467,9 +1443,6 @@ static int bgmac_probe(struct bcma_device *core)
bcma_set_drvdata(core, bgmac);
/* Defaults */
- bgmac->autoneg = true;
- bgmac->full_duplex = true;
- bgmac->speed = BGMAC_SPEED_10 | BGMAC_SPEED_100 | BGMAC_SPEED_1000;
memcpy(bgmac->net_dev->dev_addr, mac, ETH_ALEN);
/* On BCM4706 we need common core to access PHY */
@@ -1500,6 +1473,27 @@ static int bgmac_probe(struct bcma_device *core)
bgmac_chip_reset(bgmac);
+ /* For Northstar, we have to take all GMAC core out of reset */
+ if (core->id.id == BCMA_CHIP_ID_BCM4707 ||
+ core->id.id == BCMA_CHIP_ID_BCM53018) {
+ struct bcma_device *ns_core;
+ int ns_gmac;
+
+ /* Northstar has 4 GMAC cores */
+ for (ns_gmac = 0; ns_gmac < 4; ns_gmac++) {
+ /* As Northstar requirement, we have to reset all GMACs
+ * before accessing one. bgmac_chip_reset() call
+ * bcma_core_enable() for this core. Then the other
+ * three GMACs didn't reset. We do it here.
+ */
+ ns_core = bcma_find_core_unit(core->bus,
+ BCMA_CORE_MAC_GBIT,
+ ns_gmac);
+ if (ns_core && !bcma_core_is_enabled(ns_core))
+ bcma_core_enable(ns_core, 0);
+ }
+ }
+
err = bgmac_dma_alloc(bgmac);
if (err) {
bgmac_err(bgmac, "Unable to alloc memory for DMA\n");
diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h
index 66c8afbdc8c7..89fa5bc69c51 100644
--- a/drivers/net/ethernet/broadcom/bgmac.h
+++ b/drivers/net/ethernet/broadcom/bgmac.h
@@ -95,7 +95,11 @@
#define BGMAC_RXQ_CTL_MDP_SHIFT 24
#define BGMAC_GPIO_SELECT 0x194
#define BGMAC_GPIO_OUTPUT_EN 0x198
-/* For 0x1e0 see BCMA_CLKCTLST */
+
+/* For 0x1e0 see BCMA_CLKCTLST. Below are BGMAC specific bits */
+#define BGMAC_BCMA_CLKCTLST_MISC_PLL_REQ 0x00000100
+#define BGMAC_BCMA_CLKCTLST_MISC_PLL_ST 0x01000000
+
#define BGMAC_HW_WAR 0x1e4
#define BGMAC_PWR_CTL 0x1e8
#define BGMAC_DMA_BASE0 0x200 /* Tx and Rx controller */
@@ -185,6 +189,7 @@
#define BGMAC_CMDCFG_ES_10 0x00000000
#define BGMAC_CMDCFG_ES_100 0x00000004
#define BGMAC_CMDCFG_ES_1000 0x00000008
+#define BGMAC_CMDCFG_ES_2500 0x0000000C
#define BGMAC_CMDCFG_PROM 0x00000010 /* Set to activate promiscuous mode */
#define BGMAC_CMDCFG_PAD_EN 0x00000020
#define BGMAC_CMDCFG_CF 0x00000040
@@ -193,7 +198,9 @@
#define BGMAC_CMDCFG_TAI 0x00000200
#define BGMAC_CMDCFG_HD 0x00000400 /* Set if in half duplex mode */
#define BGMAC_CMDCFG_HD_SHIFT 10
-#define BGMAC_CMDCFG_SR 0x00000800 /* Set to reset mode */
+#define BGMAC_CMDCFG_SR_REV0 0x00000800 /* Set to reset mode, for other revs */
+#define BGMAC_CMDCFG_SR_REV4 0x00002000 /* Set to reset mode, only for core rev 4 */
+#define BGMAC_CMDCFG_SR(rev) ((rev == 4) ? BGMAC_CMDCFG_SR_REV4 : BGMAC_CMDCFG_SR_REV0)
#define BGMAC_CMDCFG_ML 0x00008000 /* Set to activate mac loopback mode */
#define BGMAC_CMDCFG_AE 0x00400000
#define BGMAC_CMDCFG_CFE 0x00800000
@@ -216,27 +223,6 @@
#define BGMAC_RX_STATUS 0xb38
#define BGMAC_TX_STATUS 0xb3c
-#define BGMAC_PHY_CTL 0x00
-#define BGMAC_PHY_CTL_SPEED_MSB 0x0040
-#define BGMAC_PHY_CTL_DUPLEX 0x0100 /* duplex mode */
-#define BGMAC_PHY_CTL_RESTART 0x0200 /* restart autonegotiation */
-#define BGMAC_PHY_CTL_ANENAB 0x1000 /* enable autonegotiation */
-#define BGMAC_PHY_CTL_SPEED 0x2000
-#define BGMAC_PHY_CTL_LOOP 0x4000 /* loopback */
-#define BGMAC_PHY_CTL_RESET 0x8000 /* reset */
-/* Helpers */
-#define BGMAC_PHY_CTL_SPEED_10 0
-#define BGMAC_PHY_CTL_SPEED_100 BGMAC_PHY_CTL_SPEED
-#define BGMAC_PHY_CTL_SPEED_1000 BGMAC_PHY_CTL_SPEED_MSB
-#define BGMAC_PHY_ADV 0x04
-#define BGMAC_PHY_ADV_10HALF 0x0020 /* advertise 10MBits/s half duplex */
-#define BGMAC_PHY_ADV_10FULL 0x0040 /* advertise 10MBits/s full duplex */
-#define BGMAC_PHY_ADV_100HALF 0x0080 /* advertise 100MBits/s half duplex */
-#define BGMAC_PHY_ADV_100FULL 0x0100 /* advertise 100MBits/s full duplex */
-#define BGMAC_PHY_ADV2 0x09
-#define BGMAC_PHY_ADV2_1000HALF 0x0100 /* advertise 1000MBits/s half duplex */
-#define BGMAC_PHY_ADV2_1000FULL 0x0200 /* advertise 1000MBits/s full duplex */
-
/* BCMA GMAC core specific IO Control (BCMA_IOCTL) flags */
#define BGMAC_BCMA_IOCTL_SW_CLKEN 0x00000004 /* PHY Clock Enable */
#define BGMAC_BCMA_IOCTL_SW_RESET 0x00000008 /* PHY Reset */
@@ -254,9 +240,34 @@
#define BGMAC_DMA_TX_SUSPEND 0x00000002
#define BGMAC_DMA_TX_LOOPBACK 0x00000004
#define BGMAC_DMA_TX_FLUSH 0x00000010
+#define BGMAC_DMA_TX_MR_MASK 0x000000C0 /* Multiple outstanding reads */
+#define BGMAC_DMA_TX_MR_SHIFT 6
+#define BGMAC_DMA_TX_MR_1 0
+#define BGMAC_DMA_TX_MR_2 1
#define BGMAC_DMA_TX_PARITY_DISABLE 0x00000800
#define BGMAC_DMA_TX_ADDREXT_MASK 0x00030000
#define BGMAC_DMA_TX_ADDREXT_SHIFT 16
+#define BGMAC_DMA_TX_BL_MASK 0x001C0000 /* BurstLen bits */
+#define BGMAC_DMA_TX_BL_SHIFT 18
+#define BGMAC_DMA_TX_BL_16 0
+#define BGMAC_DMA_TX_BL_32 1
+#define BGMAC_DMA_TX_BL_64 2
+#define BGMAC_DMA_TX_BL_128 3
+#define BGMAC_DMA_TX_BL_256 4
+#define BGMAC_DMA_TX_BL_512 5
+#define BGMAC_DMA_TX_BL_1024 6
+#define BGMAC_DMA_TX_PC_MASK 0x00E00000 /* Prefetch control */
+#define BGMAC_DMA_TX_PC_SHIFT 21
+#define BGMAC_DMA_TX_PC_0 0
+#define BGMAC_DMA_TX_PC_4 1
+#define BGMAC_DMA_TX_PC_8 2
+#define BGMAC_DMA_TX_PC_16 3
+#define BGMAC_DMA_TX_PT_MASK 0x03000000 /* Prefetch threshold */
+#define BGMAC_DMA_TX_PT_SHIFT 24
+#define BGMAC_DMA_TX_PT_1 0
+#define BGMAC_DMA_TX_PT_2 1
+#define BGMAC_DMA_TX_PT_4 2
+#define BGMAC_DMA_TX_PT_8 3
#define BGMAC_DMA_TX_INDEX 0x04
#define BGMAC_DMA_TX_RINGLO 0x08
#define BGMAC_DMA_TX_RINGHI 0x0C
@@ -284,8 +295,33 @@
#define BGMAC_DMA_RX_DIRECT_FIFO 0x00000100
#define BGMAC_DMA_RX_OVERFLOW_CONT 0x00000400
#define BGMAC_DMA_RX_PARITY_DISABLE 0x00000800
+#define BGMAC_DMA_RX_MR_MASK 0x000000C0 /* Multiple outstanding reads */
+#define BGMAC_DMA_RX_MR_SHIFT 6
+#define BGMAC_DMA_TX_MR_1 0
+#define BGMAC_DMA_TX_MR_2 1
#define BGMAC_DMA_RX_ADDREXT_MASK 0x00030000
#define BGMAC_DMA_RX_ADDREXT_SHIFT 16
+#define BGMAC_DMA_RX_BL_MASK 0x001C0000 /* BurstLen bits */
+#define BGMAC_DMA_RX_BL_SHIFT 18
+#define BGMAC_DMA_RX_BL_16 0
+#define BGMAC_DMA_RX_BL_32 1
+#define BGMAC_DMA_RX_BL_64 2
+#define BGMAC_DMA_RX_BL_128 3
+#define BGMAC_DMA_RX_BL_256 4
+#define BGMAC_DMA_RX_BL_512 5
+#define BGMAC_DMA_RX_BL_1024 6
+#define BGMAC_DMA_RX_PC_MASK 0x00E00000 /* Prefetch control */
+#define BGMAC_DMA_RX_PC_SHIFT 21
+#define BGMAC_DMA_RX_PC_0 0
+#define BGMAC_DMA_RX_PC_4 1
+#define BGMAC_DMA_RX_PC_8 2
+#define BGMAC_DMA_RX_PC_16 3
+#define BGMAC_DMA_RX_PT_MASK 0x03000000 /* Prefetch threshold */
+#define BGMAC_DMA_RX_PT_SHIFT 24
+#define BGMAC_DMA_RX_PT_1 0
+#define BGMAC_DMA_RX_PT_2 1
+#define BGMAC_DMA_RX_PT_4 2
+#define BGMAC_DMA_RX_PT_8 3
#define BGMAC_DMA_RX_INDEX 0x24
#define BGMAC_DMA_RX_RINGLO 0x28
#define BGMAC_DMA_RX_RINGHI 0x2C
@@ -342,10 +378,6 @@
#define BGMAC_CHIPCTL_1_SW_TYPE_RGMII 0x000000C0
#define BGMAC_CHIPCTL_1_RXC_DLL_BYPASS 0x00010000
-#define BGMAC_SPEED_10 0x0001
-#define BGMAC_SPEED_100 0x0002
-#define BGMAC_SPEED_1000 0x0004
-
#define BGMAC_WEIGHT 64
#define ETHER_MAX_LEN 1518
@@ -402,6 +434,7 @@ struct bgmac {
struct net_device *net_dev;
struct napi_struct napi;
struct mii_bus *mii_bus;
+ struct phy_device *phy_dev;
/* DMA */
struct bgmac_dma_ring tx_ring[BGMAC_MAX_TX_RINGS];
@@ -416,10 +449,9 @@ struct bgmac {
u32 int_mask;
u32 int_status;
- /* Speed-related */
- int speed;
- bool autoneg;
- bool full_duplex;
+ /* Current MAC state */
+ int mac_speed;
+ int mac_duplex;
u8 phyaddr;
bool has_robosw;
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index d9980ad00b4b..3f3cae288466 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -58,8 +58,8 @@
#include "bnx2_fw.h"
#define DRV_MODULE_NAME "bnx2"
-#define DRV_MODULE_VERSION "2.2.4"
-#define DRV_MODULE_RELDATE "Aug 05, 2013"
+#define DRV_MODULE_VERSION "2.2.5"
+#define DRV_MODULE_RELDATE "December 20, 2013"
#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-6.2.3.fw"
#define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-6.0.15.fw"
#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-6.2.1b.fw"
@@ -1197,6 +1197,8 @@ bnx2_copper_linkup(struct bnx2 *bp)
{
u32 bmcr;
+ bp->phy_flags &= ~BNX2_PHY_FLAG_MDIX;
+
bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
if (bmcr & BMCR_ANENABLE) {
u32 local_adv, remote_adv, common;
@@ -1255,6 +1257,14 @@ bnx2_copper_linkup(struct bnx2 *bp)
}
}
+ if (bp->link_up) {
+ u32 ext_status;
+
+ bnx2_read_phy(bp, MII_BNX2_EXT_STATUS, &ext_status);
+ if (ext_status & EXT_STATUS_MDIX)
+ bp->phy_flags |= BNX2_PHY_FLAG_MDIX;
+ }
+
return 0;
}
@@ -2048,29 +2058,27 @@ bnx2_setup_copper_phy(struct bnx2 *bp)
__releases(&bp->phy_lock)
__acquires(&bp->phy_lock)
{
- u32 bmcr;
+ u32 bmcr, adv_reg, new_adv = 0;
u32 new_bmcr;
bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
+ bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
+ adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
+ ADVERTISE_PAUSE_ASYM);
+
+ new_adv = ADVERTISE_CSMA | ethtool_adv_to_mii_adv_t(bp->advertising);
+
if (bp->autoneg & AUTONEG_SPEED) {
- u32 adv_reg, adv1000_reg;
- u32 new_adv = 0;
+ u32 adv1000_reg;
u32 new_adv1000 = 0;
- bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
- adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
- ADVERTISE_PAUSE_ASYM);
+ new_adv |= bnx2_phy_get_pause_adv(bp);
bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
adv1000_reg &= PHY_ALL_1000_SPEED;
- new_adv = ethtool_adv_to_mii_adv_t(bp->advertising);
- new_adv |= ADVERTISE_CSMA;
- new_adv |= bnx2_phy_get_pause_adv(bp);
-
new_adv1000 |= ethtool_adv_to_mii_ctrl1000_t(bp->advertising);
-
if ((adv1000_reg != new_adv1000) ||
(adv_reg != new_adv) ||
((bmcr & BMCR_ANENABLE) == 0)) {
@@ -2090,6 +2098,10 @@ __acquires(&bp->phy_lock)
return 0;
}
+ /* advertise nothing when forcing speed */
+ if (adv_reg != new_adv)
+ bnx2_write_phy(bp, bp->mii_adv, new_adv);
+
new_bmcr = 0;
if (bp->req_line_speed == SPEED_100) {
new_bmcr |= BMCR_SPEED100;
@@ -2341,9 +2353,15 @@ bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
}
/* ethernet@wirespeed */
- bnx2_write_phy(bp, 0x18, 0x7007);
- bnx2_read_phy(bp, 0x18, &val);
- bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
+ bnx2_write_phy(bp, MII_BNX2_AUX_CTL, AUX_CTL_MISC_CTL);
+ bnx2_read_phy(bp, MII_BNX2_AUX_CTL, &val);
+ val |= AUX_CTL_MISC_CTL_WR | AUX_CTL_MISC_CTL_WIRESPEED;
+
+ /* auto-mdix */
+ if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
+ val |= AUX_CTL_MISC_CTL_AUTOMDIX;
+
+ bnx2_write_phy(bp, MII_BNX2_AUX_CTL, val);
return 0;
}
@@ -3234,7 +3252,8 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
if ((bp->dev->features & NETIF_F_RXHASH) &&
((status & L2_FHDR_STATUS_USE_RXHASH) ==
L2_FHDR_STATUS_USE_RXHASH))
- skb->rxhash = rx_hdr->l2_fhdr_hash;
+ skb_set_hash(skb, rx_hdr->l2_fhdr_hash,
+ PKT_HASH_TYPE_L3);
skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
napi_gro_receive(&bnapi->napi, skb);
@@ -6865,6 +6884,12 @@ bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
if (netif_carrier_ok(dev)) {
ethtool_cmd_speed_set(cmd, bp->line_speed);
cmd->duplex = bp->duplex;
+ if (!(bp->phy_flags & BNX2_PHY_FLAG_SERDES)) {
+ if (bp->phy_flags & BNX2_PHY_FLAG_MDIX)
+ cmd->eth_tp_mdix = ETH_TP_MDI_X;
+ else
+ cmd->eth_tp_mdix = ETH_TP_MDI;
+ }
}
else {
ethtool_cmd_speed_set(cmd, -1);
diff --git a/drivers/net/ethernet/broadcom/bnx2.h b/drivers/net/ethernet/broadcom/bnx2.h
index 18cb2d23e56b..f1cf2c44e7ed 100644
--- a/drivers/net/ethernet/broadcom/bnx2.h
+++ b/drivers/net/ethernet/broadcom/bnx2.h
@@ -6471,6 +6471,15 @@ struct l2_fhdr {
#define BCM5708S_TX_ACTL3 0x17
+#define MII_BNX2_EXT_STATUS 0x11
+#define EXT_STATUS_MDIX (1 << 13)
+
+#define MII_BNX2_AUX_CTL 0x18
+#define AUX_CTL_MISC_CTL 0x7007
+#define AUX_CTL_MISC_CTL_WIRESPEED (1 << 4)
+#define AUX_CTL_MISC_CTL_AUTOMDIX (1 << 9)
+#define AUX_CTL_MISC_CTL_WR (1 << 15)
+
#define MII_BNX2_DSP_RW_PORT 0x15
#define MII_BNX2_DSP_ADDRESS 0x17
#define MII_BNX2_DSP_EXPAND_REG 0x0f00
@@ -6844,6 +6853,7 @@ struct bnx2 {
#define BNX2_PHY_FLAG_REMOTE_PHY_CAP 0x00000800
#define BNX2_PHY_FLAG_FORCED_DOWN 0x00001000
#define BNX2_PHY_FLAG_NO_PARALLEL 0x00002000
+#define BNX2_PHY_FLAG_MDIX 0x00004000
u32 mii_bmcr;
u32 mii_bmsr;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index a1f66e2c9a86..eb105abcf0e7 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -472,7 +472,7 @@ struct bnx2x_agg_info {
u16 vlan_tag;
u16 len_on_bd;
u32 rxhash;
- bool l4_rxhash;
+ enum pkt_hash_types rxhash_type;
u16 gro_size;
u16 full_page;
};
@@ -1250,7 +1250,10 @@ struct bnx2x_slowpath {
* Therefore, if they would have been defined in the same union,
* data can get corrupted.
*/
- struct afex_vif_list_ramrod_data func_afex_rdata;
+ union {
+ struct afex_vif_list_ramrod_data viflist_data;
+ struct function_update_data func_update;
+ } func_afex_rdata;
/* used by dmae command executer */
struct dmae_command dmae[MAX_DMAE_C];
@@ -1546,6 +1549,7 @@ struct bnx2x {
#define INTERRUPTS_ENABLED_FLAG (1 << 23)
#define BC_SUPPORTS_RMMOD_CMD (1 << 24)
#define HAS_PHYS_PORT_ID (1 << 25)
+#define AER_ENABLED (1 << 26)
#define BP_NOMCP(bp) ((bp)->flags & NO_MCP_FLAG)
@@ -2436,7 +2440,8 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
#define GOOD_ME_REG(me_reg) (((me_reg) & ME_REG_VF_VALID) && \
(!((me_reg) & ME_REG_VF_ERR)))
-int bnx2x_nic_load_analyze_req(struct bnx2x *bp, u32 load_code);
+int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err);
+
/* Congestion management fairness mode */
#define CMNG_FNS_NONE 0
#define CMNG_FNS_MINMAX 1
@@ -2499,4 +2504,6 @@ void bnx2x_set_local_cmng(struct bnx2x *bp);
#define MCPR_SCRATCH_BASE(bp) \
(CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
+#define E1H_MAX_MF_SB_COUNT (HC_SB_MAX_SB_E1X/(E1HVN_MAX * PORT_MAX))
+
#endif /* bnx2x.h */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index ec96130533cc..282ebf61f530 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -354,7 +354,7 @@ static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
*/
static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
const struct eth_fast_path_rx_cqe *cqe,
- bool *l4_rxhash)
+ enum pkt_hash_types *rxhash_type)
{
/* Get Toeplitz hash from CQE */
if ((bp->dev->features & NETIF_F_RXHASH) &&
@@ -362,11 +362,13 @@ static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
enum eth_rss_hash_type htype;
htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
- *l4_rxhash = (htype == TCP_IPV4_HASH_TYPE) ||
- (htype == TCP_IPV6_HASH_TYPE);
+ *rxhash_type = ((htype == TCP_IPV4_HASH_TYPE) ||
+ (htype == TCP_IPV6_HASH_TYPE)) ?
+ PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3;
+
return le32_to_cpu(cqe->rss_hash_result);
}
- *l4_rxhash = false;
+ *rxhash_type = PKT_HASH_TYPE_NONE;
return 0;
}
@@ -420,7 +422,7 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
tpa_info->tpa_state = BNX2X_TPA_START;
tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
tpa_info->placement_offset = cqe->placement_offset;
- tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->l4_rxhash);
+ tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->rxhash_type);
if (fp->mode == TPA_MODE_GRO) {
u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
tpa_info->full_page = SGE_PAGES / gro_size * gro_size;
@@ -728,8 +730,7 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
skb_reserve(skb, pad + NET_SKB_PAD);
skb_put(skb, len);
- skb->rxhash = tpa_info->rxhash;
- skb->l4_rxhash = tpa_info->l4_rxhash;
+ skb_set_hash(skb, tpa_info->rxhash, tpa_info->rxhash_type);
skb->protocol = eth_type_trans(skb, bp->dev);
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -846,7 +847,8 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
enum eth_rx_cqe_type cqe_fp_type;
u16 len, pad, queue;
u8 *data;
- bool l4_rxhash;
+ u32 rxhash;
+ enum pkt_hash_types rxhash_type;
#ifdef BNX2X_STOP_ON_ERROR
if (unlikely(bp->panic))
@@ -987,8 +989,8 @@ reuse_rx:
skb->protocol = eth_type_trans(skb, bp->dev);
/* Set Toeplitz hash for a none-LRO skb */
- skb->rxhash = bnx2x_get_rxhash(bp, cqe_fp, &l4_rxhash);
- skb->l4_rxhash = l4_rxhash;
+ rxhash = bnx2x_get_rxhash(bp, cqe_fp, &rxhash_type);
+ skb_set_hash(skb, rxhash, rxhash_type);
skb_checksum_none_assert(skb);
@@ -2263,7 +2265,7 @@ static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code)
* virtualized environments a pf from another VM may have already
* initialized the device including loading FW
*/
-int bnx2x_nic_load_analyze_req(struct bnx2x *bp, u32 load_code)
+int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err)
{
/* is another pf loaded on this engine? */
if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
@@ -2282,8 +2284,12 @@ int bnx2x_nic_load_analyze_req(struct bnx2x *bp, u32 load_code)
/* abort nic load if version mismatch */
if (my_fw != loaded_fw) {
- BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. Aborting\n",
- loaded_fw, my_fw);
+ if (print_err)
+ BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. Aborting\n",
+ loaded_fw, my_fw);
+ else
+ BNX2X_DEV_INFO("bnx2x with FW %x was already loaded which mismatches my %x FW, possibly due to MF UNDI\n",
+ loaded_fw, my_fw);
return -EBUSY;
}
}
@@ -2598,7 +2604,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
LOAD_ERROR_EXIT(bp, load_error1);
/* what did mcp say? */
- rc = bnx2x_nic_load_analyze_req(bp, load_code);
+ rc = bnx2x_compare_fw_ver(bp, load_code, true);
if (rc) {
bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
LOAD_ERROR_EXIT(bp, load_error2);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 32d0f1435fb4..92a467ff4104 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -1639,6 +1639,12 @@ static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
memcpy(&val, data_buf, 4);
+ /* Notice unlike bnx2x_nvram_read_dword() this will not
+ * change val using be32_to_cpu(), which causes data to flip
+ * if the eeprom is read and then written back. This is due
+ * to tools utilizing this functionality that would break
+ * if this would be resolved.
+ */
rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
/* advance to the next dword */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 20dcc02431ca..11fc79585491 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -3865,6 +3865,19 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
bnx2x_warpcore_enable_AN_KR2(phy, params, vars);
} else {
+ /* Enable Auto-Detect to support 1G over CL37 as well */
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, 0x10);
+
+ /* Force cl48 sync_status LOW to avoid getting stuck in CL73
+ * parallel-detect loop when CL73 and CL37 are enabled.
+ */
+ CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
+ MDIO_AER_BLOCK_AER_REG, 0);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_RXB_ANA_RX_CONTROL_PCI, 0x0800);
+ bnx2x_set_aer_mmd(params, phy);
+
bnx2x_disable_kr2(params, vars, phy);
}
@@ -8120,17 +8133,20 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
*edc_mode = EDC_MODE_ACTIVE_DAC;
else
check_limiting_mode = 1;
- } else if (copper_module_type &
- SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE) {
+ } else {
+ *edc_mode = EDC_MODE_PASSIVE_DAC;
+ /* Even in case PASSIVE_DAC indication is not set,
+ * treat it as a passive DAC cable, since some cables
+ * don't have this indication.
+ */
+ if (copper_module_type &
+ SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE) {
DP(NETIF_MSG_LINK,
"Passive Copper cable detected\n");
- *edc_mode =
- EDC_MODE_PASSIVE_DAC;
- } else {
- DP(NETIF_MSG_LINK,
- "Unknown copper-cable-type 0x%x !!!\n",
- copper_module_type);
- return -EINVAL;
+ } else {
+ DP(NETIF_MSG_LINK,
+ "Unknown copper-cable-type\n");
+ }
}
break;
}
@@ -10825,9 +10841,9 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
(1<<11));
if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
- (phy->speed_cap_mask &
- PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) ||
- (phy->req_line_speed == SPEED_1000)) {
+ (phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) ||
+ (phy->req_line_speed == SPEED_1000)) {
an_1000_val |= (1<<8);
autoneg_val |= (1<<9 | 1<<12);
if (phy->req_duplex == DUPLEX_FULL)
@@ -10843,30 +10859,32 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
0x09,
&an_1000_val);
- /* Set 100 speed advertisement */
- if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
- (phy->speed_cap_mask &
- (PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL |
- PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)))) {
- an_10_100_val |= (1<<7);
- /* Enable autoneg and restart autoneg for legacy speeds */
- autoneg_val |= (1<<9 | 1<<12);
-
- if (phy->req_duplex == DUPLEX_FULL)
- an_10_100_val |= (1<<8);
- DP(NETIF_MSG_LINK, "Advertising 100M\n");
- }
-
- /* Set 10 speed advertisement */
- if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
- (phy->speed_cap_mask &
- (PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL |
- PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)))) {
- an_10_100_val |= (1<<5);
- autoneg_val |= (1<<9 | 1<<12);
- if (phy->req_duplex == DUPLEX_FULL)
+ /* Advertise 10/100 link speed */
+ if (phy->req_line_speed == SPEED_AUTO_NEG) {
+ if (phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF) {
+ an_10_100_val |= (1<<5);
+ autoneg_val |= (1<<9 | 1<<12);
+ DP(NETIF_MSG_LINK, "Advertising 10M-HD\n");
+ }
+ if (phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL) {
an_10_100_val |= (1<<6);
- DP(NETIF_MSG_LINK, "Advertising 10M\n");
+ autoneg_val |= (1<<9 | 1<<12);
+ DP(NETIF_MSG_LINK, "Advertising 10M-FD\n");
+ }
+ if (phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF) {
+ an_10_100_val |= (1<<7);
+ autoneg_val |= (1<<9 | 1<<12);
+ DP(NETIF_MSG_LINK, "Advertising 100M-HD\n");
+ }
+ if (phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL) {
+ an_10_100_val |= (1<<8);
+ autoneg_val |= (1<<9 | 1<<12);
+ DP(NETIF_MSG_LINK, "Advertising 100M-FD\n");
+ }
}
/* Only 10/100 are allowed to work in FORCE mode */
@@ -13342,6 +13360,10 @@ static u8 bnx2x_analyze_link_error(struct link_params *params,
DP(NETIF_MSG_LINK, "Link changed:[%x %x]->%x\n", vars->link_up,
old_status, status);
+ /* Do not touch the link in case physical link down */
+ if ((vars->phy_flags & PHY_PHYSICAL_LINK_FLAG) == 0)
+ return 1;
+
/* a. Update shmem->link_status accordingly
* b. Update link_vars->link_up
*/
@@ -13550,7 +13572,7 @@ static void bnx2x_check_kr2_wa(struct link_params *params,
*/
not_kr2_device = (((base_page & 0x8000) == 0) ||
(((base_page & 0x8000) &&
- ((next_page & 0xe0) == 0x2))));
+ ((next_page & 0xe0) == 0x20))));
/* In case KR2 is already disabled, check if we need to re-enable it */
if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 814d0eca9b33..18498fed520b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -27,6 +27,7 @@
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
+#include <linux/aer.h>
#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
@@ -3297,6 +3298,10 @@ static void bnx2x_drv_info_ether_stat(struct bnx2x *bp)
ether_stat->txq_size = bp->tx_ring_size;
ether_stat->rxq_size = bp->rx_ring_size;
+
+#ifdef CONFIG_BNX2X_SRIOV
+ ether_stat->vf_cnt = IS_SRIOV(bp) ? bp->vfdb->sriov.nr_virtfn : 0;
+#endif
}
static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp)
@@ -9854,6 +9859,64 @@ static void bnx2x_prev_unload_close_mac(struct bnx2x *bp,
#define BNX2X_PREV_UNDI_BD(val) ((val) >> 16 & 0xffff)
#define BNX2X_PREV_UNDI_PROD(rcq, bd) ((bd) << 16 | (rcq))
+#define BCM_5710_UNDI_FW_MF_MAJOR (0x07)
+#define BCM_5710_UNDI_FW_MF_MINOR (0x08)
+#define BCM_5710_UNDI_FW_MF_VERS (0x05)
+#define BNX2X_PREV_UNDI_MF_PORT(p) (0x1a150c + ((p) << 4))
+#define BNX2X_PREV_UNDI_MF_FUNC(f) (0x1a184c + ((f) << 4))
+static bool bnx2x_prev_unload_undi_fw_supports_mf(struct bnx2x *bp)
+{
+ u8 major, minor, version;
+ u32 fw;
+
+ /* Must check that FW is loaded */
+ if (!(REG_RD(bp, MISC_REG_RESET_REG_1) &
+ MISC_REGISTERS_RESET_REG_1_RST_XSEM)) {
+ BNX2X_DEV_INFO("XSEM is reset - UNDI MF FW is not loaded\n");
+ return false;
+ }
+
+ /* Read Currently loaded FW version */
+ fw = REG_RD(bp, XSEM_REG_PRAM);
+ major = fw & 0xff;
+ minor = (fw >> 0x8) & 0xff;
+ version = (fw >> 0x10) & 0xff;
+ BNX2X_DEV_INFO("Loaded FW: 0x%08x: Major 0x%02x Minor 0x%02x Version 0x%02x\n",
+ fw, major, minor, version);
+
+ if (major > BCM_5710_UNDI_FW_MF_MAJOR)
+ return true;
+
+ if ((major == BCM_5710_UNDI_FW_MF_MAJOR) &&
+ (minor > BCM_5710_UNDI_FW_MF_MINOR))
+ return true;
+
+ if ((major == BCM_5710_UNDI_FW_MF_MAJOR) &&
+ (minor == BCM_5710_UNDI_FW_MF_MINOR) &&
+ (version >= BCM_5710_UNDI_FW_MF_VERS))
+ return true;
+
+ return false;
+}
+
+static void bnx2x_prev_unload_undi_mf(struct bnx2x *bp)
+{
+ int i;
+
+ /* Due to legacy (FW) code, the first function on each engine has a
+ * different offset macro from the rest of the functions.
+ * Setting this for all 8 functions is harmless regardless of whether
+ * this is actually a multi-function device.
+ */
+ for (i = 0; i < 2; i++)
+ REG_WR(bp, BNX2X_PREV_UNDI_MF_PORT(i), 1);
+
+ for (i = 2; i < 8; i++)
+ REG_WR(bp, BNX2X_PREV_UNDI_MF_FUNC(i - 2), 1);
+
+ BNX2X_DEV_INFO("UNDI FW (MF) set to discard\n");
+}
+
static void bnx2x_prev_unload_undi_inc(struct bnx2x *bp, u8 port, u8 inc)
{
u16 rcq, bd;
@@ -10054,7 +10117,7 @@ static int bnx2x_prev_unload_uncommon(struct bnx2x *bp)
* the one required, then FLR will be sufficient to clean any residue
* left by previous driver
*/
- rc = bnx2x_nic_load_analyze_req(bp, FW_MSG_CODE_DRV_LOAD_FUNCTION);
+ rc = bnx2x_compare_fw_ver(bp, FW_MSG_CODE_DRV_LOAD_FUNCTION, false);
if (!rc) {
/* fw version is good */
@@ -10142,10 +10205,17 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
else
timer_count--;
- /* If UNDI resides in memory, manually increment it */
- if (prev_undi)
+ /* New UNDI FW supports MF and contains better
+ * cleaning methods - might be redundant but harmless.
+ */
+ if (bnx2x_prev_unload_undi_fw_supports_mf(bp)) {
+ bnx2x_prev_unload_undi_mf(bp);
+ } else if (prev_undi) {
+ /* If UNDI resides in memory,
+ * manually increment it
+ */
bnx2x_prev_unload_undi_inc(bp, BP_PORT(bp), 1);
-
+ }
udelay(10);
}
@@ -10265,8 +10335,8 @@ static int bnx2x_prev_unload(struct bnx2x *bp)
} while (--time_counter);
if (!time_counter || rc) {
- BNX2X_ERR("Failed unloading previous driver, aborting\n");
- rc = -EBUSY;
+ BNX2X_DEV_INFO("Unloading previous driver did not occur, Possibly due to MF UNDI\n");
+ rc = -EPROBE_DEFER;
}
/* Mark function if its port was used to boot from SAN */
@@ -11447,9 +11517,9 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp)
}
}
- /* adjust igu_sb_cnt to MF for E1x */
- if (CHIP_IS_E1x(bp) && IS_MF(bp))
- bp->igu_sb_cnt /= E1HVN_MAX;
+ /* adjust igu_sb_cnt to MF for E1H */
+ if (CHIP_IS_E1H(bp) && IS_MF(bp))
+ bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt, E1H_MAX_MF_SB_COUNT);
/* port info */
bnx2x_get_port_hwinfo(bp);
@@ -11636,7 +11706,11 @@ static int bnx2x_init_bp(struct bnx2x *bp)
DRV_MSG_SEQ_NUMBER_MASK;
BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
- bnx2x_prev_unload(bp);
+ rc = bnx2x_prev_unload(bp);
+ if (rc) {
+ bnx2x_free_mem_bp(bp);
+ return rc;
+ }
}
if (CHIP_REV_IS_FPGA(bp))
@@ -12156,6 +12230,14 @@ static int bnx2x_set_coherency_mask(struct bnx2x *bp)
return 0;
}
+static void bnx2x_disable_pcie_error_reporting(struct bnx2x *bp)
+{
+ if (bp->flags & AER_ENABLED) {
+ pci_disable_pcie_error_reporting(bp->pdev);
+ bp->flags &= ~AER_ENABLED;
+ }
+}
+
static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
struct net_device *dev, unsigned long board_type)
{
@@ -12262,6 +12344,14 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
/* clean indirect addresses */
pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
PCICFG_VENDOR_ID_OFFSET);
+
+ /* AER (Advanced Error reporting) configuration */
+ rc = pci_enable_pcie_error_reporting(pdev);
+ if (!rc)
+ bp->flags |= AER_ENABLED;
+ else
+ BNX2X_DEV_INFO("Failed To configure PCIe AER [%d]\n", rc);
+
/*
* Clean the following indirect addresses for all functions since it
* is not used by the driver.
@@ -12869,6 +12959,8 @@ static int bnx2x_init_one(struct pci_dev *pdev,
return 0;
init_one_exit:
+ bnx2x_disable_pcie_error_reporting(bp);
+
if (bp->regview)
iounmap(bp->regview);
@@ -12942,6 +13034,8 @@ static void __bnx2x_remove(struct pci_dev *pdev,
pci_set_power_state(pdev, PCI_D3hot);
}
+ bnx2x_disable_pcie_error_reporting(bp);
+
if (bp->regview)
iounmap(bp->regview);
@@ -13119,6 +13213,14 @@ static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
rtnl_unlock();
+ /* If AER, perform cleanup of the PCIe registers */
+ if (bp->flags & AER_ENABLED) {
+ if (pci_cleanup_aer_uncorrect_error_status(pdev))
+ BNX2X_ERR("pci_cleanup_aer_uncorrect_error_status failed\n");
+ else
+ DP(NETIF_MSG_HW, "pci_cleanup_aer_uncorrect_error_status succeeded\n");
+ }
+
return PCI_ERS_RESULT_RECOVERED;
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index 3efbb35267c8..2beb5430b876 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -5932,6 +5932,7 @@
#define MISC_REGISTERS_RESET_REG_1_RST_NIG (0x1<<7)
#define MISC_REGISTERS_RESET_REG_1_RST_PXP (0x1<<26)
#define MISC_REGISTERS_RESET_REG_1_RST_PXPV (0x1<<27)
+#define MISC_REGISTERS_RESET_REG_1_RST_XSEM (0x1<<22)
#define MISC_REGISTERS_RESET_REG_1_SET 0x584
#define MISC_REGISTERS_RESET_REG_2_CLEAR 0x598
#define MISC_REGISTERS_RESET_REG_2_MSTAT0 (0x1<<24)
@@ -7179,6 +7180,7 @@ Theotherbitsarereservedandshouldbezero*/
#define MDIO_WC_REG_RX1_PCI_CTRL 0x80ca
#define MDIO_WC_REG_RX2_PCI_CTRL 0x80da
#define MDIO_WC_REG_RX3_PCI_CTRL 0x80ea
+#define MDIO_WC_REG_RXB_ANA_RX_CONTROL_PCI 0x80fa
#define MDIO_WC_REG_XGXSBLK2_UNICORE_MODE_10G 0x8104
#define MDIO_WC_REG_XGXS_STATUS3 0x8129
#define MDIO_WC_REG_PAR_DET_10G_STATUS 0x8130
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 32c92abf5094..98cccd487fc2 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -663,7 +663,7 @@ static int bnx2x_check_mac_add(struct bnx2x *bp,
/* Check if a requested MAC already exists */
list_for_each_entry(pos, &o->head, link)
- if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN) &&
+ if (ether_addr_equal(data->mac.mac, pos->u.mac.mac) &&
(data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
return -EEXIST;
@@ -696,8 +696,7 @@ static int bnx2x_check_vlan_mac_add(struct bnx2x *bp,
list_for_each_entry(pos, &o->head, link)
if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
- (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
- ETH_ALEN)) &&
+ ether_addr_equal_unaligned(data->vlan_mac.mac, pos->u.vlan_mac.mac) &&
(data->vlan_mac.is_inner_mac ==
pos->u.vlan_mac.is_inner_mac))
return -EEXIST;
@@ -716,7 +715,7 @@ static struct bnx2x_vlan_mac_registry_elem *
DP(BNX2X_MSG_SP, "Checking MAC %pM for DEL command\n", data->mac.mac);
list_for_each_entry(pos, &o->head, link)
- if ((!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN)) &&
+ if (ether_addr_equal(data->mac.mac, pos->u.mac.mac) &&
(data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
return pos;
@@ -751,8 +750,7 @@ static struct bnx2x_vlan_mac_registry_elem *
list_for_each_entry(pos, &o->head, link)
if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
- (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
- ETH_ALEN)) &&
+ ether_addr_equal_unaligned(data->vlan_mac.mac, pos->u.vlan_mac.mac) &&
(data->vlan_mac.is_inner_mac ==
pos->u.vlan_mac.is_inner_mac))
return pos;
@@ -2038,6 +2036,7 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
struct bnx2x_vlan_mac_ramrod_params p;
struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
struct bnx2x_exeq_elem *exeq_pos, *exeq_pos_n;
+ unsigned long flags;
int read_lock;
int rc = 0;
@@ -2046,8 +2045,9 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
spin_lock_bh(&exeq->lock);
list_for_each_entry_safe(exeq_pos, exeq_pos_n, &exeq->exe_queue, link) {
- if (exeq_pos->cmd_data.vlan_mac.vlan_mac_flags ==
- *vlan_mac_flags) {
+ flags = exeq_pos->cmd_data.vlan_mac.vlan_mac_flags;
+ if (BNX2X_VLAN_MAC_CMP_FLAGS(flags) ==
+ BNX2X_VLAN_MAC_CMP_FLAGS(*vlan_mac_flags)) {
rc = exeq->remove(bp, exeq->owner, exeq_pos);
if (rc) {
BNX2X_ERR("Failed to remove command\n");
@@ -2080,7 +2080,9 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
return read_lock;
list_for_each_entry(pos, &o->head, link) {
- if (pos->vlan_mac_flags == *vlan_mac_flags) {
+ flags = pos->vlan_mac_flags;
+ if (BNX2X_VLAN_MAC_CMP_FLAGS(flags) ==
+ BNX2X_VLAN_MAC_CMP_FLAGS(*vlan_mac_flags)) {
p.user_req.vlan_mac_flags = pos->vlan_mac_flags;
memcpy(&p.user_req.u, &pos->u, sizeof(pos->u));
rc = bnx2x_config_vlan_mac(bp, &p);
@@ -4382,8 +4384,11 @@ int bnx2x_config_rss(struct bnx2x *bp,
struct bnx2x_raw_obj *r = &o->raw;
/* Do nothing if only driver cleanup was requested */
- if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags))
+ if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
+ DP(BNX2X_MSG_SP, "Not configuring RSS ramrod_flags=%lx\n",
+ p->ramrod_flags);
return 0;
+ }
r->set_pending(r);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
index 658f4e33abf9..6a53c15c85a3 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
@@ -266,6 +266,13 @@ enum {
BNX2X_DONT_CONSUME_CAM_CREDIT,
BNX2X_DONT_CONSUME_CAM_CREDIT_DEST,
};
+/* When looking for matching filters, some flags are not interesting */
+#define BNX2X_VLAN_MAC_CMP_MASK (1 << BNX2X_UC_LIST_MAC | \
+ 1 << BNX2X_ETH_MAC | \
+ 1 << BNX2X_ISCSI_ETH_MAC | \
+ 1 << BNX2X_NETQ_ETH_MAC)
+#define BNX2X_VLAN_MAC_CMP_FLAGS(flags) \
+ ((flags) & BNX2X_VLAN_MAC_CMP_MASK)
struct bnx2x_vlan_mac_ramrod_params {
/* Object to run the command from */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 0216d592d0ce..31ab924600c1 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -166,6 +166,7 @@ enum bnx2x_vfop_qteardown_state {
BNX2X_VFOP_QTEARDOWN_RXMODE,
BNX2X_VFOP_QTEARDOWN_CLR_VLAN,
BNX2X_VFOP_QTEARDOWN_CLR_MAC,
+ BNX2X_VFOP_QTEARDOWN_CLR_MCAST,
BNX2X_VFOP_QTEARDOWN_QDTOR,
BNX2X_VFOP_QTEARDOWN_DONE
};
@@ -1112,7 +1113,10 @@ static void bnx2x_vfop_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf)
switch (state) {
case BNX2X_VFOP_MCAST_DEL:
/* clear existing mcasts */
- vfop->state = BNX2X_VFOP_MCAST_ADD;
+ vfop->state = (args->mc_num) ? BNX2X_VFOP_MCAST_ADD
+ : BNX2X_VFOP_MCAST_CHK_DONE;
+ mcast->mcast_list_len = vf->mcast_list_len;
+ vf->mcast_list_len = args->mc_num;
vfop->rc = bnx2x_config_mcast(bp, mcast, BNX2X_MCAST_CMD_DEL);
bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
@@ -1120,17 +1124,17 @@ static void bnx2x_vfop_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf)
if (raw->check_pending(raw))
goto op_pending;
- if (args->mc_num) {
- /* update mcast list on the ramrod params */
- INIT_LIST_HEAD(&mcast->mcast_list);
- for (i = 0; i < args->mc_num; i++)
- list_add_tail(&(args->mc[i].link),
- &mcast->mcast_list);
- /* add new mcasts */
- vfop->state = BNX2X_VFOP_MCAST_CHK_DONE;
- vfop->rc = bnx2x_config_mcast(bp, mcast,
- BNX2X_MCAST_CMD_ADD);
- }
+ /* update mcast list on the ramrod params */
+ INIT_LIST_HEAD(&mcast->mcast_list);
+ for (i = 0; i < args->mc_num; i++)
+ list_add_tail(&(args->mc[i].link),
+ &mcast->mcast_list);
+ mcast->mcast_list_len = args->mc_num;
+
+ /* add new mcasts */
+ vfop->state = BNX2X_VFOP_MCAST_CHK_DONE;
+ vfop->rc = bnx2x_config_mcast(bp, mcast,
+ BNX2X_MCAST_CMD_ADD);
bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
case BNX2X_VFOP_MCAST_CHK_DONE:
@@ -1209,6 +1213,11 @@ static void bnx2x_vfop_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf)
/* next state */
vfop->state = BNX2X_VFOP_RXMODE_DONE;
+ /* record the accept flags in vfdb so hypervisor can modify them
+ * if necessary
+ */
+ bnx2x_vfq(vf, ramrod->cl_id - vf->igu_base_id, accept_flags) =
+ ramrod->rx_accept_flags;
vfop->rc = bnx2x_config_rx_mode(bp, ramrod);
bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
op_err:
@@ -1224,39 +1233,43 @@ op_pending:
return;
}
+static void bnx2x_vf_prep_rx_mode(struct bnx2x *bp, u8 qid,
+ struct bnx2x_rx_mode_ramrod_params *ramrod,
+ struct bnx2x_virtf *vf,
+ unsigned long accept_flags)
+{
+ struct bnx2x_vf_queue *vfq = vfq_get(vf, qid);
+
+ memset(ramrod, 0, sizeof(*ramrod));
+ ramrod->cid = vfq->cid;
+ ramrod->cl_id = vfq_cl_id(vf, vfq);
+ ramrod->rx_mode_obj = &bp->rx_mode_obj;
+ ramrod->func_id = FW_VF_HANDLE(vf->abs_vfid);
+ ramrod->rx_accept_flags = accept_flags;
+ ramrod->tx_accept_flags = accept_flags;
+ ramrod->pstate = &vf->filter_state;
+ ramrod->state = BNX2X_FILTER_RX_MODE_PENDING;
+
+ set_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state);
+ set_bit(RAMROD_RX, &ramrod->ramrod_flags);
+ set_bit(RAMROD_TX, &ramrod->ramrod_flags);
+
+ ramrod->rdata = bnx2x_vf_sp(bp, vf, rx_mode_rdata.e2);
+ ramrod->rdata_mapping = bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2);
+}
+
int bnx2x_vfop_rxmode_cmd(struct bnx2x *bp,
struct bnx2x_virtf *vf,
struct bnx2x_vfop_cmd *cmd,
int qid, unsigned long accept_flags)
{
- struct bnx2x_vf_queue *vfq = vfq_get(vf, qid);
struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
if (vfop) {
struct bnx2x_rx_mode_ramrod_params *ramrod =
&vf->op_params.rx_mode;
- memset(ramrod, 0, sizeof(*ramrod));
-
- /* Prepare ramrod parameters */
- ramrod->cid = vfq->cid;
- ramrod->cl_id = vfq_cl_id(vf, vfq);
- ramrod->rx_mode_obj = &bp->rx_mode_obj;
- ramrod->func_id = FW_VF_HANDLE(vf->abs_vfid);
-
- ramrod->rx_accept_flags = accept_flags;
- ramrod->tx_accept_flags = accept_flags;
- ramrod->pstate = &vf->filter_state;
- ramrod->state = BNX2X_FILTER_RX_MODE_PENDING;
-
- set_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state);
- set_bit(RAMROD_RX, &ramrod->ramrod_flags);
- set_bit(RAMROD_TX, &ramrod->ramrod_flags);
-
- ramrod->rdata =
- bnx2x_vf_sp(bp, vf, rx_mode_rdata.e2);
- ramrod->rdata_mapping =
- bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2);
+ bnx2x_vf_prep_rx_mode(bp, qid, ramrod, vf, accept_flags);
bnx2x_vfop_opset(BNX2X_VFOP_RXMODE_CONFIG,
bnx2x_vfop_rxmode, cmd->done);
@@ -1303,12 +1316,19 @@ static void bnx2x_vfop_qdown(struct bnx2x *bp, struct bnx2x_virtf *vf)
case BNX2X_VFOP_QTEARDOWN_CLR_MAC:
/* mac-clear-all: consume credit */
- vfop->state = BNX2X_VFOP_QTEARDOWN_QDTOR;
+ vfop->state = BNX2X_VFOP_QTEARDOWN_CLR_MCAST;
vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, qid, false);
if (vfop->rc)
goto op_err;
return;
+ case BNX2X_VFOP_QTEARDOWN_CLR_MCAST:
+ vfop->state = BNX2X_VFOP_QTEARDOWN_QDTOR;
+ vfop->rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, NULL, 0, false);
+ if (vfop->rc)
+ goto op_err;
+ return;
+
case BNX2X_VFOP_QTEARDOWN_QDTOR:
/* run the queue destruction flow */
DP(BNX2X_MSG_IOV, "case: BNX2X_VFOP_QTEARDOWN_QDTOR\n");
@@ -2188,6 +2208,7 @@ int bnx2x_iov_nic_init(struct bnx2x *bp)
* It needs to be initialized here so that it can be safely
* handled by a subsequent FLR flow.
*/
+ vf->mcast_list_len = 0;
bnx2x_init_mcast_obj(bp, &vf->mcast_obj, 0xFF,
0xFF, 0xFF, 0xFF,
bnx2x_vf_sp(bp, vf, mcast_rdata),
@@ -2848,13 +2869,9 @@ static void bnx2x_vfop_close(struct bnx2x *bp, struct bnx2x_virtf *vf)
goto op_err;
return;
}
-
- /* remove multicasts */
vfop->state = BNX2X_VFOP_CLOSE_HW;
- vfop->rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, NULL, 0, false);
- if (vfop->rc)
- goto op_err;
- return;
+ vfop->rc = 0;
+ bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
case BNX2X_VFOP_CLOSE_HW:
@@ -2888,6 +2905,9 @@ op_done:
DP(BNX2X_MSG_IOV, "set state to acquired\n");
bnx2x_vfop_end(bp, vf, vfop);
+op_pending:
+ /* Not supported at the moment; Exists for macros only */
+ return;
}
int bnx2x_vfop_close_cmd(struct bnx2x *bp,
@@ -3114,6 +3134,11 @@ int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param)
{
struct bnx2x *bp = netdev_priv(pci_get_drvdata(dev));
+ if (!IS_SRIOV(bp)) {
+ BNX2X_ERR("failed to configure SR-IOV since vfdb was not allocated. Check dmesg for errors in probe stage\n");
+ return -EINVAL;
+ }
+
DP(BNX2X_MSG_IOV, "bnx2x_sriov_configure called with %d, BNX2X_NR_VIRTFN(bp) was %d\n",
num_vfs_param, BNX2X_NR_VIRTFN(bp));
@@ -3197,13 +3222,16 @@ int bnx2x_enable_sriov(struct bnx2x *bp)
bnx2x_iov_static_resc(bp, vf);
}
- /* prepare msix vectors in VF configuration space */
+ /* prepare msix vectors in VF configuration space - the value in the
+ * PCI configuration space should be the index of the last entry,
+ * namely one less than the actual size of the table
+ */
for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) {
bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf_idx));
REG_WR(bp, PCICFG_OFFSET + GRC_CONFIG_REG_VF_MSIX_CONTROL,
- num_vf_queues);
+ num_vf_queues - 1);
DP(BNX2X_MSG_IOV, "set msix vec num in VF %d cfg space to %d\n",
- vf_idx, num_vf_queues);
+ vf_idx, num_vf_queues - 1);
}
bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
@@ -3431,10 +3459,18 @@ out:
int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
{
+ struct bnx2x_queue_state_params q_params = {NULL};
+ struct bnx2x_vlan_mac_ramrod_params ramrod_param;
+ struct bnx2x_queue_update_params *update_params;
+ struct pf_vf_bulletin_content *bulletin = NULL;
+ struct bnx2x_rx_mode_ramrod_params rx_ramrod;
struct bnx2x *bp = netdev_priv(dev);
- int rc, q_logical_state;
+ struct bnx2x_vlan_mac_obj *vlan_obj;
+ unsigned long vlan_mac_flags = 0;
+ unsigned long ramrod_flags = 0;
struct bnx2x_virtf *vf = NULL;
- struct pf_vf_bulletin_content *bulletin = NULL;
+ unsigned long accept_flags;
+ int rc;
/* sanity and init */
rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin);
@@ -3452,104 +3488,118 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
/* update PF's copy of the VF's bulletin. No point in posting the vlan
* to the VF since it doesn't have anything to do with it. But it useful
* to store it here in case the VF is not up yet and we can only
- * configure the vlan later when it does.
+ * configure the vlan later when it does. Treat vlan id 0 as remove the
+ * Host tag.
*/
- bulletin->valid_bitmap |= 1 << VLAN_VALID;
+ if (vlan > 0)
+ bulletin->valid_bitmap |= 1 << VLAN_VALID;
+ else
+ bulletin->valid_bitmap &= ~(1 << VLAN_VALID);
bulletin->vlan = vlan;
/* is vf initialized and queue set up? */
- q_logical_state =
- bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj));
- if (vf->state == VF_ENABLED &&
- q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) {
- /* configure the vlan in device on this vf's queue */
- unsigned long ramrod_flags = 0;
- unsigned long vlan_mac_flags = 0;
- struct bnx2x_vlan_mac_obj *vlan_obj =
- &bnx2x_leading_vfq(vf, vlan_obj);
- struct bnx2x_vlan_mac_ramrod_params ramrod_param;
- struct bnx2x_queue_state_params q_params = {NULL};
- struct bnx2x_queue_update_params *update_params;
+ if (vf->state != VF_ENABLED ||
+ bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)) !=
+ BNX2X_Q_LOGICAL_STATE_ACTIVE)
+ return rc;
- rc = validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj));
- if (rc)
- return rc;
- memset(&ramrod_param, 0, sizeof(ramrod_param));
+ /* configure the vlan in device on this vf's queue */
+ vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj);
+ rc = validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj));
+ if (rc)
+ return rc;
- /* must lock vfpf channel to protect against vf flows */
- bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
+ /* must lock vfpf channel to protect against vf flows */
+ bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
- /* remove existing vlans */
- __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
- rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags,
- &ramrod_flags);
- if (rc) {
- BNX2X_ERR("failed to delete vlans\n");
- rc = -EINVAL;
- goto out;
- }
+ /* remove existing vlans */
+ __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+ rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags,
+ &ramrod_flags);
+ if (rc) {
+ BNX2X_ERR("failed to delete vlans\n");
+ rc = -EINVAL;
+ goto out;
+ }
+
+ /* need to remove/add the VF's accept_any_vlan bit */
+ accept_flags = bnx2x_leading_vfq(vf, accept_flags);
+ if (vlan)
+ clear_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
+ else
+ set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
+
+ bnx2x_vf_prep_rx_mode(bp, LEADING_IDX, &rx_ramrod, vf,
+ accept_flags);
+ bnx2x_leading_vfq(vf, accept_flags) = accept_flags;
+ bnx2x_config_rx_mode(bp, &rx_ramrod);
+
+ /* configure the new vlan to device */
+ memset(&ramrod_param, 0, sizeof(ramrod_param));
+ __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+ ramrod_param.vlan_mac_obj = vlan_obj;
+ ramrod_param.ramrod_flags = ramrod_flags;
+ set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
+ &ramrod_param.user_req.vlan_mac_flags);
+ ramrod_param.user_req.u.vlan.vlan = vlan;
+ ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
+ rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
+ if (rc) {
+ BNX2X_ERR("failed to configure vlan\n");
+ rc = -EINVAL;
+ goto out;
+ }
- /* send queue update ramrod to configure default vlan and silent
- * vlan removal
+ /* send queue update ramrod to configure default vlan and silent
+ * vlan removal
+ */
+ __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
+ q_params.cmd = BNX2X_Q_CMD_UPDATE;
+ q_params.q_obj = &bnx2x_leading_vfq(vf, sp_obj);
+ update_params = &q_params.params.update;
+ __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
+ &update_params->update_flags);
+ __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
+ &update_params->update_flags);
+ if (vlan == 0) {
+ /* if vlan is 0 then we want to leave the VF traffic
+ * untagged, and leave the incoming traffic untouched
+ * (i.e. do not remove any vlan tags).
*/
- __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
- q_params.cmd = BNX2X_Q_CMD_UPDATE;
- q_params.q_obj = &bnx2x_leading_vfq(vf, sp_obj);
- update_params = &q_params.params.update;
- __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
+ __clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
+ &update_params->update_flags);
+ __clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
+ &update_params->update_flags);
+ } else {
+ /* configure default vlan to vf queue and set silent
+ * vlan removal (the vf remains unaware of this vlan).
+ */
+ __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
&update_params->update_flags);
- __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
+ __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
&update_params->update_flags);
+ update_params->def_vlan = vlan;
+ update_params->silent_removal_value =
+ vlan & VLAN_VID_MASK;
+ update_params->silent_removal_mask = VLAN_VID_MASK;
+ }
- if (vlan == 0) {
- /* if vlan is 0 then we want to leave the VF traffic
- * untagged, and leave the incoming traffic untouched
- * (i.e. do not remove any vlan tags).
- */
- __clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
- &update_params->update_flags);
- __clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
- &update_params->update_flags);
- } else {
- /* configure the new vlan to device */
- __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
- ramrod_param.vlan_mac_obj = vlan_obj;
- ramrod_param.ramrod_flags = ramrod_flags;
- ramrod_param.user_req.u.vlan.vlan = vlan;
- ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
- rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
- if (rc) {
- BNX2X_ERR("failed to configure vlan\n");
- rc = -EINVAL;
- goto out;
- }
-
- /* configure default vlan to vf queue and set silent
- * vlan removal (the vf remains unaware of this vlan).
- */
- update_params = &q_params.params.update;
- __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
- &update_params->update_flags);
- __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
- &update_params->update_flags);
- update_params->def_vlan = vlan;
- }
+ /* Update the Queue state */
+ rc = bnx2x_queue_state_change(bp, &q_params);
+ if (rc) {
+ BNX2X_ERR("Failed to configure default VLAN\n");
+ goto out;
+ }
- /* Update the Queue state */
- rc = bnx2x_queue_state_change(bp, &q_params);
- if (rc) {
- BNX2X_ERR("Failed to configure default VLAN\n");
- goto out;
- }
- /* clear the flag indicating that this VF needs its vlan
- * (will only be set if the HV configured the Vlan before vf was
- * up and we were called because the VF came up later
- */
+ /* clear the flag indicating that this VF needs its vlan
+ * (will only be set if the HV configured the Vlan before vf was
+ * up and we were called because the VF came up later
+ */
out:
- vf->cfg_flags &= ~VF_CFG_VLAN;
- bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
- }
+ vf->cfg_flags &= ~VF_CFG_VLAN;
+ bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
+
return rc;
}
@@ -3600,7 +3650,7 @@ enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp)
/* the mac address in bulletin board is valid and is new */
if (bulletin.valid_bitmap & 1 << MAC_ADDR_VALID &&
- memcmp(bulletin.mac, bp->old_bulletin.mac, ETH_ALEN)) {
+ !ether_addr_equal(bulletin.mac, bp->old_bulletin.mac)) {
/* update new mac to net device */
memcpy(bp->dev->dev_addr, bulletin.mac, ETH_ALEN);
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
index 1ff6a9366629..d72ab7e24de0 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
@@ -74,6 +74,7 @@ struct bnx2x_vf_queue {
/* VLANs object */
struct bnx2x_vlan_mac_obj vlan_obj;
atomic_t vlan_count; /* 0 means vlan-0 is set ~ untagged */
+ unsigned long accept_flags; /* last accept flags configured */
/* Queue Slow-path State object */
struct bnx2x_queue_sp_obj sp_obj;
@@ -268,6 +269,7 @@ struct bnx2x_virtf {
int leading_rss;
/* MCAST object */
+ int mcast_list_len;
struct bnx2x_mcast_obj mcast_obj;
/* RSS configuration object */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index efa8a151d789..1b1ad31b4553 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -208,7 +208,7 @@ static int bnx2x_get_vf_id(struct bnx2x *bp, u32 *vf_id)
return -EINVAL;
}
- BNX2X_ERR("valid ME register value: 0x%08x\n", me_reg);
+ DP(BNX2X_MSG_IOV, "valid ME register value: 0x%08x\n", me_reg);
*vf_id = (me_reg & ME_REG_VF_NUM_MASK) >> ME_REG_VF_NUM_SHIFT;
@@ -800,14 +800,18 @@ int bnx2x_vfpf_config_rss(struct bnx2x *bp,
}
if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
- BNX2X_ERR("failed to send rss message to PF over Vf PF channel %d\n",
- resp->hdr.status);
- rc = -EINVAL;
+ /* Since older drivers don't support this feature (and VF has
+ * no way of knowing other than failing this), don't propagate
+ * an error in this case.
+ */
+ DP(BNX2X_MSG_IOV,
+ "Failed to send rss message to PF over VF-PF channel [%d]\n",
+ resp->hdr.status);
}
out:
bnx2x_vfpf_finalize(bp, &req->first_tlv);
- return 0;
+ return rc;
}
int bnx2x_vfpf_set_mcast(struct net_device *dev)
@@ -1416,6 +1420,14 @@ static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
setup_q->rxq.cache_line_log;
rxq_params->sb_cq_index = setup_q->rxq.sb_index;
+ /* rx setup - multicast engine */
+ if (bnx2x_vfq_is_leading(q)) {
+ u8 mcast_id = FW_VF_HANDLE(vf->abs_vfid);
+
+ rxq_params->mcast_engine_id = mcast_id;
+ __set_bit(BNX2X_Q_FLG_MCAST, &setup_p->flags);
+ }
+
bnx2x_vfop_qctor_dump_rx(bp, vf, init_p, setup_p,
q->index, q->sb_idx);
}
@@ -1598,6 +1610,8 @@ static void bnx2x_vfop_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf)
if (msg->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) {
unsigned long accept = 0;
+ struct pf_vf_bulletin_content *bulletin =
+ BP_VF_BULLETIN(bp, vf->index);
/* covert VF-PF if mask to bnx2x accept flags */
if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST)
@@ -1617,9 +1631,11 @@ static void bnx2x_vfop_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf)
__set_bit(BNX2X_ACCEPT_BROADCAST, &accept);
/* A packet arriving the vf's mac should be accepted
- * with any vlan
+ * with any vlan, unless a vlan has already been
+ * configured.
*/
- __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept);
+ if (!(bulletin->valid_bitmap & (1 << VLAN_VALID)))
+ __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept);
/* set rx-mode */
rc = bnx2x_vfop_rxmode_cmd(bp, vf, &cmd,
@@ -1702,7 +1718,7 @@ static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp,
/* ...and only the mac set by the ndo */
if (filters->n_mac_vlan_filters == 1 &&
- memcmp(filters->filters->mac, bulletin->mac, ETH_ALEN)) {
+ !ether_addr_equal(filters->filters->mac, bulletin->mac)) {
BNX2X_ERR("VF[%d] requested the addition of a mac address not matching the one configured by set_vf_mac ndo\n",
vf->abs_vfid);
@@ -1710,6 +1726,21 @@ static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp,
goto response;
}
}
+ /* if vlan was set by hypervisor we don't allow guest to config vlan */
+ if (bulletin->valid_bitmap & 1 << VLAN_VALID) {
+ int i;
+
+ /* search for vlan filters */
+ for (i = 0; i < filters->n_mac_vlan_filters; i++) {
+ if (filters->filters[i].flags &
+ VFPF_Q_FILTER_VLAN_TAG_VALID) {
+ BNX2X_ERR("VF[%d] attempted to configure vlan but one was already set by Hypervisor. Aborting request\n",
+ vf->abs_vfid);
+ vf->op_rc = -EPERM;
+ goto response;
+ }
+ }
+ }
/* verify vf_qid */
if (filters->vf_qid > vf_rxq_count(vf))
@@ -1805,6 +1836,9 @@ static void bnx2x_vf_mbx_update_rss(struct bnx2x *bp, struct bnx2x_virtf *vf,
vf_op_params->rss_result_mask = rss_tlv->rss_result_mask;
/* flags handled individually for backward/forward compatability */
+ vf_op_params->rss_flags = 0;
+ vf_op_params->ramrod_flags = 0;
+
if (rss_tlv->rss_flags & VFPF_RSS_MODE_DISABLED)
__set_bit(BNX2X_RSS_MODE_DISABLED, &vf_op_params->rss_flags);
if (rss_tlv->rss_flags & VFPF_RSS_MODE_REGULAR)
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index f58a8b80302d..fcf9105a5476 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -5220,6 +5220,7 @@ static void cnic_init_rings(struct cnic_dev *dev)
cnic_ring_ctl(dev, cid, cli, 1);
*cid_ptr = cid >> 4;
*(cid_ptr + 1) = cid * bp->db_size;
+ *(cid_ptr + 2) = UIO_USE_TX_DOORBELL;
}
}
diff --git a/drivers/net/ethernet/broadcom/cnic.h b/drivers/net/ethernet/broadcom/cnic.h
index 0121a5d55192..0d6b13f854d9 100644
--- a/drivers/net/ethernet/broadcom/cnic.h
+++ b/drivers/net/ethernet/broadcom/cnic.h
@@ -186,6 +186,8 @@ struct kcq_info {
u16 (*hw_idx)(u16);
};
+#define UIO_USE_TX_DOORBELL 0x017855DB
+
struct cnic_uio_dev {
struct uio_info cnic_uinfo;
u32 uio_dev;
diff --git a/drivers/net/ethernet/broadcom/cnic_if.h b/drivers/net/ethernet/broadcom/cnic_if.h
index ebbfe25acaa6..8cf6b1926069 100644
--- a/drivers/net/ethernet/broadcom/cnic_if.h
+++ b/drivers/net/ethernet/broadcom/cnic_if.h
@@ -14,8 +14,8 @@
#include "bnx2x/bnx2x_mfw_req.h"
-#define CNIC_MODULE_VERSION "2.5.18"
-#define CNIC_MODULE_RELDATE "Sept 01, 2013"
+#define CNIC_MODULE_VERSION "2.5.19"
+#define CNIC_MODULE_RELDATE "December 19, 2013"
#define CNIC_ULP_RDMA 0
#define CNIC_ULP_ISCSI 1
diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c
index c2777712da99..31a076d86709 100644
--- a/drivers/net/ethernet/broadcom/sb1250-mac.c
+++ b/drivers/net/ethernet/broadcom/sb1250-mac.c
@@ -13,8 +13,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
*
* This driver is designed for the Broadcom SiByte SOC built-in
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index a9e068423ba0..c37e9f27ff6d 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -37,6 +37,7 @@
#include <linux/mii.h>
#include <linux/phy.h>
#include <linux/brcmphy.h>
+#include <linux/if.h>
#include <linux/if_vlan.h>
#include <linux/ip.h>
#include <linux/tcp.h>
@@ -94,10 +95,10 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
#define DRV_MODULE_NAME "tg3"
#define TG3_MAJ_NUM 3
-#define TG3_MIN_NUM 134
+#define TG3_MIN_NUM 136
#define DRV_MODULE_VERSION \
__stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
-#define DRV_MODULE_RELDATE "Sep 16, 2013"
+#define DRV_MODULE_RELDATE "Jan 03, 2014"
#define RESET_KIND_SHUTDOWN 0
#define RESET_KIND_INIT 1
@@ -208,6 +209,9 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
#define TG3_RAW_IP_ALIGN 2
+#define TG3_MAX_UCAST_ADDR(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 3)
+#define TG3_UCAST_ADDR_IDX(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 1)
+
#define TG3_FW_UPDATE_TIMEOUT_SEC 5
#define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
@@ -3948,32 +3952,41 @@ static int tg3_load_tso_firmware(struct tg3 *tp)
return 0;
}
+/* tp->lock is held. */
+static void __tg3_set_one_mac_addr(struct tg3 *tp, u8 *mac_addr, int index)
+{
+ u32 addr_high, addr_low;
+
+ addr_high = ((mac_addr[0] << 8) | mac_addr[1]);
+ addr_low = ((mac_addr[2] << 24) | (mac_addr[3] << 16) |
+ (mac_addr[4] << 8) | mac_addr[5]);
+
+ if (index < 4) {
+ tw32(MAC_ADDR_0_HIGH + (index * 8), addr_high);
+ tw32(MAC_ADDR_0_LOW + (index * 8), addr_low);
+ } else {
+ index -= 4;
+ tw32(MAC_EXTADDR_0_HIGH + (index * 8), addr_high);
+ tw32(MAC_EXTADDR_0_LOW + (index * 8), addr_low);
+ }
+}
/* tp->lock is held. */
static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
{
- u32 addr_high, addr_low;
+ u32 addr_high;
int i;
- addr_high = ((tp->dev->dev_addr[0] << 8) |
- tp->dev->dev_addr[1]);
- addr_low = ((tp->dev->dev_addr[2] << 24) |
- (tp->dev->dev_addr[3] << 16) |
- (tp->dev->dev_addr[4] << 8) |
- (tp->dev->dev_addr[5] << 0));
for (i = 0; i < 4; i++) {
if (i == 1 && skip_mac_1)
continue;
- tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
- tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
+ __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
}
if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
tg3_asic_rev(tp) == ASIC_REV_5704) {
- for (i = 0; i < 12; i++) {
- tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
- tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
- }
+ for (i = 4; i < 16; i++)
+ __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
}
addr_high = (tp->dev->dev_addr[0] +
@@ -4403,9 +4416,12 @@ static void tg3_phy_copper_begin(struct tg3 *tp)
if (tg3_flag(tp, WOL_SPEED_100MB))
adv |= ADVERTISED_100baseT_Half |
ADVERTISED_100baseT_Full;
- if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK)
- adv |= ADVERTISED_1000baseT_Half |
- ADVERTISED_1000baseT_Full;
+ if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK) {
+ if (!(tp->phy_flags &
+ TG3_PHYFLG_DISABLE_1G_HD_ADV))
+ adv |= ADVERTISED_1000baseT_Half;
+ adv |= ADVERTISED_1000baseT_Full;
+ }
fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
} else {
@@ -7622,7 +7638,7 @@ static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
{
u32 base = (u32) mapping & 0xffffffff;
- return (base > 0xffffdcc0) && (base + len + 8 < base);
+ return base + len + 8 < base;
}
/* Test for TSO DMA buffers that cross into regions which are within MSS bytes
@@ -8925,6 +8941,49 @@ static void tg3_restore_pci_state(struct tg3 *tp)
}
}
+static void tg3_override_clk(struct tg3 *tp)
+{
+ u32 val;
+
+ switch (tg3_asic_rev(tp)) {
+ case ASIC_REV_5717:
+ val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
+ tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
+ TG3_CPMU_MAC_ORIDE_ENABLE);
+ break;
+
+ case ASIC_REV_5719:
+ case ASIC_REV_5720:
+ tw32(TG3_CPMU_CLCK_ORIDE, CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
+ break;
+
+ default:
+ return;
+ }
+}
+
+static void tg3_restore_clk(struct tg3 *tp)
+{
+ u32 val;
+
+ switch (tg3_asic_rev(tp)) {
+ case ASIC_REV_5717:
+ val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
+ tw32(TG3_CPMU_CLCK_ORIDE_ENABLE,
+ val & ~TG3_CPMU_MAC_ORIDE_ENABLE);
+ break;
+
+ case ASIC_REV_5719:
+ case ASIC_REV_5720:
+ val = tr32(TG3_CPMU_CLCK_ORIDE);
+ tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
+ break;
+
+ default:
+ return;
+ }
+}
+
/* tp->lock is held. */
static int tg3_chip_reset(struct tg3 *tp)
{
@@ -8932,6 +8991,9 @@ static int tg3_chip_reset(struct tg3 *tp)
void (*write_op)(struct tg3 *, u32, u32);
int i, err;
+ if (!pci_device_is_present(tp->pdev))
+ return -ENODEV;
+
tg3_nvram_lock(tp);
tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
@@ -9010,6 +9072,13 @@ static int tg3_chip_reset(struct tg3 *tp)
tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
}
+ /* Set the clock to the highest frequency to avoid timeouts. With link
+ * aware mode, the clock speed could be slow and bootcode does not
+ * complete within the expected time. Override the clock to allow the
+ * bootcode to finish sooner and then restore it.
+ */
+ tg3_override_clk(tp);
+
/* Manage gphy power for all CPMU absent PCIe devices. */
if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
@@ -9148,10 +9217,7 @@ static int tg3_chip_reset(struct tg3 *tp)
tw32(0x7c00, val | (1 << 25));
}
- if (tg3_asic_rev(tp) == ASIC_REV_5720) {
- val = tr32(TG3_CPMU_CLCK_ORIDE);
- tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
- }
+ tg3_restore_clk(tp);
/* Reprobe ASF enable state. */
tg3_flag_clear(tp, ENABLE_ASF);
@@ -9183,6 +9249,7 @@ static int tg3_chip_reset(struct tg3 *tp)
static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
+static void __tg3_set_rx_mode(struct net_device *);
/* tp->lock is held. */
static int tg3_halt(struct tg3 *tp, int kind, bool silent)
@@ -9243,6 +9310,7 @@ static int tg3_set_mac_addr(struct net_device *dev, void *p)
}
spin_lock_bh(&tp->lock);
__tg3_set_mac_addr(tp, skip_mac_1);
+ __tg3_set_rx_mode(dev);
spin_unlock_bh(&tp->lock);
return err;
@@ -9631,6 +9699,20 @@ static void __tg3_set_rx_mode(struct net_device *dev)
tw32(MAC_HASH_REG_3, mc_filter[3]);
}
+ if (netdev_uc_count(dev) > TG3_MAX_UCAST_ADDR(tp)) {
+ rx_mode |= RX_MODE_PROMISC;
+ } else if (!(dev->flags & IFF_PROMISC)) {
+ /* Add all entries into to the mac addr filter list */
+ int i = 0;
+ struct netdev_hw_addr *ha;
+
+ netdev_for_each_uc_addr(ha, dev) {
+ __tg3_set_one_mac_addr(tp, ha->addr,
+ i + TG3_UCAST_ADDR_IDX(tp));
+ i++;
+ }
+ }
+
if (rx_mode != tp->rx_mode) {
tp->rx_mode = rx_mode;
tw32_f(MAC_RX_MODE, rx_mode);
@@ -9963,6 +10045,7 @@ static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
if (tg3_asic_rev(tp) == ASIC_REV_5719)
val |= BUFMGR_MODE_NO_TX_UNDERRUN;
if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
+ tg3_asic_rev(tp) == ASIC_REV_5762 ||
tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
@@ -10629,10 +10712,8 @@ static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
static ssize_t tg3_show_temp(struct device *dev,
struct device_attribute *devattr, char *buf)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct net_device *netdev = pci_get_drvdata(pdev);
- struct tg3 *tp = netdev_priv(netdev);
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ struct tg3 *tp = dev_get_drvdata(dev);
u32 temperature;
spin_lock_bh(&tp->lock);
@@ -10650,29 +10731,25 @@ static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
TG3_TEMP_MAX_OFFSET);
-static struct attribute *tg3_attributes[] = {
+static struct attribute *tg3_attrs[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
&sensor_dev_attr_temp1_crit.dev_attr.attr,
&sensor_dev_attr_temp1_max.dev_attr.attr,
NULL
};
-
-static const struct attribute_group tg3_group = {
- .attrs = tg3_attributes,
-};
+ATTRIBUTE_GROUPS(tg3);
static void tg3_hwmon_close(struct tg3 *tp)
{
if (tp->hwmon_dev) {
hwmon_device_unregister(tp->hwmon_dev);
tp->hwmon_dev = NULL;
- sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group);
}
}
static void tg3_hwmon_open(struct tg3 *tp)
{
- int i, err;
+ int i;
u32 size = 0;
struct pci_dev *pdev = tp->pdev;
struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
@@ -10690,18 +10767,11 @@ static void tg3_hwmon_open(struct tg3 *tp)
if (!size)
return;
- /* Register hwmon sysfs hooks */
- err = sysfs_create_group(&pdev->dev.kobj, &tg3_group);
- if (err) {
- dev_err(&pdev->dev, "Cannot create sysfs group, aborting\n");
- return;
- }
-
- tp->hwmon_dev = hwmon_device_register(&pdev->dev);
+ tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3",
+ tp, tg3_groups);
if (IS_ERR(tp->hwmon_dev)) {
tp->hwmon_dev = NULL;
dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
- sysfs_remove_group(&pdev->dev.kobj, &tg3_group);
}
}
@@ -10761,6 +10831,7 @@ static void tg3_periodic_fetch_stats(struct tg3 *tp)
TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
+ tg3_asic_rev(tp) != ASIC_REV_5762 &&
tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
@@ -10889,6 +10960,13 @@ static void tg3_timer(unsigned long __opaque)
} else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
tg3_flag(tp, 5780_CLASS)) {
tg3_serdes_parallel_detect(tp);
+ } else if (tg3_flag(tp, POLL_CPMU_LINK)) {
+ u32 cpmu = tr32(TG3_CPMU_STATUS);
+ bool link_up = !((cpmu & TG3_CPMU_STATUS_LINK_MASK) ==
+ TG3_CPMU_STATUS_LINK_MASK);
+
+ if (link_up != tp->link_up)
+ tg3_setup_phy(tp, false);
}
tp->timer_counter = tp->timer_multiplier;
@@ -11594,10 +11672,11 @@ static int tg3_close(struct net_device *dev)
memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
- tg3_power_down_prepare(tp);
-
- tg3_carrier_off(tp);
+ if (pci_device_is_present(tp->pdev)) {
+ tg3_power_down_prepare(tp);
+ tg3_carrier_off(tp);
+ }
return 0;
}
@@ -11755,8 +11834,6 @@ static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
get_stat64(&hw_stats->rx_frame_too_long_errors) +
get_stat64(&hw_stats->rx_undersize_packets);
- stats->rx_over_errors = old_stats->rx_over_errors +
- get_stat64(&hw_stats->rxbds_empty);
stats->rx_frame_errors = old_stats->rx_frame_errors +
get_stat64(&hw_stats->rx_align_errors);
stats->tx_aborted_errors = old_stats->tx_aborted_errors +
@@ -13603,14 +13680,13 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
}
-static int tg3_hwtstamp_ioctl(struct net_device *dev,
- struct ifreq *ifr, int cmd)
+static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
{
struct tg3 *tp = netdev_priv(dev);
struct hwtstamp_config stmpconf;
if (!tg3_flag(tp, PTP_CAPABLE))
- return -EINVAL;
+ return -EOPNOTSUPP;
if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
return -EFAULT;
@@ -13691,6 +13767,67 @@ static int tg3_hwtstamp_ioctl(struct net_device *dev,
-EFAULT : 0;
}
+static int tg3_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
+{
+ struct tg3 *tp = netdev_priv(dev);
+ struct hwtstamp_config stmpconf;
+
+ if (!tg3_flag(tp, PTP_CAPABLE))
+ return -EOPNOTSUPP;
+
+ stmpconf.flags = 0;
+ stmpconf.tx_type = (tg3_flag(tp, TX_TSTAMP_EN) ?
+ HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF);
+
+ switch (tp->rxptpctl) {
+ case 0:
+ stmpconf.rx_filter = HWTSTAMP_FILTER_NONE;
+ break;
+ case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_ALL_V1_EVENTS:
+ stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
+ break;
+ case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
+ stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
+ break;
+ case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_DELAY_REQ:
+ stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
+ break;
+ case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
+ stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ break;
+ case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
+ stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
+ break;
+ case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
+ stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
+ break;
+ case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
+ stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
+ break;
+ case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
+ stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC;
+ break;
+ case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
+ stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
+ break;
+ case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
+ stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
+ break;
+ case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
+ stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ;
+ break;
+ case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_DELAY_REQ:
+ stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return -ERANGE;
+ }
+
+ return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
+ -EFAULT : 0;
+}
+
static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
struct mii_ioctl_data *data = if_mii(ifr);
@@ -13744,7 +13881,10 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return err;
case SIOCSHWTSTAMP:
- return tg3_hwtstamp_ioctl(dev, ifr, cmd);
+ return tg3_hwtstamp_set(dev, ifr);
+
+ case SIOCGHWTSTAMP:
+ return tg3_hwtstamp_get(dev, ifr);
default:
/* do nothing */
@@ -14865,7 +15005,8 @@ static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
if (val == NIC_SRAM_DATA_SIG_MAGIC) {
u32 nic_cfg, led_cfg;
- u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
+ u32 cfg2 = 0, cfg4 = 0, cfg5 = 0;
+ u32 nic_phy_id, ver, eeprom_phy_id;
int eeprom_phy_serdes = 0;
tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
@@ -14882,6 +15023,11 @@ static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
if (tg3_asic_rev(tp) == ASIC_REV_5785)
tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
+ if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
+ tg3_asic_rev(tp) == ASIC_REV_5719 ||
+ tg3_asic_rev(tp) == ASIC_REV_5720)
+ tg3_read_mem(tp, NIC_SRAM_DATA_CFG_5, &cfg5);
+
if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
eeprom_phy_serdes = 1;
@@ -15034,6 +15180,9 @@ static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
+
+ if (cfg5 & NIC_SRAM_DISABLE_1G_HALF_ADV)
+ tp->phy_flags |= TG3_PHYFLG_DISABLE_1G_HD_ADV;
}
done:
if (tg3_flag(tp, WOL_CAP))
@@ -15129,9 +15278,11 @@ static void tg3_phy_init_link_config(struct tg3 *tp)
{
u32 adv = ADVERTISED_Autoneg;
- if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
- adv |= ADVERTISED_1000baseT_Half |
- ADVERTISED_1000baseT_Full;
+ if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
+ if (!(tp->phy_flags & TG3_PHYFLG_DISABLE_1G_HD_ADV))
+ adv |= ADVERTISED_1000baseT_Half;
+ adv |= ADVERTISED_1000baseT_Full;
+ }
if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
adv |= ADVERTISED_100baseT_Half |
@@ -16479,6 +16630,7 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
/* Set these bits to enable statistics workaround. */
if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
+ tg3_asic_rev(tp) == ASIC_REV_5762 ||
tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
tp->coalesce_mode |= HOSTCC_MODE_ATTN;
@@ -16512,6 +16664,9 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
/* Clear this out for sanity. */
tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
+ /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
+ tw32(TG3PCI_REG_BASE_ADDR, 0);
+
pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
&pci_state_reg);
if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
@@ -16618,6 +16773,9 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
else
tg3_flag_clear(tp, POLL_SERDES);
+ if (tg3_flag(tp, ENABLE_APE) && tg3_flag(tp, ENABLE_ASF))
+ tg3_flag_set(tp, POLL_CPMU_LINK);
+
tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
@@ -17539,6 +17697,7 @@ static int tg3_init_one(struct pci_dev *pdev,
features |= NETIF_F_LOOPBACK;
dev->hw_features |= features;
+ dev->priv_flags |= IFF_UNICAST_FLT;
if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
!tg3_flag(tp, TSO_CAPABLE) &&
@@ -17739,10 +17898,12 @@ static int tg3_suspend(struct device *device)
struct pci_dev *pdev = to_pci_dev(device);
struct net_device *dev = pci_get_drvdata(pdev);
struct tg3 *tp = netdev_priv(dev);
- int err;
+ int err = 0;
+
+ rtnl_lock();
if (!netif_running(dev))
- return 0;
+ goto unlock;
tg3_reset_task_cancel(tp);
tg3_phy_stop(tp);
@@ -17784,6 +17945,8 @@ out:
tg3_phy_start(tp);
}
+unlock:
+ rtnl_unlock();
return err;
}
@@ -17792,10 +17955,12 @@ static int tg3_resume(struct device *device)
struct pci_dev *pdev = to_pci_dev(device);
struct net_device *dev = pci_get_drvdata(pdev);
struct tg3 *tp = netdev_priv(dev);
- int err;
+ int err = 0;
+
+ rtnl_lock();
if (!netif_running(dev))
- return 0;
+ goto unlock;
netif_device_attach(dev);
@@ -17819,6 +17984,8 @@ out:
if (!err)
tg3_phy_start(tp);
+unlock:
+ rtnl_unlock();
return err;
}
#endif /* CONFIG_PM_SLEEP */
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index 5c3835aa1e1b..ef472385bce4 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -1146,10 +1146,14 @@
#define TG3_CPMU_CLCK_ORIDE 0x00003624
#define CPMU_CLCK_ORIDE_MAC_ORIDE_EN 0x80000000
+#define TG3_CPMU_CLCK_ORIDE_ENABLE 0x00003628
+#define TG3_CPMU_MAC_ORIDE_ENABLE (1 << 13)
+
#define TG3_CPMU_STATUS 0x0000362c
#define TG3_CPMU_STATUS_FMSK_5717 0x20000000
#define TG3_CPMU_STATUS_FMSK_5719 0xc0000000
#define TG3_CPMU_STATUS_FSHFT_5719 30
+#define TG3_CPMU_STATUS_LINK_MASK 0x180000
#define TG3_CPMU_CLCK_STAT 0x00003630
#define CPMU_CLCK_STAT_MAC_CLCK_MASK 0x001f0000
@@ -2204,7 +2208,7 @@
#define NIC_SRAM_DATA_CFG_2 0x00000d38
-#define NIC_SRAM_DATA_CFG_2_APD_EN 0x00000400
+#define NIC_SRAM_DATA_CFG_2_APD_EN 0x00004000
#define SHASTA_EXT_LED_MODE_MASK 0x00018000
#define SHASTA_EXT_LED_LEGACY 0x00000000
#define SHASTA_EXT_LED_SHARED 0x00008000
@@ -2226,6 +2230,9 @@
#define NIC_SRAM_CPMUSTAT_SIG 0x0000362c
#define NIC_SRAM_CPMUSTAT_SIG_MSK 0x0000ffff
+#define NIC_SRAM_DATA_CFG_5 0x00000e0c
+#define NIC_SRAM_DISABLE_1G_HALF_ADV 0x00000002
+
#define NIC_SRAM_RX_MINI_BUFFER_DESC 0x00001000
#define NIC_SRAM_DMA_DESC_POOL_BASE 0x00002000
@@ -3014,6 +3021,7 @@ enum TG3_FLAGS {
TG3_FLAG_ENABLE_ASF,
TG3_FLAG_ASPM_WORKAROUND,
TG3_FLAG_POLL_SERDES,
+ TG3_FLAG_POLL_CPMU_LINK,
TG3_FLAG_MBOX_WRITE_REORDER,
TG3_FLAG_PCIX_TARGET_HWBUG,
TG3_FLAG_WOL_SPEED_100MB,
@@ -3325,6 +3333,7 @@ struct tg3 {
#define TG3_PHYFLG_1G_ON_VAUX_OK 0x00080000
#define TG3_PHYFLG_KEEP_LINK_ON_PWRDN 0x00100000
#define TG3_PHYFLG_MDIX_STATE 0x00200000
+#define TG3_PHYFLG_DISABLE_1G_HD_ADV 0x00400000
u32 led_ctrl;
u32 phy_otp;
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
index 6f3cac060f29..537bba14f913 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
@@ -22,6 +22,14 @@
/* IOC local definitions */
+#define bfa_ioc_state_disabled(__sm) \
+ (((__sm) == BFI_IOC_UNINIT) || \
+ ((__sm) == BFI_IOC_INITING) || \
+ ((__sm) == BFI_IOC_HWINIT) || \
+ ((__sm) == BFI_IOC_DISABLED) || \
+ ((__sm) == BFI_IOC_FAIL) || \
+ ((__sm) == BFI_IOC_CFG_DISABLED))
+
/* Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details. */
#define bfa_ioc_firmware_lock(__ioc) \
@@ -42,6 +50,14 @@
((__ioc)->ioc_hwif->ioc_sync_ack(__ioc))
#define bfa_ioc_sync_complete(__ioc) \
((__ioc)->ioc_hwif->ioc_sync_complete(__ioc))
+#define bfa_ioc_set_cur_ioc_fwstate(__ioc, __fwstate) \
+ ((__ioc)->ioc_hwif->ioc_set_fwstate(__ioc, __fwstate))
+#define bfa_ioc_get_cur_ioc_fwstate(__ioc) \
+ ((__ioc)->ioc_hwif->ioc_get_fwstate(__ioc))
+#define bfa_ioc_set_alt_ioc_fwstate(__ioc, __fwstate) \
+ ((__ioc)->ioc_hwif->ioc_set_alt_fwstate(__ioc, __fwstate))
+#define bfa_ioc_get_alt_ioc_fwstate(__ioc) \
+ ((__ioc)->ioc_hwif->ioc_get_alt_fwstate(__ioc))
#define bfa_ioc_mbox_cmd_pending(__ioc) \
(!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
@@ -76,8 +92,8 @@ static void bfa_ioc_pf_disabled(struct bfa_ioc *ioc);
static void bfa_ioc_pf_failed(struct bfa_ioc *ioc);
static void bfa_ioc_pf_hwfailed(struct bfa_ioc *ioc);
static void bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc);
-static void bfa_ioc_boot(struct bfa_ioc *ioc, enum bfi_fwboot_type boot_type,
- u32 boot_param);
+static enum bfa_status bfa_ioc_boot(struct bfa_ioc *ioc,
+ enum bfi_fwboot_type boot_type, u32 boot_param);
static u32 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr);
static void bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc,
char *serial_num);
@@ -860,7 +876,7 @@ bfa_iocpf_sm_disabling(struct bfa_iocpf *iocpf, enum iocpf_event event)
*/
case IOCPF_E_TIMEOUT:
- writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
+ bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
break;
@@ -949,7 +965,7 @@ bfa_iocpf_sm_initfail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
case IOCPF_E_SEMLOCKED:
bfa_ioc_notify_fail(ioc);
bfa_ioc_sync_leave(ioc);
- writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
+ bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL);
bfa_nw_ioc_hw_sem_release(ioc);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
break;
@@ -1031,7 +1047,7 @@ bfa_iocpf_sm_fail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
bfa_ioc_notify_fail(ioc);
if (!iocpf->auto_recover) {
bfa_ioc_sync_leave(ioc);
- writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
+ bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL);
bfa_nw_ioc_hw_sem_release(ioc);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
} else {
@@ -1131,6 +1147,25 @@ bfa_nw_ioc_sem_release(void __iomem *sem_reg)
writel(1, sem_reg);
}
+/* Invalidate fwver signature */
+enum bfa_status
+bfa_nw_ioc_fwsig_invalidate(struct bfa_ioc *ioc)
+{
+ u32 pgnum, pgoff;
+ u32 loff = 0;
+ enum bfi_ioc_state ioc_fwstate;
+
+ ioc_fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc);
+ if (!bfa_ioc_state_disabled(ioc_fwstate))
+ return BFA_STATUS_ADAPTER_ENABLED;
+
+ pgnum = bfa_ioc_smem_pgnum(ioc, loff);
+ pgoff = PSS_SMEM_PGOFF(loff);
+ writel(pgnum, ioc->ioc_regs.host_page_num_fn);
+ writel(BFI_IOC_FW_INV_SIGN, ioc->ioc_regs.smem_page_start + loff);
+ return BFA_STATUS_OK;
+}
+
/* Clear fwver hdr */
static void
bfa_ioc_fwver_clear(struct bfa_ioc *ioc)
@@ -1162,7 +1197,7 @@ bfa_ioc_hw_sem_init(struct bfa_ioc *ioc)
r32 = readl(ioc->ioc_regs.ioc_init_sem_reg);
}
- fwstate = readl(ioc->ioc_regs.ioc_fwstate);
+ fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc);
if (fwstate == BFI_IOC_UNINIT) {
writel(1, ioc->ioc_regs.ioc_init_sem_reg);
return;
@@ -1176,8 +1211,8 @@ bfa_ioc_hw_sem_init(struct bfa_ioc *ioc)
}
bfa_ioc_fwver_clear(ioc);
- writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
- writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate);
+ bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_UNINIT);
+ bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_UNINIT);
/*
* Try to lock and then unlock the semaphore.
@@ -1309,22 +1344,510 @@ bfa_nw_ioc_fwver_get(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
}
}
-/* Returns TRUE if same. */
+static bool
+bfa_ioc_fwver_md5_check(struct bfi_ioc_image_hdr *fwhdr_1,
+ struct bfi_ioc_image_hdr *fwhdr_2)
+{
+ int i;
+
+ for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) {
+ if (fwhdr_1->md5sum[i] != fwhdr_2->md5sum[i])
+ return false;
+ }
+
+ return true;
+}
+
+/* Returns TRUE if major minor and maintainence are same.
+ * If patch version are same, check for MD5 Checksum to be same.
+ */
+static bool
+bfa_ioc_fw_ver_compatible(struct bfi_ioc_image_hdr *drv_fwhdr,
+ struct bfi_ioc_image_hdr *fwhdr_to_cmp)
+{
+ if (drv_fwhdr->signature != fwhdr_to_cmp->signature)
+ return false;
+ if (drv_fwhdr->fwver.major != fwhdr_to_cmp->fwver.major)
+ return false;
+ if (drv_fwhdr->fwver.minor != fwhdr_to_cmp->fwver.minor)
+ return false;
+ if (drv_fwhdr->fwver.maint != fwhdr_to_cmp->fwver.maint)
+ return false;
+ if (drv_fwhdr->fwver.patch == fwhdr_to_cmp->fwver.patch &&
+ drv_fwhdr->fwver.phase == fwhdr_to_cmp->fwver.phase &&
+ drv_fwhdr->fwver.build == fwhdr_to_cmp->fwver.build)
+ return bfa_ioc_fwver_md5_check(drv_fwhdr, fwhdr_to_cmp);
+
+ return true;
+}
+
+static bool
+bfa_ioc_flash_fwver_valid(struct bfi_ioc_image_hdr *flash_fwhdr)
+{
+ if (flash_fwhdr->fwver.major == 0 || flash_fwhdr->fwver.major == 0xFF)
+ return false;
+
+ return true;
+}
+
+static bool
+fwhdr_is_ga(struct bfi_ioc_image_hdr *fwhdr)
+{
+ if (fwhdr->fwver.phase == 0 &&
+ fwhdr->fwver.build == 0)
+ return false;
+
+ return true;
+}
+
+/* Returns TRUE if both are compatible and patch of fwhdr_to_cmp is better. */
+static enum bfi_ioc_img_ver_cmp
+bfa_ioc_fw_ver_patch_cmp(struct bfi_ioc_image_hdr *base_fwhdr,
+ struct bfi_ioc_image_hdr *fwhdr_to_cmp)
+{
+ if (bfa_ioc_fw_ver_compatible(base_fwhdr, fwhdr_to_cmp) == false)
+ return BFI_IOC_IMG_VER_INCOMP;
+
+ if (fwhdr_to_cmp->fwver.patch > base_fwhdr->fwver.patch)
+ return BFI_IOC_IMG_VER_BETTER;
+ else if (fwhdr_to_cmp->fwver.patch < base_fwhdr->fwver.patch)
+ return BFI_IOC_IMG_VER_OLD;
+
+ /* GA takes priority over internal builds of the same patch stream.
+ * At this point major minor maint and patch numbers are same.
+ */
+ if (fwhdr_is_ga(base_fwhdr) == true)
+ if (fwhdr_is_ga(fwhdr_to_cmp))
+ return BFI_IOC_IMG_VER_SAME;
+ else
+ return BFI_IOC_IMG_VER_OLD;
+ else
+ if (fwhdr_is_ga(fwhdr_to_cmp))
+ return BFI_IOC_IMG_VER_BETTER;
+
+ if (fwhdr_to_cmp->fwver.phase > base_fwhdr->fwver.phase)
+ return BFI_IOC_IMG_VER_BETTER;
+ else if (fwhdr_to_cmp->fwver.phase < base_fwhdr->fwver.phase)
+ return BFI_IOC_IMG_VER_OLD;
+
+ if (fwhdr_to_cmp->fwver.build > base_fwhdr->fwver.build)
+ return BFI_IOC_IMG_VER_BETTER;
+ else if (fwhdr_to_cmp->fwver.build < base_fwhdr->fwver.build)
+ return BFI_IOC_IMG_VER_OLD;
+
+ /* All Version Numbers are equal.
+ * Md5 check to be done as a part of compatibility check.
+ */
+ return BFI_IOC_IMG_VER_SAME;
+}
+
+/* register definitions */
+#define FLI_CMD_REG 0x0001d000
+#define FLI_WRDATA_REG 0x0001d00c
+#define FLI_RDDATA_REG 0x0001d010
+#define FLI_ADDR_REG 0x0001d004
+#define FLI_DEV_STATUS_REG 0x0001d014
+
+#define BFA_FLASH_FIFO_SIZE 128 /* fifo size */
+#define BFA_FLASH_CHECK_MAX 10000 /* max # of status check */
+#define BFA_FLASH_BLOCKING_OP_MAX 1000000 /* max # of blocking op check */
+#define BFA_FLASH_WIP_MASK 0x01 /* write in progress bit mask */
+
+#define NFC_STATE_RUNNING 0x20000001
+#define NFC_STATE_PAUSED 0x00004560
+#define NFC_VER_VALID 0x147
+
+enum bfa_flash_cmd {
+ BFA_FLASH_FAST_READ = 0x0b, /* fast read */
+ BFA_FLASH_WRITE_ENABLE = 0x06, /* write enable */
+ BFA_FLASH_SECTOR_ERASE = 0xd8, /* sector erase */
+ BFA_FLASH_WRITE = 0x02, /* write */
+ BFA_FLASH_READ_STATUS = 0x05, /* read status */
+};
+
+/* hardware error definition */
+enum bfa_flash_err {
+ BFA_FLASH_NOT_PRESENT = -1, /*!< flash not present */
+ BFA_FLASH_UNINIT = -2, /*!< flash not initialized */
+ BFA_FLASH_BAD = -3, /*!< flash bad */
+ BFA_FLASH_BUSY = -4, /*!< flash busy */
+ BFA_FLASH_ERR_CMD_ACT = -5, /*!< command active never cleared */
+ BFA_FLASH_ERR_FIFO_CNT = -6, /*!< fifo count never cleared */
+ BFA_FLASH_ERR_WIP = -7, /*!< write-in-progress never cleared */
+ BFA_FLASH_ERR_TIMEOUT = -8, /*!< fli timeout */
+ BFA_FLASH_ERR_LEN = -9, /*!< invalid length */
+};
+
+/* flash command register data structure */
+union bfa_flash_cmd_reg {
+ struct {
+#ifdef __BIG_ENDIAN
+ u32 act:1;
+ u32 rsv:1;
+ u32 write_cnt:9;
+ u32 read_cnt:9;
+ u32 addr_cnt:4;
+ u32 cmd:8;
+#else
+ u32 cmd:8;
+ u32 addr_cnt:4;
+ u32 read_cnt:9;
+ u32 write_cnt:9;
+ u32 rsv:1;
+ u32 act:1;
+#endif
+ } r;
+ u32 i;
+};
+
+/* flash device status register data structure */
+union bfa_flash_dev_status_reg {
+ struct {
+#ifdef __BIG_ENDIAN
+ u32 rsv:21;
+ u32 fifo_cnt:6;
+ u32 busy:1;
+ u32 init_status:1;
+ u32 present:1;
+ u32 bad:1;
+ u32 good:1;
+#else
+ u32 good:1;
+ u32 bad:1;
+ u32 present:1;
+ u32 init_status:1;
+ u32 busy:1;
+ u32 fifo_cnt:6;
+ u32 rsv:21;
+#endif
+ } r;
+ u32 i;
+};
+
+/* flash address register data structure */
+union bfa_flash_addr_reg {
+ struct {
+#ifdef __BIG_ENDIAN
+ u32 addr:24;
+ u32 dummy:8;
+#else
+ u32 dummy:8;
+ u32 addr:24;
+#endif
+ } r;
+ u32 i;
+};
+
+/* Flash raw private functions */
+static void
+bfa_flash_set_cmd(void __iomem *pci_bar, u8 wr_cnt,
+ u8 rd_cnt, u8 ad_cnt, u8 op)
+{
+ union bfa_flash_cmd_reg cmd;
+
+ cmd.i = 0;
+ cmd.r.act = 1;
+ cmd.r.write_cnt = wr_cnt;
+ cmd.r.read_cnt = rd_cnt;
+ cmd.r.addr_cnt = ad_cnt;
+ cmd.r.cmd = op;
+ writel(cmd.i, (pci_bar + FLI_CMD_REG));
+}
+
+static void
+bfa_flash_set_addr(void __iomem *pci_bar, u32 address)
+{
+ union bfa_flash_addr_reg addr;
+
+ addr.r.addr = address & 0x00ffffff;
+ addr.r.dummy = 0;
+ writel(addr.i, (pci_bar + FLI_ADDR_REG));
+}
+
+static int
+bfa_flash_cmd_act_check(void __iomem *pci_bar)
+{
+ union bfa_flash_cmd_reg cmd;
+
+ cmd.i = readl(pci_bar + FLI_CMD_REG);
+
+ if (cmd.r.act)
+ return BFA_FLASH_ERR_CMD_ACT;
+
+ return 0;
+}
+
+/* Flush FLI data fifo. */
+static u32
+bfa_flash_fifo_flush(void __iomem *pci_bar)
+{
+ u32 i;
+ u32 t;
+ union bfa_flash_dev_status_reg dev_status;
+
+ dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG);
+
+ if (!dev_status.r.fifo_cnt)
+ return 0;
+
+ /* fifo counter in terms of words */
+ for (i = 0; i < dev_status.r.fifo_cnt; i++)
+ t = readl(pci_bar + FLI_RDDATA_REG);
+
+ /* Check the device status. It may take some time. */
+ for (i = 0; i < BFA_FLASH_CHECK_MAX; i++) {
+ dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG);
+ if (!dev_status.r.fifo_cnt)
+ break;
+ }
+
+ if (dev_status.r.fifo_cnt)
+ return BFA_FLASH_ERR_FIFO_CNT;
+
+ return 0;
+}
+
+/* Read flash status. */
+static u32
+bfa_flash_status_read(void __iomem *pci_bar)
+{
+ union bfa_flash_dev_status_reg dev_status;
+ u32 status;
+ u32 ret_status;
+ int i;
+
+ status = bfa_flash_fifo_flush(pci_bar);
+ if (status < 0)
+ return status;
+
+ bfa_flash_set_cmd(pci_bar, 0, 4, 0, BFA_FLASH_READ_STATUS);
+
+ for (i = 0; i < BFA_FLASH_CHECK_MAX; i++) {
+ status = bfa_flash_cmd_act_check(pci_bar);
+ if (!status)
+ break;
+ }
+
+ if (status)
+ return status;
+
+ dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG);
+ if (!dev_status.r.fifo_cnt)
+ return BFA_FLASH_BUSY;
+
+ ret_status = readl(pci_bar + FLI_RDDATA_REG);
+ ret_status >>= 24;
+
+ status = bfa_flash_fifo_flush(pci_bar);
+ if (status < 0)
+ return status;
+
+ return ret_status;
+}
+
+/* Start flash read operation. */
+static u32
+bfa_flash_read_start(void __iomem *pci_bar, u32 offset, u32 len,
+ char *buf)
+{
+ u32 status;
+
+ /* len must be mutiple of 4 and not exceeding fifo size */
+ if (len == 0 || len > BFA_FLASH_FIFO_SIZE || (len & 0x03) != 0)
+ return BFA_FLASH_ERR_LEN;
+
+ /* check status */
+ status = bfa_flash_status_read(pci_bar);
+ if (status == BFA_FLASH_BUSY)
+ status = bfa_flash_status_read(pci_bar);
+
+ if (status < 0)
+ return status;
+
+ /* check if write-in-progress bit is cleared */
+ if (status & BFA_FLASH_WIP_MASK)
+ return BFA_FLASH_ERR_WIP;
+
+ bfa_flash_set_addr(pci_bar, offset);
+
+ bfa_flash_set_cmd(pci_bar, 0, (u8)len, 4, BFA_FLASH_FAST_READ);
+
+ return 0;
+}
+
+/* Check flash read operation. */
+static u32
+bfa_flash_read_check(void __iomem *pci_bar)
+{
+ if (bfa_flash_cmd_act_check(pci_bar))
+ return 1;
+
+ return 0;
+}
+
+/* End flash read operation. */
+static void
+bfa_flash_read_end(void __iomem *pci_bar, u32 len, char *buf)
+{
+ u32 i;
+
+ /* read data fifo up to 32 words */
+ for (i = 0; i < len; i += 4) {
+ u32 w = readl(pci_bar + FLI_RDDATA_REG);
+ *((u32 *)(buf + i)) = swab32(w);
+ }
+
+ bfa_flash_fifo_flush(pci_bar);
+}
+
+/* Perform flash raw read. */
+
+#define FLASH_BLOCKING_OP_MAX 500
+#define FLASH_SEM_LOCK_REG 0x18820
+
+static int
+bfa_raw_sem_get(void __iomem *bar)
+{
+ int locked;
+
+ locked = readl((bar + FLASH_SEM_LOCK_REG));
+
+ return !locked;
+}
+
+static enum bfa_status
+bfa_flash_sem_get(void __iomem *bar)
+{
+ u32 n = FLASH_BLOCKING_OP_MAX;
+
+ while (!bfa_raw_sem_get(bar)) {
+ if (--n <= 0)
+ return BFA_STATUS_BADFLASH;
+ udelay(10000);
+ }
+ return BFA_STATUS_OK;
+}
+
+static void
+bfa_flash_sem_put(void __iomem *bar)
+{
+ writel(0, (bar + FLASH_SEM_LOCK_REG));
+}
+
+static enum bfa_status
+bfa_flash_raw_read(void __iomem *pci_bar, u32 offset, char *buf,
+ u32 len)
+{
+ u32 n, status;
+ u32 off, l, s, residue, fifo_sz;
+
+ residue = len;
+ off = 0;
+ fifo_sz = BFA_FLASH_FIFO_SIZE;
+ status = bfa_flash_sem_get(pci_bar);
+ if (status != BFA_STATUS_OK)
+ return status;
+
+ while (residue) {
+ s = offset + off;
+ n = s / fifo_sz;
+ l = (n + 1) * fifo_sz - s;
+ if (l > residue)
+ l = residue;
+
+ status = bfa_flash_read_start(pci_bar, offset + off, l,
+ &buf[off]);
+ if (status < 0) {
+ bfa_flash_sem_put(pci_bar);
+ return BFA_STATUS_FAILED;
+ }
+
+ n = BFA_FLASH_BLOCKING_OP_MAX;
+ while (bfa_flash_read_check(pci_bar)) {
+ if (--n <= 0) {
+ bfa_flash_sem_put(pci_bar);
+ return BFA_STATUS_FAILED;
+ }
+ }
+
+ bfa_flash_read_end(pci_bar, l, &buf[off]);
+
+ residue -= l;
+ off += l;
+ }
+ bfa_flash_sem_put(pci_bar);
+
+ return BFA_STATUS_OK;
+}
+
+u32
+bfa_nw_ioc_flash_img_get_size(struct bfa_ioc *ioc)
+{
+ return BFI_FLASH_IMAGE_SZ/sizeof(u32);
+}
+
+#define BFA_FLASH_PART_FWIMG_ADDR 0x100000 /* fw image address */
+
+enum bfa_status
+bfa_nw_ioc_flash_img_get_chnk(struct bfa_ioc *ioc, u32 off,
+ u32 *fwimg)
+{
+ return bfa_flash_raw_read(ioc->pcidev.pci_bar_kva,
+ BFA_FLASH_PART_FWIMG_ADDR + (off * sizeof(u32)),
+ (char *)fwimg, BFI_FLASH_CHUNK_SZ);
+}
+
+static enum bfi_ioc_img_ver_cmp
+bfa_ioc_flash_fwver_cmp(struct bfa_ioc *ioc,
+ struct bfi_ioc_image_hdr *base_fwhdr)
+{
+ struct bfi_ioc_image_hdr *flash_fwhdr;
+ enum bfa_status status;
+ u32 fwimg[BFI_FLASH_CHUNK_SZ_WORDS];
+
+ status = bfa_nw_ioc_flash_img_get_chnk(ioc, 0, fwimg);
+ if (status != BFA_STATUS_OK)
+ return BFI_IOC_IMG_VER_INCOMP;
+
+ flash_fwhdr = (struct bfi_ioc_image_hdr *)fwimg;
+ if (bfa_ioc_flash_fwver_valid(flash_fwhdr))
+ return bfa_ioc_fw_ver_patch_cmp(base_fwhdr, flash_fwhdr);
+ else
+ return BFI_IOC_IMG_VER_INCOMP;
+}
+
+/**
+ * Returns TRUE if driver is willing to work with current smem f/w version.
+ */
bool
bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
{
struct bfi_ioc_image_hdr *drv_fwhdr;
- int i;
+ enum bfi_ioc_img_ver_cmp smem_flash_cmp, drv_smem_cmp;
drv_fwhdr = (struct bfi_ioc_image_hdr *)
bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
- for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) {
- if (fwhdr->md5sum[i] != drv_fwhdr->md5sum[i])
- return false;
+ /* If smem is incompatible or old, driver should not work with it. */
+ drv_smem_cmp = bfa_ioc_fw_ver_patch_cmp(drv_fwhdr, fwhdr);
+ if (drv_smem_cmp == BFI_IOC_IMG_VER_INCOMP ||
+ drv_smem_cmp == BFI_IOC_IMG_VER_OLD) {
+ return false;
}
- return true;
+ /* IF Flash has a better F/W than smem do not work with smem.
+ * If smem f/w == flash f/w, as smem f/w not old | incmp, work with it.
+ * If Flash is old or incomp work with smem iff smem f/w == drv f/w.
+ */
+ smem_flash_cmp = bfa_ioc_flash_fwver_cmp(ioc, fwhdr);
+
+ if (smem_flash_cmp == BFI_IOC_IMG_VER_BETTER)
+ return false;
+ else if (smem_flash_cmp == BFI_IOC_IMG_VER_SAME)
+ return true;
+ else
+ return (drv_smem_cmp == BFI_IOC_IMG_VER_SAME) ?
+ true : false;
}
/* Return true if current running version is valid. Firmware signature and
@@ -1333,15 +1856,9 @@ bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
static bool
bfa_ioc_fwver_valid(struct bfa_ioc *ioc, u32 boot_env)
{
- struct bfi_ioc_image_hdr fwhdr, *drv_fwhdr;
+ struct bfi_ioc_image_hdr fwhdr;
bfa_nw_ioc_fwver_get(ioc, &fwhdr);
- drv_fwhdr = (struct bfi_ioc_image_hdr *)
- bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
-
- if (fwhdr.signature != drv_fwhdr->signature)
- return false;
-
if (swab32(fwhdr.bootenv) != boot_env)
return false;
@@ -1366,7 +1883,7 @@ bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
bool fwvalid;
u32 boot_env;
- ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
+ ioc_fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc);
if (force)
ioc_fwstate = BFI_IOC_UNINIT;
@@ -1380,8 +1897,10 @@ bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
false : bfa_ioc_fwver_valid(ioc, boot_env);
if (!fwvalid) {
- bfa_ioc_boot(ioc, BFI_FWBOOT_TYPE_NORMAL, boot_env);
- bfa_ioc_poll_fwinit(ioc);
+ if (bfa_ioc_boot(ioc, BFI_FWBOOT_TYPE_NORMAL, boot_env) ==
+ BFA_STATUS_OK)
+ bfa_ioc_poll_fwinit(ioc);
+
return;
}
@@ -1411,8 +1930,9 @@ bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
/**
* Initialize the h/w for any other states.
*/
- bfa_ioc_boot(ioc, BFI_FWBOOT_TYPE_NORMAL, boot_env);
- bfa_ioc_poll_fwinit(ioc);
+ if (bfa_ioc_boot(ioc, BFI_FWBOOT_TYPE_NORMAL, boot_env) ==
+ BFA_STATUS_OK)
+ bfa_ioc_poll_fwinit(ioc);
}
void
@@ -1517,7 +2037,7 @@ bfa_ioc_hb_stop(struct bfa_ioc *ioc)
}
/* Initiate a full firmware download. */
-static void
+static enum bfa_status
bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type,
u32 boot_env)
{
@@ -1527,18 +2047,47 @@ bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type,
u32 chunkno = 0;
u32 i;
u32 asicmode;
+ u32 fwimg_size;
+ u32 fwimg_buf[BFI_FLASH_CHUNK_SZ_WORDS];
+ enum bfa_status status;
+
+ if (boot_env == BFI_FWBOOT_ENV_OS &&
+ boot_type == BFI_FWBOOT_TYPE_FLASH) {
+ fwimg_size = BFI_FLASH_IMAGE_SZ/sizeof(u32);
+
+ status = bfa_nw_ioc_flash_img_get_chnk(ioc,
+ BFA_IOC_FLASH_CHUNK_ADDR(chunkno), fwimg_buf);
+ if (status != BFA_STATUS_OK)
+ return status;
- fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), chunkno);
+ fwimg = fwimg_buf;
+ } else {
+ fwimg_size = bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc));
+ fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc),
+ BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
+ }
pgnum = bfa_ioc_smem_pgnum(ioc, loff);
writel(pgnum, ioc->ioc_regs.host_page_num_fn);
- for (i = 0; i < bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)); i++) {
+ for (i = 0; i < fwimg_size; i++) {
if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {
chunkno = BFA_IOC_FLASH_CHUNK_NO(i);
- fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc),
+ if (boot_env == BFI_FWBOOT_ENV_OS &&
+ boot_type == BFI_FWBOOT_TYPE_FLASH) {
+ status = bfa_nw_ioc_flash_img_get_chnk(ioc,
+ BFA_IOC_FLASH_CHUNK_ADDR(chunkno),
+ fwimg_buf);
+ if (status != BFA_STATUS_OK)
+ return status;
+
+ fwimg = fwimg_buf;
+ } else {
+ fwimg = bfa_cb_image_get_chunk(
+ bfa_ioc_asic_gen(ioc),
BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
+ }
}
/**
@@ -1566,6 +2115,10 @@ bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type,
/*
* Set boot type, env and device mode at the end.
*/
+ if (boot_env == BFI_FWBOOT_ENV_OS &&
+ boot_type == BFI_FWBOOT_TYPE_FLASH) {
+ boot_type = BFI_FWBOOT_TYPE_NORMAL;
+ }
asicmode = BFI_FWBOOT_DEVMODE(ioc->asic_gen, ioc->asic_mode,
ioc->port0_mode, ioc->port1_mode);
writel(asicmode, ((ioc->ioc_regs.smem_page_start)
@@ -1574,6 +2127,7 @@ bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type,
+ (BFI_FWBOOT_TYPE_OFF)));
writel(boot_env, ((ioc->ioc_regs.smem_page_start)
+ (BFI_FWBOOT_ENV_OFF)));
+ return BFA_STATUS_OK;
}
static void
@@ -1846,29 +2400,47 @@ bfa_ioc_pll_init(struct bfa_ioc *ioc)
/* Interface used by diag module to do firmware boot with memory test
* as the entry vector.
*/
-static void
+static enum bfa_status
bfa_ioc_boot(struct bfa_ioc *ioc, enum bfi_fwboot_type boot_type,
u32 boot_env)
{
+ struct bfi_ioc_image_hdr *drv_fwhdr;
+ enum bfa_status status;
bfa_ioc_stats(ioc, ioc_boots);
if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
- return;
+ return BFA_STATUS_FAILED;
+ if (boot_env == BFI_FWBOOT_ENV_OS &&
+ boot_type == BFI_FWBOOT_TYPE_NORMAL) {
+ drv_fwhdr = (struct bfi_ioc_image_hdr *)
+ bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
+ /* Work with Flash iff flash f/w is better than driver f/w.
+ * Otherwise push drivers firmware.
+ */
+ if (bfa_ioc_flash_fwver_cmp(ioc, drv_fwhdr) ==
+ BFI_IOC_IMG_VER_BETTER)
+ boot_type = BFI_FWBOOT_TYPE_FLASH;
+ }
/**
* Initialize IOC state of all functions on a chip reset.
*/
if (boot_type == BFI_FWBOOT_TYPE_MEMTEST) {
- writel(BFI_IOC_MEMTEST, ioc->ioc_regs.ioc_fwstate);
- writel(BFI_IOC_MEMTEST, ioc->ioc_regs.alt_ioc_fwstate);
+ bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_MEMTEST);
+ bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_MEMTEST);
} else {
- writel(BFI_IOC_INITING, ioc->ioc_regs.ioc_fwstate);
- writel(BFI_IOC_INITING, ioc->ioc_regs.alt_ioc_fwstate);
+ bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_INITING);
+ bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_INITING);
}
bfa_ioc_msgflush(ioc);
- bfa_ioc_download_fw(ioc, boot_type, boot_env);
- bfa_ioc_lpu_start(ioc);
+ status = bfa_ioc_download_fw(ioc, boot_type, boot_env);
+ if (status == BFA_STATUS_OK)
+ bfa_ioc_lpu_start(ioc);
+ else
+ bfa_nw_iocpf_timeout(ioc);
+
+ return status;
}
/* Enable/disable IOC failure auto recovery. */
@@ -2473,7 +3045,7 @@ bfa_nw_iocpf_sem_timeout(void *ioc_arg)
static void
bfa_ioc_poll_fwinit(struct bfa_ioc *ioc)
{
- u32 fwstate = readl(ioc->ioc_regs.ioc_fwstate);
+ u32 fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc);
if (fwstate == BFI_IOC_DISABLED) {
bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.h b/drivers/net/ethernet/brocade/bna/bfa_ioc.h
index f04e0aab25b4..20cff7df4b55 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.h
@@ -215,6 +215,13 @@ struct bfa_ioc_hwif {
void (*ioc_sync_ack) (struct bfa_ioc *ioc);
bool (*ioc_sync_complete) (struct bfa_ioc *ioc);
bool (*ioc_lpu_read_stat) (struct bfa_ioc *ioc);
+ void (*ioc_set_fwstate) (struct bfa_ioc *ioc,
+ enum bfi_ioc_state fwstate);
+ enum bfi_ioc_state (*ioc_get_fwstate) (struct bfa_ioc *ioc);
+ void (*ioc_set_alt_fwstate) (struct bfa_ioc *ioc,
+ enum bfi_ioc_state fwstate);
+ enum bfi_ioc_state (*ioc_get_alt_fwstate) (struct bfa_ioc *ioc);
+
};
#define bfa_ioc_pcifn(__ioc) ((__ioc)->pcidev.pci_func)
@@ -291,6 +298,7 @@ void bfa_nw_ioc_error_isr(struct bfa_ioc *ioc);
bool bfa_nw_ioc_is_disabled(struct bfa_ioc *ioc);
bool bfa_nw_ioc_is_operational(struct bfa_ioc *ioc);
void bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr);
+enum bfa_status bfa_nw_ioc_fwsig_invalidate(struct bfa_ioc *ioc);
void bfa_nw_ioc_notify_register(struct bfa_ioc *ioc,
struct bfa_ioc_notify *notify);
bool bfa_nw_ioc_sem_get(void __iomem *sem_reg);
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c b/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
index 5df0b0c68c5a..d639558455cb 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
@@ -48,6 +48,12 @@ static void bfa_ioc_ct_sync_join(struct bfa_ioc *ioc);
static void bfa_ioc_ct_sync_leave(struct bfa_ioc *ioc);
static void bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc);
static bool bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc);
+static void bfa_ioc_ct_set_cur_ioc_fwstate(
+ struct bfa_ioc *ioc, enum bfi_ioc_state fwstate);
+static enum bfi_ioc_state bfa_ioc_ct_get_cur_ioc_fwstate(struct bfa_ioc *ioc);
+static void bfa_ioc_ct_set_alt_ioc_fwstate(
+ struct bfa_ioc *ioc, enum bfi_ioc_state fwstate);
+static enum bfi_ioc_state bfa_ioc_ct_get_alt_ioc_fwstate(struct bfa_ioc *ioc);
static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb,
enum bfi_asic_mode asic_mode);
static enum bfa_status bfa_ioc_ct2_pll_init(void __iomem *rb,
@@ -68,6 +74,10 @@ static const struct bfa_ioc_hwif nw_hwif_ct = {
.ioc_sync_leave = bfa_ioc_ct_sync_leave,
.ioc_sync_ack = bfa_ioc_ct_sync_ack,
.ioc_sync_complete = bfa_ioc_ct_sync_complete,
+ .ioc_set_fwstate = bfa_ioc_ct_set_cur_ioc_fwstate,
+ .ioc_get_fwstate = bfa_ioc_ct_get_cur_ioc_fwstate,
+ .ioc_set_alt_fwstate = bfa_ioc_ct_set_alt_ioc_fwstate,
+ .ioc_get_alt_fwstate = bfa_ioc_ct_get_alt_ioc_fwstate,
};
static const struct bfa_ioc_hwif nw_hwif_ct2 = {
@@ -85,6 +95,10 @@ static const struct bfa_ioc_hwif nw_hwif_ct2 = {
.ioc_sync_leave = bfa_ioc_ct_sync_leave,
.ioc_sync_ack = bfa_ioc_ct_sync_ack,
.ioc_sync_complete = bfa_ioc_ct_sync_complete,
+ .ioc_set_fwstate = bfa_ioc_ct_set_cur_ioc_fwstate,
+ .ioc_get_fwstate = bfa_ioc_ct_get_cur_ioc_fwstate,
+ .ioc_set_alt_fwstate = bfa_ioc_ct_set_alt_ioc_fwstate,
+ .ioc_get_alt_fwstate = bfa_ioc_ct_get_alt_ioc_fwstate,
};
/* Called from bfa_ioc_attach() to map asic specific calls. */
@@ -565,6 +579,32 @@ bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc)
return false;
}
+static void
+bfa_ioc_ct_set_cur_ioc_fwstate(struct bfa_ioc *ioc,
+ enum bfi_ioc_state fwstate)
+{
+ writel(fwstate, ioc->ioc_regs.ioc_fwstate);
+}
+
+static enum bfi_ioc_state
+bfa_ioc_ct_get_cur_ioc_fwstate(struct bfa_ioc *ioc)
+{
+ return (enum bfi_ioc_state)readl(ioc->ioc_regs.ioc_fwstate);
+}
+
+static void
+bfa_ioc_ct_set_alt_ioc_fwstate(struct bfa_ioc *ioc,
+ enum bfi_ioc_state fwstate)
+{
+ writel(fwstate, ioc->ioc_regs.alt_ioc_fwstate);
+}
+
+static enum bfi_ioc_state
+bfa_ioc_ct_get_alt_ioc_fwstate(struct bfa_ioc *ioc)
+{
+ return (enum bfi_ioc_state)readl(ioc->ioc_regs.alt_ioc_fwstate);
+}
+
static enum bfa_status
bfa_ioc_ct_pll_init(void __iomem *rb, enum bfi_asic_mode asic_mode)
{
diff --git a/drivers/net/ethernet/brocade/bna/bfi.h b/drivers/net/ethernet/brocade/bna/bfi.h
index 1f24c23dc786..8c563a77cdf6 100644
--- a/drivers/net/ethernet/brocade/bna/bfi.h
+++ b/drivers/net/ethernet/brocade/bna/bfi.h
@@ -25,6 +25,7 @@
/* BFI FW image type */
#define BFI_FLASH_CHUNK_SZ 256 /*!< Flash chunk size */
#define BFI_FLASH_CHUNK_SZ_WORDS (BFI_FLASH_CHUNK_SZ/sizeof(u32))
+#define BFI_FLASH_IMAGE_SZ 0x100000
/* Msg header common to all msgs */
struct bfi_mhdr {
@@ -233,7 +234,29 @@ struct bfi_ioc_getattr_reply {
#define BFI_IOC_TRC_HDR_SZ 32
#define BFI_IOC_FW_SIGNATURE (0xbfadbfad)
+#define BFI_IOC_FW_INV_SIGN (0xdeaddead)
#define BFI_IOC_MD5SUM_SZ 4
+
+struct bfi_ioc_fwver {
+#ifdef __BIG_ENDIAN
+ u8 patch;
+ u8 maint;
+ u8 minor;
+ u8 major;
+ u8 rsvd[2];
+ u8 build;
+ u8 phase;
+#else
+ u8 major;
+ u8 minor;
+ u8 maint;
+ u8 patch;
+ u8 phase;
+ u8 build;
+ u8 rsvd[2];
+#endif
+};
+
struct bfi_ioc_image_hdr {
u32 signature; /*!< constant signature */
u8 asic_gen; /*!< asic generation */
@@ -242,10 +265,18 @@ struct bfi_ioc_image_hdr {
u8 port1_mode; /*!< device mode for port 1 */
u32 exec; /*!< exec vector */
u32 bootenv; /*!< firmware boot env */
- u32 rsvd_b[4];
+ u32 rsvd_b[2];
+ struct bfi_ioc_fwver fwver;
u32 md5sum[BFI_IOC_MD5SUM_SZ];
};
+enum bfi_ioc_img_ver_cmp {
+ BFI_IOC_IMG_VER_INCOMP,
+ BFI_IOC_IMG_VER_OLD,
+ BFI_IOC_IMG_VER_SAME,
+ BFI_IOC_IMG_VER_BETTER
+};
+
#define BFI_FWBOOT_DEVMODE_OFF 4
#define BFI_FWBOOT_TYPE_OFF 8
#define BFI_FWBOOT_ENV_OFF 12
diff --git a/drivers/net/ethernet/brocade/bna/bfi_enet.h b/drivers/net/ethernet/brocade/bna/bfi_enet.h
index 7d10e335c27d..ae072dc5d238 100644
--- a/drivers/net/ethernet/brocade/bna/bfi_enet.h
+++ b/drivers/net/ethernet/brocade/bna/bfi_enet.h
@@ -472,7 +472,8 @@ enum bfi_enet_hds_type {
struct bfi_enet_rx_cfg {
u8 rxq_type;
- u8 rsvd[3];
+ u8 rsvd[1];
+ u16 frame_size;
struct {
u8 max_header_size;
diff --git a/drivers/net/ethernet/brocade/bna/bna.h b/drivers/net/ethernet/brocade/bna/bna.h
index f1eafc409bbd..1f512190d696 100644
--- a/drivers/net/ethernet/brocade/bna/bna.h
+++ b/drivers/net/ethernet/brocade/bna/bna.h
@@ -354,6 +354,14 @@ do { \
} \
} while (0)
+#define bna_mcam_mod_free_q(_bna) (&(_bna)->mcam_mod.free_q)
+
+#define bna_mcam_mod_del_q(_bna) (&(_bna)->mcam_mod.del_q)
+
+#define bna_ucam_mod_free_q(_bna) (&(_bna)->ucam_mod.free_q)
+
+#define bna_ucam_mod_del_q(_bna) (&(_bna)->ucam_mod.del_q)
+
/* Inline functions */
static inline struct bna_mac *bna_mac_find(struct list_head *q, u8 *addr)
@@ -391,12 +399,8 @@ int bna_num_rxp_set(struct bna *bna, int num_rxp);
void bna_hw_stats_get(struct bna *bna);
/* APIs for RxF */
-struct bna_mac *bna_ucam_mod_mac_get(struct bna_ucam_mod *ucam_mod);
-void bna_ucam_mod_mac_put(struct bna_ucam_mod *ucam_mod,
- struct bna_mac *mac);
-struct bna_mac *bna_mcam_mod_mac_get(struct bna_mcam_mod *mcam_mod);
-void bna_mcam_mod_mac_put(struct bna_mcam_mod *mcam_mod,
- struct bna_mac *mac);
+struct bna_mac *bna_cam_mod_mac_get(struct list_head *head);
+void bna_cam_mod_mac_put(struct list_head *tail, struct bna_mac *mac);
struct bna_mcam_handle *bna_mcam_mod_handle_get(struct bna_mcam_mod *mod);
void bna_mcam_mod_handle_put(struct bna_mcam_mod *mcam_mod,
struct bna_mcam_handle *handle);
@@ -493,11 +497,17 @@ enum bna_cb_status
bna_rx_ucast_del(struct bna_rx *rx, u8 *ucmac,
void (*cbfn)(struct bnad *, struct bna_rx *));
enum bna_cb_status
+bna_rx_ucast_listset(struct bna_rx *rx, int count, u8 *uclist,
+ void (*cbfn)(struct bnad *, struct bna_rx *));
+enum bna_cb_status
bna_rx_mcast_add(struct bna_rx *rx, u8 *mcmac,
void (*cbfn)(struct bnad *, struct bna_rx *));
enum bna_cb_status
bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mcmac,
void (*cbfn)(struct bnad *, struct bna_rx *));
+void
+bna_rx_mcast_delall(struct bna_rx *rx,
+ void (*cbfn)(struct bnad *, struct bna_rx *));
enum bna_cb_status
bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode rxmode,
enum bna_rxmode bitmask,
@@ -505,6 +515,8 @@ bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode rxmode,
void bna_rx_vlan_add(struct bna_rx *rx, int vlan_id);
void bna_rx_vlan_del(struct bna_rx *rx, int vlan_id);
void bna_rx_vlanfilter_enable(struct bna_rx *rx);
+void bna_rx_vlan_strip_enable(struct bna_rx *rx);
+void bna_rx_vlan_strip_disable(struct bna_rx *rx);
/* ENET */
/* API for RX */
diff --git a/drivers/net/ethernet/brocade/bna/bna_enet.c b/drivers/net/ethernet/brocade/bna/bna_enet.c
index 3ca77fad4851..13f9636cdba7 100644
--- a/drivers/net/ethernet/brocade/bna/bna_enet.c
+++ b/drivers/net/ethernet/brocade/bna/bna_enet.c
@@ -1811,6 +1811,13 @@ bna_ucam_mod_init(struct bna_ucam_mod *ucam_mod, struct bna *bna,
list_add_tail(&ucam_mod->ucmac[i].qe, &ucam_mod->free_q);
}
+ /* A separate queue to allow synchronous setting of a list of MACs */
+ INIT_LIST_HEAD(&ucam_mod->del_q);
+ for (i = i; i < (bna->ioceth.attr.num_ucmac * 2); i++) {
+ bfa_q_qe_init(&ucam_mod->ucmac[i].qe);
+ list_add_tail(&ucam_mod->ucmac[i].qe, &ucam_mod->del_q);
+ }
+
ucam_mod->bna = bna;
}
@@ -1818,11 +1825,16 @@ static void
bna_ucam_mod_uninit(struct bna_ucam_mod *ucam_mod)
{
struct list_head *qe;
- int i = 0;
+ int i;
+ i = 0;
list_for_each(qe, &ucam_mod->free_q)
i++;
+ i = 0;
+ list_for_each(qe, &ucam_mod->del_q)
+ i++;
+
ucam_mod->bna = NULL;
}
@@ -1851,6 +1863,13 @@ bna_mcam_mod_init(struct bna_mcam_mod *mcam_mod, struct bna *bna,
&mcam_mod->free_handle_q);
}
+ /* A separate queue to allow synchronous setting of a list of MACs */
+ INIT_LIST_HEAD(&mcam_mod->del_q);
+ for (i = i; i < (bna->ioceth.attr.num_mcmac * 2); i++) {
+ bfa_q_qe_init(&mcam_mod->mcmac[i].qe);
+ list_add_tail(&mcam_mod->mcmac[i].qe, &mcam_mod->del_q);
+ }
+
mcam_mod->bna = bna;
}
@@ -1864,6 +1883,9 @@ bna_mcam_mod_uninit(struct bna_mcam_mod *mcam_mod)
list_for_each(qe, &mcam_mod->free_q) i++;
i = 0;
+ list_for_each(qe, &mcam_mod->del_q) i++;
+
+ i = 0;
list_for_each(qe, &mcam_mod->free_handle_q) i++;
mcam_mod->bna = NULL;
@@ -1976,7 +1998,7 @@ bna_mod_res_req(struct bna *bna, struct bna_res_info *res_info)
BNA_MEM_T_KVA;
res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.num = 1;
res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.len =
- attr->num_ucmac * sizeof(struct bna_mac);
+ (attr->num_ucmac * 2) * sizeof(struct bna_mac);
/* Virtual memory for Multicast MAC address - stored by mcam module */
res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_type = BNA_RES_T_MEM;
@@ -1984,7 +2006,7 @@ bna_mod_res_req(struct bna *bna, struct bna_res_info *res_info)
BNA_MEM_T_KVA;
res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.num = 1;
res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.len =
- attr->num_mcmac * sizeof(struct bna_mac);
+ (attr->num_mcmac * 2) * sizeof(struct bna_mac);
/* Virtual memory for Multicast handle - stored by mcam module */
res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_type = BNA_RES_T_MEM;
@@ -2080,41 +2102,21 @@ bna_num_rxp_set(struct bna *bna, int num_rxp)
}
struct bna_mac *
-bna_ucam_mod_mac_get(struct bna_ucam_mod *ucam_mod)
-{
- struct list_head *qe;
-
- if (list_empty(&ucam_mod->free_q))
- return NULL;
-
- bfa_q_deq(&ucam_mod->free_q, &qe);
-
- return (struct bna_mac *)qe;
-}
-
-void
-bna_ucam_mod_mac_put(struct bna_ucam_mod *ucam_mod, struct bna_mac *mac)
-{
- list_add_tail(&mac->qe, &ucam_mod->free_q);
-}
-
-struct bna_mac *
-bna_mcam_mod_mac_get(struct bna_mcam_mod *mcam_mod)
+bna_cam_mod_mac_get(struct list_head *head)
{
struct list_head *qe;
- if (list_empty(&mcam_mod->free_q))
+ if (list_empty(head))
return NULL;
- bfa_q_deq(&mcam_mod->free_q, &qe);
-
+ bfa_q_deq(head, &qe);
return (struct bna_mac *)qe;
}
void
-bna_mcam_mod_mac_put(struct bna_mcam_mod *mcam_mod, struct bna_mac *mac)
+bna_cam_mod_mac_put(struct list_head *tail, struct bna_mac *mac)
{
- list_add_tail(&mac->qe, &mcam_mod->free_q);
+ list_add_tail(&mac->qe, tail);
}
struct bna_mcam_handle *
diff --git a/drivers/net/ethernet/brocade/bna/bna_hw_defs.h b/drivers/net/ethernet/brocade/bna/bna_hw_defs.h
index af3f7bb0b3b8..2702d02e98d9 100644
--- a/drivers/net/ethernet/brocade/bna/bna_hw_defs.h
+++ b/drivers/net/ethernet/brocade/bna/bna_hw_defs.h
@@ -322,6 +322,10 @@ do { \
#define BNA_CQ_EF_REMOTE (1 << 19)
#define BNA_CQ_EF_LOCAL (1 << 20)
+/* CAT2 ASIC does not use bit 21 as per the SPEC.
+ * Bit 31 is set in every end of frame completion
+ */
+#define BNA_CQ_EF_EOP (1 << 31)
/* Data structures */
diff --git a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
index 3c07064b2bc4..85e63546abe3 100644
--- a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
+++ b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
@@ -529,13 +529,13 @@ bna_rxf_mcast_cfg_apply(struct bna_rxf *rxf)
struct list_head *qe;
int ret;
- /* Delete multicast entries previousely added */
+ /* First delete multicast entries to maintain the count */
while (!list_empty(&rxf->mcast_pending_del_q)) {
bfa_q_deq(&rxf->mcast_pending_del_q, &qe);
bfa_q_qe_init(qe);
mac = (struct bna_mac *)qe;
ret = bna_rxf_mcast_del(rxf, mac, BNA_HARD_CLEANUP);
- bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
+ bna_cam_mod_mac_put(bna_mcam_mod_del_q(rxf->rx->bna), mac);
if (ret)
return ret;
}
@@ -586,7 +586,7 @@ bna_rxf_mcast_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
bfa_q_qe_init(qe);
mac = (struct bna_mac *)qe;
ret = bna_rxf_mcast_del(rxf, mac, cleanup);
- bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
+ bna_cam_mod_mac_put(bna_mcam_mod_del_q(rxf->rx->bna), mac);
if (ret)
return ret;
}
@@ -796,20 +796,20 @@ bna_rxf_uninit(struct bna_rxf *rxf)
while (!list_empty(&rxf->ucast_pending_add_q)) {
bfa_q_deq(&rxf->ucast_pending_add_q, &mac);
bfa_q_qe_init(&mac->qe);
- bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
+ bna_cam_mod_mac_put(bna_ucam_mod_free_q(rxf->rx->bna), mac);
}
if (rxf->ucast_pending_mac) {
bfa_q_qe_init(&rxf->ucast_pending_mac->qe);
- bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod,
- rxf->ucast_pending_mac);
+ bna_cam_mod_mac_put(bna_ucam_mod_free_q(rxf->rx->bna),
+ rxf->ucast_pending_mac);
rxf->ucast_pending_mac = NULL;
}
while (!list_empty(&rxf->mcast_pending_add_q)) {
bfa_q_deq(&rxf->mcast_pending_add_q, &mac);
bfa_q_qe_init(&mac->qe);
- bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
+ bna_cam_mod_mac_put(bna_mcam_mod_free_q(rxf->rx->bna), mac);
}
rxf->rxmode_pending = 0;
@@ -869,7 +869,7 @@ bna_rx_ucast_set(struct bna_rx *rx, u8 *ucmac,
if (rxf->ucast_pending_mac == NULL) {
rxf->ucast_pending_mac =
- bna_ucam_mod_mac_get(&rxf->rx->bna->ucam_mod);
+ bna_cam_mod_mac_get(bna_ucam_mod_free_q(rxf->rx->bna));
if (rxf->ucast_pending_mac == NULL)
return BNA_CB_UCAST_CAM_FULL;
bfa_q_qe_init(&rxf->ucast_pending_mac->qe);
@@ -900,7 +900,7 @@ bna_rx_mcast_add(struct bna_rx *rx, u8 *addr,
return BNA_CB_SUCCESS;
}
- mac = bna_mcam_mod_mac_get(&rxf->rx->bna->mcam_mod);
+ mac = bna_cam_mod_mac_get(bna_mcam_mod_free_q(rxf->rx->bna));
if (mac == NULL)
return BNA_CB_MCAST_LIST_FULL;
bfa_q_qe_init(&mac->qe);
@@ -916,35 +916,92 @@ bna_rx_mcast_add(struct bna_rx *rx, u8 *addr,
}
enum bna_cb_status
-bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mclist,
+bna_rx_ucast_listset(struct bna_rx *rx, int count, u8 *uclist,
void (*cbfn)(struct bnad *, struct bna_rx *))
{
+ struct bna_ucam_mod *ucam_mod = &rx->bna->ucam_mod;
struct bna_rxf *rxf = &rx->rxf;
struct list_head list_head;
struct list_head *qe;
u8 *mcaddr;
- struct bna_mac *mac;
+ struct bna_mac *mac, *del_mac;
int i;
+ /* Purge the pending_add_q */
+ while (!list_empty(&rxf->ucast_pending_add_q)) {
+ bfa_q_deq(&rxf->ucast_pending_add_q, &qe);
+ bfa_q_qe_init(qe);
+ mac = (struct bna_mac *)qe;
+ bna_cam_mod_mac_put(&ucam_mod->free_q, mac);
+ }
+
+ /* Schedule active_q entries for deletion */
+ while (!list_empty(&rxf->ucast_active_q)) {
+ bfa_q_deq(&rxf->ucast_active_q, &qe);
+ mac = (struct bna_mac *)qe;
+ bfa_q_qe_init(&mac->qe);
+
+ del_mac = bna_cam_mod_mac_get(&ucam_mod->del_q);
+ memcpy(del_mac, mac, sizeof(*del_mac));
+ list_add_tail(&del_mac->qe, &rxf->ucast_pending_del_q);
+ bna_cam_mod_mac_put(&ucam_mod->free_q, mac);
+ }
+
/* Allocate nodes */
INIT_LIST_HEAD(&list_head);
- for (i = 0, mcaddr = mclist; i < count; i++) {
- mac = bna_mcam_mod_mac_get(&rxf->rx->bna->mcam_mod);
+ for (i = 0, mcaddr = uclist; i < count; i++) {
+ mac = bna_cam_mod_mac_get(&ucam_mod->free_q);
if (mac == NULL)
goto err_return;
bfa_q_qe_init(&mac->qe);
memcpy(mac->addr, mcaddr, ETH_ALEN);
list_add_tail(&mac->qe, &list_head);
-
mcaddr += ETH_ALEN;
}
+ /* Add the new entries */
+ while (!list_empty(&list_head)) {
+ bfa_q_deq(&list_head, &qe);
+ mac = (struct bna_mac *)qe;
+ bfa_q_qe_init(&mac->qe);
+ list_add_tail(&mac->qe, &rxf->ucast_pending_add_q);
+ }
+
+ rxf->cam_fltr_cbfn = cbfn;
+ rxf->cam_fltr_cbarg = rx->bna->bnad;
+ bfa_fsm_send_event(rxf, RXF_E_CONFIG);
+
+ return BNA_CB_SUCCESS;
+
+err_return:
+ while (!list_empty(&list_head)) {
+ bfa_q_deq(&list_head, &qe);
+ mac = (struct bna_mac *)qe;
+ bfa_q_qe_init(&mac->qe);
+ bna_cam_mod_mac_put(&ucam_mod->free_q, mac);
+ }
+
+ return BNA_CB_UCAST_CAM_FULL;
+}
+
+enum bna_cb_status
+bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mclist,
+ void (*cbfn)(struct bnad *, struct bna_rx *))
+{
+ struct bna_mcam_mod *mcam_mod = &rx->bna->mcam_mod;
+ struct bna_rxf *rxf = &rx->rxf;
+ struct list_head list_head;
+ struct list_head *qe;
+ u8 *mcaddr;
+ struct bna_mac *mac, *del_mac;
+ int i;
+
/* Purge the pending_add_q */
while (!list_empty(&rxf->mcast_pending_add_q)) {
bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
bfa_q_qe_init(qe);
mac = (struct bna_mac *)qe;
- bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
+ bna_cam_mod_mac_put(&mcam_mod->free_q, mac);
}
/* Schedule active_q entries for deletion */
@@ -952,7 +1009,26 @@ bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mclist,
bfa_q_deq(&rxf->mcast_active_q, &qe);
mac = (struct bna_mac *)qe;
bfa_q_qe_init(&mac->qe);
- list_add_tail(&mac->qe, &rxf->mcast_pending_del_q);
+
+ del_mac = bna_cam_mod_mac_get(&mcam_mod->del_q);
+
+ memcpy(del_mac, mac, sizeof(*del_mac));
+ list_add_tail(&del_mac->qe, &rxf->mcast_pending_del_q);
+ mac->handle = NULL;
+ bna_cam_mod_mac_put(&mcam_mod->free_q, mac);
+ }
+
+ /* Allocate nodes */
+ INIT_LIST_HEAD(&list_head);
+ for (i = 0, mcaddr = mclist; i < count; i++) {
+ mac = bna_cam_mod_mac_get(&mcam_mod->free_q);
+ if (mac == NULL)
+ goto err_return;
+ bfa_q_qe_init(&mac->qe);
+ memcpy(mac->addr, mcaddr, ETH_ALEN);
+ list_add_tail(&mac->qe, &list_head);
+
+ mcaddr += ETH_ALEN;
}
/* Add the new entries */
@@ -974,13 +1050,56 @@ err_return:
bfa_q_deq(&list_head, &qe);
mac = (struct bna_mac *)qe;
bfa_q_qe_init(&mac->qe);
- bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
+ bna_cam_mod_mac_put(&mcam_mod->free_q, mac);
}
return BNA_CB_MCAST_LIST_FULL;
}
void
+bna_rx_mcast_delall(struct bna_rx *rx,
+ void (*cbfn)(struct bnad *, struct bna_rx *))
+{
+ struct bna_rxf *rxf = &rx->rxf;
+ struct list_head *qe;
+ struct bna_mac *mac, *del_mac;
+ int need_hw_config = 0;
+
+ /* Purge all entries from pending_add_q */
+ while (!list_empty(&rxf->mcast_pending_add_q)) {
+ bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
+ mac = (struct bna_mac *)qe;
+ bfa_q_qe_init(&mac->qe);
+ bna_cam_mod_mac_put(bna_mcam_mod_free_q(rxf->rx->bna), mac);
+ }
+
+ /* Schedule all entries in active_q for deletion */
+ while (!list_empty(&rxf->mcast_active_q)) {
+ bfa_q_deq(&rxf->mcast_active_q, &qe);
+ mac = (struct bna_mac *)qe;
+ bfa_q_qe_init(&mac->qe);
+
+ del_mac = bna_cam_mod_mac_get(bna_mcam_mod_del_q(rxf->rx->bna));
+
+ memcpy(del_mac, mac, sizeof(*del_mac));
+ list_add_tail(&del_mac->qe, &rxf->mcast_pending_del_q);
+ mac->handle = NULL;
+ bna_cam_mod_mac_put(bna_mcam_mod_free_q(rxf->rx->bna), mac);
+ need_hw_config = 1;
+ }
+
+ if (need_hw_config) {
+ rxf->cam_fltr_cbfn = cbfn;
+ rxf->cam_fltr_cbarg = rx->bna->bnad;
+ bfa_fsm_send_event(rxf, RXF_E_CONFIG);
+ return;
+ }
+
+ if (cbfn)
+ (*cbfn)(rx->bna->bnad, rx);
+}
+
+void
bna_rx_vlan_add(struct bna_rx *rx, int vlan_id)
{
struct bna_rxf *rxf = &rx->rxf;
@@ -1022,7 +1141,7 @@ bna_rxf_ucast_cfg_apply(struct bna_rxf *rxf)
bfa_q_qe_init(qe);
mac = (struct bna_mac *)qe;
bna_bfi_ucast_req(rxf, mac, BFI_ENET_H2I_MAC_UCAST_DEL_REQ);
- bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
+ bna_cam_mod_mac_put(bna_ucam_mod_del_q(rxf->rx->bna), mac);
return 1;
}
@@ -1062,11 +1181,13 @@ bna_rxf_ucast_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
bfa_q_qe_init(qe);
mac = (struct bna_mac *)qe;
if (cleanup == BNA_SOFT_CLEANUP)
- bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
+ bna_cam_mod_mac_put(bna_ucam_mod_del_q(rxf->rx->bna),
+ mac);
else {
bna_bfi_ucast_req(rxf, mac,
BFI_ENET_H2I_MAC_UCAST_DEL_REQ);
- bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
+ bna_cam_mod_mac_put(bna_ucam_mod_del_q(rxf->rx->bna),
+ mac);
return 1;
}
}
@@ -1690,6 +1811,7 @@ bna_bfi_rx_enet_start(struct bna_rx *rx)
cfg_req->mh.num_entries = htons(
bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rx_cfg_req)));
+ cfg_req->rx_cfg.frame_size = bna_enet_mtu_get(&rx->bna->enet);
cfg_req->num_queue_sets = rx->num_paths;
for (i = 0, rxp_qe = bfa_q_first(&rx->rxp_q);
i < rx->num_paths;
@@ -1711,8 +1833,17 @@ bna_bfi_rx_enet_start(struct bna_rx *rx)
/* Large/Single RxQ */
bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].ql.q,
&q0->qpt);
- q0->buffer_size =
- bna_enet_mtu_get(&rx->bna->enet);
+ if (q0->multi_buffer)
+ /* multi-buffer is enabled by allocating
+ * a new rx with new set of resources.
+ * q0->buffer_size should be initialized to
+ * fragment size.
+ */
+ cfg_req->rx_cfg.multi_buffer =
+ BNA_STATUS_T_ENABLED;
+ else
+ q0->buffer_size =
+ bna_enet_mtu_get(&rx->bna->enet);
cfg_req->q_cfg[i].ql.rx_buffer_size =
htons((u16)q0->buffer_size);
break;
@@ -2262,8 +2393,8 @@ bna_rx_res_req(struct bna_rx_config *q_cfg, struct bna_res_info *res_info)
u32 hq_depth;
u32 dq_depth;
- dq_depth = q_cfg->q_depth;
- hq_depth = ((q_cfg->rxp_type == BNA_RXP_SINGLE) ? 0 : q_cfg->q_depth);
+ dq_depth = q_cfg->q0_depth;
+ hq_depth = ((q_cfg->rxp_type == BNA_RXP_SINGLE) ? 0 : q_cfg->q1_depth);
cq_depth = dq_depth + hq_depth;
BNA_TO_POWER_OF_2_HIGH(cq_depth);
@@ -2380,10 +2511,10 @@ bna_rx_create(struct bna *bna, struct bnad *bnad,
struct bna_rxq *q0;
struct bna_rxq *q1;
struct bna_intr_info *intr_info;
- u32 page_count;
+ struct bna_mem_descr *hqunmap_mem;
+ struct bna_mem_descr *dqunmap_mem;
struct bna_mem_descr *ccb_mem;
struct bna_mem_descr *rcb_mem;
- struct bna_mem_descr *unmapq_mem;
struct bna_mem_descr *cqpt_mem;
struct bna_mem_descr *cswqpt_mem;
struct bna_mem_descr *cpage_mem;
@@ -2393,8 +2524,10 @@ bna_rx_create(struct bna *bna, struct bnad *bnad,
struct bna_mem_descr *dsqpt_mem;
struct bna_mem_descr *hpage_mem;
struct bna_mem_descr *dpage_mem;
- int i;
- int dpage_count, hpage_count, rcb_idx;
+ u32 dpage_count, hpage_count;
+ u32 hq_idx, dq_idx, rcb_idx;
+ u32 cq_depth, i;
+ u32 page_count;
if (!bna_rx_res_check(rx_mod, rx_cfg))
return NULL;
@@ -2402,7 +2535,8 @@ bna_rx_create(struct bna *bna, struct bnad *bnad,
intr_info = &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
ccb_mem = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info.mdl[0];
rcb_mem = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info.mdl[0];
- unmapq_mem = &res_info[BNA_RX_RES_MEM_T_UNMAPQ].res_u.mem_info.mdl[0];
+ dqunmap_mem = &res_info[BNA_RX_RES_MEM_T_UNMAPDQ].res_u.mem_info.mdl[0];
+ hqunmap_mem = &res_info[BNA_RX_RES_MEM_T_UNMAPHQ].res_u.mem_info.mdl[0];
cqpt_mem = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info.mdl[0];
cswqpt_mem = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info.mdl[0];
cpage_mem = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.mdl[0];
@@ -2454,7 +2588,8 @@ bna_rx_create(struct bna *bna, struct bnad *bnad,
}
rx->num_paths = rx_cfg->num_paths;
- for (i = 0, rcb_idx = 0; i < rx->num_paths; i++) {
+ for (i = 0, hq_idx = 0, dq_idx = 0, rcb_idx = 0;
+ i < rx->num_paths; i++) {
rxp = bna_rxp_get(rx_mod);
list_add_tail(&rxp->qe, &rx->rxp_q);
rxp->type = rx_cfg->rxp_type;
@@ -2497,9 +2632,13 @@ bna_rx_create(struct bna *bna, struct bnad *bnad,
q0->rxp = rxp;
q0->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva;
- q0->rcb->unmap_q = (void *)unmapq_mem[rcb_idx].kva;
- rcb_idx++;
- q0->rcb->q_depth = rx_cfg->q_depth;
+ q0->rcb->unmap_q = (void *)dqunmap_mem[dq_idx].kva;
+ rcb_idx++; dq_idx++;
+ q0->rcb->q_depth = rx_cfg->q0_depth;
+ q0->q_depth = rx_cfg->q0_depth;
+ q0->multi_buffer = rx_cfg->q0_multi_buf;
+ q0->buffer_size = rx_cfg->q0_buf_size;
+ q0->num_vecs = rx_cfg->q0_num_vecs;
q0->rcb->rxq = q0;
q0->rcb->bnad = bna->bnad;
q0->rcb->id = 0;
@@ -2519,15 +2658,18 @@ bna_rx_create(struct bna *bna, struct bnad *bnad,
q1->rxp = rxp;
q1->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva;
- q1->rcb->unmap_q = (void *)unmapq_mem[rcb_idx].kva;
- rcb_idx++;
- q1->rcb->q_depth = rx_cfg->q_depth;
+ q1->rcb->unmap_q = (void *)hqunmap_mem[hq_idx].kva;
+ rcb_idx++; hq_idx++;
+ q1->rcb->q_depth = rx_cfg->q1_depth;
+ q1->q_depth = rx_cfg->q1_depth;
+ q1->multi_buffer = BNA_STATUS_T_DISABLED;
+ q1->num_vecs = 1;
q1->rcb->rxq = q1;
q1->rcb->bnad = bna->bnad;
q1->rcb->id = 1;
q1->buffer_size = (rx_cfg->rxp_type == BNA_RXP_HDS) ?
rx_cfg->hds_config.forced_offset
- : rx_cfg->small_buff_size;
+ : rx_cfg->q1_buf_size;
q1->rx_packets = q1->rx_bytes = 0;
q1->rx_packets_with_error = q1->rxbuf_alloc_failed = 0;
@@ -2542,9 +2684,14 @@ bna_rx_create(struct bna *bna, struct bnad *bnad,
/* Setup CQ */
rxp->cq.ccb = (struct bna_ccb *) ccb_mem[i].kva;
- rxp->cq.ccb->q_depth = rx_cfg->q_depth +
- ((rx_cfg->rxp_type == BNA_RXP_SINGLE) ?
- 0 : rx_cfg->q_depth);
+ cq_depth = rx_cfg->q0_depth +
+ ((rx_cfg->rxp_type == BNA_RXP_SINGLE) ?
+ 0 : rx_cfg->q1_depth);
+ /* if multi-buffer is enabled sum of q0_depth
+ * and q1_depth need not be a power of 2
+ */
+ BNA_TO_POWER_OF_2_HIGH(cq_depth);
+ rxp->cq.ccb->q_depth = cq_depth;
rxp->cq.ccb->cq = &rxp->cq;
rxp->cq.ccb->rcb[0] = q0->rcb;
q0->rcb->ccb = rxp->cq.ccb;
@@ -2670,6 +2817,30 @@ bna_rx_cleanup_complete(struct bna_rx *rx)
bfa_fsm_send_event(rx, RX_E_CLEANUP_DONE);
}
+void
+bna_rx_vlan_strip_enable(struct bna_rx *rx)
+{
+ struct bna_rxf *rxf = &rx->rxf;
+
+ if (rxf->vlan_strip_status == BNA_STATUS_T_DISABLED) {
+ rxf->vlan_strip_status = BNA_STATUS_T_ENABLED;
+ rxf->vlan_strip_pending = true;
+ bfa_fsm_send_event(rxf, RXF_E_CONFIG);
+ }
+}
+
+void
+bna_rx_vlan_strip_disable(struct bna_rx *rx)
+{
+ struct bna_rxf *rxf = &rx->rxf;
+
+ if (rxf->vlan_strip_status != BNA_STATUS_T_DISABLED) {
+ rxf->vlan_strip_status = BNA_STATUS_T_DISABLED;
+ rxf->vlan_strip_pending = true;
+ bfa_fsm_send_event(rxf, RXF_E_CONFIG);
+ }
+}
+
enum bna_cb_status
bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode new_mode,
enum bna_rxmode bitmask,
diff --git a/drivers/net/ethernet/brocade/bna/bna_types.h b/drivers/net/ethernet/brocade/bna/bna_types.h
index dc50f7836b6d..621547cd3504 100644
--- a/drivers/net/ethernet/brocade/bna/bna_types.h
+++ b/drivers/net/ethernet/brocade/bna/bna_types.h
@@ -109,20 +109,21 @@ enum bna_tx_res_req_type {
enum bna_rx_mem_type {
BNA_RX_RES_MEM_T_CCB = 0, /* CQ context */
BNA_RX_RES_MEM_T_RCB = 1, /* CQ context */
- BNA_RX_RES_MEM_T_UNMAPQ = 2, /* UnmapQ for RxQs */
- BNA_RX_RES_MEM_T_CQPT = 3, /* CQ QPT */
- BNA_RX_RES_MEM_T_CSWQPT = 4, /* S/W QPT */
- BNA_RX_RES_MEM_T_CQPT_PAGE = 5, /* CQPT page */
- BNA_RX_RES_MEM_T_HQPT = 6, /* RX QPT */
- BNA_RX_RES_MEM_T_DQPT = 7, /* RX QPT */
- BNA_RX_RES_MEM_T_HSWQPT = 8, /* RX s/w QPT */
- BNA_RX_RES_MEM_T_DSWQPT = 9, /* RX s/w QPT */
- BNA_RX_RES_MEM_T_DPAGE = 10, /* RX s/w QPT */
- BNA_RX_RES_MEM_T_HPAGE = 11, /* RX s/w QPT */
- BNA_RX_RES_MEM_T_IBIDX = 12,
- BNA_RX_RES_MEM_T_RIT = 13,
- BNA_RX_RES_T_INTR = 14, /* Rx interrupts */
- BNA_RX_RES_T_MAX = 15
+ BNA_RX_RES_MEM_T_UNMAPHQ = 2,
+ BNA_RX_RES_MEM_T_UNMAPDQ = 3,
+ BNA_RX_RES_MEM_T_CQPT = 4,
+ BNA_RX_RES_MEM_T_CSWQPT = 5,
+ BNA_RX_RES_MEM_T_CQPT_PAGE = 6,
+ BNA_RX_RES_MEM_T_HQPT = 7,
+ BNA_RX_RES_MEM_T_DQPT = 8,
+ BNA_RX_RES_MEM_T_HSWQPT = 9,
+ BNA_RX_RES_MEM_T_DSWQPT = 10,
+ BNA_RX_RES_MEM_T_DPAGE = 11,
+ BNA_RX_RES_MEM_T_HPAGE = 12,
+ BNA_RX_RES_MEM_T_IBIDX = 13,
+ BNA_RX_RES_MEM_T_RIT = 14,
+ BNA_RX_RES_T_INTR = 15,
+ BNA_RX_RES_T_MAX = 16
};
enum bna_tx_type {
@@ -583,6 +584,8 @@ struct bna_rxq {
int buffer_size;
int q_depth;
+ u32 num_vecs;
+ enum bna_status multi_buffer;
struct bna_qpt qpt;
struct bna_rcb *rcb;
@@ -632,6 +635,8 @@ struct bna_ccb {
struct bna_rcb *rcb[2];
void *ctrl; /* For bnad */
struct bna_pkt_rate pkt_rate;
+ u32 pkts_una;
+ u32 bytes_per_intr;
/* Control path */
struct bna_cq *cq;
@@ -671,14 +676,22 @@ struct bna_rx_config {
int num_paths;
enum bna_rxp_type rxp_type;
int paused;
- int q_depth;
int coalescing_timeo;
/*
* Small/Large (or Header/Data) buffer size to be configured
- * for SLR and HDS queue type. Large buffer size comes from
- * enet->mtu.
+ * for SLR and HDS queue type.
*/
- int small_buff_size;
+ u32 frame_size;
+
+ /* header or small queue */
+ u32 q1_depth;
+ u32 q1_buf_size;
+
+ /* data or large queue */
+ u32 q0_depth;
+ u32 q0_buf_size;
+ u32 q0_num_vecs;
+ enum bna_status q0_multi_buf;
enum bna_status rss_status;
struct bna_rss_config rss_config;
@@ -866,8 +879,9 @@ struct bna_rx_mod {
/* CAM */
struct bna_ucam_mod {
- struct bna_mac *ucmac; /* BFI_MAX_UCMAC entries */
+ struct bna_mac *ucmac; /* num_ucmac * 2 entries */
struct list_head free_q;
+ struct list_head del_q;
struct bna *bna;
};
@@ -880,9 +894,10 @@ struct bna_mcam_handle {
};
struct bna_mcam_mod {
- struct bna_mac *mcmac; /* BFI_MAX_MCMAC entries */
- struct bna_mcam_handle *mchandle; /* BFI_MAX_MCMAC entries */
+ struct bna_mac *mcmac; /* num_mcmac * 2 entries */
+ struct bna_mcam_handle *mchandle; /* num_mcmac entries */
struct list_head free_q;
+ struct list_head del_q;
struct list_head free_handle_q;
struct bna *bna;
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index 248bc37cb41b..5f24a9ffcfaa 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -52,7 +52,7 @@ MODULE_PARM_DESC(bna_debugfs_enable, "Enables debugfs feature, default=1,"
/*
* Global variables
*/
-u32 bnad_rxqs_per_cq = 2;
+static u32 bnad_rxqs_per_cq = 2;
static u32 bna_id;
static struct mutex bnad_list_mutex;
static LIST_HEAD(bnad_list);
@@ -142,7 +142,8 @@ bnad_tx_buff_unmap(struct bnad *bnad,
dma_unmap_page(&bnad->pcidev->dev,
dma_unmap_addr(&unmap->vectors[vector], dma_addr),
- skb_shinfo(skb)->frags[nvecs].size, DMA_TO_DEVICE);
+ dma_unmap_len(&unmap->vectors[vector], dma_len),
+ DMA_TO_DEVICE);
dma_unmap_addr_set(&unmap->vectors[vector], dma_addr, 0);
nvecs--;
}
@@ -282,27 +283,32 @@ static int
bnad_rxq_alloc_init(struct bnad *bnad, struct bna_rcb *rcb)
{
struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
- int mtu, order;
+ int order;
bnad_rxq_alloc_uninit(bnad, rcb);
- mtu = bna_enet_mtu_get(&bnad->bna.enet);
- order = get_order(mtu);
+ order = get_order(rcb->rxq->buffer_size);
+
+ unmap_q->type = BNAD_RXBUF_PAGE;
if (bna_is_small_rxq(rcb->id)) {
unmap_q->alloc_order = 0;
unmap_q->map_size = rcb->rxq->buffer_size;
} else {
- unmap_q->alloc_order = order;
- unmap_q->map_size =
- (rcb->rxq->buffer_size > 2048) ?
- PAGE_SIZE << order : 2048;
+ if (rcb->rxq->multi_buffer) {
+ unmap_q->alloc_order = 0;
+ unmap_q->map_size = rcb->rxq->buffer_size;
+ unmap_q->type = BNAD_RXBUF_MULTI_BUFF;
+ } else {
+ unmap_q->alloc_order = order;
+ unmap_q->map_size =
+ (rcb->rxq->buffer_size > 2048) ?
+ PAGE_SIZE << order : 2048;
+ }
}
BUG_ON(((PAGE_SIZE << order) % unmap_q->map_size));
- unmap_q->type = BNAD_RXBUF_PAGE;
-
return 0;
}
@@ -345,10 +351,10 @@ bnad_rxq_cleanup(struct bnad *bnad, struct bna_rcb *rcb)
for (i = 0; i < rcb->q_depth; i++) {
struct bnad_rx_unmap *unmap = &unmap_q->unmap[i];
- if (BNAD_RXBUF_IS_PAGE(unmap_q->type))
- bnad_rxq_cleanup_page(bnad, unmap);
- else
+ if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
bnad_rxq_cleanup_skb(bnad, unmap);
+ else
+ bnad_rxq_cleanup_page(bnad, unmap);
}
bnad_rxq_alloc_uninit(bnad, rcb);
}
@@ -480,10 +486,10 @@ bnad_rxq_post(struct bnad *bnad, struct bna_rcb *rcb)
if (!(to_alloc >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT))
return;
- if (BNAD_RXBUF_IS_PAGE(unmap_q->type))
- bnad_rxq_refill_page(bnad, rcb, to_alloc);
- else
+ if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
bnad_rxq_refill_skb(bnad, rcb, to_alloc);
+ else
+ bnad_rxq_refill_page(bnad, rcb, to_alloc);
}
#define flags_cksum_prot_mask (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
@@ -500,72 +506,114 @@ bnad_rxq_post(struct bnad *bnad, struct bna_rcb *rcb)
#define flags_udp6 (BNA_CQ_EF_IPV6 | \
BNA_CQ_EF_UDP | BNA_CQ_EF_L4_CKSUM_OK)
-static inline struct sk_buff *
-bnad_cq_prepare_skb(struct bnad_rx_ctrl *rx_ctrl,
- struct bnad_rx_unmap_q *unmap_q,
- struct bnad_rx_unmap *unmap,
- u32 length, u32 flags)
+static void
+bnad_cq_drop_packet(struct bnad *bnad, struct bna_rcb *rcb,
+ u32 sop_ci, u32 nvecs)
{
- struct bnad *bnad = rx_ctrl->bnad;
- struct sk_buff *skb;
+ struct bnad_rx_unmap_q *unmap_q;
+ struct bnad_rx_unmap *unmap;
+ u32 ci, vec;
- if (BNAD_RXBUF_IS_PAGE(unmap_q->type)) {
- skb = napi_get_frags(&rx_ctrl->napi);
- if (unlikely(!skb))
- return NULL;
+ unmap_q = rcb->unmap_q;
+ for (vec = 0, ci = sop_ci; vec < nvecs; vec++) {
+ unmap = &unmap_q->unmap[ci];
+ BNA_QE_INDX_INC(ci, rcb->q_depth);
+
+ if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
+ bnad_rxq_cleanup_skb(bnad, unmap);
+ else
+ bnad_rxq_cleanup_page(bnad, unmap);
+ }
+}
+
+static void
+bnad_cq_setup_skb_frags(struct bna_rcb *rcb, struct sk_buff *skb,
+ u32 sop_ci, u32 nvecs, u32 last_fraglen)
+{
+ struct bnad *bnad;
+ u32 ci, vec, len, totlen = 0;
+ struct bnad_rx_unmap_q *unmap_q;
+ struct bnad_rx_unmap *unmap;
+
+ unmap_q = rcb->unmap_q;
+ bnad = rcb->bnad;
+
+ /* prefetch header */
+ prefetch(page_address(unmap_q->unmap[sop_ci].page) +
+ unmap_q->unmap[sop_ci].page_offset);
+
+ for (vec = 1, ci = sop_ci; vec <= nvecs; vec++) {
+ unmap = &unmap_q->unmap[ci];
+ BNA_QE_INDX_INC(ci, rcb->q_depth);
dma_unmap_page(&bnad->pcidev->dev,
dma_unmap_addr(&unmap->vector, dma_addr),
unmap->vector.len, DMA_FROM_DEVICE);
+
+ len = (vec == nvecs) ?
+ last_fraglen : unmap->vector.len;
+ totlen += len;
+
skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
- unmap->page, unmap->page_offset, length);
- skb->len += length;
- skb->data_len += length;
- skb->truesize += length;
+ unmap->page, unmap->page_offset, len);
unmap->page = NULL;
unmap->vector.len = 0;
-
- return skb;
}
- skb = unmap->skb;
- BUG_ON(!skb);
+ skb->len += totlen;
+ skb->data_len += totlen;
+ skb->truesize += totlen;
+}
+
+static inline void
+bnad_cq_setup_skb(struct bnad *bnad, struct sk_buff *skb,
+ struct bnad_rx_unmap *unmap, u32 len)
+{
+ prefetch(skb->data);
dma_unmap_single(&bnad->pcidev->dev,
dma_unmap_addr(&unmap->vector, dma_addr),
unmap->vector.len, DMA_FROM_DEVICE);
- skb_put(skb, length);
-
+ skb_put(skb, len);
skb->protocol = eth_type_trans(skb, bnad->netdev);
unmap->skb = NULL;
unmap->vector.len = 0;
- return skb;
}
static u32
bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
{
- struct bna_cq_entry *cq, *cmpl;
+ struct bna_cq_entry *cq, *cmpl, *next_cmpl;
struct bna_rcb *rcb = NULL;
struct bnad_rx_unmap_q *unmap_q;
- struct bnad_rx_unmap *unmap;
- struct sk_buff *skb;
+ struct bnad_rx_unmap *unmap = NULL;
+ struct sk_buff *skb = NULL;
struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
struct bnad_rx_ctrl *rx_ctrl = ccb->ctrl;
- u32 packets = 0, length = 0, flags, masked_flags;
+ u32 packets = 0, len = 0, totlen = 0;
+ u32 pi, vec, sop_ci = 0, nvecs = 0;
+ u32 flags, masked_flags;
prefetch(bnad->netdev);
cq = ccb->sw_q;
cmpl = &cq[ccb->producer_index];
- while (cmpl->valid && (packets < budget)) {
- packets++;
- flags = ntohl(cmpl->flags);
- length = ntohs(cmpl->length);
+ while (packets < budget) {
+ if (!cmpl->valid)
+ break;
+ /* The 'valid' field is set by the adapter, only after writing
+ * the other fields of completion entry. Hence, do not load
+ * other fields of completion entry *before* the 'valid' is
+ * loaded. Adding the rmb() here prevents the compiler and/or
+ * CPU from reordering the reads which would potentially result
+ * in reading stale values in completion entry.
+ */
+ rmb();
+
BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length));
if (bna_is_small_rxq(cmpl->rxq_id))
@@ -574,25 +622,78 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
rcb = ccb->rcb[0];
unmap_q = rcb->unmap_q;
- unmap = &unmap_q->unmap[rcb->consumer_index];
- if (unlikely(flags & (BNA_CQ_EF_MAC_ERROR |
- BNA_CQ_EF_FCS_ERROR |
- BNA_CQ_EF_TOO_LONG))) {
- if (BNAD_RXBUF_IS_PAGE(unmap_q->type))
- bnad_rxq_cleanup_page(bnad, unmap);
- else
- bnad_rxq_cleanup_skb(bnad, unmap);
+ /* start of packet ci */
+ sop_ci = rcb->consumer_index;
+
+ if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type)) {
+ unmap = &unmap_q->unmap[sop_ci];
+ skb = unmap->skb;
+ } else {
+ skb = napi_get_frags(&rx_ctrl->napi);
+ if (unlikely(!skb))
+ break;
+ }
+ prefetch(skb);
+
+ flags = ntohl(cmpl->flags);
+ len = ntohs(cmpl->length);
+ totlen = len;
+ nvecs = 1;
+ /* Check all the completions for this frame.
+ * busy-wait doesn't help much, break here.
+ */
+ if (BNAD_RXBUF_IS_MULTI_BUFF(unmap_q->type) &&
+ (flags & BNA_CQ_EF_EOP) == 0) {
+ pi = ccb->producer_index;
+ do {
+ BNA_QE_INDX_INC(pi, ccb->q_depth);
+ next_cmpl = &cq[pi];
+
+ if (!next_cmpl->valid)
+ break;
+ /* The 'valid' field is set by the adapter, only
+ * after writing the other fields of completion
+ * entry. Hence, do not load other fields of
+ * completion entry *before* the 'valid' is
+ * loaded. Adding the rmb() here prevents the
+ * compiler and/or CPU from reordering the reads
+ * which would potentially result in reading
+ * stale values in completion entry.
+ */
+ rmb();
+
+ len = ntohs(next_cmpl->length);
+ flags = ntohl(next_cmpl->flags);
+
+ nvecs++;
+ totlen += len;
+ } while ((flags & BNA_CQ_EF_EOP) == 0);
+
+ if (!next_cmpl->valid)
+ break;
+ }
+
+ /* TODO: BNA_CQ_EF_LOCAL ? */
+ if (unlikely(flags & (BNA_CQ_EF_MAC_ERROR |
+ BNA_CQ_EF_FCS_ERROR |
+ BNA_CQ_EF_TOO_LONG))) {
+ bnad_cq_drop_packet(bnad, rcb, sop_ci, nvecs);
rcb->rxq->rx_packets_with_error++;
+
goto next;
}
- skb = bnad_cq_prepare_skb(ccb->ctrl, unmap_q, unmap,
- length, flags);
+ if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
+ bnad_cq_setup_skb(bnad, skb, unmap, len);
+ else
+ bnad_cq_setup_skb_frags(rcb, skb, sop_ci, nvecs, len);
- if (unlikely(!skb))
- break;
+ packets++;
+ rcb->rxq->rx_packets++;
+ rcb->rxq->rx_bytes += totlen;
+ ccb->bytes_per_intr += totlen;
masked_flags = flags & flags_cksum_prot_mask;
@@ -606,21 +707,21 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
else
skb_checksum_none_assert(skb);
- rcb->rxq->rx_packets++;
- rcb->rxq->rx_bytes += length;
-
if (flags & BNA_CQ_EF_VLAN)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(cmpl->vlan_tag));
- if (BNAD_RXBUF_IS_PAGE(unmap_q->type))
- napi_gro_frags(&rx_ctrl->napi);
- else
+ if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
netif_receive_skb(skb);
+ else
+ napi_gro_frags(&rx_ctrl->napi);
next:
- cmpl->valid = 0;
- BNA_QE_INDX_INC(rcb->consumer_index, rcb->q_depth);
- BNA_QE_INDX_INC(ccb->producer_index, ccb->q_depth);
+ BNA_QE_INDX_ADD(rcb->consumer_index, nvecs, rcb->q_depth);
+ for (vec = 0; vec < nvecs; vec++) {
+ cmpl = &cq[ccb->producer_index];
+ cmpl->valid = 0;
+ BNA_QE_INDX_INC(ccb->producer_index, ccb->q_depth);
+ }
cmpl = &cq[ccb->producer_index];
}
@@ -1899,8 +2000,10 @@ bnad_setup_tx(struct bnad *bnad, u32 tx_id)
tx = bna_tx_create(&bnad->bna, bnad, tx_config, &tx_cbfn, res_info,
tx_info);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
- if (!tx)
+ if (!tx) {
+ err = -ENOMEM;
goto err_return;
+ }
tx_info->tx = tx;
INIT_DELAYED_WORK(&tx_info->tx_cleanup_work,
@@ -1911,7 +2014,7 @@ bnad_setup_tx(struct bnad *bnad, u32 tx_id)
err = bnad_tx_msix_register(bnad, tx_info,
tx_id, bnad->num_txq_per_tx);
if (err)
- goto err_return;
+ goto cleanup_tx;
}
spin_lock_irqsave(&bnad->bna_lock, flags);
@@ -1920,6 +2023,12 @@ bnad_setup_tx(struct bnad *bnad, u32 tx_id)
return 0;
+cleanup_tx:
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bna_tx_destroy(tx_info->tx);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ tx_info->tx = NULL;
+ tx_info->tx_id = 0;
err_return:
bnad_tx_res_free(bnad, res_info);
return err;
@@ -1930,6 +2039,7 @@ err_return:
static void
bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config)
{
+ memset(rx_config, 0, sizeof(*rx_config));
rx_config->rx_type = BNA_RX_T_REGULAR;
rx_config->num_paths = bnad->num_rxp_per_rx;
rx_config->coalescing_timeo = bnad->rx_coalescing_timeo;
@@ -1950,10 +2060,39 @@ bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config)
memset(&rx_config->rss_config, 0,
sizeof(rx_config->rss_config));
}
+
+ rx_config->frame_size = BNAD_FRAME_SIZE(bnad->netdev->mtu);
+ rx_config->q0_multi_buf = BNA_STATUS_T_DISABLED;
+
+ /* BNA_RXP_SINGLE - one data-buffer queue
+ * BNA_RXP_SLR - one small-buffer and one large-buffer queues
+ * BNA_RXP_HDS - one header-buffer and one data-buffer queues
+ */
+ /* TODO: configurable param for queue type */
rx_config->rxp_type = BNA_RXP_SLR;
- rx_config->q_depth = bnad->rxq_depth;
- rx_config->small_buff_size = BFI_SMALL_RXBUF_SIZE;
+ if (BNAD_PCI_DEV_IS_CAT2(bnad) &&
+ rx_config->frame_size > 4096) {
+ /* though size_routing_enable is set in SLR,
+ * small packets may get routed to same rxq.
+ * set buf_size to 2048 instead of PAGE_SIZE.
+ */
+ rx_config->q0_buf_size = 2048;
+ /* this should be in multiples of 2 */
+ rx_config->q0_num_vecs = 4;
+ rx_config->q0_depth = bnad->rxq_depth * rx_config->q0_num_vecs;
+ rx_config->q0_multi_buf = BNA_STATUS_T_ENABLED;
+ } else {
+ rx_config->q0_buf_size = rx_config->frame_size;
+ rx_config->q0_num_vecs = 1;
+ rx_config->q0_depth = bnad->rxq_depth;
+ }
+
+ /* initialize for q1 for BNA_RXP_SLR/BNA_RXP_HDS */
+ if (rx_config->rxp_type == BNA_RXP_SLR) {
+ rx_config->q1_depth = bnad->rxq_depth;
+ rx_config->q1_buf_size = BFI_SMALL_RXBUF_SIZE;
+ }
rx_config->vlan_strip_status = BNA_STATUS_T_ENABLED;
}
@@ -1969,6 +2108,49 @@ bnad_rx_ctrl_init(struct bnad *bnad, u32 rx_id)
}
/* Called with mutex_lock(&bnad->conf_mutex) held */
+u32
+bnad_reinit_rx(struct bnad *bnad)
+{
+ struct net_device *netdev = bnad->netdev;
+ u32 err = 0, current_err = 0;
+ u32 rx_id = 0, count = 0;
+ unsigned long flags;
+
+ /* destroy and create new rx objects */
+ for (rx_id = 0; rx_id < bnad->num_rx; rx_id++) {
+ if (!bnad->rx_info[rx_id].rx)
+ continue;
+ bnad_destroy_rx(bnad, rx_id);
+ }
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bna_enet_mtu_set(&bnad->bna.enet,
+ BNAD_FRAME_SIZE(bnad->netdev->mtu), NULL);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ for (rx_id = 0; rx_id < bnad->num_rx; rx_id++) {
+ count++;
+ current_err = bnad_setup_rx(bnad, rx_id);
+ if (current_err && !err) {
+ err = current_err;
+ pr_err("RXQ:%u setup failed\n", rx_id);
+ }
+ }
+
+ /* restore rx configuration */
+ if (bnad->rx_info[0].rx && !err) {
+ bnad_restore_vlans(bnad, 0);
+ bnad_enable_default_bcast(bnad);
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ bnad_set_rx_mode(netdev);
+ }
+
+ return count;
+}
+
+/* Called with bnad_conf_lock() held */
void
bnad_destroy_rx(struct bnad *bnad, u32 rx_id)
{
@@ -2047,13 +2229,19 @@ bnad_setup_rx(struct bnad *bnad, u32 rx_id)
spin_unlock_irqrestore(&bnad->bna_lock, flags);
/* Fill Unmap Q memory requirements */
- BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_RX_RES_MEM_T_UNMAPQ],
- rx_config->num_paths +
- ((rx_config->rxp_type == BNA_RXP_SINGLE) ?
- 0 : rx_config->num_paths),
- ((bnad->rxq_depth * sizeof(struct bnad_rx_unmap)) +
- sizeof(struct bnad_rx_unmap_q)));
-
+ BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_RX_RES_MEM_T_UNMAPDQ],
+ rx_config->num_paths,
+ (rx_config->q0_depth *
+ sizeof(struct bnad_rx_unmap)) +
+ sizeof(struct bnad_rx_unmap_q));
+
+ if (rx_config->rxp_type != BNA_RXP_SINGLE) {
+ BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_RX_RES_MEM_T_UNMAPHQ],
+ rx_config->num_paths,
+ (rx_config->q1_depth *
+ sizeof(struct bnad_rx_unmap) +
+ sizeof(struct bnad_rx_unmap_q)));
+ }
/* Allocate resource */
err = bnad_rx_res_alloc(bnad, res_info, rx_id);
if (err)
@@ -2548,7 +2736,6 @@ bnad_open(struct net_device *netdev)
int err;
struct bnad *bnad = netdev_priv(netdev);
struct bna_pause_config pause_config;
- int mtu;
unsigned long flags;
mutex_lock(&bnad->conf_mutex);
@@ -2567,10 +2754,9 @@ bnad_open(struct net_device *netdev)
pause_config.tx_pause = 0;
pause_config.rx_pause = 0;
- mtu = ETH_HLEN + VLAN_HLEN + bnad->netdev->mtu + ETH_FCS_LEN;
-
spin_lock_irqsave(&bnad->bna_lock, flags);
- bna_enet_mtu_set(&bnad->bna.enet, mtu, NULL);
+ bna_enet_mtu_set(&bnad->bna.enet,
+ BNAD_FRAME_SIZE(bnad->netdev->mtu), NULL);
bna_enet_pause_config(&bnad->bna.enet, &pause_config, NULL);
bna_enet_enable(&bnad->bna.enet);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
@@ -2624,9 +2810,6 @@ bnad_stop(struct net_device *netdev)
bnad_destroy_tx(bnad, 0);
bnad_destroy_rx(bnad, 0);
- /* These config flags are cleared in the hardware */
- bnad->cfg_flags &= ~(BNAD_CF_ALLMULTI | BNAD_CF_PROMISC);
-
/* Synchronize mailbox IRQ */
bnad_mbox_irq_sync(bnad);
@@ -2784,21 +2967,21 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
}
tcb = bnad->tx_info[0].tcb[txq_id];
- q_depth = tcb->q_depth;
- prod = tcb->producer_index;
-
- unmap_q = tcb->unmap_q;
/*
* Takes care of the Tx that is scheduled between clearing the flag
* and the netif_tx_stop_all_queues() call.
*/
- if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) {
+ if (unlikely(!tcb || !test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) {
dev_kfree_skb(skb);
BNAD_UPDATE_CTR(bnad, tx_skb_stopping);
return NETDEV_TX_OK;
}
+ q_depth = tcb->q_depth;
+ prod = tcb->producer_index;
+ unmap_q = tcb->unmap_q;
+
vectors = 1 + skb_shinfo(skb)->nr_frags;
wis = BNA_TXQ_WI_NEEDED(vectors); /* 4 vectors per work item */
@@ -2863,7 +3046,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
for (i = 0, vect_id = 0; i < vectors - 1; i++) {
const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
- u16 size = skb_frag_size(frag);
+ u32 size = skb_frag_size(frag);
if (unlikely(size == 0)) {
/* Undo the changes starting at tcb->producer_index */
@@ -2888,10 +3071,11 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
dma_addr = skb_frag_dma_map(&bnad->pcidev->dev, frag,
0, size, DMA_TO_DEVICE);
+ dma_unmap_len_set(&unmap->vectors[vect_id], dma_len, size);
BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
txqent->vector[vect_id].length = htons(size);
dma_unmap_addr_set(&unmap->vectors[vect_id], dma_addr,
- dma_addr);
+ dma_addr);
head_unmap->nvecs++;
}
@@ -2911,6 +3095,8 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
return NETDEV_TX_OK;
+ skb_tx_timestamp(skb);
+
bna_txq_prod_indx_doorbell(tcb);
smp_mb();
@@ -2937,73 +3123,133 @@ bnad_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
return stats;
}
+static void
+bnad_set_rx_ucast_fltr(struct bnad *bnad)
+{
+ struct net_device *netdev = bnad->netdev;
+ int uc_count = netdev_uc_count(netdev);
+ enum bna_cb_status ret;
+ u8 *mac_list;
+ struct netdev_hw_addr *ha;
+ int entry;
+
+ if (netdev_uc_empty(bnad->netdev)) {
+ bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL, NULL);
+ return;
+ }
+
+ if (uc_count > bna_attr(&bnad->bna)->num_ucmac)
+ goto mode_default;
+
+ mac_list = kzalloc(uc_count * ETH_ALEN, GFP_ATOMIC);
+ if (mac_list == NULL)
+ goto mode_default;
+
+ entry = 0;
+ netdev_for_each_uc_addr(ha, netdev) {
+ memcpy(&mac_list[entry * ETH_ALEN],
+ &ha->addr[0], ETH_ALEN);
+ entry++;
+ }
+
+ ret = bna_rx_ucast_listset(bnad->rx_info[0].rx, entry,
+ mac_list, NULL);
+ kfree(mac_list);
+
+ if (ret != BNA_CB_SUCCESS)
+ goto mode_default;
+
+ return;
+
+ /* ucast packets not in UCAM are routed to default function */
+mode_default:
+ bnad->cfg_flags |= BNAD_CF_DEFAULT;
+ bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL, NULL);
+}
+
+static void
+bnad_set_rx_mcast_fltr(struct bnad *bnad)
+{
+ struct net_device *netdev = bnad->netdev;
+ int mc_count = netdev_mc_count(netdev);
+ enum bna_cb_status ret;
+ u8 *mac_list;
+
+ if (netdev->flags & IFF_ALLMULTI)
+ goto mode_allmulti;
+
+ if (netdev_mc_empty(netdev))
+ return;
+
+ if (mc_count > bna_attr(&bnad->bna)->num_mcmac)
+ goto mode_allmulti;
+
+ mac_list = kzalloc((mc_count + 1) * ETH_ALEN, GFP_ATOMIC);
+
+ if (mac_list == NULL)
+ goto mode_allmulti;
+
+ memcpy(&mac_list[0], &bnad_bcast_addr[0], ETH_ALEN);
+
+ /* copy rest of the MCAST addresses */
+ bnad_netdev_mc_list_get(netdev, mac_list);
+ ret = bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1,
+ mac_list, NULL);
+ kfree(mac_list);
+
+ if (ret != BNA_CB_SUCCESS)
+ goto mode_allmulti;
+
+ return;
+
+mode_allmulti:
+ bnad->cfg_flags |= BNAD_CF_ALLMULTI;
+ bna_rx_mcast_delall(bnad->rx_info[0].rx, NULL);
+}
+
void
bnad_set_rx_mode(struct net_device *netdev)
{
struct bnad *bnad = netdev_priv(netdev);
- u32 new_mask, valid_mask;
+ enum bna_rxmode new_mode, mode_mask;
unsigned long flags;
spin_lock_irqsave(&bnad->bna_lock, flags);
- new_mask = valid_mask = 0;
-
- if (netdev->flags & IFF_PROMISC) {
- if (!(bnad->cfg_flags & BNAD_CF_PROMISC)) {
- new_mask = BNAD_RXMODE_PROMISC_DEFAULT;
- valid_mask = BNAD_RXMODE_PROMISC_DEFAULT;
- bnad->cfg_flags |= BNAD_CF_PROMISC;
- }
- } else {
- if (bnad->cfg_flags & BNAD_CF_PROMISC) {
- new_mask = ~BNAD_RXMODE_PROMISC_DEFAULT;
- valid_mask = BNAD_RXMODE_PROMISC_DEFAULT;
- bnad->cfg_flags &= ~BNAD_CF_PROMISC;
- }
- }
-
- if (netdev->flags & IFF_ALLMULTI) {
- if (!(bnad->cfg_flags & BNAD_CF_ALLMULTI)) {
- new_mask |= BNA_RXMODE_ALLMULTI;
- valid_mask |= BNA_RXMODE_ALLMULTI;
- bnad->cfg_flags |= BNAD_CF_ALLMULTI;
- }
- } else {
- if (bnad->cfg_flags & BNAD_CF_ALLMULTI) {
- new_mask &= ~BNA_RXMODE_ALLMULTI;
- valid_mask |= BNA_RXMODE_ALLMULTI;
- bnad->cfg_flags &= ~BNAD_CF_ALLMULTI;
- }
+ if (bnad->rx_info[0].rx == NULL) {
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ return;
}
- if (bnad->rx_info[0].rx == NULL)
- goto unlock;
+ /* clear bnad flags to update it with new settings */
+ bnad->cfg_flags &= ~(BNAD_CF_PROMISC | BNAD_CF_DEFAULT |
+ BNAD_CF_ALLMULTI);
- bna_rx_mode_set(bnad->rx_info[0].rx, new_mask, valid_mask, NULL);
+ new_mode = 0;
+ if (netdev->flags & IFF_PROMISC) {
+ new_mode |= BNAD_RXMODE_PROMISC_DEFAULT;
+ bnad->cfg_flags |= BNAD_CF_PROMISC;
+ } else {
+ bnad_set_rx_mcast_fltr(bnad);
- if (!netdev_mc_empty(netdev)) {
- u8 *mcaddr_list;
- int mc_count = netdev_mc_count(netdev);
+ if (bnad->cfg_flags & BNAD_CF_ALLMULTI)
+ new_mode |= BNA_RXMODE_ALLMULTI;
- /* Index 0 holds the broadcast address */
- mcaddr_list =
- kzalloc((mc_count + 1) * ETH_ALEN,
- GFP_ATOMIC);
- if (!mcaddr_list)
- goto unlock;
+ bnad_set_rx_ucast_fltr(bnad);
- memcpy(&mcaddr_list[0], &bnad_bcast_addr[0], ETH_ALEN);
+ if (bnad->cfg_flags & BNAD_CF_DEFAULT)
+ new_mode |= BNA_RXMODE_DEFAULT;
+ }
- /* Copy rest of the MC addresses */
- bnad_netdev_mc_list_get(netdev, mcaddr_list);
+ mode_mask = BNA_RXMODE_PROMISC | BNA_RXMODE_DEFAULT |
+ BNA_RXMODE_ALLMULTI;
+ bna_rx_mode_set(bnad->rx_info[0].rx, new_mode, mode_mask, NULL);
- bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1,
- mcaddr_list, NULL);
+ if (bnad->cfg_flags & BNAD_CF_PROMISC)
+ bna_rx_vlan_strip_disable(bnad->rx_info[0].rx);
+ else
+ bna_rx_vlan_strip_enable(bnad->rx_info[0].rx);
- /* Should we enable BNAD_CF_ALLMULTI for err != 0 ? */
- kfree(mcaddr_list);
- }
-unlock:
spin_unlock_irqrestore(&bnad->bna_lock, flags);
}
@@ -3033,14 +3279,14 @@ bnad_set_mac_address(struct net_device *netdev, void *mac_addr)
}
static int
-bnad_mtu_set(struct bnad *bnad, int mtu)
+bnad_mtu_set(struct bnad *bnad, int frame_size)
{
unsigned long flags;
init_completion(&bnad->bnad_completions.mtu_comp);
spin_lock_irqsave(&bnad->bna_lock, flags);
- bna_enet_mtu_set(&bnad->bna.enet, mtu, bnad_cb_enet_mtu_set);
+ bna_enet_mtu_set(&bnad->bna.enet, frame_size, bnad_cb_enet_mtu_set);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
wait_for_completion(&bnad->bnad_completions.mtu_comp);
@@ -3051,18 +3297,34 @@ bnad_mtu_set(struct bnad *bnad, int mtu)
static int
bnad_change_mtu(struct net_device *netdev, int new_mtu)
{
- int err, mtu = netdev->mtu;
+ int err, mtu;
struct bnad *bnad = netdev_priv(netdev);
+ u32 rx_count = 0, frame, new_frame;
if (new_mtu + ETH_HLEN < ETH_ZLEN || new_mtu > BNAD_JUMBO_MTU)
return -EINVAL;
mutex_lock(&bnad->conf_mutex);
+ mtu = netdev->mtu;
netdev->mtu = new_mtu;
- mtu = ETH_HLEN + VLAN_HLEN + new_mtu + ETH_FCS_LEN;
- err = bnad_mtu_set(bnad, mtu);
+ frame = BNAD_FRAME_SIZE(mtu);
+ new_frame = BNAD_FRAME_SIZE(new_mtu);
+
+ /* check if multi-buffer needs to be enabled */
+ if (BNAD_PCI_DEV_IS_CAT2(bnad) &&
+ netif_running(bnad->netdev)) {
+ /* only when transition is over 4K */
+ if ((frame <= 4096 && new_frame > 4096) ||
+ (frame > 4096 && new_frame <= 4096))
+ rx_count = bnad_reinit_rx(bnad);
+ }
+
+ /* rx_count > 0 - new rx created
+ * - Linux set err = 0 and return
+ */
+ err = bnad_mtu_set(bnad, new_frame);
if (err)
err = -EBUSY;
@@ -3262,7 +3524,6 @@ bnad_uninit(struct bnad *bnad)
if (bnad->bar0)
iounmap(bnad->bar0);
- pci_set_drvdata(bnad->pcidev, NULL);
}
/*
diff --git a/drivers/net/ethernet/brocade/bna/bnad.h b/drivers/net/ethernet/brocade/bna/bnad.h
index f7e033f8a00e..2842c188e0da 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.h
+++ b/drivers/net/ethernet/brocade/bna/bnad.h
@@ -71,7 +71,7 @@ struct bnad_rx_ctrl {
#define BNAD_NAME "bna"
#define BNAD_NAME_LEN 64
-#define BNAD_VERSION "3.2.21.1"
+#define BNAD_VERSION "3.2.23.0"
#define BNAD_MAILBOX_MSIX_INDEX 0
#define BNAD_MAILBOX_MSIX_VECTORS 1
@@ -84,7 +84,7 @@ struct bnad_rx_ctrl {
#define BNAD_IOCETH_TIMEOUT 10000
#define BNAD_MIN_Q_DEPTH 512
-#define BNAD_MAX_RXQ_DEPTH 2048
+#define BNAD_MAX_RXQ_DEPTH 16384
#define BNAD_MAX_TXQ_DEPTH 2048
#define BNAD_JUMBO_MTU 9000
@@ -105,6 +105,9 @@ struct bnad_rx_ctrl {
#define BNAD_NUM_TXQ (bnad->num_tx * bnad->num_txq_per_tx)
#define BNAD_NUM_RXP (bnad->num_rx * bnad->num_rxp_per_rx)
+#define BNAD_FRAME_SIZE(_mtu) \
+ (ETH_HLEN + VLAN_HLEN + (_mtu) + ETH_FCS_LEN)
+
/*
* DATA STRUCTURES
*/
@@ -219,6 +222,7 @@ struct bnad_rx_info {
struct bnad_tx_vector {
DEFINE_DMA_UNMAP_ADDR(dma_addr);
+ DEFINE_DMA_UNMAP_LEN(dma_len);
};
struct bnad_tx_unmap {
@@ -234,33 +238,38 @@ struct bnad_rx_vector {
struct bnad_rx_unmap {
struct page *page;
- u32 page_offset;
struct sk_buff *skb;
struct bnad_rx_vector vector;
+ u32 page_offset;
};
enum bnad_rxbuf_type {
BNAD_RXBUF_NONE = 0,
- BNAD_RXBUF_SKB = 1,
+ BNAD_RXBUF_SK_BUFF = 1,
BNAD_RXBUF_PAGE = 2,
- BNAD_RXBUF_MULTI = 3
+ BNAD_RXBUF_MULTI_BUFF = 3
};
-#define BNAD_RXBUF_IS_PAGE(_type) ((_type) == BNAD_RXBUF_PAGE)
+#define BNAD_RXBUF_IS_SK_BUFF(_type) ((_type) == BNAD_RXBUF_SK_BUFF)
+#define BNAD_RXBUF_IS_MULTI_BUFF(_type) ((_type) == BNAD_RXBUF_MULTI_BUFF)
struct bnad_rx_unmap_q {
int reuse_pi;
int alloc_order;
u32 map_size;
enum bnad_rxbuf_type type;
- struct bnad_rx_unmap unmap[0];
+ struct bnad_rx_unmap unmap[0] ____cacheline_aligned;
};
+#define BNAD_PCI_DEV_IS_CAT2(_bnad) \
+ ((_bnad)->pcidev->device == BFA_PCI_DEVICE_ID_CT2)
+
/* Bit mask values for bnad->cfg_flags */
#define BNAD_CF_DIM_ENABLED 0x01 /* DIM */
#define BNAD_CF_PROMISC 0x02
#define BNAD_CF_ALLMULTI 0x04
-#define BNAD_CF_MSIX 0x08 /* If in MSIx mode */
+#define BNAD_CF_DEFAULT 0x08
+#define BNAD_CF_MSIX 0x10 /* If in MSIx mode */
/* Defines for run_flags bit-mask */
/* Set, tested & cleared using xxx_bit() functions */
@@ -367,7 +376,6 @@ struct bnad_drvinfo {
* EXTERN VARIABLES
*/
extern const struct firmware *bfi_fw;
-extern u32 bnad_rxqs_per_cq;
/*
* EXTERN PROTOTYPES
diff --git a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
index 455b5a2e59d4..f9e150825bb5 100644
--- a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
+++ b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
@@ -1131,6 +1131,7 @@ static const struct ethtool_ops bnad_ethtool_ops = {
.get_eeprom = bnad_get_eeprom,
.set_eeprom = bnad_set_eeprom,
.flash_device = bnad_flash_device,
+ .get_ts_info = ethtool_op_get_ts_info,
};
void
diff --git a/drivers/net/ethernet/brocade/bna/cna.h b/drivers/net/ethernet/brocade/bna/cna.h
index 43405f654b4a..b3ff6d507951 100644
--- a/drivers/net/ethernet/brocade/bna/cna.h
+++ b/drivers/net/ethernet/brocade/bna/cna.h
@@ -37,8 +37,8 @@
extern char bfa_version[];
-#define CNA_FW_FILE_CT "ctfw-3.2.1.1.bin"
-#define CNA_FW_FILE_CT2 "ct2fw-3.2.1.1.bin"
+#define CNA_FW_FILE_CT "ctfw-3.2.3.0.bin"
+#define CNA_FW_FILE_CT2 "ct2fw-3.2.3.0.bin"
#define FC_SYMNAME_MAX 256 /*!< max name server symbolic name size */
#pragma pack(1)
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index 92578690f6de..3190d38e16fb 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -17,6 +17,7 @@
#include <linux/circ_buf.h>
#include <linux/slab.h>
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/gpio.h>
#include <linux/interrupt.h>
#include <linux/netdevice.h>
@@ -203,6 +204,47 @@ static int macb_mdio_reset(struct mii_bus *bus)
return 0;
}
+/**
+ * macb_set_tx_clk() - Set a clock to a new frequency
+ * @clk Pointer to the clock to change
+ * @rate New frequency in Hz
+ * @dev Pointer to the struct net_device
+ */
+static void macb_set_tx_clk(struct clk *clk, int speed, struct net_device *dev)
+{
+ long ferr, rate, rate_rounded;
+
+ switch (speed) {
+ case SPEED_10:
+ rate = 2500000;
+ break;
+ case SPEED_100:
+ rate = 25000000;
+ break;
+ case SPEED_1000:
+ rate = 125000000;
+ break;
+ default:
+ return;
+ }
+
+ rate_rounded = clk_round_rate(clk, rate);
+ if (rate_rounded < 0)
+ return;
+
+ /* RGMII allows 50 ppm frequency error. Test and warn if this limit
+ * is not satisfied.
+ */
+ ferr = abs(rate_rounded - rate);
+ ferr = DIV_ROUND_UP(ferr, rate / 100000);
+ if (ferr > 5)
+ netdev_warn(dev, "unable to generate target frequency: %ld Hz\n",
+ rate);
+
+ if (clk_set_rate(clk, rate_rounded))
+ netdev_err(dev, "adjusting tx_clk failed.\n");
+}
+
static void macb_handle_link_change(struct net_device *dev)
{
struct macb *bp = netdev_priv(dev);
@@ -250,6 +292,9 @@ static void macb_handle_link_change(struct net_device *dev)
spin_unlock_irqrestore(&bp->lock, flags);
+ if (!IS_ERR(bp->tx_clk))
+ macb_set_tx_clk(bp->tx_clk, phydev->speed, dev);
+
if (status_change) {
if (phydev->link) {
netif_carrier_on(dev);
@@ -1790,21 +1835,44 @@ static int __init macb_probe(struct platform_device *pdev)
spin_lock_init(&bp->lock);
INIT_WORK(&bp->tx_error_task, macb_tx_error_task);
- bp->pclk = clk_get(&pdev->dev, "pclk");
+ bp->pclk = devm_clk_get(&pdev->dev, "pclk");
if (IS_ERR(bp->pclk)) {
- dev_err(&pdev->dev, "failed to get macb_clk\n");
+ err = PTR_ERR(bp->pclk);
+ dev_err(&pdev->dev, "failed to get macb_clk (%u)\n", err);
goto err_out_free_dev;
}
- clk_prepare_enable(bp->pclk);
- bp->hclk = clk_get(&pdev->dev, "hclk");
+ bp->hclk = devm_clk_get(&pdev->dev, "hclk");
if (IS_ERR(bp->hclk)) {
- dev_err(&pdev->dev, "failed to get hclk\n");
- goto err_out_put_pclk;
+ err = PTR_ERR(bp->hclk);
+ dev_err(&pdev->dev, "failed to get hclk (%u)\n", err);
+ goto err_out_free_dev;
}
- clk_prepare_enable(bp->hclk);
- bp->regs = ioremap(regs->start, resource_size(regs));
+ bp->tx_clk = devm_clk_get(&pdev->dev, "tx_clk");
+
+ err = clk_prepare_enable(bp->pclk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable pclk (%u)\n", err);
+ goto err_out_free_dev;
+ }
+
+ err = clk_prepare_enable(bp->hclk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable hclk (%u)\n", err);
+ goto err_out_disable_pclk;
+ }
+
+ if (!IS_ERR(bp->tx_clk)) {
+ err = clk_prepare_enable(bp->tx_clk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n",
+ err);
+ goto err_out_disable_hclk;
+ }
+ }
+
+ bp->regs = devm_ioremap(&pdev->dev, regs->start, resource_size(regs));
if (!bp->regs) {
dev_err(&pdev->dev, "failed to map registers, aborting.\n");
err = -ENOMEM;
@@ -1812,11 +1880,12 @@ static int __init macb_probe(struct platform_device *pdev)
}
dev->irq = platform_get_irq(pdev, 0);
- err = request_irq(dev->irq, macb_interrupt, 0, dev->name, dev);
+ err = devm_request_irq(&pdev->dev, dev->irq, macb_interrupt, 0,
+ dev->name, dev);
if (err) {
dev_err(&pdev->dev, "Unable to request IRQ %d (error %d)\n",
dev->irq, err);
- goto err_out_iounmap;
+ goto err_out_disable_clocks;
}
dev->netdev_ops = &macb_netdev_ops;
@@ -1879,7 +1948,7 @@ static int __init macb_probe(struct platform_device *pdev)
err = register_netdev(dev);
if (err) {
dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
- goto err_out_free_irq;
+ goto err_out_disable_clocks;
}
err = macb_mii_init(bp);
@@ -1902,16 +1971,13 @@ static int __init macb_probe(struct platform_device *pdev)
err_out_unregister_netdev:
unregister_netdev(dev);
-err_out_free_irq:
- free_irq(dev->irq, dev);
-err_out_iounmap:
- iounmap(bp->regs);
err_out_disable_clocks:
+ if (!IS_ERR(bp->tx_clk))
+ clk_disable_unprepare(bp->tx_clk);
+err_out_disable_hclk:
clk_disable_unprepare(bp->hclk);
- clk_put(bp->hclk);
+err_out_disable_pclk:
clk_disable_unprepare(bp->pclk);
-err_out_put_pclk:
- clk_put(bp->pclk);
err_out_free_dev:
free_netdev(dev);
err_out:
@@ -1933,12 +1999,10 @@ static int __exit macb_remove(struct platform_device *pdev)
kfree(bp->mii_bus->irq);
mdiobus_free(bp->mii_bus);
unregister_netdev(dev);
- free_irq(dev->irq, dev);
- iounmap(bp->regs);
+ if (!IS_ERR(bp->tx_clk))
+ clk_disable_unprepare(bp->tx_clk);
clk_disable_unprepare(bp->hclk);
- clk_put(bp->hclk);
clk_disable_unprepare(bp->pclk);
- clk_put(bp->pclk);
free_netdev(dev);
}
@@ -1946,45 +2010,49 @@ static int __exit macb_remove(struct platform_device *pdev)
}
#ifdef CONFIG_PM
-static int macb_suspend(struct platform_device *pdev, pm_message_t state)
+static int macb_suspend(struct device *dev)
{
+ struct platform_device *pdev = to_platform_device(dev);
struct net_device *netdev = platform_get_drvdata(pdev);
struct macb *bp = netdev_priv(netdev);
netif_carrier_off(netdev);
netif_device_detach(netdev);
+ if (!IS_ERR(bp->tx_clk))
+ clk_disable_unprepare(bp->tx_clk);
clk_disable_unprepare(bp->hclk);
clk_disable_unprepare(bp->pclk);
return 0;
}
-static int macb_resume(struct platform_device *pdev)
+static int macb_resume(struct device *dev)
{
+ struct platform_device *pdev = to_platform_device(dev);
struct net_device *netdev = platform_get_drvdata(pdev);
struct macb *bp = netdev_priv(netdev);
clk_prepare_enable(bp->pclk);
clk_prepare_enable(bp->hclk);
+ if (!IS_ERR(bp->tx_clk))
+ clk_prepare_enable(bp->tx_clk);
netif_device_attach(netdev);
return 0;
}
-#else
-#define macb_suspend NULL
-#define macb_resume NULL
#endif
+static SIMPLE_DEV_PM_OPS(macb_pm_ops, macb_suspend, macb_resume);
+
static struct platform_driver macb_driver = {
.remove = __exit_p(macb_remove),
- .suspend = macb_suspend,
- .resume = macb_resume,
.driver = {
.name = "macb",
.owner = THIS_MODULE,
.of_match_table = of_match_ptr(macb_dt_ids),
+ .pm = &macb_pm_ops,
},
};
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index f4076155bed7..51c02442160a 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -572,6 +572,7 @@ struct macb {
struct platform_device *pdev;
struct clk *pclk;
struct clk *hclk;
+ struct clk *tx_clk;
struct net_device *dev;
struct napi_struct napi;
struct work_struct tx_error_task;
diff --git a/drivers/net/ethernet/chelsio/cxgb/common.h b/drivers/net/ethernet/chelsio/cxgb/common.h
index 8abb46b39032..5dd20f7285eb 100644
--- a/drivers/net/ethernet/chelsio/cxgb/common.h
+++ b/drivers/net/ethernet/chelsio/cxgb/common.h
@@ -11,8 +11,7 @@
* published by the Free Software Foundation. *
* *
* You should have received a copy of the GNU General Public License along *
- * with this program; if not, write to the Free Software Foundation, Inc., *
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
+ * with this program; if not, see <http://www.gnu.org/licenses/>. *
* *
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
* WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
diff --git a/drivers/net/ethernet/chelsio/cxgb/cphy.h b/drivers/net/ethernet/chelsio/cxgb/cphy.h
index 1f095a9fc739..a4d2a4c08d3f 100644
--- a/drivers/net/ethernet/chelsio/cxgb/cphy.h
+++ b/drivers/net/ethernet/chelsio/cxgb/cphy.h
@@ -11,8 +11,7 @@
* published by the Free Software Foundation. *
* *
* You should have received a copy of the GNU General Public License along *
- * with this program; if not, write to the Free Software Foundation, Inc., *
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
+ * with this program; if not, see <http://www.gnu.org/licenses/>. *
* *
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
* WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
diff --git a/drivers/net/ethernet/chelsio/cxgb/cpl5_cmd.h b/drivers/net/ethernet/chelsio/cxgb/cpl5_cmd.h
index e36d45b78cc7..5249686afe71 100644
--- a/drivers/net/ethernet/chelsio/cxgb/cpl5_cmd.h
+++ b/drivers/net/ethernet/chelsio/cxgb/cpl5_cmd.h
@@ -11,8 +11,7 @@
* published by the Free Software Foundation. *
* *
* You should have received a copy of the GNU General Public License along *
- * with this program; if not, write to the Free Software Foundation, Inc., *
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
+ * with this program; if not, see <http://www.gnu.org/licenses/>. *
* *
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
* WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
index 1d021059f097..e5987139a1ae 100644
--- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
+++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
@@ -11,8 +11,7 @@
* published by the Free Software Foundation. *
* *
* You should have received a copy of the GNU General Public License along *
- * with this program; if not, write to the Free Software Foundation, Inc., *
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
+ * with this program; if not, see <http://www.gnu.org/licenses/>. *
* *
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
* WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
diff --git a/drivers/net/ethernet/chelsio/cxgb/elmer0.h b/drivers/net/ethernet/chelsio/cxgb/elmer0.h
index eef655c827d9..81526ad36339 100644
--- a/drivers/net/ethernet/chelsio/cxgb/elmer0.h
+++ b/drivers/net/ethernet/chelsio/cxgb/elmer0.h
@@ -11,8 +11,7 @@
* published by the Free Software Foundation. *
* *
* You should have received a copy of the GNU General Public License along *
- * with this program; if not, write to the Free Software Foundation, Inc., *
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
+ * with this program; if not, see <http://www.gnu.org/licenses/>. *
* *
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
* WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
diff --git a/drivers/net/ethernet/chelsio/cxgb/espi.c b/drivers/net/ethernet/chelsio/cxgb/espi.c
index 639ff1955739..3e182eee799e 100644
--- a/drivers/net/ethernet/chelsio/cxgb/espi.c
+++ b/drivers/net/ethernet/chelsio/cxgb/espi.c
@@ -12,8 +12,7 @@
* published by the Free Software Foundation. *
* *
* You should have received a copy of the GNU General Public License along *
- * with this program; if not, write to the Free Software Foundation, Inc., *
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
+ * with this program; if not, see <http://www.gnu.org/licenses/>. *
* *
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
* WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
diff --git a/drivers/net/ethernet/chelsio/cxgb/espi.h b/drivers/net/ethernet/chelsio/cxgb/espi.h
index 5694aad4fbc0..162de5259df9 100644
--- a/drivers/net/ethernet/chelsio/cxgb/espi.h
+++ b/drivers/net/ethernet/chelsio/cxgb/espi.h
@@ -11,8 +11,7 @@
* published by the Free Software Foundation. *
* *
* You should have received a copy of the GNU General Public License along *
- * with this program; if not, write to the Free Software Foundation, Inc., *
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
+ * with this program; if not, see <http://www.gnu.org/licenses/>. *
* *
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
* WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
diff --git a/drivers/net/ethernet/chelsio/cxgb/gmac.h b/drivers/net/ethernet/chelsio/cxgb/gmac.h
index d42337457cf7..dfa77491a910 100644
--- a/drivers/net/ethernet/chelsio/cxgb/gmac.h
+++ b/drivers/net/ethernet/chelsio/cxgb/gmac.h
@@ -12,8 +12,7 @@
* published by the Free Software Foundation. *
* *
* You should have received a copy of the GNU General Public License along *
- * with this program; if not, write to the Free Software Foundation, Inc., *
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
+ * with this program; if not, see <http://www.gnu.org/licenses/>. *
* *
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
* WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
diff --git a/drivers/net/ethernet/chelsio/cxgb/mv88x201x.c b/drivers/net/ethernet/chelsio/cxgb/mv88x201x.c
index f7136b2fd1e5..d0cf611551a1 100644
--- a/drivers/net/ethernet/chelsio/cxgb/mv88x201x.c
+++ b/drivers/net/ethernet/chelsio/cxgb/mv88x201x.c
@@ -12,8 +12,7 @@
* published by the Free Software Foundation. *
* *
* You should have received a copy of the GNU General Public License along *
- * with this program; if not, write to the Free Software Foundation, Inc., *
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
+ * with this program; if not, see <http://www.gnu.org/licenses/>. *
* *
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
* WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
diff --git a/drivers/net/ethernet/chelsio/cxgb/pm3393.c b/drivers/net/ethernet/chelsio/cxgb/pm3393.c
index eb33a31b08a0..ec5e05052d99 100644
--- a/drivers/net/ethernet/chelsio/cxgb/pm3393.c
+++ b/drivers/net/ethernet/chelsio/cxgb/pm3393.c
@@ -12,8 +12,7 @@
* published by the Free Software Foundation. *
* *
* You should have received a copy of the GNU General Public License along *
- * with this program; if not, write to the Free Software Foundation, Inc., *
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
+ * with this program; if not, see <http://www.gnu.org/licenses/>. *
* *
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
* WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
diff --git a/drivers/net/ethernet/chelsio/cxgb/regs.h b/drivers/net/ethernet/chelsio/cxgb/regs.h
index c80bf4d6d0a6..964ce59ee169 100644
--- a/drivers/net/ethernet/chelsio/cxgb/regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb/regs.h
@@ -11,8 +11,7 @@
* published by the Free Software Foundation. *
* *
* You should have received a copy of the GNU General Public License along *
- * with this program; if not, write to the Free Software Foundation, Inc., *
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
+ * with this program; if not, see <http://www.gnu.org/licenses/>. *
* *
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
* WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.c b/drivers/net/ethernet/chelsio/cxgb/sge.c
index 8061fb0ef7ed..0341537cdd37 100644
--- a/drivers/net/ethernet/chelsio/cxgb/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb/sge.c
@@ -12,8 +12,7 @@
* published by the Free Software Foundation. *
* *
* You should have received a copy of the GNU General Public License along *
- * with this program; if not, write to the Free Software Foundation, Inc., *
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
+ * with this program; if not, see <http://www.gnu.org/licenses/>. *
* *
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
* WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.h b/drivers/net/ethernet/chelsio/cxgb/sge.h
index b9bf16b385f7..a1ba591b3431 100644
--- a/drivers/net/ethernet/chelsio/cxgb/sge.h
+++ b/drivers/net/ethernet/chelsio/cxgb/sge.h
@@ -11,8 +11,7 @@
* published by the Free Software Foundation. *
* *
* You should have received a copy of the GNU General Public License along *
- * with this program; if not, write to the Free Software Foundation, Inc., *
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
+ * with this program; if not, see <http://www.gnu.org/licenses/>. *
* *
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
* WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
diff --git a/drivers/net/ethernet/chelsio/cxgb/subr.c b/drivers/net/ethernet/chelsio/cxgb/subr.c
index e0a03a31e7c4..816719314cc8 100644
--- a/drivers/net/ethernet/chelsio/cxgb/subr.c
+++ b/drivers/net/ethernet/chelsio/cxgb/subr.c
@@ -12,8 +12,7 @@
* published by the Free Software Foundation. *
* *
* You should have received a copy of the GNU General Public License along *
- * with this program; if not, write to the Free Software Foundation, Inc., *
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
+ * with this program; if not, see <http://www.gnu.org/licenses/>. *
* *
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
* WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
diff --git a/drivers/net/ethernet/chelsio/cxgb/suni1x10gexp_regs.h b/drivers/net/ethernet/chelsio/cxgb/suni1x10gexp_regs.h
index d0f87d82566a..7f79cc7ceb75 100644
--- a/drivers/net/ethernet/chelsio/cxgb/suni1x10gexp_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb/suni1x10gexp_regs.h
@@ -12,8 +12,7 @@
* published by the Free Software Foundation. *
* *
* You should have received a copy of the GNU General Public License along *
- * with this program; if not, write to the Free Software Foundation, Inc., *
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
+ * with this program; if not, see <http://www.gnu.org/licenses/>. *
* *
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
* WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
index 76ae09999b5b..c0a9dd55f4e5 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
@@ -182,7 +182,7 @@ static struct net_device *get_iff_from_mac(struct adapter *adapter,
for_each_port(adapter, i) {
struct net_device *dev = adapter->port[i];
- if (!memcmp(dev->dev_addr, mac, ETH_ALEN)) {
+ if (ether_addr_equal(dev->dev_addr, mac)) {
rcu_read_lock();
if (vlan && vlan != VLAN_VID_MASK) {
dev = __vlan_find_dev_deep(dev, htons(ETH_P_8021Q), vlan);
diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.c b/drivers/net/ethernet/chelsio/cxgb3/l2t.c
index 8d53438638b2..5f226eda8cd6 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.c
@@ -429,7 +429,7 @@ found:
} else {
e->state = neigh->nud_state & NUD_CONNECTED ?
L2T_STATE_VALID : L2T_STATE_STALE;
- if (memcmp(e->dmac, neigh->ha, 6))
+ if (!ether_addr_equal(e->dmac, neigh->ha))
setup_l2e_send_pending(dev, NULL, e);
}
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index ecd2fb3ef695..16782b2cff49 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -49,13 +49,15 @@
#include <asm/io.h>
#include "cxgb4_uld.h"
-#define FW_VERSION_MAJOR 1
-#define FW_VERSION_MINOR 4
-#define FW_VERSION_MICRO 0
+#define T4FW_VERSION_MAJOR 0x01
+#define T4FW_VERSION_MINOR 0x09
+#define T4FW_VERSION_MICRO 0x17
+#define T4FW_VERSION_BUILD 0x00
-#define FW_VERSION_MAJOR_T5 0
-#define FW_VERSION_MINOR_T5 0
-#define FW_VERSION_MICRO_T5 0
+#define T5FW_VERSION_MAJOR 0x01
+#define T5FW_VERSION_MINOR 0x09
+#define T5FW_VERSION_MICRO 0x17
+#define T5FW_VERSION_BUILD 0x00
#define CH_WARN(adap, fmt, ...) dev_warn(adap->pdev_dev, fmt, ## __VA_ARGS__)
@@ -226,6 +228,25 @@ struct tp_params {
uint32_t dack_re; /* DACK timer resolution */
unsigned short tx_modq[NCHAN]; /* channel to modulation queue map */
+
+ u32 vlan_pri_map; /* cached TP_VLAN_PRI_MAP */
+ u32 ingress_config; /* cached TP_INGRESS_CONFIG */
+
+ /* TP_VLAN_PRI_MAP Compressed Filter Tuple field offsets. This is a
+ * subset of the set of fields which may be present in the Compressed
+ * Filter Tuple portion of filters and TCP TCB connections. The
+ * fields which are present are controlled by the TP_VLAN_PRI_MAP.
+ * Since a variable number of fields may or may not be present, their
+ * shifted field positions within the Compressed Filter Tuple may
+ * vary, or not even be present if the field isn't selected in
+ * TP_VLAN_PRI_MAP. Since some of these fields are needed in various
+ * places we store their offsets here, or a -1 if the field isn't
+ * present.
+ */
+ int vlan_shift;
+ int vnic_shift;
+ int port_shift;
+ int protocol_shift;
};
struct vpd_params {
@@ -240,6 +261,26 @@ struct pci_params {
unsigned char width;
};
+#define CHELSIO_CHIP_CODE(version, revision) (((version) << 4) | (revision))
+#define CHELSIO_CHIP_FPGA 0x100
+#define CHELSIO_CHIP_VERSION(code) (((code) >> 4) & 0xf)
+#define CHELSIO_CHIP_RELEASE(code) ((code) & 0xf)
+
+#define CHELSIO_T4 0x4
+#define CHELSIO_T5 0x5
+
+enum chip_type {
+ T4_A1 = CHELSIO_CHIP_CODE(CHELSIO_T4, 1),
+ T4_A2 = CHELSIO_CHIP_CODE(CHELSIO_T4, 2),
+ T4_FIRST_REV = T4_A1,
+ T4_LAST_REV = T4_A2,
+
+ T5_A0 = CHELSIO_CHIP_CODE(CHELSIO_T5, 0),
+ T5_A1 = CHELSIO_CHIP_CODE(CHELSIO_T5, 1),
+ T5_FIRST_REV = T5_A0,
+ T5_LAST_REV = T5_A1,
+};
+
struct adapter_params {
struct tp_params tp;
struct vpd_params vpd;
@@ -259,7 +300,7 @@ struct adapter_params {
unsigned char nports; /* # of ethernet ports */
unsigned char portvec;
- unsigned char rev; /* chip revision */
+ enum chip_type chip; /* chip code */
unsigned char offload;
unsigned char bypass;
@@ -267,6 +308,23 @@ struct adapter_params {
unsigned int ofldq_wr_cred;
};
+#include "t4fw_api.h"
+
+#define FW_VERSION(chip) ( \
+ FW_HDR_FW_VER_MAJOR_GET(chip##FW_VERSION_MAJOR) | \
+ FW_HDR_FW_VER_MINOR_GET(chip##FW_VERSION_MINOR) | \
+ FW_HDR_FW_VER_MICRO_GET(chip##FW_VERSION_MICRO) | \
+ FW_HDR_FW_VER_BUILD_GET(chip##FW_VERSION_BUILD))
+#define FW_INTFVER(chip, intf) (FW_HDR_INTFVER_##intf)
+
+struct fw_info {
+ u8 chip;
+ char *fs_name;
+ char *fw_mod_name;
+ struct fw_hdr fw_hdr;
+};
+
+
struct trace_params {
u32 data[TRACE_LEN / 4];
u32 mask[TRACE_LEN / 4];
@@ -512,25 +570,6 @@ struct sge {
struct l2t_data;
-#define CHELSIO_CHIP_CODE(version, revision) (((version) << 4) | (revision))
-#define CHELSIO_CHIP_VERSION(code) ((code) >> 4)
-#define CHELSIO_CHIP_RELEASE(code) ((code) & 0xf)
-
-#define CHELSIO_T4 0x4
-#define CHELSIO_T5 0x5
-
-enum chip_type {
- T4_A1 = CHELSIO_CHIP_CODE(CHELSIO_T4, 0),
- T4_A2 = CHELSIO_CHIP_CODE(CHELSIO_T4, 1),
- T4_A3 = CHELSIO_CHIP_CODE(CHELSIO_T4, 2),
- T4_FIRST_REV = T4_A1,
- T4_LAST_REV = T4_A3,
-
- T5_A1 = CHELSIO_CHIP_CODE(CHELSIO_T5, 0),
- T5_FIRST_REV = T5_A1,
- T5_LAST_REV = T5_A1,
-};
-
#ifdef CONFIG_PCI_IOV
/* T4 supports SRIOV on PF0-3 and T5 on PF0-7. However, the Serial
@@ -715,12 +754,12 @@ enum {
static inline int is_t5(enum chip_type chip)
{
- return (chip >= T5_FIRST_REV && chip <= T5_LAST_REV);
+ return CHELSIO_CHIP_VERSION(chip) == CHELSIO_T5;
}
static inline int is_t4(enum chip_type chip)
{
- return (chip >= T4_FIRST_REV && chip <= T4_LAST_REV);
+ return CHELSIO_CHIP_VERSION(chip) == CHELSIO_T4;
}
static inline u32 t4_read_reg(struct adapter *adap, u32 reg_addr)
@@ -899,9 +938,14 @@ int t4_seeprom_wp(struct adapter *adapter, bool enable);
int get_vpd_params(struct adapter *adapter, struct vpd_params *p);
int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size);
unsigned int t4_flash_cfg_addr(struct adapter *adapter);
-int t4_load_cfg(struct adapter *adapter, const u8 *cfg_data, unsigned int size);
-int t4_check_fw_version(struct adapter *adapter);
+int t4_get_fw_version(struct adapter *adapter, u32 *vers);
+int t4_get_tp_version(struct adapter *adapter, u32 *vers);
+int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
+ const u8 *fw_data, unsigned int fw_size,
+ struct fw_hdr *card_fw, enum dev_state state, int *reset);
int t4_prep_adapter(struct adapter *adapter);
+int t4_init_tp_params(struct adapter *adap);
+int t4_filter_field_shift(const struct adapter *adap, int filter_sel);
int t4_port_init(struct adapter *adap, int mbox, int pf, int vf);
void t4_fatal_err(struct adapter *adapter);
int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
@@ -934,13 +978,6 @@ int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
int t4_fw_bye(struct adapter *adap, unsigned int mbox);
int t4_early_init(struct adapter *adap, unsigned int mbox);
int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset);
-int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force);
-int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset);
-int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
- const u8 *fw_data, unsigned int size, int force);
-int t4_fw_config_file(struct adapter *adap, unsigned int mbox,
- unsigned int mtype, unsigned int maddr,
- u32 *finiver, u32 *finicsum, u32 *cfcsum);
int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
unsigned int cache_line_size);
int t4_fw_initialize(struct adapter *adap, unsigned int mbox);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 8b929eeecd2d..fff02ed1295e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -276,9 +276,9 @@ static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = {
{ 0, }
};
-#define FW_FNAME "cxgb4/t4fw.bin"
+#define FW4_FNAME "cxgb4/t4fw.bin"
#define FW5_FNAME "cxgb4/t5fw.bin"
-#define FW_CFNAME "cxgb4/t4-config.txt"
+#define FW4_CFNAME "cxgb4/t4-config.txt"
#define FW5_CFNAME "cxgb4/t5-config.txt"
MODULE_DESCRIPTION(DRV_DESC);
@@ -286,7 +286,7 @@ MODULE_AUTHOR("Chelsio Communications");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(DRV_VERSION);
MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
-MODULE_FIRMWARE(FW_FNAME);
+MODULE_FIRMWARE(FW4_FNAME);
MODULE_FIRMWARE(FW5_FNAME);
/*
@@ -1071,72 +1071,6 @@ freeout: t4_free_sge_resources(adap);
}
/*
- * Returns 0 if new FW was successfully loaded, a positive errno if a load was
- * started but failed, and a negative errno if flash load couldn't start.
- */
-static int upgrade_fw(struct adapter *adap)
-{
- int ret;
- u32 vers, exp_major;
- const struct fw_hdr *hdr;
- const struct firmware *fw;
- struct device *dev = adap->pdev_dev;
- char *fw_file_name;
-
- switch (CHELSIO_CHIP_VERSION(adap->chip)) {
- case CHELSIO_T4:
- fw_file_name = FW_FNAME;
- exp_major = FW_VERSION_MAJOR;
- break;
- case CHELSIO_T5:
- fw_file_name = FW5_FNAME;
- exp_major = FW_VERSION_MAJOR_T5;
- break;
- default:
- dev_err(dev, "Unsupported chip type, %x\n", adap->chip);
- return -EINVAL;
- }
-
- ret = request_firmware(&fw, fw_file_name, dev);
- if (ret < 0) {
- dev_err(dev, "unable to load firmware image %s, error %d\n",
- fw_file_name, ret);
- return ret;
- }
-
- hdr = (const struct fw_hdr *)fw->data;
- vers = ntohl(hdr->fw_ver);
- if (FW_HDR_FW_VER_MAJOR_GET(vers) != exp_major) {
- ret = -EINVAL; /* wrong major version, won't do */
- goto out;
- }
-
- /*
- * If the flash FW is unusable or we found something newer, load it.
- */
- if (FW_HDR_FW_VER_MAJOR_GET(adap->params.fw_vers) != exp_major ||
- vers > adap->params.fw_vers) {
- dev_info(dev, "upgrading firmware ...\n");
- ret = t4_fw_upgrade(adap, adap->mbox, fw->data, fw->size,
- /*force=*/false);
- if (!ret)
- dev_info(dev,
- "firmware upgraded to version %pI4 from %s\n",
- &hdr->fw_ver, fw_file_name);
- else
- dev_err(dev, "firmware upgrade failed! err=%d\n", -ret);
- } else {
- /*
- * Tell our caller that we didn't upgrade the firmware.
- */
- ret = -EINVAL;
- }
-
-out: release_firmware(fw);
- return ret;
-}
-
-/*
* Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
* The allocated memory is cleared.
*/
@@ -1415,7 +1349,7 @@ static int get_sset_count(struct net_device *dev, int sset)
static int get_regs_len(struct net_device *dev)
{
struct adapter *adap = netdev2adap(dev);
- if (is_t4(adap->chip))
+ if (is_t4(adap->params.chip))
return T4_REGMAP_SIZE;
else
return T5_REGMAP_SIZE;
@@ -1499,7 +1433,7 @@ static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
data += sizeof(struct port_stats) / sizeof(u64);
collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
data += sizeof(struct queue_port_stats) / sizeof(u64);
- if (!is_t4(adapter->chip)) {
+ if (!is_t4(adapter->params.chip)) {
t4_write_reg(adapter, SGE_STAT_CFG, STATSOURCE_T5(7));
val1 = t4_read_reg(adapter, SGE_STAT_TOTAL);
val2 = t4_read_reg(adapter, SGE_STAT_MATCH);
@@ -1521,8 +1455,8 @@ static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
*/
static inline unsigned int mk_adap_vers(const struct adapter *ap)
{
- return CHELSIO_CHIP_VERSION(ap->chip) |
- (CHELSIO_CHIP_RELEASE(ap->chip) << 10) | (1 << 16);
+ return CHELSIO_CHIP_VERSION(ap->params.chip) |
+ (CHELSIO_CHIP_RELEASE(ap->params.chip) << 10) | (1 << 16);
}
static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start,
@@ -2189,7 +2123,7 @@ static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
static const unsigned int *reg_ranges;
int arr_size = 0, buf_size = 0;
- if (is_t4(ap->chip)) {
+ if (is_t4(ap->params.chip)) {
reg_ranges = &t4_reg_ranges[0];
arr_size = ARRAY_SIZE(t4_reg_ranges);
buf_size = T4_REGMAP_SIZE;
@@ -2967,7 +2901,7 @@ static int setup_debugfs(struct adapter *adap)
size = t4_read_reg(adap, MA_EDRAM1_BAR);
add_debugfs_mem(adap, "edc1", MEM_EDC1, EDRAM_SIZE_GET(size));
}
- if (is_t4(adap->chip)) {
+ if (is_t4(adap->params.chip)) {
size = t4_read_reg(adap, MA_EXT_MEMORY_BAR);
if (i & EXT_MEM_ENABLE)
add_debugfs_mem(adap, "mc", MEM_MC,
@@ -3052,7 +2986,14 @@ int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
if (stid >= 0) {
t->stid_tab[stid].data = data;
stid += t->stid_base;
- t->stids_in_use++;
+ /* IPv6 requires max of 520 bits or 16 cells in TCAM
+ * This is equivalent to 4 TIDs. With CLIP enabled it
+ * needs 2 TIDs.
+ */
+ if (family == PF_INET)
+ t->stids_in_use++;
+ else
+ t->stids_in_use += 4;
}
spin_unlock_bh(&t->stid_lock);
return stid;
@@ -3078,7 +3019,8 @@ int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data)
}
if (stid >= 0) {
t->stid_tab[stid].data = data;
- stid += t->stid_base;
+ stid -= t->nstids;
+ stid += t->sftid_base;
t->stids_in_use++;
}
spin_unlock_bh(&t->stid_lock);
@@ -3090,14 +3032,24 @@ EXPORT_SYMBOL(cxgb4_alloc_sftid);
*/
void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
{
- stid -= t->stid_base;
+ /* Is it a server filter TID? */
+ if (t->nsftids && (stid >= t->sftid_base)) {
+ stid -= t->sftid_base;
+ stid += t->nstids;
+ } else {
+ stid -= t->stid_base;
+ }
+
spin_lock_bh(&t->stid_lock);
if (family == PF_INET)
__clear_bit(stid, t->stid_bmap);
else
bitmap_release_region(t->stid_bmap, stid, 2);
t->stid_tab[stid].data = NULL;
- t->stids_in_use--;
+ if (family == PF_INET)
+ t->stids_in_use--;
+ else
+ t->stids_in_use -= 4;
spin_unlock_bh(&t->stid_lock);
}
EXPORT_SYMBOL(cxgb4_free_stid);
@@ -3200,6 +3152,7 @@ static int tid_init(struct tid_info *t)
size_t size;
unsigned int stid_bmap_size;
unsigned int natids = t->natids;
+ struct adapter *adap = container_of(t, struct adapter, tids);
stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids);
size = t->ntids * sizeof(*t->tid_tab) +
@@ -3233,6 +3186,11 @@ static int tid_init(struct tid_info *t)
t->afree = t->atid_tab;
}
bitmap_zero(t->stid_bmap, t->nstids + t->nsftids);
+ /* Reserve stid 0 for T4/T5 adapters */
+ if (!t->stid_base &&
+ (is_t4(adap->params.chip) || is_t5(adap->params.chip)))
+ __set_bit(0, t->stid_bmap);
+
return 0;
}
@@ -3419,7 +3377,7 @@ unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo)
v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2);
- if (is_t4(adap->chip)) {
+ if (is_t4(adap->params.chip)) {
lp_count = G_LP_COUNT(v1);
hp_count = G_HP_COUNT(v1);
} else {
@@ -3588,7 +3546,7 @@ static void drain_db_fifo(struct adapter *adap, int usecs)
do {
v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2);
- if (is_t4(adap->chip)) {
+ if (is_t4(adap->params.chip)) {
lp_count = G_LP_COUNT(v1);
hp_count = G_HP_COUNT(v1);
} else {
@@ -3708,7 +3666,7 @@ static void process_db_drop(struct work_struct *work)
adap = container_of(work, struct adapter, db_drop_task);
- if (is_t4(adap->chip)) {
+ if (is_t4(adap->params.chip)) {
disable_dbs(adap);
notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP);
drain_db_fifo(adap, 1);
@@ -3753,7 +3711,7 @@ static void process_db_drop(struct work_struct *work)
void t4_db_full(struct adapter *adap)
{
- if (is_t4(adap->chip)) {
+ if (is_t4(adap->params.chip)) {
t4_set_reg_field(adap, SGE_INT_ENABLE3,
DBFIFO_HP_INT | DBFIFO_LP_INT, 0);
queue_work(workq, &adap->db_full_task);
@@ -3762,7 +3720,7 @@ void t4_db_full(struct adapter *adap)
void t4_db_dropped(struct adapter *adap)
{
- if (is_t4(adap->chip))
+ if (is_t4(adap->params.chip))
queue_work(workq, &adap->db_drop_task);
}
@@ -3789,7 +3747,7 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
lli.nchan = adap->params.nports;
lli.nports = adap->params.nports;
lli.wr_cred = adap->params.ofldq_wr_cred;
- lli.adapter_type = adap->params.rev;
+ lli.adapter_type = adap->params.chip;
lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2));
lli.udb_density = 1 << QUEUESPERPAGEPF0_GET(
t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF) >>
@@ -3797,7 +3755,7 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET(
t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF) >>
(adap->fn * 4));
- lli.filt_mode = adap->filter_mode;
+ lli.filt_mode = adap->params.tp.vlan_pri_map;
/* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
for (i = 0; i < NCHAN; i++)
lli.tx_modq[i] = i;
@@ -4245,7 +4203,7 @@ int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
adap = netdev2adap(dev);
/* Adjust stid to correct filter index */
- stid -= adap->tids.nstids;
+ stid -= adap->tids.sftid_base;
stid += adap->tids.nftids;
/* Check to make sure the filter requested is writable ...
@@ -4271,12 +4229,17 @@ int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
f->fs.val.lip[i] = val[i];
f->fs.mask.lip[i] = ~0;
}
- if (adap->filter_mode & F_PORT) {
+ if (adap->params.tp.vlan_pri_map & F_PORT) {
f->fs.val.iport = port;
f->fs.mask.iport = mask;
}
}
+ if (adap->params.tp.vlan_pri_map & F_PROTOCOL) {
+ f->fs.val.proto = IPPROTO_TCP;
+ f->fs.mask.proto = ~0;
+ }
+
f->fs.dirsteer = 1;
f->fs.iq = queue;
/* Mark filter as locked */
@@ -4303,7 +4266,7 @@ int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
adap = netdev2adap(dev);
/* Adjust stid to correct filter index */
- stid -= adap->tids.nstids;
+ stid -= adap->tids.sftid_base;
stid += adap->tids.nftids;
f = &adap->tids.ftid_tab[stid];
@@ -4483,7 +4446,7 @@ static void setup_memwin(struct adapter *adap)
u32 bar0, mem_win0_base, mem_win1_base, mem_win2_base;
bar0 = pci_resource_start(adap->pdev, 0); /* truncation intentional */
- if (is_t4(adap->chip)) {
+ if (is_t4(adap->params.chip)) {
mem_win0_base = bar0 + MEMWIN0_BASE;
mem_win1_base = bar0 + MEMWIN1_BASE;
mem_win2_base = bar0 + MEMWIN2_BASE;
@@ -4668,8 +4631,10 @@ static int adap_init0_config(struct adapter *adapter, int reset)
const struct firmware *cf;
unsigned long mtype = 0, maddr = 0;
u32 finiver, finicsum, cfcsum;
- int ret, using_flash;
+ int ret;
+ int config_issued = 0;
char *fw_config_file, fw_config_file_path[256];
+ char *config_name = NULL;
/*
* Reset device if necessary.
@@ -4686,9 +4651,9 @@ static int adap_init0_config(struct adapter *adapter, int reset)
* then use that. Otherwise, use the configuration file stored
* in the adapter flash ...
*/
- switch (CHELSIO_CHIP_VERSION(adapter->chip)) {
+ switch (CHELSIO_CHIP_VERSION(adapter->params.chip)) {
case CHELSIO_T4:
- fw_config_file = FW_CFNAME;
+ fw_config_file = FW4_CFNAME;
break;
case CHELSIO_T5:
fw_config_file = FW5_CFNAME;
@@ -4702,13 +4667,16 @@ static int adap_init0_config(struct adapter *adapter, int reset)
ret = request_firmware(&cf, fw_config_file, adapter->pdev_dev);
if (ret < 0) {
- using_flash = 1;
+ config_name = "On FLASH";
mtype = FW_MEMTYPE_CF_FLASH;
maddr = t4_flash_cfg_addr(adapter);
} else {
u32 params[7], val[7];
- using_flash = 0;
+ sprintf(fw_config_file_path,
+ "/lib/firmware/%s", fw_config_file);
+ config_name = fw_config_file_path;
+
if (cf->size >= FLASH_CFG_MAX_SIZE)
ret = -ENOMEM;
else {
@@ -4776,6 +4744,26 @@ static int adap_init0_config(struct adapter *adapter, int reset)
FW_LEN16(caps_cmd));
ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
&caps_cmd);
+
+ /* If the CAPS_CONFIG failed with an ENOENT (for a Firmware
+ * Configuration File in FLASH), our last gasp effort is to use the
+ * Firmware Configuration File which is embedded in the firmware. A
+ * very few early versions of the firmware didn't have one embedded
+ * but we can ignore those.
+ */
+ if (ret == -ENOENT) {
+ memset(&caps_cmd, 0, sizeof(caps_cmd));
+ caps_cmd.op_to_write =
+ htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
+ FW_CMD_REQUEST |
+ FW_CMD_READ);
+ caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
+ ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd,
+ sizeof(caps_cmd), &caps_cmd);
+ config_name = "Firmware Default";
+ }
+
+ config_issued = 1;
if (ret < 0)
goto bye;
@@ -4816,7 +4804,6 @@ static int adap_init0_config(struct adapter *adapter, int reset)
if (ret < 0)
goto bye;
- sprintf(fw_config_file_path, "/lib/firmware/%s", fw_config_file);
/*
* Return successfully and note that we're operating with parameters
* not supplied by the driver, rather than from hard-wired
@@ -4824,11 +4811,8 @@ static int adap_init0_config(struct adapter *adapter, int reset)
*/
adapter->flags |= USING_SOFT_PARAMS;
dev_info(adapter->pdev_dev, "Successfully configured using Firmware "\
- "Configuration File %s, version %#x, computed checksum %#x\n",
- (using_flash
- ? "in device FLASH"
- : fw_config_file_path),
- finiver, cfcsum);
+ "Configuration File \"%s\", version %#x, computed checksum %#x\n",
+ config_name, finiver, cfcsum);
return 0;
/*
@@ -4837,9 +4821,9 @@ static int adap_init0_config(struct adapter *adapter, int reset)
* want to issue a warning since this is fairly common.)
*/
bye:
- if (ret != -ENOENT)
- dev_warn(adapter->pdev_dev, "Configuration file error %d\n",
- -ret);
+ if (config_issued && ret != -ENOENT)
+ dev_warn(adapter->pdev_dev, "\"%s\" configuration file error %d\n",
+ config_name, -ret);
return ret;
}
@@ -5086,6 +5070,47 @@ bye:
return ret;
}
+static struct fw_info fw_info_array[] = {
+ {
+ .chip = CHELSIO_T4,
+ .fs_name = FW4_CFNAME,
+ .fw_mod_name = FW4_FNAME,
+ .fw_hdr = {
+ .chip = FW_HDR_CHIP_T4,
+ .fw_ver = __cpu_to_be32(FW_VERSION(T4)),
+ .intfver_nic = FW_INTFVER(T4, NIC),
+ .intfver_vnic = FW_INTFVER(T4, VNIC),
+ .intfver_ri = FW_INTFVER(T4, RI),
+ .intfver_iscsi = FW_INTFVER(T4, ISCSI),
+ .intfver_fcoe = FW_INTFVER(T4, FCOE),
+ },
+ }, {
+ .chip = CHELSIO_T5,
+ .fs_name = FW5_CFNAME,
+ .fw_mod_name = FW5_FNAME,
+ .fw_hdr = {
+ .chip = FW_HDR_CHIP_T5,
+ .fw_ver = __cpu_to_be32(FW_VERSION(T5)),
+ .intfver_nic = FW_INTFVER(T5, NIC),
+ .intfver_vnic = FW_INTFVER(T5, VNIC),
+ .intfver_ri = FW_INTFVER(T5, RI),
+ .intfver_iscsi = FW_INTFVER(T5, ISCSI),
+ .intfver_fcoe = FW_INTFVER(T5, FCOE),
+ },
+ }
+};
+
+static struct fw_info *find_fw_info(int chip)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(fw_info_array); i++) {
+ if (fw_info_array[i].chip == chip)
+ return &fw_info_array[i];
+ }
+ return NULL;
+}
+
/*
* Phase 0 of initialization: contact FW, obtain config, perform basic init.
*/
@@ -5096,7 +5121,7 @@ static int adap_init0(struct adapter *adap)
enum dev_state state;
u32 params[7], val[7];
struct fw_caps_config_cmd caps_cmd;
- int reset = 1, j;
+ int reset = 1;
/*
* Contact FW, advertising Master capability (and potentially forcing
@@ -5123,44 +5148,54 @@ static int adap_init0(struct adapter *adap)
* later reporting and B. to warn if the currently loaded firmware
* is excessively mismatched relative to the driver.)
*/
- ret = t4_check_fw_version(adap);
-
- /* The error code -EFAULT is returned by t4_check_fw_version() if
- * firmware on adapter < supported firmware. If firmware on adapter
- * is too old (not supported by driver) and we're the MASTER_PF set
- * adapter state to DEV_STATE_UNINIT to force firmware upgrade
- * and reinitialization.
- */
- if ((adap->flags & MASTER_PF) && ret == -EFAULT)
- state = DEV_STATE_UNINIT;
+ t4_get_fw_version(adap, &adap->params.fw_vers);
+ t4_get_tp_version(adap, &adap->params.tp_vers);
if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) {
- if (ret == -EINVAL || ret == -EFAULT || ret > 0) {
- if (upgrade_fw(adap) >= 0) {
- /*
- * Note that the chip was reset as part of the
- * firmware upgrade so we don't reset it again
- * below and grab the new firmware version.
- */
- reset = 0;
- ret = t4_check_fw_version(adap);
- } else
- if (ret == -EFAULT) {
- /*
- * Firmware is old but still might
- * work if we force reinitialization
- * of the adapter. Ignoring FW upgrade
- * failure.
- */
- dev_warn(adap->pdev_dev,
- "Ignoring firmware upgrade "
- "failure, and forcing driver "
- "to reinitialize the "
- "adapter.\n");
- ret = 0;
- }
+ struct fw_info *fw_info;
+ struct fw_hdr *card_fw;
+ const struct firmware *fw;
+ const u8 *fw_data = NULL;
+ unsigned int fw_size = 0;
+
+ /* This is the firmware whose headers the driver was compiled
+ * against
+ */
+ fw_info = find_fw_info(CHELSIO_CHIP_VERSION(adap->params.chip));
+ if (fw_info == NULL) {
+ dev_err(adap->pdev_dev,
+ "unable to get firmware info for chip %d.\n",
+ CHELSIO_CHIP_VERSION(adap->params.chip));
+ return -EINVAL;
}
+
+ /* allocate memory to read the header of the firmware on the
+ * card
+ */
+ card_fw = t4_alloc_mem(sizeof(*card_fw));
+
+ /* Get FW from from /lib/firmware/ */
+ ret = request_firmware(&fw, fw_info->fw_mod_name,
+ adap->pdev_dev);
+ if (ret < 0) {
+ dev_err(adap->pdev_dev,
+ "unable to load firmware image %s, error %d\n",
+ fw_info->fw_mod_name, ret);
+ } else {
+ fw_data = fw->data;
+ fw_size = fw->size;
+ }
+
+ /* upgrade FW logic */
+ ret = t4_prep_fw(adap, fw_info, fw_data, fw_size, card_fw,
+ state, &reset);
+
+ /* Cleaning up */
+ if (fw != NULL)
+ release_firmware(fw);
+ t4_free_mem(card_fw);
+
if (ret < 0)
- return ret;
+ goto bye;
}
/*
@@ -5245,7 +5280,7 @@ static int adap_init0(struct adapter *adap)
if (ret == -ENOENT) {
dev_info(adap->pdev_dev,
"No Configuration File present "
- "on adapter. Using hard-wired "
+ "on adapter. Using hard-wired "
"configuration parameters.\n");
ret = adap_init0_no_config(adap, reset);
}
@@ -5428,21 +5463,11 @@ static int adap_init0(struct adapter *adap)
/*
* These are finalized by FW initialization, load their values now.
*/
- v = t4_read_reg(adap, TP_TIMER_RESOLUTION);
- adap->params.tp.tre = TIMERRESOLUTION_GET(v);
- adap->params.tp.dack_re = DELAYEDACKRESOLUTION_GET(v);
t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
adap->params.b_wnd);
- /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
- for (j = 0; j < NCHAN; j++)
- adap->params.tp.tx_modq[j] = j;
-
- t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
- &adap->filter_mode, 1,
- TP_VLAN_PRI_MAP);
-
+ t4_init_tp_params(adap);
adap->flags |= FW_OK;
return 0;
@@ -5787,7 +5812,7 @@ static void print_port_info(const struct net_device *dev)
netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n",
adap->params.vpd.id,
- CHELSIO_CHIP_RELEASE(adap->params.rev), buf,
+ CHELSIO_CHIP_RELEASE(adap->params.chip), buf,
is_offload(adap) ? "R" : "", adap->params.pci.width, spd,
(adap->flags & USING_MSIX) ? " MSI-X" :
(adap->flags & USING_MSI) ? " MSI" : "");
@@ -5910,7 +5935,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto out_unmap_bar0;
- if (!is_t4(adapter->chip)) {
+ if (!is_t4(adapter->params.chip)) {
s_qpp = QUEUESPERPAGEPF1 * adapter->fn;
qpp = 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adapter,
SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp);
@@ -6064,7 +6089,7 @@ sriov:
out_free_dev:
free_some_resources(adapter);
out_unmap_bar:
- if (!is_t4(adapter->chip))
+ if (!is_t4(adapter->params.chip))
iounmap(adapter->bar2);
out_unmap_bar0:
iounmap(adapter->regs);
@@ -6116,7 +6141,7 @@ static void remove_one(struct pci_dev *pdev)
free_some_resources(adapter);
iounmap(adapter->regs);
- if (!is_t4(adapter->chip))
+ if (!is_t4(adapter->params.chip))
iounmap(adapter->bar2);
kfree(adapter);
pci_disable_pcie_error_reporting(pdev);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index 6f21f2451c30..4dd0a82533e4 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -131,7 +131,14 @@ static inline void *lookup_atid(const struct tid_info *t, unsigned int atid)
static inline void *lookup_stid(const struct tid_info *t, unsigned int stid)
{
- stid -= t->stid_base;
+ /* Is it a server filter TID? */
+ if (t->nsftids && (stid >= t->sftid_base)) {
+ stid -= t->sftid_base;
+ stid += t->nstids;
+ } else {
+ stid -= t->stid_base;
+ }
+
return stid < (t->nstids + t->nsftids) ? t->stid_tab[stid].data : NULL;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
index 29878098101e..cb05be905def 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
@@ -45,6 +45,7 @@
#include "l2t.h"
#include "t4_msg.h"
#include "t4fw_api.h"
+#include "t4_regs.h"
#define VLAN_NONE 0xfff
@@ -411,6 +412,40 @@ done:
}
EXPORT_SYMBOL(cxgb4_l2t_get);
+u64 cxgb4_select_ntuple(struct net_device *dev,
+ const struct l2t_entry *l2t)
+{
+ struct adapter *adap = netdev2adap(dev);
+ struct tp_params *tp = &adap->params.tp;
+ u64 ntuple = 0;
+
+ /* Initialize each of the fields which we care about which are present
+ * in the Compressed Filter Tuple.
+ */
+ if (tp->vlan_shift >= 0 && l2t->vlan != VLAN_NONE)
+ ntuple |= (F_FT_VLAN_VLD | l2t->vlan) << tp->vlan_shift;
+
+ if (tp->port_shift >= 0)
+ ntuple |= (u64)l2t->lport << tp->port_shift;
+
+ if (tp->protocol_shift >= 0)
+ ntuple |= (u64)IPPROTO_TCP << tp->protocol_shift;
+
+ if (tp->vnic_shift >= 0) {
+ u32 viid = cxgb4_port_viid(dev);
+ u32 vf = FW_VIID_VIN_GET(viid);
+ u32 pf = FW_VIID_PFN_GET(viid);
+ u32 vld = FW_VIID_VIVLD_GET(viid);
+
+ ntuple |= (u64)(V_FT_VNID_ID_VF(vf) |
+ V_FT_VNID_ID_PF(pf) |
+ V_FT_VNID_ID_VLD(vld)) << tp->vnic_shift;
+ }
+
+ return ntuple;
+}
+EXPORT_SYMBOL(cxgb4_select_ntuple);
+
/*
* Called when address resolution fails for an L2T entry to handle packets
* on the arpq head. If a packet specifies a failure handler it is invoked,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.h b/drivers/net/ethernet/chelsio/cxgb4/l2t.h
index 108c0f1fce1c..85eb5c71358d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.h
@@ -98,7 +98,8 @@ int cxgb4_l2t_send(struct net_device *dev, struct sk_buff *skb,
struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh,
const struct net_device *physdev,
unsigned int priority);
-
+u64 cxgb4_select_ntuple(struct net_device *dev,
+ const struct l2t_entry *l2t);
void t4_l2t_update(struct adapter *adap, struct neighbour *neigh);
struct l2t_entry *t4_l2t_alloc_switching(struct l2t_data *d);
int t4_l2t_set_switching(struct adapter *adap, struct l2t_entry *e, u16 vlan,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index ac311f5f3eb9..47ffa64fcf19 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -509,7 +509,7 @@ static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
u32 val;
if (q->pend_cred >= 8) {
val = PIDX(q->pend_cred / 8);
- if (!is_t4(adap->chip))
+ if (!is_t4(adap->params.chip))
val |= DBTYPE(1);
wmb();
t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), DBPRIO(1) |
@@ -847,7 +847,7 @@ static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
wmb(); /* write descriptors before telling HW */
spin_lock(&q->db_lock);
if (!q->db_disabled) {
- if (is_t4(adap->chip)) {
+ if (is_t4(adap->params.chip)) {
t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
QID(q->cntxt_id) | PIDX(n));
} else {
@@ -1596,7 +1596,7 @@ static noinline int handle_trace_pkt(struct adapter *adap,
return 0;
}
- if (is_t4(adap->chip))
+ if (is_t4(adap->params.chip))
__skb_pull(skb, sizeof(struct cpl_trace_pkt));
else
__skb_pull(skb, sizeof(struct cpl_t5_trace_pkt));
@@ -1630,7 +1630,8 @@ static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
skb->ip_summed = CHECKSUM_UNNECESSARY;
skb_record_rx_queue(skb, rxq->rspq.idx);
if (rxq->rspq.netdev->features & NETIF_F_RXHASH)
- skb->rxhash = (__force u32)pkt->rsshdr.hash_val;
+ skb_set_hash(skb, (__force u32)pkt->rsshdr.hash_val,
+ PKT_HASH_TYPE_L3);
if (unlikely(pkt->vlan_ex)) {
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(pkt->vlan));
@@ -1661,7 +1662,7 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
const struct cpl_rx_pkt *pkt;
struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
struct sge *s = &q->adap->sge;
- int cpl_trace_pkt = is_t4(q->adap->chip) ?
+ int cpl_trace_pkt = is_t4(q->adap->params.chip) ?
CPL_TRACE_PKT : CPL_TRACE_PKT_T5;
if (unlikely(*(u8 *)rsp == cpl_trace_pkt))
@@ -1686,7 +1687,8 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
skb->protocol = eth_type_trans(skb, q->netdev);
skb_record_rx_queue(skb, q->idx);
if (skb->dev->features & NETIF_F_RXHASH)
- skb->rxhash = (__force u32)pkt->rsshdr.hash_val;
+ skb_set_hash(skb, (__force u32)pkt->rsshdr.hash_val,
+ PKT_HASH_TYPE_L3);
rxq->stats.pkts++;
@@ -2182,7 +2184,7 @@ err:
static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id)
{
q->cntxt_id = id;
- if (!is_t4(adap->chip)) {
+ if (!is_t4(adap->params.chip)) {
unsigned int s_qpp;
unsigned short udb_density;
unsigned long qpshift;
@@ -2581,7 +2583,7 @@ static int t4_sge_init_soft(struct adapter *adap)
#undef READ_FL_BUF
if (fl_small_pg != PAGE_SIZE ||
- (fl_large_pg != 0 && (fl_large_pg <= fl_small_pg ||
+ (fl_large_pg != 0 && (fl_large_pg < fl_small_pg ||
(fl_large_pg & (fl_large_pg-1)) != 0))) {
dev_err(adap->pdev_dev, "bad SGE FL page buffer sizes [%d, %d]\n",
fl_small_pg, fl_large_pg);
@@ -2641,7 +2643,7 @@ static int t4_sge_init_hard(struct adapter *adap)
* Set up to drop DOORBELL writes when the DOORBELL FIFO overflows
* and generate an interrupt when this occurs so we can recover.
*/
- if (is_t4(adap->chip)) {
+ if (is_t4(adap->params.chip)) {
t4_set_reg_field(adap, A_SGE_DBFIFO_STATUS,
V_HP_INT_THRESH(M_HP_INT_THRESH) |
V_LP_INT_THRESH(M_LP_INT_THRESH),
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 4cbb2f9850be..a3964753935c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -38,6 +38,8 @@
#include "t4_regs.h"
#include "t4fw_api.h"
+static int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
+ const u8 *fw_data, unsigned int size, int force);
/**
* t4_wait_op_done_val - wait until an operation is completed
* @adapter: the adapter performing the operation
@@ -296,7 +298,7 @@ int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
u32 mc_bist_cmd, mc_bist_cmd_addr, mc_bist_cmd_len;
u32 mc_bist_status_rdata, mc_bist_data_pattern;
- if (is_t4(adap->chip)) {
+ if (is_t4(adap->params.chip)) {
mc_bist_cmd = MC_BIST_CMD;
mc_bist_cmd_addr = MC_BIST_CMD_ADDR;
mc_bist_cmd_len = MC_BIST_CMD_LEN;
@@ -349,7 +351,7 @@ int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
u32 edc_bist_cmd, edc_bist_cmd_addr, edc_bist_cmd_len;
u32 edc_bist_cmd_data_pattern, edc_bist_status_rdata;
- if (is_t4(adap->chip)) {
+ if (is_t4(adap->params.chip)) {
edc_bist_cmd = EDC_REG(EDC_BIST_CMD, idx);
edc_bist_cmd_addr = EDC_REG(EDC_BIST_CMD_ADDR, idx);
edc_bist_cmd_len = EDC_REG(EDC_BIST_CMD_LEN, idx);
@@ -402,7 +404,7 @@ int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
static int t4_mem_win_rw(struct adapter *adap, u32 addr, __be32 *data, int dir)
{
int i;
- u32 win_pf = is_t4(adap->chip) ? 0 : V_PFNUM(adap->fn);
+ u32 win_pf = is_t4(adap->params.chip) ? 0 : V_PFNUM(adap->fn);
/*
* Setup offset into PCIE memory window. Address must be a
@@ -863,104 +865,169 @@ unlock:
}
/**
- * get_fw_version - read the firmware version
+ * t4_get_fw_version - read the firmware version
* @adapter: the adapter
* @vers: where to place the version
*
* Reads the FW version from flash.
*/
-static int get_fw_version(struct adapter *adapter, u32 *vers)
+int t4_get_fw_version(struct adapter *adapter, u32 *vers)
{
- return t4_read_flash(adapter, adapter->params.sf_fw_start +
- offsetof(struct fw_hdr, fw_ver), 1, vers, 0);
+ return t4_read_flash(adapter, FLASH_FW_START +
+ offsetof(struct fw_hdr, fw_ver), 1,
+ vers, 0);
}
/**
- * get_tp_version - read the TP microcode version
+ * t4_get_tp_version - read the TP microcode version
* @adapter: the adapter
* @vers: where to place the version
*
* Reads the TP microcode version from flash.
*/
-static int get_tp_version(struct adapter *adapter, u32 *vers)
+int t4_get_tp_version(struct adapter *adapter, u32 *vers)
{
- return t4_read_flash(adapter, adapter->params.sf_fw_start +
+ return t4_read_flash(adapter, FLASH_FW_START +
offsetof(struct fw_hdr, tp_microcode_ver),
1, vers, 0);
}
-/**
- * t4_check_fw_version - check if the FW is compatible with this driver
- * @adapter: the adapter
- *
- * Checks if an adapter's FW is compatible with the driver. Returns 0
- * if there's exact match, a negative error if the version could not be
- * read or there's a major version mismatch, and a positive value if the
- * expected major version is found but there's a minor version mismatch.
+/* Is the given firmware API compatible with the one the driver was compiled
+ * with?
*/
-int t4_check_fw_version(struct adapter *adapter)
+static int fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2)
{
- u32 api_vers[2];
- int ret, major, minor, micro;
- int exp_major, exp_minor, exp_micro;
- ret = get_fw_version(adapter, &adapter->params.fw_vers);
- if (!ret)
- ret = get_tp_version(adapter, &adapter->params.tp_vers);
- if (!ret)
- ret = t4_read_flash(adapter, adapter->params.sf_fw_start +
- offsetof(struct fw_hdr, intfver_nic),
- 2, api_vers, 1);
- if (ret)
- return ret;
+ /* short circuit if it's the exact same firmware version */
+ if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver)
+ return 1;
- major = FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers);
- minor = FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers);
- micro = FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers);
+#define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x)
+ if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) &&
+ SAME_INTF(ri) && SAME_INTF(iscsi) && SAME_INTF(fcoe))
+ return 1;
+#undef SAME_INTF
- switch (CHELSIO_CHIP_VERSION(adapter->chip)) {
- case CHELSIO_T4:
- exp_major = FW_VERSION_MAJOR;
- exp_minor = FW_VERSION_MINOR;
- exp_micro = FW_VERSION_MICRO;
- break;
- case CHELSIO_T5:
- exp_major = FW_VERSION_MAJOR_T5;
- exp_minor = FW_VERSION_MINOR_T5;
- exp_micro = FW_VERSION_MICRO_T5;
- break;
- default:
- dev_err(adapter->pdev_dev, "Unsupported chip type, %x\n",
- adapter->chip);
- return -EINVAL;
- }
+ return 0;
+}
- memcpy(adapter->params.api_vers, api_vers,
- sizeof(adapter->params.api_vers));
+/* The firmware in the filesystem is usable, but should it be installed?
+ * This routine explains itself in detail if it indicates the filesystem
+ * firmware should be installed.
+ */
+static int should_install_fs_fw(struct adapter *adap, int card_fw_usable,
+ int k, int c)
+{
+ const char *reason;
- if (major < exp_major || (major == exp_major && minor < exp_minor) ||
- (major == exp_major && minor == exp_minor && micro < exp_micro)) {
- dev_err(adapter->pdev_dev,
- "Card has firmware version %u.%u.%u, minimum "
- "supported firmware is %u.%u.%u.\n", major, minor,
- micro, exp_major, exp_minor, exp_micro);
- return -EFAULT;
+ if (!card_fw_usable) {
+ reason = "incompatible or unusable";
+ goto install;
}
- if (major != exp_major) { /* major mismatch - fail */
- dev_err(adapter->pdev_dev,
- "card FW has major version %u, driver wants %u\n",
- major, exp_major);
- return -EINVAL;
+ if (k > c) {
+ reason = "older than the version supported with this driver";
+ goto install;
}
- if (minor == exp_minor && micro == exp_micro)
- return 0; /* perfect match */
+ return 0;
+
+install:
+ dev_err(adap->pdev_dev, "firmware on card (%u.%u.%u.%u) is %s, "
+ "installing firmware %u.%u.%u.%u on card.\n",
+ FW_HDR_FW_VER_MAJOR_GET(c), FW_HDR_FW_VER_MINOR_GET(c),
+ FW_HDR_FW_VER_MICRO_GET(c), FW_HDR_FW_VER_BUILD_GET(c), reason,
+ FW_HDR_FW_VER_MAJOR_GET(k), FW_HDR_FW_VER_MINOR_GET(k),
+ FW_HDR_FW_VER_MICRO_GET(k), FW_HDR_FW_VER_BUILD_GET(k));
- /* Minor/micro version mismatch. Report it but often it's OK. */
return 1;
}
+int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
+ const u8 *fw_data, unsigned int fw_size,
+ struct fw_hdr *card_fw, enum dev_state state,
+ int *reset)
+{
+ int ret, card_fw_usable, fs_fw_usable;
+ const struct fw_hdr *fs_fw;
+ const struct fw_hdr *drv_fw;
+
+ drv_fw = &fw_info->fw_hdr;
+
+ /* Read the header of the firmware on the card */
+ ret = -t4_read_flash(adap, FLASH_FW_START,
+ sizeof(*card_fw) / sizeof(uint32_t),
+ (uint32_t *)card_fw, 1);
+ if (ret == 0) {
+ card_fw_usable = fw_compatible(drv_fw, (const void *)card_fw);
+ } else {
+ dev_err(adap->pdev_dev,
+ "Unable to read card's firmware header: %d\n", ret);
+ card_fw_usable = 0;
+ }
+
+ if (fw_data != NULL) {
+ fs_fw = (const void *)fw_data;
+ fs_fw_usable = fw_compatible(drv_fw, fs_fw);
+ } else {
+ fs_fw = NULL;
+ fs_fw_usable = 0;
+ }
+
+ if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver &&
+ (!fs_fw_usable || fs_fw->fw_ver == drv_fw->fw_ver)) {
+ /* Common case: the firmware on the card is an exact match and
+ * the filesystem one is an exact match too, or the filesystem
+ * one is absent/incompatible.
+ */
+ } else if (fs_fw_usable && state == DEV_STATE_UNINIT &&
+ should_install_fs_fw(adap, card_fw_usable,
+ be32_to_cpu(fs_fw->fw_ver),
+ be32_to_cpu(card_fw->fw_ver))) {
+ ret = -t4_fw_upgrade(adap, adap->mbox, fw_data,
+ fw_size, 0);
+ if (ret != 0) {
+ dev_err(adap->pdev_dev,
+ "failed to install firmware: %d\n", ret);
+ goto bye;
+ }
+
+ /* Installed successfully, update the cached header too. */
+ memcpy(card_fw, fs_fw, sizeof(*card_fw));
+ card_fw_usable = 1;
+ *reset = 0; /* already reset as part of load_fw */
+ }
+
+ if (!card_fw_usable) {
+ uint32_t d, c, k;
+
+ d = be32_to_cpu(drv_fw->fw_ver);
+ c = be32_to_cpu(card_fw->fw_ver);
+ k = fs_fw ? be32_to_cpu(fs_fw->fw_ver) : 0;
+
+ dev_err(adap->pdev_dev, "Cannot find a usable firmware: "
+ "chip state %d, "
+ "driver compiled with %d.%d.%d.%d, "
+ "card has %d.%d.%d.%d, filesystem has %d.%d.%d.%d\n",
+ state,
+ FW_HDR_FW_VER_MAJOR_GET(d), FW_HDR_FW_VER_MINOR_GET(d),
+ FW_HDR_FW_VER_MICRO_GET(d), FW_HDR_FW_VER_BUILD_GET(d),
+ FW_HDR_FW_VER_MAJOR_GET(c), FW_HDR_FW_VER_MINOR_GET(c),
+ FW_HDR_FW_VER_MICRO_GET(c), FW_HDR_FW_VER_BUILD_GET(c),
+ FW_HDR_FW_VER_MAJOR_GET(k), FW_HDR_FW_VER_MINOR_GET(k),
+ FW_HDR_FW_VER_MICRO_GET(k), FW_HDR_FW_VER_BUILD_GET(k));
+ ret = EINVAL;
+ goto bye;
+ }
+
+ /* We're using whatever's on the card and it's known to be good. */
+ adap->params.fw_vers = be32_to_cpu(card_fw->fw_ver);
+ adap->params.tp_vers = be32_to_cpu(card_fw->tp_microcode_ver);
+
+bye:
+ return ret;
+}
+
/**
* t4_flash_erase_sectors - erase a range of flash sectors
* @adapter: the adapter
@@ -1005,62 +1072,6 @@ unsigned int t4_flash_cfg_addr(struct adapter *adapter)
}
/**
- * t4_load_cfg - download config file
- * @adap: the adapter
- * @cfg_data: the cfg text file to write
- * @size: text file size
- *
- * Write the supplied config text file to the card's serial flash.
- */
-int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
-{
- int ret, i, n;
- unsigned int addr;
- unsigned int flash_cfg_start_sec;
- unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
-
- addr = t4_flash_cfg_addr(adap);
- flash_cfg_start_sec = addr / SF_SEC_SIZE;
-
- if (size > FLASH_CFG_MAX_SIZE) {
- dev_err(adap->pdev_dev, "cfg file too large, max is %u bytes\n",
- FLASH_CFG_MAX_SIZE);
- return -EFBIG;
- }
-
- i = DIV_ROUND_UP(FLASH_CFG_MAX_SIZE, /* # of sectors spanned */
- sf_sec_size);
- ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
- flash_cfg_start_sec + i - 1);
- /*
- * If size == 0 then we're simply erasing the FLASH sectors associated
- * with the on-adapter Firmware Configuration File.
- */
- if (ret || size == 0)
- goto out;
-
- /* this will write to the flash up to SF_PAGE_SIZE at a time */
- for (i = 0; i < size; i += SF_PAGE_SIZE) {
- if ((size - i) < SF_PAGE_SIZE)
- n = size - i;
- else
- n = SF_PAGE_SIZE;
- ret = t4_write_flash(adap, addr, n, cfg_data);
- if (ret)
- goto out;
-
- addr += SF_PAGE_SIZE;
- cfg_data += SF_PAGE_SIZE;
- }
-
-out:
- if (ret)
- dev_err(adap->pdev_dev, "config file %s failed %d\n",
- (size == 0 ? "clear" : "download"), ret);
- return ret;
-}
-
-/**
* t4_load_fw - download firmware
* @adap: the adapter
* @fw_data: the firmware image to write
@@ -1368,7 +1379,7 @@ static void pcie_intr_handler(struct adapter *adapter)
PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
pcie_port_intr_info) +
t4_handle_intr_status(adapter, PCIE_INT_CAUSE,
- is_t4(adapter->chip) ?
+ is_t4(adapter->params.chip) ?
pcie_intr_info : t5_pcie_intr_info);
if (fat)
@@ -1782,7 +1793,7 @@ static void xgmac_intr_handler(struct adapter *adap, int port)
{
u32 v, int_cause_reg;
- if (is_t4(adap->chip))
+ if (is_t4(adap->params.chip))
int_cause_reg = PORT_REG(port, XGMAC_PORT_INT_CAUSE);
else
int_cause_reg = T5_PORT_REG(port, MAC_PORT_INT_CAUSE);
@@ -2250,7 +2261,7 @@ void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
#define GET_STAT(name) \
t4_read_reg64(adap, \
- (is_t4(adap->chip) ? PORT_REG(idx, MPS_PORT_STAT_##name##_L) : \
+ (is_t4(adap->params.chip) ? PORT_REG(idx, MPS_PORT_STAT_##name##_L) : \
T5_PORT_REG(idx, MPS_PORT_STAT_##name##_L)))
#define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
@@ -2332,7 +2343,7 @@ void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
{
u32 mag_id_reg_l, mag_id_reg_h, port_cfg_reg;
- if (is_t4(adap->chip)) {
+ if (is_t4(adap->params.chip)) {
mag_id_reg_l = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_LO);
mag_id_reg_h = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_HI);
port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2);
@@ -2374,7 +2385,7 @@ int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
int i;
u32 port_cfg_reg;
- if (is_t4(adap->chip))
+ if (is_t4(adap->params.chip))
port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2);
else
port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2);
@@ -2387,7 +2398,7 @@ int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
return -EINVAL;
#define EPIO_REG(name) \
- (is_t4(adap->chip) ? PORT_REG(port, XGMAC_PORT_EPIO_##name) : \
+ (is_t4(adap->params.chip) ? PORT_REG(port, XGMAC_PORT_EPIO_##name) : \
T5_PORT_REG(port, MAC_PORT_EPIO_##name))
t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
@@ -2474,7 +2485,7 @@ int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
int t4_mem_win_read_len(struct adapter *adap, u32 addr, __be32 *data, int len)
{
int i, off;
- u32 win_pf = is_t4(adap->chip) ? 0 : V_PFNUM(adap->fn);
+ u32 win_pf = is_t4(adap->params.chip) ? 0 : V_PFNUM(adap->fn);
/* Align on a 2KB boundary.
*/
@@ -2745,7 +2756,7 @@ int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
* be doing. The only way out of this state is to RESTART the firmware
* ...
*/
-int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
+static int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
{
int ret = 0;
@@ -2810,7 +2821,7 @@ int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
* the chip since older firmware won't recognize the PCIE_FW.HALT
* flag and automatically RESET itself on startup.
*/
-int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
+static int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
{
if (reset) {
/*
@@ -2873,8 +2884,8 @@ int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
* positive errno indicates that the adapter is ~probably~ intact, a
* negative errno indicates that things are looking bad ...
*/
-int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
- const u8 *fw_data, unsigned int size, int force)
+static int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
+ const u8 *fw_data, unsigned int size, int force)
{
const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
int reset, ret;
@@ -2899,78 +2910,6 @@ int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
return t4_fw_restart(adap, mbox, reset);
}
-
-/**
- * t4_fw_config_file - setup an adapter via a Configuration File
- * @adap: the adapter
- * @mbox: mailbox to use for the FW command
- * @mtype: the memory type where the Configuration File is located
- * @maddr: the memory address where the Configuration File is located
- * @finiver: return value for CF [fini] version
- * @finicsum: return value for CF [fini] checksum
- * @cfcsum: return value for CF computed checksum
- *
- * Issue a command to get the firmware to process the Configuration
- * File located at the specified mtype/maddress. If the Configuration
- * File is processed successfully and return value pointers are
- * provided, the Configuration File "[fini] section version and
- * checksum values will be returned along with the computed checksum.
- * It's up to the caller to decide how it wants to respond to the
- * checksums not matching but it recommended that a prominant warning
- * be emitted in order to help people rapidly identify changed or
- * corrupted Configuration Files.
- *
- * Also note that it's possible to modify things like "niccaps",
- * "toecaps",etc. between processing the Configuration File and telling
- * the firmware to use the new configuration. Callers which want to
- * do this will need to "hand-roll" their own CAPS_CONFIGS commands for
- * Configuration Files if they want to do this.
- */
-int t4_fw_config_file(struct adapter *adap, unsigned int mbox,
- unsigned int mtype, unsigned int maddr,
- u32 *finiver, u32 *finicsum, u32 *cfcsum)
-{
- struct fw_caps_config_cmd caps_cmd;
- int ret;
-
- /*
- * Tell the firmware to process the indicated Configuration File.
- * If there are no errors and the caller has provided return value
- * pointers for the [fini] section version, checksum and computed
- * checksum, pass those back to the caller.
- */
- memset(&caps_cmd, 0, sizeof(caps_cmd));
- caps_cmd.op_to_write =
- htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
- FW_CMD_REQUEST |
- FW_CMD_READ);
- caps_cmd.cfvalid_to_len16 =
- htonl(FW_CAPS_CONFIG_CMD_CFVALID |
- FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
- FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) |
- FW_LEN16(caps_cmd));
- ret = t4_wr_mbox(adap, mbox, &caps_cmd, sizeof(caps_cmd), &caps_cmd);
- if (ret < 0)
- return ret;
-
- if (finiver)
- *finiver = ntohl(caps_cmd.finiver);
- if (finicsum)
- *finicsum = ntohl(caps_cmd.finicsum);
- if (cfcsum)
- *cfcsum = ntohl(caps_cmd.cfcsum);
-
- /*
- * And now tell the firmware to use the configuration we just loaded.
- */
- caps_cmd.op_to_write =
- htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
- FW_CMD_REQUEST |
- FW_CMD_WRITE);
- caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
- return t4_wr_mbox(adap, mbox, &caps_cmd, sizeof(caps_cmd), NULL);
-}
-
/**
* t4_fixup_host_params - fix up host-dependent parameters
* @adap: the adapter
@@ -3306,7 +3245,7 @@ int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
int i, ret;
struct fw_vi_mac_cmd c;
struct fw_vi_mac_exact *p;
- unsigned int max_naddr = is_t4(adap->chip) ?
+ unsigned int max_naddr = is_t4(adap->params.chip) ?
NUM_MPS_CLS_SRAM_L_INSTANCES :
NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
@@ -3368,7 +3307,7 @@ int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
int ret, mode;
struct fw_vi_mac_cmd c;
struct fw_vi_mac_exact *p = c.u.exact;
- unsigned int max_mac_addr = is_t4(adap->chip) ?
+ unsigned int max_mac_addr = is_t4(adap->params.chip) ?
NUM_MPS_CLS_SRAM_L_INSTANCES :
NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
@@ -3699,13 +3638,14 @@ int t4_prep_adapter(struct adapter *adapter)
{
int ret, ver;
uint16_t device_id;
+ u32 pl_rev;
ret = t4_wait_dev_ready(adapter);
if (ret < 0)
return ret;
get_pci_mode(adapter, &adapter->params.pci);
- adapter->params.rev = t4_read_reg(adapter, PL_REV);
+ pl_rev = G_REV(t4_read_reg(adapter, PL_REV));
ret = get_flash_params(adapter);
if (ret < 0) {
@@ -3717,14 +3657,13 @@ int t4_prep_adapter(struct adapter *adapter)
*/
pci_read_config_word(adapter->pdev, PCI_DEVICE_ID, &device_id);
ver = device_id >> 12;
+ adapter->params.chip = 0;
switch (ver) {
case CHELSIO_T4:
- adapter->chip = CHELSIO_CHIP_CODE(CHELSIO_T4,
- adapter->params.rev);
+ adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev);
break;
case CHELSIO_T5:
- adapter->chip = CHELSIO_CHIP_CODE(CHELSIO_T5,
- adapter->params.rev);
+ adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
break;
default:
dev_err(adapter->pdev_dev, "Device %d is not supported\n",
@@ -3732,9 +3671,6 @@ int t4_prep_adapter(struct adapter *adapter)
return -EINVAL;
}
- /* Reassign the updated revision field */
- adapter->params.rev = adapter->chip;
-
init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
/*
@@ -3746,6 +3682,109 @@ int t4_prep_adapter(struct adapter *adapter)
return 0;
}
+/**
+ * t4_init_tp_params - initialize adap->params.tp
+ * @adap: the adapter
+ *
+ * Initialize various fields of the adapter's TP Parameters structure.
+ */
+int t4_init_tp_params(struct adapter *adap)
+{
+ int chan;
+ u32 v;
+
+ v = t4_read_reg(adap, TP_TIMER_RESOLUTION);
+ adap->params.tp.tre = TIMERRESOLUTION_GET(v);
+ adap->params.tp.dack_re = DELAYEDACKRESOLUTION_GET(v);
+
+ /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
+ for (chan = 0; chan < NCHAN; chan++)
+ adap->params.tp.tx_modq[chan] = chan;
+
+ /* Cache the adapter's Compressed Filter Mode and global Incress
+ * Configuration.
+ */
+ t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
+ &adap->params.tp.vlan_pri_map, 1,
+ TP_VLAN_PRI_MAP);
+ t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
+ &adap->params.tp.ingress_config, 1,
+ TP_INGRESS_CONFIG);
+
+ /* Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
+ * shift positions of several elements of the Compressed Filter Tuple
+ * for this adapter which we need frequently ...
+ */
+ adap->params.tp.vlan_shift = t4_filter_field_shift(adap, F_VLAN);
+ adap->params.tp.vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID);
+ adap->params.tp.port_shift = t4_filter_field_shift(adap, F_PORT);
+ adap->params.tp.protocol_shift = t4_filter_field_shift(adap,
+ F_PROTOCOL);
+
+ /* If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID
+ * represents the presense of an Outer VLAN instead of a VNIC ID.
+ */
+ if ((adap->params.tp.ingress_config & F_VNIC) == 0)
+ adap->params.tp.vnic_shift = -1;
+
+ return 0;
+}
+
+/**
+ * t4_filter_field_shift - calculate filter field shift
+ * @adap: the adapter
+ * @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits)
+ *
+ * Return the shift position of a filter field within the Compressed
+ * Filter Tuple. The filter field is specified via its selection bit
+ * within TP_VLAN_PRI_MAL (filter mode). E.g. F_VLAN.
+ */
+int t4_filter_field_shift(const struct adapter *adap, int filter_sel)
+{
+ unsigned int filter_mode = adap->params.tp.vlan_pri_map;
+ unsigned int sel;
+ int field_shift;
+
+ if ((filter_mode & filter_sel) == 0)
+ return -1;
+
+ for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
+ switch (filter_mode & sel) {
+ case F_FCOE:
+ field_shift += W_FT_FCOE;
+ break;
+ case F_PORT:
+ field_shift += W_FT_PORT;
+ break;
+ case F_VNIC_ID:
+ field_shift += W_FT_VNIC_ID;
+ break;
+ case F_VLAN:
+ field_shift += W_FT_VLAN;
+ break;
+ case F_TOS:
+ field_shift += W_FT_TOS;
+ break;
+ case F_PROTOCOL:
+ field_shift += W_FT_PROTOCOL;
+ break;
+ case F_ETHERTYPE:
+ field_shift += W_FT_ETHERTYPE;
+ break;
+ case F_MACMATCH:
+ field_shift += W_FT_MACMATCH;
+ break;
+ case F_MPSHITTYPE:
+ field_shift += W_FT_MPSHITTYPE;
+ break;
+ case F_FRAGMENTATION:
+ field_shift += W_FT_FRAGMENTATION;
+ break;
+ }
+ }
+ return field_shift;
+}
+
int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
{
u8 addr[6];
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
index ef146c0ba481..4082522d8140 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
@@ -1092,6 +1092,11 @@
#define PL_REV 0x1943c
+#define S_REV 0
+#define M_REV 0xfU
+#define V_REV(x) ((x) << S_REV)
+#define G_REV(x) (((x) >> S_REV) & M_REV)
+
#define LE_DB_CONFIG 0x19c04
#define HASHEN 0x00100000U
@@ -1166,10 +1171,50 @@
#define A_TP_TX_SCHED_PCMD 0x25
+#define S_VNIC 11
+#define V_VNIC(x) ((x) << S_VNIC)
+#define F_VNIC V_VNIC(1U)
+
+#define S_FRAGMENTATION 9
+#define V_FRAGMENTATION(x) ((x) << S_FRAGMENTATION)
+#define F_FRAGMENTATION V_FRAGMENTATION(1U)
+
+#define S_MPSHITTYPE 8
+#define V_MPSHITTYPE(x) ((x) << S_MPSHITTYPE)
+#define F_MPSHITTYPE V_MPSHITTYPE(1U)
+
+#define S_MACMATCH 7
+#define V_MACMATCH(x) ((x) << S_MACMATCH)
+#define F_MACMATCH V_MACMATCH(1U)
+
+#define S_ETHERTYPE 6
+#define V_ETHERTYPE(x) ((x) << S_ETHERTYPE)
+#define F_ETHERTYPE V_ETHERTYPE(1U)
+
+#define S_PROTOCOL 5
+#define V_PROTOCOL(x) ((x) << S_PROTOCOL)
+#define F_PROTOCOL V_PROTOCOL(1U)
+
+#define S_TOS 4
+#define V_TOS(x) ((x) << S_TOS)
+#define F_TOS V_TOS(1U)
+
+#define S_VLAN 3
+#define V_VLAN(x) ((x) << S_VLAN)
+#define F_VLAN V_VLAN(1U)
+
+#define S_VNIC_ID 2
+#define V_VNIC_ID(x) ((x) << S_VNIC_ID)
+#define F_VNIC_ID V_VNIC_ID(1U)
+
#define S_PORT 1
#define V_PORT(x) ((x) << S_PORT)
#define F_PORT V_PORT(1U)
+#define S_FCOE 0
+#define V_FCOE(x) ((x) << S_FCOE)
+#define F_FCOE V_FCOE(1U)
+
#define NUM_MPS_CLS_SRAM_L_INSTANCES 336
#define NUM_MPS_T5_CLS_SRAM_L_INSTANCES 512
@@ -1199,4 +1244,46 @@
#define EDC_STRIDE_T5 (EDC_T51_BASE_ADDR - EDC_T50_BASE_ADDR)
#define EDC_REG_T5(reg, idx) (reg + EDC_STRIDE_T5 * idx)
+#define A_PL_VF_REV 0x4
+#define A_PL_VF_WHOAMI 0x0
+#define A_PL_VF_REVISION 0x8
+
+#define S_CHIPID 4
+#define M_CHIPID 0xfU
+#define V_CHIPID(x) ((x) << S_CHIPID)
+#define G_CHIPID(x) (((x) >> S_CHIPID) & M_CHIPID)
+
+/* TP_VLAN_PRI_MAP controls which subset of fields will be present in the
+ * Compressed Filter Tuple for LE filters. Each bit set in TP_VLAN_PRI_MAP
+ * selects for a particular field being present. These fields, when present
+ * in the Compressed Filter Tuple, have the following widths in bits.
+ */
+#define W_FT_FCOE 1
+#define W_FT_PORT 3
+#define W_FT_VNIC_ID 17
+#define W_FT_VLAN 17
+#define W_FT_TOS 8
+#define W_FT_PROTOCOL 8
+#define W_FT_ETHERTYPE 16
+#define W_FT_MACMATCH 9
+#define W_FT_MPSHITTYPE 3
+#define W_FT_FRAGMENTATION 1
+
+/* Some of the Compressed Filter Tuple fields have internal structure. These
+ * bit shifts/masks describe those structures. All shifts are relative to the
+ * base position of the fields within the Compressed Filter Tuple
+ */
+#define S_FT_VLAN_VLD 16
+#define V_FT_VLAN_VLD(x) ((x) << S_FT_VLAN_VLD)
+#define F_FT_VLAN_VLD V_FT_VLAN_VLD(1U)
+
+#define S_FT_VNID_ID_VF 0
+#define V_FT_VNID_ID_VF(x) ((x) << S_FT_VNID_ID_VF)
+
+#define S_FT_VNID_ID_PF 7
+#define V_FT_VNID_ID_PF(x) ((x) << S_FT_VNID_ID_PF)
+
+#define S_FT_VNID_ID_VLD 16
+#define V_FT_VNID_ID_VLD(x) ((x) << S_FT_VNID_ID_VLD)
+
#endif /* __T4_REGS_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index 6f77ac487743..74fea74ce0aa 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -2157,7 +2157,7 @@ struct fw_debug_cmd {
struct fw_hdr {
u8 ver;
- u8 reserved1;
+ u8 chip; /* terminator chip type */
__be16 len512; /* bin length in units of 512-bytes */
__be32 fw_ver; /* firmware version */
__be32 tp_microcode_ver;
@@ -2176,6 +2176,11 @@ struct fw_hdr {
__be32 reserved6[23];
};
+enum fw_hdr_chip {
+ FW_HDR_CHIP_T4,
+ FW_HDR_CHIP_T5
+};
+
#define FW_HDR_FW_VER_MAJOR_GET(x) (((x) >> 24) & 0xff)
#define FW_HDR_FW_VER_MINOR_GET(x) (((x) >> 16) & 0xff)
#define FW_HDR_FW_VER_MICRO_GET(x) (((x) >> 8) & 0xff)
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
index be5c7ef6ca93..68eaa9c88c7d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
@@ -344,7 +344,6 @@ struct adapter {
unsigned long registered_device_map;
unsigned long open_device_map;
unsigned long flags;
- enum chip_type chip;
struct adapter_params params;
/* queue and interrupt resources */
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index 5f90ec5f7519..0899c0983594 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -1064,7 +1064,7 @@ static inline unsigned int mk_adap_vers(const struct adapter *adapter)
/*
* Chip version 4, revision 0x3f (cxgb4vf).
*/
- return CHELSIO_CHIP_VERSION(adapter->chip) | (0x3f << 10);
+ return CHELSIO_CHIP_VERSION(adapter->params.chip) | (0x3f << 10);
}
/*
@@ -1551,9 +1551,13 @@ static void cxgb4vf_get_regs(struct net_device *dev,
reg_block_dump(adapter, regbuf,
T4VF_MPS_BASE_ADDR + T4VF_MOD_MAP_MPS_FIRST,
T4VF_MPS_BASE_ADDR + T4VF_MOD_MAP_MPS_LAST);
+
+ /* T5 adds new registers in the PL Register map.
+ */
reg_block_dump(adapter, regbuf,
T4VF_PL_BASE_ADDR + T4VF_MOD_MAP_PL_FIRST,
- T4VF_PL_BASE_ADDR + T4VF_MOD_MAP_PL_LAST);
+ T4VF_PL_BASE_ADDR + (is_t4(adapter->params.chip)
+ ? A_PL_VF_WHOAMI : A_PL_VF_REVISION));
reg_block_dump(adapter, regbuf,
T4VF_CIM_BASE_ADDR + T4VF_MOD_MAP_CIM_FIRST,
T4VF_CIM_BASE_ADDR + T4VF_MOD_MAP_CIM_LAST);
@@ -2087,6 +2091,7 @@ static int adap_init0(struct adapter *adapter)
unsigned int ethqsets;
int err;
u32 param, val = 0;
+ unsigned int chipid;
/*
* Wait for the device to become ready before proceeding ...
@@ -2114,12 +2119,14 @@ static int adap_init0(struct adapter *adapter)
return err;
}
+ adapter->params.chip = 0;
switch (adapter->pdev->device >> 12) {
case CHELSIO_T4:
- adapter->chip = CHELSIO_CHIP_CODE(CHELSIO_T4, 0);
+ adapter->params.chip = CHELSIO_CHIP_CODE(CHELSIO_T4, 0);
break;
case CHELSIO_T5:
- adapter->chip = CHELSIO_CHIP_CODE(CHELSIO_T5, 0);
+ chipid = G_REV(t4_read_reg(adapter, A_PL_VF_REV));
+ adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, chipid);
break;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index 8475c4cda9e4..0a89963c48ce 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -537,7 +537,7 @@ static inline void ring_fl_db(struct adapter *adapter, struct sge_fl *fl)
*/
if (fl->pend_cred >= FL_PER_EQ_UNIT) {
val = PIDX(fl->pend_cred / FL_PER_EQ_UNIT);
- if (!is_t4(adapter->chip))
+ if (!is_t4(adapter->params.chip))
val |= DBTYPE(1);
wmb();
t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL,
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
index 53cbfed21d0b..f412d0fa0850 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
@@ -39,21 +39,28 @@
#include "../cxgb4/t4fw_api.h"
#define CHELSIO_CHIP_CODE(version, revision) (((version) << 4) | (revision))
-#define CHELSIO_CHIP_VERSION(code) ((code) >> 4)
+#define CHELSIO_CHIP_VERSION(code) (((code) >> 4) & 0xf)
#define CHELSIO_CHIP_RELEASE(code) ((code) & 0xf)
+/* All T4 and later chips have their PCI-E Device IDs encoded as 0xVFPP where:
+ *
+ * V = "4" for T4; "5" for T5, etc. or
+ * = "a" for T4 FPGA; "b" for T4 FPGA, etc.
+ * F = "0" for PF 0..3; "4".."7" for PF4..7; and "8" for VFs
+ * PP = adapter product designation
+ */
#define CHELSIO_T4 0x4
#define CHELSIO_T5 0x5
enum chip_type {
- T4_A1 = CHELSIO_CHIP_CODE(CHELSIO_T4, 0),
- T4_A2 = CHELSIO_CHIP_CODE(CHELSIO_T4, 1),
- T4_A3 = CHELSIO_CHIP_CODE(CHELSIO_T4, 2),
+ T4_A1 = CHELSIO_CHIP_CODE(CHELSIO_T4, 1),
+ T4_A2 = CHELSIO_CHIP_CODE(CHELSIO_T4, 2),
T4_FIRST_REV = T4_A1,
- T4_LAST_REV = T4_A3,
+ T4_LAST_REV = T4_A2,
- T5_A1 = CHELSIO_CHIP_CODE(CHELSIO_T5, 0),
- T5_FIRST_REV = T5_A1,
+ T5_A0 = CHELSIO_CHIP_CODE(CHELSIO_T5, 0),
+ T5_A1 = CHELSIO_CHIP_CODE(CHELSIO_T5, 1),
+ T5_FIRST_REV = T5_A0,
T5_LAST_REV = T5_A1,
};
@@ -203,6 +210,7 @@ struct adapter_params {
struct vpd_params vpd; /* Vital Product Data */
struct rss_params rss; /* Receive Side Scaling */
struct vf_resources vfres; /* Virtual Function Resource limits */
+ enum chip_type chip; /* chip code */
u8 nports; /* # of Ethernet "ports" */
};
@@ -253,14 +261,13 @@ static inline int t4vf_wr_mbox_ns(struct adapter *adapter, const void *cmd,
static inline int is_t4(enum chip_type chip)
{
- return (chip >= T4_FIRST_REV && chip <= T4_LAST_REV);
+ return CHELSIO_CHIP_VERSION(chip) == CHELSIO_T4;
}
int t4vf_wait_dev_ready(struct adapter *);
int t4vf_port_init(struct adapter *, int);
int t4vf_fw_reset(struct adapter *);
-int t4vf_query_params(struct adapter *, unsigned int, const u32 *, u32 *);
int t4vf_set_params(struct adapter *, unsigned int, const u32 *, const u32 *);
int t4vf_get_sge_params(struct adapter *);
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
index 9f96dc3bb112..25dfeb8f28ed 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
@@ -363,8 +363,8 @@ int t4vf_fw_reset(struct adapter *adapter)
* Reads the values of firmware or device parameters. Up to 7 parameters
* can be queried at once.
*/
-int t4vf_query_params(struct adapter *adapter, unsigned int nparams,
- const u32 *params, u32 *vals)
+static int t4vf_query_params(struct adapter *adapter, unsigned int nparams,
+ const u32 *params, u32 *vals)
{
int i, ret;
struct fw_params_cmd cmd, rpl;
@@ -1027,7 +1027,7 @@ int t4vf_alloc_mac_filt(struct adapter *adapter, unsigned int viid, bool free,
unsigned nfilters = 0;
unsigned int rem = naddr;
struct fw_vi_mac_cmd cmd, rpl;
- unsigned int max_naddr = is_t4(adapter->chip) ?
+ unsigned int max_naddr = is_t4(adapter->params.chip) ?
NUM_MPS_CLS_SRAM_L_INSTANCES :
NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
@@ -1121,7 +1121,7 @@ int t4vf_change_mac(struct adapter *adapter, unsigned int viid,
struct fw_vi_mac_exact *p = &cmd.u.exact[0];
size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
u.exact[1]), 16);
- unsigned int max_naddr = is_t4(adapter->chip) ?
+ unsigned int max_naddr = is_t4(adapter->params.chip) ?
NUM_MPS_CLS_SRAM_L_INSTANCES :
NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index ff78dfaec508..b740bfce72ef 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -1036,11 +1036,12 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
skb->protocol = eth_type_trans(skb, netdev);
skb_record_rx_queue(skb, q_number);
if (netdev->features & NETIF_F_RXHASH) {
- skb->rxhash = rss_hash;
- if (rss_type & (NIC_CFG_RSS_HASH_TYPE_TCP_IPV6_EX |
- NIC_CFG_RSS_HASH_TYPE_TCP_IPV6 |
- NIC_CFG_RSS_HASH_TYPE_TCP_IPV4))
- skb->l4_rxhash = true;
+ skb_set_hash(skb, rss_hash,
+ (rss_type &
+ (NIC_CFG_RSS_HASH_TYPE_TCP_IPV6_EX |
+ NIC_CFG_RSS_HASH_TYPE_TCP_IPV6 |
+ NIC_CFG_RSS_HASH_TYPE_TCP_IPV4)) ?
+ PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3);
}
if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc) {
diff --git a/drivers/net/ethernet/cisco/enic/enic_pp.c b/drivers/net/ethernet/cisco/enic/enic_pp.c
index 43464f0a4f99..e6a83198c3dd 100644
--- a/drivers/net/ethernet/cisco/enic/enic_pp.c
+++ b/drivers/net/ethernet/cisco/enic/enic_pp.c
@@ -162,7 +162,7 @@ static int enic_are_pp_different(struct enic_port_profile *pp1,
return strcmp(pp1->name, pp2->name) | !!memcmp(pp1->instance_uuid,
pp2->instance_uuid, PORT_UUID_MAX) |
!!memcmp(pp1->host_uuid, pp2->host_uuid, PORT_UUID_MAX) |
- !!memcmp(pp1->mac_addr, pp2->mac_addr, ETH_ALEN);
+ !ether_addr_equal(pp1->mac_addr, pp2->mac_addr);
}
static int enic_pp_preassociate(struct enic *enic, int vf,
diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c
index a5397b130724..aa4ee385091f 100644
--- a/drivers/net/ethernet/dec/tulip/uli526x.c
+++ b/drivers/net/ethernet/dec/tulip/uli526x.c
@@ -1192,9 +1192,6 @@ static int uli526x_suspend(struct pci_dev *pdev, pm_message_t state)
ULI526X_DBUG(0, "uli526x_suspend", 0);
- if (!netdev_priv(dev))
- return 0;
-
pci_save_state(pdev);
if (!netif_running(dev))
@@ -1228,9 +1225,6 @@ static int uli526x_resume(struct pci_dev *pdev)
ULI526X_DBUG(0, "uli526x_resume", 0);
- if (!netdev_priv(dev))
- return 0;
-
pci_restore_state(pdev);
if (!netif_running(dev))
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index f4825db5d179..4ccaf9af6fc9 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -104,6 +104,7 @@ static inline char *nic_name(struct pci_dev *pdev)
#define BE3_MAX_RSS_QS 16
#define BE3_MAX_TX_QS 16
#define BE3_MAX_EVT_QS 16
+#define BE3_SRIOV_MAX_EVT_QS 8
#define MAX_RX_QS 32
#define MAX_EVT_QS 32
@@ -480,7 +481,7 @@ struct be_adapter {
struct list_head entry;
u32 flash_status;
- struct completion flash_compl;
+ struct completion et_cmd_compl;
struct be_resources res; /* resources available for the func */
u16 num_vfs; /* Number of VFs provisioned by PF */
@@ -503,6 +504,7 @@ struct be_adapter {
};
#define be_physfn(adapter) (!adapter->virtfn)
+#define be_virtfn(adapter) (adapter->virtfn)
#define sriov_enabled(adapter) (adapter->num_vfs > 0)
#define sriov_want(adapter) (be_physfn(adapter) && \
(num_vfs || pci_num_vf(adapter->pdev)))
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index dbcd5262c016..94c35c8d799d 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -141,11 +141,17 @@ static int be_mcc_compl_process(struct be_adapter *adapter,
subsystem = resp_hdr->subsystem;
}
+ if (opcode == OPCODE_LOWLEVEL_LOOPBACK_TEST &&
+ subsystem == CMD_SUBSYSTEM_LOWLEVEL) {
+ complete(&adapter->et_cmd_compl);
+ return 0;
+ }
+
if (((opcode == OPCODE_COMMON_WRITE_FLASHROM) ||
(opcode == OPCODE_COMMON_WRITE_OBJECT)) &&
(subsystem == CMD_SUBSYSTEM_COMMON)) {
adapter->flash_status = compl_status;
- complete(&adapter->flash_compl);
+ complete(&adapter->et_cmd_compl);
}
if (compl_status == MCC_STATUS_SUCCESS) {
@@ -1032,6 +1038,13 @@ int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq,
} else {
req->hdr.version = 2;
req->page_size = 1; /* 1 for 4K */
+
+ /* coalesce-wm field in this cmd is not relevant to Lancer.
+ * Lancer uses COMMON_MODIFY_CQ to set this field
+ */
+ if (!lancer_chip(adapter))
+ AMAP_SET_BITS(struct amap_cq_context_v2, coalescwm,
+ ctxt, coalesce_wm);
AMAP_SET_BITS(struct amap_cq_context_v2, nodelay, ctxt,
no_delay);
AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt,
@@ -2010,6 +2023,9 @@ int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
0x3ea83c02, 0x4a110304};
int status;
+ if (!(be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS))
+ return 0;
+
if (mutex_lock_interruptible(&adapter->mbox_lock))
return -1;
@@ -2153,7 +2169,7 @@ int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
be_mcc_notify(adapter);
spin_unlock_bh(&adapter->mcc_lock);
- if (!wait_for_completion_timeout(&adapter->flash_compl,
+ if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
msecs_to_jiffies(60000)))
status = -1;
else
@@ -2248,8 +2264,8 @@ int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
be_mcc_notify(adapter);
spin_unlock_bh(&adapter->mcc_lock);
- if (!wait_for_completion_timeout(&adapter->flash_compl,
- msecs_to_jiffies(40000)))
+ if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
+ msecs_to_jiffies(40000)))
status = -1;
else
status = adapter->flash_status;
@@ -2360,6 +2376,7 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_loopback_test *req;
+ struct be_cmd_resp_loopback_test *resp;
int status;
spin_lock_bh(&adapter->mcc_lock);
@@ -2374,8 +2391,8 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb, NULL);
- req->hdr.timeout = cpu_to_le32(4);
+ req->hdr.timeout = cpu_to_le32(15);
req->pattern = cpu_to_le64(pattern);
req->src_port = cpu_to_le32(port_num);
req->dest_port = cpu_to_le32(port_num);
@@ -2383,12 +2400,15 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
req->num_pkts = cpu_to_le32(num_pkts);
req->loopback_type = cpu_to_le32(loopback_type);
- status = be_mcc_notify_wait(adapter);
- if (!status) {
- struct be_cmd_resp_loopback_test *resp = embedded_payload(wrb);
- status = le32_to_cpu(resp->status);
- }
+ be_mcc_notify(adapter);
+
+ spin_unlock_bh(&adapter->mcc_lock);
+
+ wait_for_completion(&adapter->et_cmd_compl);
+ resp = embedded_payload(wrb);
+ status = le32_to_cpu(resp->status);
+ return status;
err:
spin_unlock_bh(&adapter->mcc_lock);
return status;
diff --git a/drivers/net/ethernet/emulex/benet/be_hw.h b/drivers/net/ethernet/emulex/benet/be_hw.h
index 3e2162121601..dc88782185f2 100644
--- a/drivers/net/ethernet/emulex/benet/be_hw.h
+++ b/drivers/net/ethernet/emulex/benet/be_hw.h
@@ -64,6 +64,9 @@
#define SLIPORT_ERROR_NO_RESOURCE1 0x2
#define SLIPORT_ERROR_NO_RESOURCE2 0x9
+#define SLIPORT_ERROR_FW_RESET1 0x2
+#define SLIPORT_ERROR_FW_RESET2 0x0
+
/********* Memory BAR register ************/
#define PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET 0xfc
/* Host Interrupt Enable, if set interrupts are enabled although "PCI Interrupt
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index abde97471636..3acf137b5784 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -287,7 +287,7 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
/* The MAC change did not happen, either due to lack of privilege
* or PF didn't pre-provision.
*/
- if (memcmp(addr->sa_data, mac, ETH_ALEN)) {
+ if (!ether_addr_equal(addr->sa_data, mac)) {
status = -EPERM;
goto err;
}
@@ -1581,7 +1581,7 @@ static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
skb->protocol = eth_type_trans(skb, netdev);
skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
if (netdev->features & NETIF_F_RXHASH)
- skb->rxhash = rxcp->rss_hash;
+ skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
skb_mark_napi_id(skb, napi);
if (rxcp->vlanf)
@@ -1639,7 +1639,7 @@ static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
skb->ip_summed = CHECKSUM_UNNECESSARY;
skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
if (adapter->netdev->features & NETIF_F_RXHASH)
- skb->rxhash = rxcp->rss_hash;
+ skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
skb_mark_napi_id(skb, napi);
if (rxcp->vlanf)
@@ -2464,8 +2464,16 @@ void be_detect_error(struct be_adapter *adapter)
*/
if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
adapter->hw_error = true;
- dev_err(&adapter->pdev->dev,
- "Error detected in the card\n");
+ /* Do not log error messages if its a FW reset */
+ if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
+ sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
+ dev_info(&adapter->pdev->dev,
+ "Firmware update in progress\n");
+ return;
+ } else {
+ dev_err(&adapter->pdev->dev,
+ "Error detected in the card\n");
+ }
}
if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
@@ -2658,8 +2666,8 @@ static int be_close(struct net_device *netdev)
be_roce_dev_close(adapter);
- for_all_evt_queues(adapter, eqo, i) {
- if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
+ if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
+ for_all_evt_queues(adapter, eqo, i) {
napi_disable(&eqo->napi);
be_disable_busy_poll(eqo);
}
@@ -2736,13 +2744,16 @@ static int be_rx_qs_create(struct be_adapter *adapter)
if (!BEx_chip(adapter))
adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
RSS_ENABLE_UDP_IPV6;
+ } else {
+ /* Disable RSS, if only default RX Q is created */
+ adapter->rss_flags = RSS_ENABLE_NONE;
+ }
- rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
- 128);
- if (rc) {
- adapter->rss_flags = 0;
- return rc;
- }
+ rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
+ 128);
+ if (rc) {
+ adapter->rss_flags = RSS_ENABLE_NONE;
+ return rc;
}
/* First time posting */
@@ -2932,28 +2943,35 @@ static void be_cancel_worker(struct be_adapter *adapter)
}
}
-static int be_clear(struct be_adapter *adapter)
+static void be_mac_clear(struct be_adapter *adapter)
{
int i;
+ if (adapter->pmac_id) {
+ for (i = 0; i < (adapter->uc_macs + 1); i++)
+ be_cmd_pmac_del(adapter, adapter->if_handle,
+ adapter->pmac_id[i], 0);
+ adapter->uc_macs = 0;
+
+ kfree(adapter->pmac_id);
+ adapter->pmac_id = NULL;
+ }
+}
+
+static int be_clear(struct be_adapter *adapter)
+{
be_cancel_worker(adapter);
if (sriov_enabled(adapter))
be_vf_clear(adapter);
/* delete the primary mac along with the uc-mac list */
- for (i = 0; i < (adapter->uc_macs + 1); i++)
- be_cmd_pmac_del(adapter, adapter->if_handle,
- adapter->pmac_id[i], 0);
- adapter->uc_macs = 0;
+ be_mac_clear(adapter);
be_cmd_if_destroy(adapter, adapter->if_handle, 0);
be_clear_queues(adapter);
- kfree(adapter->pmac_id);
- adapter->pmac_id = NULL;
-
be_msix_disable(adapter);
return 0;
}
@@ -3109,11 +3127,11 @@ static void BEx_get_resources(struct be_adapter *adapter,
{
struct pci_dev *pdev = adapter->pdev;
bool use_sriov = false;
+ int max_vfs;
- if (BE3_chip(adapter) && sriov_want(adapter)) {
- int max_vfs;
+ max_vfs = pci_sriov_get_totalvfs(pdev);
- max_vfs = pci_sriov_get_totalvfs(pdev);
+ if (BE3_chip(adapter) && sriov_want(adapter)) {
res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
use_sriov = res->max_vfs;
}
@@ -3144,7 +3162,11 @@ static void BEx_get_resources(struct be_adapter *adapter,
BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
res->max_rx_qs = res->max_rss_qs + 1;
- res->max_evt_qs = be_physfn(adapter) ? BE3_MAX_EVT_QS : 1;
+ if (be_physfn(adapter))
+ res->max_evt_qs = (max_vfs > 0) ?
+ BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
+ else
+ res->max_evt_qs = 1;
res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
@@ -3253,12 +3275,10 @@ static int be_mac_setup(struct be_adapter *adapter)
memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
}
- /* On BE3 VFs this cmd may fail due to lack of privilege.
- * Ignore the failure as in this case pmac_id is fetched
- * in the IFACE_CREATE cmd.
- */
- be_cmd_pmac_add(adapter, mac, adapter->if_handle,
- &adapter->pmac_id[0], 0);
+ /* For BE3-R VFs, the PF programs the initial MAC address */
+ if (!(BEx_chip(adapter) && be_virtfn(adapter)))
+ be_cmd_pmac_add(adapter, mac, adapter->if_handle,
+ &adapter->pmac_id[0], 0);
return 0;
}
@@ -3814,6 +3834,8 @@ static int lancer_fw_download(struct be_adapter *adapter,
}
if (change_status == LANCER_FW_RESET_NEEDED) {
+ dev_info(&adapter->pdev->dev,
+ "Resetting adapter to activate new FW\n");
status = lancer_physdev_ctrl(adapter,
PHYSDEV_CONTROL_FW_RESET_MASK);
if (status) {
@@ -4190,7 +4212,7 @@ static int be_ctrl_init(struct be_adapter *adapter)
spin_lock_init(&adapter->mcc_lock);
spin_lock_init(&adapter->mcc_cq_lock);
- init_completion(&adapter->flash_compl);
+ init_completion(&adapter->et_cmd_compl);
pci_save_state(adapter->pdev);
return 0;
@@ -4365,13 +4387,13 @@ static int lancer_recover_func(struct be_adapter *adapter)
goto err;
}
- dev_err(dev, "Error recovery successful\n");
+ dev_err(dev, "Adapter recovery successful\n");
return 0;
err:
if (status == -EAGAIN)
dev_err(dev, "Waiting for resource provisioning\n");
else
- dev_err(dev, "Error recovery failed\n");
+ dev_err(dev, "Adapter recovery failed\n");
return status;
}
@@ -4599,6 +4621,7 @@ static int be_suspend(struct pci_dev *pdev, pm_message_t state)
if (adapter->wol)
be_setup_wol(adapter, true);
+ be_intr_set(adapter, false);
cancel_delayed_work_sync(&adapter->func_recovery_work);
netif_device_detach(netdev);
@@ -4634,6 +4657,7 @@ static int be_resume(struct pci_dev *pdev)
if (status)
return status;
+ be_intr_set(adapter, true);
/* tell fw we're ready to fire cmds */
status = be_cmd_fw_init(adapter);
if (status)
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 0120217a16dd..3b8d6d19ff05 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -339,7 +339,8 @@ struct fec_enet_private {
void fec_ptp_init(struct platform_device *pdev);
void fec_ptp_start_cyclecounter(struct net_device *ndev);
-int fec_ptp_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd);
+int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr);
+int fec_ptp_get(struct net_device *ndev, struct ifreq *ifr);
/****************************************************************************/
#endif /* FEC_H */
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 4cbebf3d80eb..6530177d53e7 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -98,10 +98,6 @@ static void set_multicast_list(struct net_device *ndev);
* detected as not set during a prior frame transmission, then the
* ENET_TDAR[TDAR] bit is cleared at a later time, even if additional TxBDs
* were added to the ring and the ENET_TDAR[TDAR] bit is set. This results in
- * If the ready bit in the transmit buffer descriptor (TxBD[R]) is previously
- * detected as not set during a prior frame transmission, then the
- * ENET_TDAR[TDAR] bit is cleared at a later time, even if additional TxBDs
- * were added to the ring and the ENET_TDAR[TDAR] bit is set. This results in
* frames not being transmitted until there is a 0-to-1 transition on
* ENET_TDAR[TDAR].
*/
@@ -385,7 +381,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
* data.
*/
bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, bufaddr,
- FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE);
+ skb->len, DMA_TO_DEVICE);
if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) {
bdp->cbd_bufaddr = 0;
fep->tx_skbuff[index] = NULL;
@@ -432,6 +428,8 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
/* If this was the last BD in the ring, start at the beginning again. */
bdp = fec_enet_get_nextdesc(bdp, fep);
+ skb_tx_timestamp(skb);
+
fep->cur_tx = bdp;
if (fep->cur_tx == fep->dirty_tx)
@@ -440,8 +438,6 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
/* Trigger transmission start */
writel(0, fep->hwp + FEC_X_DES_ACTIVE);
- skb_tx_timestamp(skb);
-
return NETDEV_TX_OK;
}
@@ -779,11 +775,10 @@ fec_enet_tx(struct net_device *ndev)
else
index = bdp - fep->tx_bd_base;
- dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
- FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE);
- bdp->cbd_bufaddr = 0;
-
skb = fep->tx_skbuff[index];
+ dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, skb->len,
+ DMA_TO_DEVICE);
+ bdp->cbd_bufaddr = 0;
/* Check for errors. */
if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
@@ -1684,8 +1679,12 @@ static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
if (!phydev)
return -ENODEV;
- if (cmd == SIOCSHWTSTAMP && fep->bufdesc_ex)
- return fec_ptp_ioctl(ndev, rq, cmd);
+ if (fep->bufdesc_ex) {
+ if (cmd == SIOCSHWTSTAMP)
+ return fec_ptp_set(ndev, rq);
+ if (cmd == SIOCGHWTSTAMP)
+ return fec_ptp_get(ndev, rq);
+ }
return phy_mii_ioctl(phydev, rq, cmd);
}
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index 5007e4f9fff9..3a74ea48fd40 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -274,7 +274,7 @@ static int fec_ptp_enable(struct ptp_clock_info *ptp,
* @ifreq: ioctl data
* @cmd: particular ioctl requested
*/
-int fec_ptp_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
+int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr)
{
struct fec_enet_private *fep = netdev_priv(ndev);
@@ -321,6 +321,20 @@ int fec_ptp_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
-EFAULT : 0;
}
+int fec_ptp_get(struct net_device *ndev, struct ifreq *ifr)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ struct hwtstamp_config config;
+
+ config.flags = 0;
+ config.tx_type = fep->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
+ config.rx_filter = (fep->hwts_rx_en ?
+ HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE);
+
+ return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
+ -EFAULT : 0;
+}
+
/**
* fec_time_keep - call timecounter_read every second to avoid timer overrun
* because ENET just support 32bit counter, will timeout in 4s
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index b14d7904a075..365342d293e8 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -795,8 +795,7 @@ err_grp_init:
return err;
}
-static int gfar_hwtstamp_ioctl(struct net_device *netdev,
- struct ifreq *ifr, int cmd)
+static int gfar_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
{
struct hwtstamp_config config;
struct gfar_private *priv = netdev_priv(netdev);
@@ -845,7 +844,20 @@ static int gfar_hwtstamp_ioctl(struct net_device *netdev,
-EFAULT : 0;
}
-/* Ioctl MII Interface */
+static int gfar_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr)
+{
+ struct hwtstamp_config config;
+ struct gfar_private *priv = netdev_priv(netdev);
+
+ config.flags = 0;
+ config.tx_type = priv->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
+ config.rx_filter = (priv->hwts_rx_en ?
+ HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE);
+
+ return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
+ -EFAULT : 0;
+}
+
static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct gfar_private *priv = netdev_priv(dev);
@@ -854,7 +866,9 @@ static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
return -EINVAL;
if (cmd == SIOCSHWTSTAMP)
- return gfar_hwtstamp_ioctl(dev, rq, cmd);
+ return gfar_hwtstamp_set(dev, rq);
+ if (cmd == SIOCGHWTSTAMP)
+ return gfar_hwtstamp_get(dev, rq);
if (!priv->phydev)
return -ENODEV;
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 5548b6d00c31..72291a8904a9 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -435,11 +435,6 @@ static void hw_add_addr_in_hash(struct ucc_geth_private *ugeth,
QE_CR_PROTOCOL_ETHERNET, 0);
}
-static inline int compare_addr(u8 **addr1, u8 **addr2)
-{
- return memcmp(addr1, addr2, ETH_ALEN);
-}
-
#ifdef DEBUG
static void get_statistics(struct ucc_geth_private *ugeth,
struct ucc_geth_tx_firmware_statistics *
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index 2d1c6bdd3618..7628e0fd8455 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -3033,7 +3033,7 @@ static struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
dev->hw_features = NETIF_F_SG | NETIF_F_TSO |
NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_CTAG_TX;
- dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO |
+ dev->features = NETIF_F_SG | NETIF_F_TSO |
NETIF_F_HIGHDMA | NETIF_F_IP_CSUM |
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_RXCSUM;
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 952d795230a4..cde0fd941f0c 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -12,8 +12,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
* Copyright (C) IBM Corporation, 2003, 2010
*
diff --git a/drivers/net/ethernet/ibm/ibmveth.h b/drivers/net/ethernet/ibm/ibmveth.h
index 84066bafe057..451ba7949e15 100644
--- a/drivers/net/ethernet/ibm/ibmveth.h
+++ b/drivers/net/ethernet/ibm/ibmveth.h
@@ -12,8 +12,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
* Copyright (C) IBM Corporation, 2003, 2010
*
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index 149ac85b5f9e..9fb2eb8cf152 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -220,12 +220,12 @@ config IXGBE_DCB
If unsure, say N.
config IXGBEVF
- tristate "Intel(R) 82599 Virtual Function Ethernet support"
+ tristate "Intel(R) 10GbE PCI Express Virtual Function Ethernet support"
depends on PCI_MSI
---help---
- This driver supports Intel(R) 82599 virtual functions. For more
- information on how to identify your adapter, go to the Adapter &
- Driver ID Guide at:
+ This driver supports Intel(R) PCI Express virtual functions for the
+ Intel(R) ixgbe driver. For more information on how to identify your
+ adapter, go to the Adapter & Driver ID Guide at:
<http://support.intel.com/support/network/sb/CS-008441.htm>
@@ -259,4 +259,36 @@ config I40E
To compile this driver as a module, choose M here. The module
will be called i40e.
+config I40E_VXLAN
+ bool "Virtual eXtensible Local Area Network Support"
+ default n
+ depends on I40E && VXLAN && !(I40E=y && VXLAN=m)
+ ---help---
+ This allows one to create VXLAN virtual interfaces that provide
+ Layer 2 Networks over Layer 3 Networks. VXLAN is often used
+ to tunnel virtual network infrastructure in virtualized environments.
+ Say Y here if you want to use Virtual eXtensible Local Area Network
+ (VXLAN) in the driver.
+
+ If unsure, say N.
+
+config I40EVF
+ tristate "Intel(R) XL710 X710 Virtual Function Ethernet support"
+ depends on PCI_MSI
+ ---help---
+ This driver supports Intel(R) XL710 and X710 virtual functions.
+ For more information on how to identify your adapter, go to the
+ Adapter & Driver ID Guide at:
+
+ <http://support.intel.com/support/network/sb/CS-008441.htm>
+
+ For general information and support, go to the Intel support
+ website at:
+
+ <http://support.intel.com>
+
+ To compile this driver as a module, choose M here. The module
+ will be called i40evf. MSI-X interrupt support is required
+ for this driver to work correctly.
+
endif # NET_VENDOR_INTEL
diff --git a/drivers/net/ethernet/intel/Makefile b/drivers/net/ethernet/intel/Makefile
index 5bae933efc7c..cdbbca8a3755 100644
--- a/drivers/net/ethernet/intel/Makefile
+++ b/drivers/net/ethernet/intel/Makefile
@@ -11,3 +11,4 @@ obj-$(CONFIG_IXGBE) += ixgbe/
obj-$(CONFIG_IXGBEVF) += ixgbevf/
obj-$(CONFIG_I40E) += i40e/
obj-$(CONFIG_IXGB) += ixgb/
+obj-$(CONFIG_I40EVF) += i40evf/
diff --git a/drivers/net/ethernet/intel/e1000/e1000.h b/drivers/net/ethernet/intel/e1000/e1000.h
index 58c147271a36..f9313b36c887 100644
--- a/drivers/net/ethernet/intel/e1000/e1000.h
+++ b/drivers/net/ethernet/intel/e1000/e1000.h
@@ -83,6 +83,11 @@ struct e1000_adapter;
#define E1000_MAX_INTR 10
+/*
+ * Count for polling __E1000_RESET condition every 10-20msec.
+ */
+#define E1000_CHECK_RESET_COUNT 50
+
/* TX/RX descriptor defines */
#define E1000_DEFAULT_TXD 256
#define E1000_MAX_TXD 256
@@ -312,8 +317,6 @@ struct e1000_adapter {
struct delayed_work watchdog_task;
struct delayed_work fifo_stall_task;
struct delayed_work phy_info_task;
-
- struct mutex mutex;
};
enum e1000_state_t {
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index e38622825fa7..46e6544ed1b7 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -494,13 +494,20 @@ static void e1000_down_and_stop(struct e1000_adapter *adapter)
{
set_bit(__E1000_DOWN, &adapter->flags);
- /* Only kill reset task if adapter is not resetting */
- if (!test_bit(__E1000_RESETTING, &adapter->flags))
- cancel_work_sync(&adapter->reset_task);
-
cancel_delayed_work_sync(&adapter->watchdog_task);
+
+ /*
+ * Since the watchdog task can reschedule other tasks, we should cancel
+ * it first, otherwise we can run into the situation when a work is
+ * still running after the adapter has been turned down.
+ */
+
cancel_delayed_work_sync(&adapter->phy_info_task);
cancel_delayed_work_sync(&adapter->fifo_stall_task);
+
+ /* Only kill reset task if adapter is not resetting */
+ if (!test_bit(__E1000_RESETTING, &adapter->flags))
+ cancel_work_sync(&adapter->reset_task);
}
void e1000_down(struct e1000_adapter *adapter)
@@ -544,21 +551,8 @@ void e1000_down(struct e1000_adapter *adapter)
e1000_clean_all_rx_rings(adapter);
}
-static void e1000_reinit_safe(struct e1000_adapter *adapter)
-{
- while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
- msleep(1);
- mutex_lock(&adapter->mutex);
- e1000_down(adapter);
- e1000_up(adapter);
- mutex_unlock(&adapter->mutex);
- clear_bit(__E1000_RESETTING, &adapter->flags);
-}
-
void e1000_reinit_locked(struct e1000_adapter *adapter)
{
- /* if rtnl_lock is not held the call path is bogus */
- ASSERT_RTNL();
WARN_ON(in_interrupt());
while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
msleep(1);
@@ -1316,7 +1310,6 @@ static int e1000_sw_init(struct e1000_adapter *adapter)
e1000_irq_disable(adapter);
spin_lock_init(&adapter->stats_lock);
- mutex_init(&adapter->mutex);
set_bit(__E1000_DOWN, &adapter->flags);
@@ -1440,6 +1433,10 @@ static int e1000_close(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
+ int count = E1000_CHECK_RESET_COUNT;
+
+ while (test_bit(__E1000_RESETTING, &adapter->flags) && count--)
+ usleep_range(10000, 20000);
WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
e1000_down(adapter);
@@ -2325,11 +2322,8 @@ static void e1000_update_phy_info_task(struct work_struct *work)
struct e1000_adapter *adapter = container_of(work,
struct e1000_adapter,
phy_info_task.work);
- if (test_bit(__E1000_DOWN, &adapter->flags))
- return;
- mutex_lock(&adapter->mutex);
+
e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
- mutex_unlock(&adapter->mutex);
}
/**
@@ -2345,9 +2339,6 @@ static void e1000_82547_tx_fifo_stall_task(struct work_struct *work)
struct net_device *netdev = adapter->netdev;
u32 tctl;
- if (test_bit(__E1000_DOWN, &adapter->flags))
- return;
- mutex_lock(&adapter->mutex);
if (atomic_read(&adapter->tx_fifo_stall)) {
if ((er32(TDT) == er32(TDH)) &&
(er32(TDFT) == er32(TDFH)) &&
@@ -2368,7 +2359,6 @@ static void e1000_82547_tx_fifo_stall_task(struct work_struct *work)
schedule_delayed_work(&adapter->fifo_stall_task, 1);
}
}
- mutex_unlock(&adapter->mutex);
}
bool e1000_has_link(struct e1000_adapter *adapter)
@@ -2422,10 +2412,6 @@ static void e1000_watchdog(struct work_struct *work)
struct e1000_tx_ring *txdr = adapter->tx_ring;
u32 link, tctl;
- if (test_bit(__E1000_DOWN, &adapter->flags))
- return;
-
- mutex_lock(&adapter->mutex);
link = e1000_has_link(adapter);
if ((netif_carrier_ok(netdev)) && link)
goto link_up;
@@ -2516,7 +2502,7 @@ link_up:
adapter->tx_timeout_count++;
schedule_work(&adapter->reset_task);
/* exit immediately since reset is imminent */
- goto unlock;
+ return;
}
}
@@ -2544,9 +2530,6 @@ link_up:
/* Reschedule the task */
if (!test_bit(__E1000_DOWN, &adapter->flags))
schedule_delayed_work(&adapter->watchdog_task, 2 * HZ);
-
-unlock:
- mutex_unlock(&adapter->mutex);
}
enum latency_range {
@@ -3495,10 +3478,8 @@ static void e1000_reset_task(struct work_struct *work)
struct e1000_adapter *adapter =
container_of(work, struct e1000_adapter, reset_task);
- if (test_bit(__E1000_DOWN, &adapter->flags))
- return;
e_err(drv, "Reset adapter\n");
- e1000_reinit_safe(adapter);
+ e1000_reinit_locked(adapter);
}
/**
@@ -4963,6 +4944,11 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
netif_device_detach(netdev);
if (netif_running(netdev)) {
+ int count = E1000_CHECK_RESET_COUNT;
+
+ while (test_bit(__E1000_RESETTING, &adapter->flags) && count--)
+ usleep_range(10000, 20000);
+
WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
e1000_down(adapter);
}
diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
index 895450e9bb3c..ff2d806eaef7 100644
--- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c
+++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
@@ -718,8 +718,11 @@ static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw)
e1000_release_phy_80003es2lan(hw);
/* Disable IBIST slave mode (far-end loopback) */
- e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
- &kum_reg_data);
+ ret_val =
+ e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
+ &kum_reg_data);
+ if (ret_val)
+ return ret_val;
kum_reg_data |= E1000_KMRNCTRLSTA_IBIST_DISABLE;
e1000_write_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
kum_reg_data);
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 8d3945ab7334..d6570b2d5a6b 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -5790,7 +5790,7 @@ static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
* specified. Matching the kind of event packet is not supported, with the
* exception of "all V2 events regardless of level 2 or 4".
**/
-static int e1000e_hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
+static int e1000e_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct hwtstamp_config config;
@@ -5825,6 +5825,14 @@ static int e1000e_hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
sizeof(config)) ? -EFAULT : 0;
}
+static int e1000e_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ return copy_to_user(ifr->ifr_data, &adapter->hwtstamp_config,
+ sizeof(adapter->hwtstamp_config)) ? -EFAULT : 0;
+}
+
static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
{
switch (cmd) {
@@ -5833,7 +5841,9 @@ static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
case SIOCSMIIREG:
return e1000_mii_ioctl(netdev, ifr, cmd);
case SIOCSHWTSTAMP:
- return e1000e_hwtstamp_ioctl(netdev, ifr);
+ return e1000e_hwtstamp_set(netdev, ifr);
+ case SIOCGHWTSTAMP:
+ return e1000e_hwtstamp_get(netdev, ifr);
default:
return -EOPNOTSUPP;
}
@@ -6174,7 +6184,7 @@ static int __e1000_resume(struct pci_dev *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_PM
static int e1000_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
@@ -6193,7 +6203,7 @@ static int e1000_resume(struct device *dev)
return __e1000_resume(pdev);
}
-#endif /* CONFIG_PM_SLEEP */
+#endif /* CONFIG_PM */
#ifdef CONFIG_PM_RUNTIME
static int e1000_runtime_suspend(struct device *dev)
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
index da2be59505c0..20e71f4ca426 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.c
+++ b/drivers/net/ethernet/intel/e1000e/phy.c
@@ -1757,19 +1757,23 @@ s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
* it across the board.
*/
ret_val = e1e_rphy(hw, MII_BMSR, &phy_status);
- if (ret_val)
+ if (ret_val) {
/* If the first read fails, another entity may have
* ownership of the resources, wait and try again to
* see if they have relinquished the resources yet.
*/
- udelay(usec_interval);
+ if (usec_interval >= 1000)
+ msleep(usec_interval / 1000);
+ else
+ udelay(usec_interval);
+ }
ret_val = e1e_rphy(hw, MII_BMSR, &phy_status);
if (ret_val)
break;
if (phy_status & BMSR_LSTATUS)
break;
if (usec_interval >= 1000)
- mdelay(usec_interval / 1000);
+ msleep(usec_interval / 1000);
else
udelay(usec_interval);
}
diff --git a/drivers/net/ethernet/intel/i40e/Makefile b/drivers/net/ethernet/intel/i40e/Makefile
index 479b2c4e552d..6ec1a795f184 100644
--- a/drivers/net/ethernet/intel/i40e/Makefile
+++ b/drivers/net/ethernet/intel/i40e/Makefile
@@ -1,7 +1,7 @@
################################################################################
#
# Intel Ethernet Controller XL710 Family Linux Driver
-# Copyright(c) 2013 Intel Corporation.
+# Copyright(c) 2013 - 2014 Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
-# You should have received a copy of the GNU General Public License along with
-# this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
#
# The full GNU General Public License is included in this distribution in
# the file called "COPYING".
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 1ca9834cdfda..4d4cdbf6fad7 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -29,6 +28,7 @@
#define _I40E_H_
#include <net/tcp.h>
+#include <net/udp.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/errno.h>
@@ -61,9 +61,10 @@
#define I40E_BASE_VSI_SEID 512
#define I40E_BASE_VEB_SEID 288
#define I40E_MAX_VEB 16
+#define I40E_MAX_NPAR_QPS 32
#define I40E_MAX_NUM_DESCRIPTORS 4096
-#define I40E_MAX_REGISTER 0x0038FFFF
+#define I40E_MAX_REGISTER 0x800000
#define I40E_DEFAULT_NUM_DESCRIPTORS 512
#define I40E_REQ_DESCRIPTOR_MULTIPLE 32
#define I40E_MIN_NUM_DESCRIPTORS 64
@@ -81,11 +82,14 @@
#define I40E_DEFAULT_MSG_ENABLE 4
#define I40E_NVM_VERSION_LO_SHIFT 0
-#define I40E_NVM_VERSION_LO_MASK (0xf << I40E_NVM_VERSION_LO_SHIFT)
-#define I40E_NVM_VERSION_MID_SHIFT 4
-#define I40E_NVM_VERSION_MID_MASK (0xff << I40E_NVM_VERSION_MID_SHIFT)
-#define I40E_NVM_VERSION_HI_SHIFT 12
-#define I40E_NVM_VERSION_HI_MASK (0xf << I40E_NVM_VERSION_HI_SHIFT)
+#define I40E_NVM_VERSION_LO_MASK (0xff << I40E_NVM_VERSION_LO_SHIFT)
+#define I40E_NVM_VERSION_HI_SHIFT 8
+#define I40E_NVM_VERSION_HI_MASK (0xff << I40E_NVM_VERSION_HI_SHIFT)
+
+/* The values in here are decimal coded as hex as is the case in the NVM map*/
+#define I40E_CURRENT_NVM_VERSION_HI 0x2
+#define I40E_CURRENT_NVM_VERSION_LO 0x1
+
/* magic for getting defines into strings */
#define STRINGIFY(foo) #foo
@@ -127,7 +131,9 @@ enum i40e_state_t {
__I40E_PF_RESET_REQUESTED,
__I40E_CORE_RESET_REQUESTED,
__I40E_GLOBAL_RESET_REQUESTED,
+ __I40E_EMP_RESET_REQUESTED,
__I40E_FILTER_OVERFLOW_PROMISC,
+ __I40E_SUSPENDED,
};
enum i40e_interrupt_policy {
@@ -194,11 +200,18 @@ struct i40e_pf {
u16 num_tc_qps; /* num queue pairs per TC */
u16 num_lan_qps; /* num lan queues this pf has set up */
u16 num_lan_msix; /* num queue vectors for the base pf vsi */
+ int queues_left; /* queues left unclaimed */
u16 rss_size; /* num queues in the RSS array */
u16 rss_size_max; /* HW defined max RSS queues */
u16 fdir_pf_filter_count; /* num of guaranteed filters for this PF */
u8 atr_sample_rate;
+ bool wol_en;
+
+#ifdef CONFIG_I40E_VXLAN
+ __be16 vxlan_ports[I40E_MAX_PF_UDP_OFFLOAD_PORTS];
+ u16 pending_vxlan_bitmap;
+#endif
enum i40e_interrupt_policy int_policy;
u16 rx_itr_default;
u16 tx_itr_default;
@@ -216,24 +229,23 @@ struct i40e_pf {
#define I40E_FLAG_RX_1BUF_ENABLED (u64)(1 << 4)
#define I40E_FLAG_RX_PS_ENABLED (u64)(1 << 5)
#define I40E_FLAG_RSS_ENABLED (u64)(1 << 6)
-#define I40E_FLAG_MQ_ENABLED (u64)(1 << 7)
-#define I40E_FLAG_VMDQ_ENABLED (u64)(1 << 8)
-#define I40E_FLAG_FDIR_REQUIRES_REINIT (u64)(1 << 9)
-#define I40E_FLAG_NEED_LINK_UPDATE (u64)(1 << 10)
-#define I40E_FLAG_IN_NETPOLL (u64)(1 << 13)
-#define I40E_FLAG_16BYTE_RX_DESC_ENABLED (u64)(1 << 14)
-#define I40E_FLAG_CLEAN_ADMINQ (u64)(1 << 15)
-#define I40E_FLAG_FILTER_SYNC (u64)(1 << 16)
-#define I40E_FLAG_PROCESS_MDD_EVENT (u64)(1 << 18)
-#define I40E_FLAG_PROCESS_VFLR_EVENT (u64)(1 << 19)
-#define I40E_FLAG_SRIOV_ENABLED (u64)(1 << 20)
-#define I40E_FLAG_DCB_ENABLED (u64)(1 << 21)
-#define I40E_FLAG_FDIR_ENABLED (u64)(1 << 22)
-#define I40E_FLAG_FDIR_ATR_ENABLED (u64)(1 << 23)
-#define I40E_FLAG_MFP_ENABLED (u64)(1 << 27)
-
- u16 num_tx_queues;
- u16 num_rx_queues;
+#define I40E_FLAG_VMDQ_ENABLED (u64)(1 << 7)
+#define I40E_FLAG_FDIR_REQUIRES_REINIT (u64)(1 << 8)
+#define I40E_FLAG_NEED_LINK_UPDATE (u64)(1 << 9)
+#define I40E_FLAG_IN_NETPOLL (u64)(1 << 12)
+#define I40E_FLAG_16BYTE_RX_DESC_ENABLED (u64)(1 << 13)
+#define I40E_FLAG_CLEAN_ADMINQ (u64)(1 << 14)
+#define I40E_FLAG_FILTER_SYNC (u64)(1 << 15)
+#define I40E_FLAG_PROCESS_MDD_EVENT (u64)(1 << 17)
+#define I40E_FLAG_PROCESS_VFLR_EVENT (u64)(1 << 18)
+#define I40E_FLAG_SRIOV_ENABLED (u64)(1 << 19)
+#define I40E_FLAG_DCB_ENABLED (u64)(1 << 20)
+#define I40E_FLAG_FDIR_ENABLED (u64)(1 << 21)
+#define I40E_FLAG_FDIR_ATR_ENABLED (u64)(1 << 22)
+#define I40E_FLAG_MFP_ENABLED (u64)(1 << 26)
+#ifdef CONFIG_I40E_VXLAN
+#define I40E_FLAG_VXLAN_FILTER_SYNC (u64)(1 << 27)
+#endif
bool stat_offsets_loaded;
struct i40e_hw_port_stats stats;
@@ -247,6 +259,7 @@ struct i40e_pf {
u16 globr_count; /* Global reset count */
u16 empr_count; /* EMP reset count */
u16 pfr_count; /* PF reset count */
+ u16 sw_int_count; /* SW interrupt count */
struct mutex switch_mutex;
u16 lan_vsi; /* our default LAN VSI */
@@ -270,6 +283,8 @@ struct i40e_pf {
struct dentry *i40e_dbg_pf;
#endif /* CONFIG_DEBUG_FS */
+ u16 instance; /* A unique number per i40e_pf instance in the system */
+
/* sr-iov config info */
struct i40e_vf *vf;
int num_alloc_vfs; /* actual number of VFs allocated */
@@ -441,13 +456,11 @@ static inline char *i40e_fw_version_str(struct i40e_hw *hw)
static char buf[32];
snprintf(buf, sizeof(buf),
- "f%d.%d a%d.%d n%02d.%02d.%02d e%08x",
+ "f%d.%d a%d.%d n%02x.%02x e%08x",
hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
hw->aq.api_maj_ver, hw->aq.api_min_ver,
(hw->nvm.version & I40E_NVM_VERSION_HI_MASK)
>> I40E_NVM_VERSION_HI_SHIFT,
- (hw->nvm.version & I40E_NVM_VERSION_MID_MASK)
- >> I40E_NVM_VERSION_MID_SHIFT,
(hw->nvm.version & I40E_NVM_VERSION_LO_MASK)
>> I40E_NVM_VERSION_LO_SHIFT,
hw->nvm.eetrack);
@@ -495,6 +508,7 @@ int i40e_up(struct i40e_vsi *vsi);
void i40e_down(struct i40e_vsi *vsi);
extern const char i40e_driver_name[];
extern const char i40e_driver_version_str[];
+void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags);
void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags);
void i40e_update_stats(struct i40e_vsi *vsi);
void i40e_update_eth_stats(struct i40e_vsi *vsi);
@@ -502,13 +516,6 @@ struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi);
int i40e_fetch_switch_configuration(struct i40e_pf *pf,
bool printconfig);
-/* needed by i40e_main.c */
-void i40e_add_fdir_filter(struct i40e_fdir_data fdir_data,
- struct i40e_ring *tx_ring);
-void i40e_add_remove_filter(struct i40e_fdir_data fdir_data,
- struct i40e_ring *tx_ring);
-void i40e_update_fdir_filter(struct i40e_fdir_data fdir_data,
- struct i40e_ring *tx_ring);
int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data,
struct i40e_pf *pf, bool add);
@@ -524,6 +531,8 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
int i40e_vsi_release(struct i40e_vsi *vsi);
struct i40e_vsi *i40e_vsi_lookup(struct i40e_pf *pf, enum i40e_vsi_type type,
struct i40e_vsi *start_vsi);
+int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool enable);
+int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count);
struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags, u16 uplink_seid,
u16 downlink_seid, u8 enabled_tc);
void i40e_veb_release(struct i40e_veb *veb);
@@ -544,6 +553,7 @@ static inline void i40e_dbg_init(void) {}
static inline void i40e_dbg_exit(void) {}
#endif /* CONFIG_DEBUG_FS*/
void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector);
+void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf);
void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf);
int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
void i40e_vlan_stripping_disable(struct i40e_vsi *vsi);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index cfef7fc32cdd..c75aa2d07491 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -31,6 +30,8 @@
#include "i40e_adminq.h"
#include "i40e_prototype.h"
+static void i40e_resume_aq(struct i40e_hw *hw);
+
/**
* i40e_adminq_init_regs - Initialize AdminQ registers
* @hw: pointer to the hardware structure
@@ -43,13 +44,17 @@ static void i40e_adminq_init_regs(struct i40e_hw *hw)
if (hw->mac.type == I40E_MAC_VF) {
hw->aq.asq.tail = I40E_VF_ATQT1;
hw->aq.asq.head = I40E_VF_ATQH1;
+ hw->aq.asq.len = I40E_VF_ATQLEN1;
hw->aq.arq.tail = I40E_VF_ARQT1;
hw->aq.arq.head = I40E_VF_ARQH1;
+ hw->aq.arq.len = I40E_VF_ARQLEN1;
} else {
hw->aq.asq.tail = I40E_PF_ATQT;
hw->aq.asq.head = I40E_PF_ATQH;
+ hw->aq.asq.len = I40E_PF_ATQLEN;
hw->aq.arq.tail = I40E_PF_ARQT;
hw->aq.arq.head = I40E_PF_ARQH;
+ hw->aq.arq.len = I40E_PF_ARQLEN;
}
}
@@ -60,9 +65,8 @@ static void i40e_adminq_init_regs(struct i40e_hw *hw)
static i40e_status i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
{
i40e_status ret_code;
- struct i40e_virt_mem mem;
- ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq_mem,
+ ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
i40e_mem_atq_ring,
(hw->aq.num_asq_entries *
sizeof(struct i40e_aq_desc)),
@@ -70,21 +74,14 @@ static i40e_status i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
if (ret_code)
return ret_code;
- hw->aq.asq.desc = hw->aq.asq_mem.va;
- hw->aq.asq.dma_addr = hw->aq.asq_mem.pa;
-
- ret_code = i40e_allocate_virt_mem(hw, &mem,
+ ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
(hw->aq.num_asq_entries *
sizeof(struct i40e_asq_cmd_details)));
if (ret_code) {
- i40e_free_dma_mem(hw, &hw->aq.asq_mem);
- hw->aq.asq_mem.va = NULL;
- hw->aq.asq_mem.pa = 0;
+ i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
return ret_code;
}
- hw->aq.asq.details = mem.va;
-
return ret_code;
}
@@ -96,16 +93,11 @@ static i40e_status i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
{
i40e_status ret_code;
- ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq_mem,
+ ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
i40e_mem_arq_ring,
(hw->aq.num_arq_entries *
sizeof(struct i40e_aq_desc)),
I40E_ADMINQ_DESC_ALIGNMENT);
- if (ret_code)
- return ret_code;
-
- hw->aq.arq.desc = hw->aq.arq_mem.va;
- hw->aq.arq.dma_addr = hw->aq.arq_mem.pa;
return ret_code;
}
@@ -119,14 +111,7 @@ static i40e_status i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
**/
static void i40e_free_adminq_asq(struct i40e_hw *hw)
{
- struct i40e_virt_mem mem;
-
- i40e_free_dma_mem(hw, &hw->aq.asq_mem);
- hw->aq.asq_mem.va = NULL;
- hw->aq.asq_mem.pa = 0;
- mem.va = hw->aq.asq.details;
- i40e_free_virt_mem(hw, &mem);
- hw->aq.asq.details = NULL;
+ i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
}
/**
@@ -138,9 +123,7 @@ static void i40e_free_adminq_asq(struct i40e_hw *hw)
**/
static void i40e_free_adminq_arq(struct i40e_hw *hw)
{
- i40e_free_dma_mem(hw, &hw->aq.arq_mem);
- hw->aq.arq_mem.va = NULL;
- hw->aq.arq_mem.pa = 0;
+ i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
}
/**
@@ -151,7 +134,6 @@ static i40e_status i40e_alloc_arq_bufs(struct i40e_hw *hw)
{
i40e_status ret_code;
struct i40e_aq_desc *desc;
- struct i40e_virt_mem mem;
struct i40e_dma_mem *bi;
int i;
@@ -160,11 +142,11 @@ static i40e_status i40e_alloc_arq_bufs(struct i40e_hw *hw)
*/
/* buffer_info structures do not need alignment */
- ret_code = i40e_allocate_virt_mem(hw, &mem, (hw->aq.num_arq_entries *
- sizeof(struct i40e_dma_mem)));
+ ret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
+ (hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem)));
if (ret_code)
goto alloc_arq_bufs;
- hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)mem.va;
+ hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va;
/* allocate the mapped buffers */
for (i = 0; i < hw->aq.num_arq_entries; i++) {
@@ -206,8 +188,7 @@ unwind_alloc_arq_bufs:
i--;
for (; i >= 0; i--)
i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
- mem.va = hw->aq.arq.r.arq_bi;
- i40e_free_virt_mem(hw, &mem);
+ i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
return ret_code;
}
@@ -219,16 +200,15 @@ unwind_alloc_arq_bufs:
static i40e_status i40e_alloc_asq_bufs(struct i40e_hw *hw)
{
i40e_status ret_code;
- struct i40e_virt_mem mem;
struct i40e_dma_mem *bi;
int i;
/* No mapped memory needed yet, just the buffer info structures */
- ret_code = i40e_allocate_virt_mem(hw, &mem, (hw->aq.num_asq_entries *
- sizeof(struct i40e_dma_mem)));
+ ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
+ (hw->aq.num_asq_entries * sizeof(struct i40e_dma_mem)));
if (ret_code)
goto alloc_asq_bufs;
- hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)mem.va;
+ hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va;
/* allocate the mapped buffers */
for (i = 0; i < hw->aq.num_asq_entries; i++) {
@@ -248,8 +228,7 @@ unwind_alloc_asq_bufs:
i--;
for (; i >= 0; i--)
i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
- mem.va = hw->aq.asq.r.asq_bi;
- i40e_free_virt_mem(hw, &mem);
+ i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
return ret_code;
}
@@ -260,14 +239,17 @@ unwind_alloc_asq_bufs:
**/
static void i40e_free_arq_bufs(struct i40e_hw *hw)
{
- struct i40e_virt_mem mem;
int i;
+ /* free descriptors */
for (i = 0; i < hw->aq.num_arq_entries; i++)
i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
- mem.va = hw->aq.arq.r.arq_bi;
- i40e_free_virt_mem(hw, &mem);
+ /* free the descriptor memory */
+ i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
+
+ /* free the dma header */
+ i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
}
/**
@@ -276,7 +258,6 @@ static void i40e_free_arq_bufs(struct i40e_hw *hw)
**/
static void i40e_free_asq_bufs(struct i40e_hw *hw)
{
- struct i40e_virt_mem mem;
int i;
/* only unmap if the address is non-NULL */
@@ -284,9 +265,14 @@ static void i40e_free_asq_bufs(struct i40e_hw *hw)
if (hw->aq.asq.r.asq_bi[i].pa)
i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
- /* now free the buffer info list */
- mem.va = hw->aq.asq.r.asq_bi;
- i40e_free_virt_mem(hw, &mem);
+ /* free the buffer info list */
+ i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
+
+ /* free the descriptor memory */
+ i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
+
+ /* free the dma header */
+ i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
}
/**
@@ -299,14 +285,18 @@ static void i40e_config_asq_regs(struct i40e_hw *hw)
{
if (hw->mac.type == I40E_MAC_VF) {
/* configure the transmit queue */
- wr32(hw, I40E_VF_ATQBAH1, upper_32_bits(hw->aq.asq.dma_addr));
- wr32(hw, I40E_VF_ATQBAL1, lower_32_bits(hw->aq.asq.dma_addr));
+ wr32(hw, I40E_VF_ATQBAH1,
+ upper_32_bits(hw->aq.asq.desc_buf.pa));
+ wr32(hw, I40E_VF_ATQBAL1,
+ lower_32_bits(hw->aq.asq.desc_buf.pa));
wr32(hw, I40E_VF_ATQLEN1, (hw->aq.num_asq_entries |
I40E_VF_ATQLEN1_ATQENABLE_MASK));
} else {
/* configure the transmit queue */
- wr32(hw, I40E_PF_ATQBAH, upper_32_bits(hw->aq.asq.dma_addr));
- wr32(hw, I40E_PF_ATQBAL, lower_32_bits(hw->aq.asq.dma_addr));
+ wr32(hw, I40E_PF_ATQBAH,
+ upper_32_bits(hw->aq.asq.desc_buf.pa));
+ wr32(hw, I40E_PF_ATQBAL,
+ lower_32_bits(hw->aq.asq.desc_buf.pa));
wr32(hw, I40E_PF_ATQLEN, (hw->aq.num_asq_entries |
I40E_PF_ATQLEN_ATQENABLE_MASK));
}
@@ -322,14 +312,18 @@ static void i40e_config_arq_regs(struct i40e_hw *hw)
{
if (hw->mac.type == I40E_MAC_VF) {
/* configure the receive queue */
- wr32(hw, I40E_VF_ARQBAH1, upper_32_bits(hw->aq.arq.dma_addr));
- wr32(hw, I40E_VF_ARQBAL1, lower_32_bits(hw->aq.arq.dma_addr));
+ wr32(hw, I40E_VF_ARQBAH1,
+ upper_32_bits(hw->aq.arq.desc_buf.pa));
+ wr32(hw, I40E_VF_ARQBAL1,
+ lower_32_bits(hw->aq.arq.desc_buf.pa));
wr32(hw, I40E_VF_ARQLEN1, (hw->aq.num_arq_entries |
I40E_VF_ARQLEN1_ARQENABLE_MASK));
} else {
/* configure the receive queue */
- wr32(hw, I40E_PF_ARQBAH, upper_32_bits(hw->aq.arq.dma_addr));
- wr32(hw, I40E_PF_ARQBAL, lower_32_bits(hw->aq.arq.dma_addr));
+ wr32(hw, I40E_PF_ARQBAH,
+ upper_32_bits(hw->aq.arq.desc_buf.pa));
+ wr32(hw, I40E_PF_ARQBAL,
+ lower_32_bits(hw->aq.arq.desc_buf.pa));
wr32(hw, I40E_PF_ARQLEN, (hw->aq.num_arq_entries |
I40E_PF_ARQLEN_ARQENABLE_MASK));
}
@@ -466,10 +460,9 @@ static i40e_status i40e_shutdown_asq(struct i40e_hw *hw)
return I40E_ERR_NOT_READY;
/* Stop firmware AdminQ processing */
- if (hw->mac.type == I40E_MAC_VF)
- wr32(hw, I40E_VF_ATQLEN1, 0);
- else
- wr32(hw, I40E_PF_ATQLEN, 0);
+ wr32(hw, hw->aq.asq.head, 0);
+ wr32(hw, hw->aq.asq.tail, 0);
+ wr32(hw, hw->aq.asq.len, 0);
/* make sure lock is available */
mutex_lock(&hw->aq.asq_mutex);
@@ -478,8 +471,6 @@ static i40e_status i40e_shutdown_asq(struct i40e_hw *hw)
/* free ring buffers */
i40e_free_asq_bufs(hw);
- /* free the ring descriptors */
- i40e_free_adminq_asq(hw);
mutex_unlock(&hw->aq.asq_mutex);
@@ -500,10 +491,9 @@ static i40e_status i40e_shutdown_arq(struct i40e_hw *hw)
return I40E_ERR_NOT_READY;
/* Stop firmware AdminQ processing */
- if (hw->mac.type == I40E_MAC_VF)
- wr32(hw, I40E_VF_ARQLEN1, 0);
- else
- wr32(hw, I40E_PF_ARQLEN, 0);
+ wr32(hw, hw->aq.arq.head, 0);
+ wr32(hw, hw->aq.arq.tail, 0);
+ wr32(hw, hw->aq.arq.len, 0);
/* make sure lock is available */
mutex_lock(&hw->aq.arq_mutex);
@@ -512,8 +502,6 @@ static i40e_status i40e_shutdown_arq(struct i40e_hw *hw)
/* free ring buffers */
i40e_free_arq_bufs(hw);
- /* free the ring descriptors */
- i40e_free_adminq_arq(hw);
mutex_unlock(&hw->aq.arq_mutex);
@@ -533,8 +521,9 @@ static i40e_status i40e_shutdown_arq(struct i40e_hw *hw)
**/
i40e_status i40e_init_adminq(struct i40e_hw *hw)
{
- u16 eetrack_lo, eetrack_hi;
i40e_status ret_code;
+ u16 eetrack_lo, eetrack_hi;
+ int retry = 0;
/* verify input for valid configuration */
if ((hw->aq.num_arq_entries == 0) ||
@@ -562,23 +551,41 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw)
if (ret_code)
goto init_adminq_free_asq;
- ret_code = i40e_aq_get_firmware_version(hw,
- &hw->aq.fw_maj_ver, &hw->aq.fw_min_ver,
- &hw->aq.api_maj_ver, &hw->aq.api_min_ver,
- NULL);
- if (ret_code)
+ /* There are some cases where the firmware may not be quite ready
+ * for AdminQ operations, so we retry the AdminQ setup a few times
+ * if we see timeouts in this first AQ call.
+ */
+ do {
+ ret_code = i40e_aq_get_firmware_version(hw,
+ &hw->aq.fw_maj_ver,
+ &hw->aq.fw_min_ver,
+ &hw->aq.api_maj_ver,
+ &hw->aq.api_min_ver,
+ NULL);
+ if (ret_code != I40E_ERR_ADMIN_QUEUE_TIMEOUT)
+ break;
+ retry++;
+ msleep(100);
+ i40e_resume_aq(hw);
+ } while (retry < 10);
+ if (ret_code != I40E_SUCCESS)
goto init_adminq_free_arq;
- if (hw->aq.api_maj_ver != I40E_FW_API_VERSION_MAJOR ||
- hw->aq.api_min_ver != I40E_FW_API_VERSION_MINOR) {
- ret_code = I40E_ERR_FIRMWARE_API_VERSION;
- goto init_adminq_free_arq;
- }
+ /* get the NVM version info */
i40e_read_nvm_word(hw, I40E_SR_NVM_IMAGE_VERSION, &hw->nvm.version);
i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_LO, &eetrack_lo);
i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi);
hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo;
+ if (hw->aq.api_maj_ver != I40E_FW_API_VERSION_MAJOR ||
+ hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR) {
+ ret_code = I40E_ERR_FIRMWARE_API_VERSION;
+ goto init_adminq_free_arq;
+ }
+
+ /* pre-emptive resource lock release */
+ i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
+
ret_code = i40e_aq_set_hmc_resource_profile(hw,
I40E_HMC_PROFILE_DEFAULT,
0,
@@ -606,6 +613,9 @@ i40e_status i40e_shutdown_adminq(struct i40e_hw *hw)
{
i40e_status ret_code = 0;
+ if (i40e_check_asq_alive(hw))
+ i40e_aq_queue_shutdown(hw, true);
+
i40e_shutdown_asq(hw);
i40e_shutdown_arq(hw);
@@ -659,12 +669,12 @@ static u16 i40e_clean_asq(struct i40e_hw *hw)
* Returns true if the firmware has processed all descriptors on the
* admin send queue. Returns false if there are still requests pending.
**/
-bool i40e_asq_done(struct i40e_hw *hw)
+static bool i40e_asq_done(struct i40e_hw *hw)
{
/* AQ designers suggest use of head for better
* timing reliability than DD bit
*/
- return (rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use);
+ return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
}
@@ -674,7 +684,7 @@ bool i40e_asq_done(struct i40e_hw *hw)
* @desc: prefilled descriptor describing the command (non DMA mem)
* @buff: buffer to use for indirect commands
* @buff_size: size of buffer for indirect commands
- * @opaque: pointer to info to be used in async cleanup
+ * @cmd_details: pointer to command details structure
*
* This is the main send command driver routine for the Admin Queue send
* queue. It runs the queue, cleans the queue, etc
@@ -854,7 +864,7 @@ void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
/* zero out the desc */
memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
desc->opcode = cpu_to_le16(opcode);
- desc->flags = cpu_to_le16(I40E_AQ_FLAG_EI | I40E_AQ_FLAG_SI);
+ desc->flags = cpu_to_le16(I40E_AQ_FLAG_SI);
}
/**
@@ -925,6 +935,11 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
* size
*/
bi = &hw->aq.arq.r.arq_bi[ntc];
+ memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
+
+ desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF);
+ if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
+ desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB);
desc->datalen = cpu_to_le16((u16)bi->size);
desc->params.external.addr_high = cpu_to_le32(upper_32_bits(bi->pa));
desc->params.external.addr_low = cpu_to_le32(lower_32_bits(bi->pa));
@@ -947,36 +962,16 @@ clean_arq_element_out:
return ret_code;
}
-void i40e_resume_aq(struct i40e_hw *hw)
+static void i40e_resume_aq(struct i40e_hw *hw)
{
- u32 reg = 0;
-
/* Registers are reset after PF reset */
hw->aq.asq.next_to_use = 0;
hw->aq.asq.next_to_clean = 0;
i40e_config_asq_regs(hw);
- reg = hw->aq.num_asq_entries;
-
- if (hw->mac.type == I40E_MAC_VF) {
- reg |= I40E_VF_ATQLEN_ATQENABLE_MASK;
- wr32(hw, I40E_VF_ATQLEN1, reg);
- } else {
- reg |= I40E_PF_ATQLEN_ATQENABLE_MASK;
- wr32(hw, I40E_PF_ATQLEN, reg);
- }
hw->aq.arq.next_to_use = 0;
hw->aq.arq.next_to_clean = 0;
i40e_config_arq_regs(hw);
- reg = hw->aq.num_arq_entries;
-
- if (hw->mac.type == I40E_MAC_VF) {
- reg |= I40E_VF_ATQLEN_ATQENABLE_MASK;
- wr32(hw, I40E_VF_ARQLEN1, reg);
- } else {
- reg |= I40E_PF_ATQLEN_ATQENABLE_MASK;
- wr32(hw, I40E_PF_ARQLEN, reg);
- }
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.h b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
index 22e5ed683e47..993f7685a911 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -32,20 +31,20 @@
#include "i40e_adminq_cmd.h"
#define I40E_ADMINQ_DESC(R, i) \
- (&(((struct i40e_aq_desc *)((R).desc))[i]))
+ (&(((struct i40e_aq_desc *)((R).desc_buf.va))[i]))
#define I40E_ADMINQ_DESC_ALIGNMENT 4096
struct i40e_adminq_ring {
- void *desc; /* Descriptor ring memory */
- void *details; /* ASQ details */
+ struct i40e_virt_mem dma_head; /* space for dma structures */
+ struct i40e_dma_mem desc_buf; /* descriptor ring memory */
+ struct i40e_virt_mem cmd_buf; /* command buffer memory */
union {
struct i40e_dma_mem *asq_bi;
struct i40e_dma_mem *arq_bi;
} r;
- u64 dma_addr; /* Physical address of the ring */
u16 count; /* Number of descriptors */
u16 rx_buf_len; /* Admin Receive Queue buffer length */
@@ -56,6 +55,7 @@ struct i40e_adminq_ring {
/* used for queue tracking */
u32 head;
u32 tail;
+ u32 len;
};
/* ASQ transaction details */
@@ -69,7 +69,7 @@ struct i40e_asq_cmd_details {
};
#define I40E_ADMINQ_DETAILS(R, i) \
- (&(((struct i40e_asq_cmd_details *)((R).details))[i]))
+ (&(((struct i40e_asq_cmd_details *)((R).cmd_buf.va))[i]))
/* ARQ event information */
struct i40e_arq_event_info {
@@ -94,9 +94,6 @@ struct i40e_adminq_info {
struct mutex asq_mutex; /* Send queue lock */
struct mutex arq_mutex; /* Receive queue lock */
- struct i40e_dma_mem asq_mem; /* send queue dynamic memory */
- struct i40e_dma_mem arq_mem; /* receive queue dynamic memory */
-
/* last status values on send and receive queues */
enum i40e_admin_queue_err asq_last_status;
enum i40e_admin_queue_err arq_last_status;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
index e61ebdd5a5f9..c009eb47058a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -35,7 +34,7 @@
*/
#define I40E_FW_API_VERSION_MAJOR 0x0001
-#define I40E_FW_API_VERSION_MINOR 0x0000
+#define I40E_FW_API_VERSION_MINOR 0x0001
struct i40e_aq_desc {
__le16 flags;
@@ -137,10 +136,13 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_set_ns_proxy_entry = 0x0105,
/* LAA */
- i40e_aqc_opc_mng_laa = 0x0106,
+ i40e_aqc_opc_mng_laa = 0x0106, /* AQ obsolete */
i40e_aqc_opc_mac_address_read = 0x0107,
i40e_aqc_opc_mac_address_write = 0x0108,
+ /* PXE */
+ i40e_aqc_opc_clear_pxe_mode = 0x0110,
+
/* internal switch commands */
i40e_aqc_opc_get_switch_config = 0x0200,
i40e_aqc_opc_add_statistics = 0x0201,
@@ -317,13 +319,15 @@ struct i40e_aqc_get_version {
I40E_CHECK_CMD_LENGTH(i40e_aqc_get_version);
-/* Send driver version (direct 0x0002) */
+/* Send driver version (indirect 0x0002) */
struct i40e_aqc_driver_version {
u8 driver_major_ver;
u8 driver_minor_ver;
u8 driver_build_ver;
u8 driver_subbuild_ver;
- u8 reserved[12];
+ u8 reserved[4];
+ __le32 address_high;
+ __le32 address_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_driver_version);
@@ -479,7 +483,7 @@ struct i40e_aqc_mng_laa {
u8 reserved2[6];
};
-/* Manage MAC Address Read Command (0x0107) */
+/* Manage MAC Address Read Command (indirect 0x0107) */
struct i40e_aqc_mac_address_read {
__le16 command_flags;
#define I40E_AQC_LAN_ADDR_VALID 0x10
@@ -517,6 +521,16 @@ struct i40e_aqc_mac_address_write {
I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_write);
+/* PXE commands (0x011x) */
+
+/* Clear PXE Command and response (direct 0x0110) */
+struct i40e_aqc_clear_pxe {
+ u8 rx_cnt;
+ u8 reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_clear_pxe);
+
/* Switch configuration commands (0x02xx) */
/* Used by many indirect commands that only pass an seid and a buffer in the
@@ -639,13 +653,15 @@ struct i40e_aqc_switch_resource_alloc_element_resp {
u8 reserved2[6];
};
-/* Add VSI (indirect 0x210)
+/* Add VSI (indirect 0x0210)
* this indirect command uses struct i40e_aqc_vsi_properties_data
* as the indirect buffer (128 bytes)
*
- * Update VSI (indirect 0x211) Get VSI (indirect 0x0212)
- * use the generic i40e_aqc_switch_seid descriptor format
- * use the same completion and data structure as Add VSI
+ * Update VSI (indirect 0x211)
+ * uses the same data structure as Add VSI
+ *
+ * Get VSI (indirect 0x0212)
+ * uses the same completion and data structure as Add VSI
*/
struct i40e_aqc_add_get_update_vsi {
__le16 uplink_seid;
@@ -1185,27 +1201,40 @@ struct i40e_aqc_add_remove_cloud_filters_element_data {
#define I40E_AQC_ADD_CLOUD_FILTER_SHIFT 0
#define I40E_AQC_ADD_CLOUD_FILTER_MASK (0x3F << \
I40E_AQC_ADD_CLOUD_FILTER_SHIFT)
+/* 0x0000 reserved */
#define I40E_AQC_ADD_CLOUD_FILTER_OIP 0x0001
-#define I40E_AQC_ADD_CLOUD_FILTER_OIP_GRE 0x0002
+/* 0x0002 reserved */
#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN 0x0003
-#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_GRE 0x0004
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID 0x0004
+/* 0x0005 reserved */
#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID 0x0006
-#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_VNL 0x0007
+/* 0x0007 reserved */
/* 0x0008 reserved */
#define I40E_AQC_ADD_CLOUD_FILTER_OMAC 0x0009
#define I40E_AQC_ADD_CLOUD_FILTER_IMAC 0x000A
+#define I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC 0x000B
+#define I40E_AQC_ADD_CLOUD_FILTER_IIP 0x000C
+
#define I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE 0x0080
#define I40E_AQC_ADD_CLOUD_VNK_SHIFT 6
#define I40E_AQC_ADD_CLOUD_VNK_MASK 0x00C0
#define I40E_AQC_ADD_CLOUD_FLAGS_IPV4 0
#define I40E_AQC_ADD_CLOUD_FLAGS_IPV6 0x0100
- __le32 key_low;
- __le32 key_high;
+
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT 9
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK 0x1E00
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_XVLAN 0
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC 1
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NGE 2
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_IP 3
+
+ __le32 tenant_id;
+ u8 reserved[4];
__le16 queue_number;
#define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT 0
#define I40E_AQC_ADD_CLOUD_QUEUE_MASK (0x3F << \
I40E_AQC_ADD_CLOUD_QUEUE_SHIFT)
- u8 reserved[14];
+ u8 reserved2[14];
/* response section */
u8 allocation_result;
#define I40E_AQC_ADD_CLOUD_FILTER_SUCCESS 0x0
@@ -1548,7 +1577,7 @@ struct i40e_aqc_module_desc {
struct i40e_aq_get_phy_abilities_resp {
__le32 phy_type; /* bitmap using the above enum for offsets */
- u8 link_speed; /* bitmap using the above enum */
+ u8 link_speed; /* bitmap using the above enum bit patterns */
u8 abilities;
#define I40E_AQ_PHY_FLAG_PAUSE_TX 0x01
#define I40E_AQ_PHY_FLAG_PAUSE_RX 0x02
@@ -1582,6 +1611,10 @@ struct i40e_aq_set_phy_config { /* same bits as above in all */
__le32 phy_type;
u8 link_speed;
u8 abilities;
+/* bits 0-2 use the values from get_phy_abilities_resp */
+#define I40E_AQ_PHY_ENABLE_LINK 0x08
+#define I40E_AQ_PHY_ENABLE_AN 0x10
+#define I40E_AQ_PHY_ENABLE_ATOMIC_LINK 0x20
__le16 eee_capability;
__le32 eeer;
u8 low_power_ctrl;
@@ -1915,21 +1948,39 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start);
struct i40e_aqc_add_udp_tunnel {
__le16 udp_port;
u8 header_len; /* in DWords, 1 to 15 */
- u8 protocol_index;
-#define I40E_AQC_TUNNEL_TYPE_MAC 0x0
-#define I40E_AQC_TUNNEL_TYPE_UDP 0x1
- u8 reserved[12];
+ u8 protocol_type;
+#define I40E_AQC_TUNNEL_TYPE_TEREDO 0x0
+#define I40E_AQC_TUNNEL_TYPE_VXLAN 0x2
+#define I40E_AQC_TUNNEL_TYPE_NGE 0x3
+ u8 variable_udp_length;
+#define I40E_AQC_TUNNEL_FIXED_UDP_LENGTH 0x0
+#define I40E_AQC_TUNNEL_VARIABLE_UDP_LENGTH 0x1
+ u8 udp_key_index;
+#define I40E_AQC_TUNNEL_KEY_INDEX_VXLAN 0x0
+#define I40E_AQC_TUNNEL_KEY_INDEX_NGE 0x1
+#define I40E_AQC_TUNNEL_KEY_INDEX_PROPRIETARY_UDP 0x2
+ u8 reserved[10];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel);
+struct i40e_aqc_add_udp_tunnel_completion {
+ __le16 udp_port;
+ u8 filter_entry_index;
+ u8 multiple_pfs;
+#define I40E_AQC_SINGLE_PF 0x0
+#define I40E_AQC_MULTIPLE_PFS 0x1
+ u8 total_filters;
+ u8 reserved[11];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel_completion);
+
/* remove UDP Tunnel command (0x0B01) */
struct i40e_aqc_remove_udp_tunnel {
u8 reserved[2];
u8 index; /* 0 to 15 */
- u8 pf_filters;
- u8 total_filters;
- u8 reserved2[11];
+ u8 reserved2[13];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_udp_tunnel);
@@ -1937,28 +1988,32 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_udp_tunnel);
struct i40e_aqc_del_udp_tunnel_completion {
__le16 udp_port;
u8 index; /* 0 to 15 */
- u8 multiple_entries;
- u8 tunnels_used;
- u8 reserved;
- u8 tunnels_free;
- u8 reserved1[9];
+ u8 multiple_pfs;
+ u8 total_filters_used;
+ u8 reserved1[11];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion);
/* tunnel key structure 0x0B10 */
+
struct i40e_aqc_tunnel_key_structure {
- __le16 key1_off;
- __le16 key1_len;
- __le16 key2_off;
- __le16 key2_len;
- __le16 flags;
+ u8 key1_off;
+ u8 key2_off;
+ u8 key1_len; /* 0 to 15 */
+ u8 key2_len; /* 0 to 15 */
+ u8 flags;
#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDE 0x01
/* response flags */
#define I40E_AQC_TUNNEL_KEY_STRUCT_SUCCESS 0x01
#define I40E_AQC_TUNNEL_KEY_STRUCT_MODIFIED 0x02
#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDDEN 0x03
- u8 resreved[6];
+ u8 network_key_index;
+#define I40E_AQC_NETWORK_KEY_INDEX_VXLAN 0x0
+#define I40E_AQC_NETWORK_KEY_INDEX_NGE 0x1
+#define I40E_AQC_NETWORK_KEY_INDEX_FLEX_MAC_IN_UDP 0x2
+#define I40E_AQC_NETWORK_KEY_INDEX_GRE 0x3
+ u8 reserved[10];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_tunnel_key_structure);
@@ -2052,6 +2107,7 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_reg);
#define I40E_AQ_CLUSTER_ID_DCB 8
#define I40E_AQ_CLUSTER_ID_EMP_MEM 9
#define I40E_AQ_CLUSTER_ID_PKT_BUF 10
+#define I40E_AQ_CLUSTER_ID_ALTRAM 11
struct i40e_aqc_debug_dump_internals {
u8 cluster_id;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_alloc.h b/drivers/net/ethernet/intel/i40e/i40e_alloc.h
index 3b1cc214f9dc..926811ad44ac 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_alloc.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_alloc.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 1e4ea134975a..807312bb62a2 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -126,6 +125,44 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
}
/**
+ * i40e_check_asq_alive
+ * @hw: pointer to the hw struct
+ *
+ * Returns true if Queue is enabled else false.
+ **/
+bool i40e_check_asq_alive(struct i40e_hw *hw)
+{
+ return !!(rd32(hw, hw->aq.asq.len) & I40E_PF_ATQLEN_ATQENABLE_MASK);
+}
+
+/**
+ * i40e_aq_queue_shutdown
+ * @hw: pointer to the hw struct
+ * @unloading: is the driver unloading itself
+ *
+ * Tell the Firmware that we're shutting down the AdminQ and whether
+ * or not the driver is unloading as well.
+ **/
+i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw,
+ bool unloading)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_queue_shutdown *cmd =
+ (struct i40e_aqc_queue_shutdown *)&desc.params.raw;
+ i40e_status status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_queue_shutdown);
+
+ if (unloading)
+ cmd->driver_unloading = cpu_to_le32(I40E_AQ_DRIVER_UNLOADING);
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
+
+ return status;
+}
+
+
+/**
* i40e_init_shared_code - Initialize the shared code
* @hw: pointer to hardware structure
*
@@ -142,14 +179,6 @@ i40e_status i40e_init_shared_code(struct i40e_hw *hw)
i40e_status status = 0;
u32 reg;
- hw->phy.get_link_info = true;
-
- /* Determine port number */
- reg = rd32(hw, I40E_PFGEN_PORTNUM);
- reg = ((reg & I40E_PFGEN_PORTNUM_PORT_NUM_MASK) >>
- I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT);
- hw->port = (u8)reg;
-
i40e_set_mac_type(hw);
switch (hw->mac.type) {
@@ -160,6 +189,21 @@ i40e_status i40e_init_shared_code(struct i40e_hw *hw)
break;
}
+ hw->phy.get_link_info = true;
+
+ /* Determine port number */
+ reg = rd32(hw, I40E_PFGEN_PORTNUM);
+ reg = ((reg & I40E_PFGEN_PORTNUM_PORT_NUM_MASK) >>
+ I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT);
+ hw->port = (u8)reg;
+
+ /* Determine the PF number based on the PCI fn */
+ reg = rd32(hw, I40E_GLPCI_CAPSUP);
+ if (reg & I40E_GLPCI_CAPSUP_ARI_EN_MASK)
+ hw->pf_id = (u8)((hw->bus.device << 3) | hw->bus.func);
+ else
+ hw->pf_id = (u8)hw->bus.func;
+
status = i40e_init_nvm(hw);
return status;
}
@@ -210,8 +254,11 @@ i40e_status i40e_aq_mac_address_write(struct i40e_hw *hw,
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_mac_address_write);
cmd_data->command_flags = cpu_to_le16(flags);
- memcpy(&cmd_data->mac_sal, &mac_addr[0], 4);
- memcpy(&cmd_data->mac_sah, &mac_addr[4], 2);
+ cmd_data->mac_sah = cpu_to_le16((u16)mac_addr[0] << 8 | mac_addr[1]);
+ cmd_data->mac_sal = cpu_to_le32(((u32)mac_addr[2] << 24) |
+ ((u32)mac_addr[3] << 16) |
+ ((u32)mac_addr[4] << 8) |
+ mac_addr[5]);
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
@@ -240,32 +287,53 @@ i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
}
/**
- * i40e_validate_mac_addr - Validate MAC address
- * @mac_addr: pointer to MAC address
- *
- * Tests a MAC address to ensure it is a valid Individual Address
+ * i40e_get_media_type - Gets media type
+ * @hw: pointer to the hardware structure
**/
-i40e_status i40e_validate_mac_addr(u8 *mac_addr)
+static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
{
- i40e_status status = 0;
-
- /* Make sure it is not a multicast address */
- if (I40E_IS_MULTICAST(mac_addr)) {
- hw_dbg(hw, "MAC address is multicast\n");
- status = I40E_ERR_INVALID_MAC_ADDR;
- /* Not a broadcast address */
- } else if (I40E_IS_BROADCAST(mac_addr)) {
- hw_dbg(hw, "MAC address is broadcast\n");
- status = I40E_ERR_INVALID_MAC_ADDR;
- /* Reject the zero address */
- } else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
- mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) {
- hw_dbg(hw, "MAC address is all zeros\n");
- status = I40E_ERR_INVALID_MAC_ADDR;
+ enum i40e_media_type media;
+
+ switch (hw->phy.link_info.phy_type) {
+ case I40E_PHY_TYPE_10GBASE_SR:
+ case I40E_PHY_TYPE_10GBASE_LR:
+ case I40E_PHY_TYPE_40GBASE_SR4:
+ case I40E_PHY_TYPE_40GBASE_LR4:
+ media = I40E_MEDIA_TYPE_FIBER;
+ break;
+ case I40E_PHY_TYPE_100BASE_TX:
+ case I40E_PHY_TYPE_1000BASE_T:
+ case I40E_PHY_TYPE_10GBASE_T:
+ media = I40E_MEDIA_TYPE_BASET;
+ break;
+ case I40E_PHY_TYPE_10GBASE_CR1_CU:
+ case I40E_PHY_TYPE_40GBASE_CR4_CU:
+ case I40E_PHY_TYPE_10GBASE_CR1:
+ case I40E_PHY_TYPE_40GBASE_CR4:
+ case I40E_PHY_TYPE_10GBASE_SFPP_CU:
+ media = I40E_MEDIA_TYPE_DA;
+ break;
+ case I40E_PHY_TYPE_1000BASE_KX:
+ case I40E_PHY_TYPE_10GBASE_KX4:
+ case I40E_PHY_TYPE_10GBASE_KR:
+ case I40E_PHY_TYPE_40GBASE_KR4:
+ media = I40E_MEDIA_TYPE_BACKPLANE;
+ break;
+ case I40E_PHY_TYPE_SGMII:
+ case I40E_PHY_TYPE_XAUI:
+ case I40E_PHY_TYPE_XFI:
+ case I40E_PHY_TYPE_XLAUI:
+ case I40E_PHY_TYPE_XLPPI:
+ default:
+ media = I40E_MEDIA_TYPE_UNKNOWN;
+ break;
}
- return status;
+
+ return media;
}
+#define I40E_PF_RESET_WAIT_COUNT_A0 200
+#define I40E_PF_RESET_WAIT_COUNT 10
/**
* i40e_pf_reset - Reset the PF
* @hw: pointer to the hardware structure
@@ -275,7 +343,8 @@ i40e_status i40e_validate_mac_addr(u8 *mac_addr)
**/
i40e_status i40e_pf_reset(struct i40e_hw *hw)
{
- u32 wait_cnt = 0;
+ u32 cnt = 0;
+ u32 cnt1 = 0;
u32 reg = 0;
u32 grst_del;
@@ -285,7 +354,7 @@ i40e_status i40e_pf_reset(struct i40e_hw *hw)
*/
grst_del = rd32(hw, I40E_GLGEN_RSTCTL) & I40E_GLGEN_RSTCTL_GRSTDEL_MASK
>> I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
- for (wait_cnt = 0; wait_cnt < grst_del + 2; wait_cnt++) {
+ for (cnt = 0; cnt < grst_del + 2; cnt++) {
reg = rd32(hw, I40E_GLGEN_RSTAT);
if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
break;
@@ -296,17 +365,37 @@ i40e_status i40e_pf_reset(struct i40e_hw *hw)
return I40E_ERR_RESET_FAILED;
}
- /* Determine the PF number based on the PCI fn */
- hw->pf_id = (u8)hw->bus.func;
+ /* Now Wait for the FW to be ready */
+ for (cnt1 = 0; cnt1 < I40E_PF_RESET_WAIT_COUNT; cnt1++) {
+ reg = rd32(hw, I40E_GLNVM_ULD);
+ reg &= (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
+ I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK);
+ if (reg == (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
+ I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK)) {
+ hw_dbg(hw, "Core and Global modules ready %d\n", cnt1);
+ break;
+ }
+ usleep_range(10000, 20000);
+ }
+ if (!(reg & (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
+ I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))) {
+ hw_dbg(hw, "wait for FW Reset complete timedout\n");
+ hw_dbg(hw, "I40E_GLNVM_ULD = 0x%x\n", reg);
+ return I40E_ERR_RESET_FAILED;
+ }
/* If there was a Global Reset in progress when we got here,
* we don't need to do the PF Reset
*/
- if (!wait_cnt) {
+ if (!cnt) {
+ if (hw->revision_id == 0)
+ cnt = I40E_PF_RESET_WAIT_COUNT_A0;
+ else
+ cnt = I40E_PF_RESET_WAIT_COUNT;
reg = rd32(hw, I40E_PFGEN_CTRL);
wr32(hw, I40E_PFGEN_CTRL,
(reg | I40E_PFGEN_CTRL_PFSWR_MASK));
- for (wait_cnt = 0; wait_cnt < 10; wait_cnt++) {
+ for (; cnt; cnt--) {
reg = rd32(hw, I40E_PFGEN_CTRL);
if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK))
break;
@@ -319,6 +408,7 @@ i40e_status i40e_pf_reset(struct i40e_hw *hw)
}
i40e_clear_pxe_mode(hw);
+
return 0;
}
@@ -335,9 +425,47 @@ void i40e_clear_pxe_mode(struct i40e_hw *hw)
/* Clear single descriptor fetch/write-back mode */
reg = rd32(hw, I40E_GLLAN_RCTL_0);
- wr32(hw, I40E_GLLAN_RCTL_0, (reg | I40E_GLLAN_RCTL_0_PXE_MODE_MASK));
+
+ if (hw->revision_id == 0) {
+ /* As a work around clear PXE_MODE instead of setting it */
+ wr32(hw, I40E_GLLAN_RCTL_0, (reg & (~I40E_GLLAN_RCTL_0_PXE_MODE_MASK)));
+ } else {
+ wr32(hw, I40E_GLLAN_RCTL_0, (reg | I40E_GLLAN_RCTL_0_PXE_MODE_MASK));
+ }
+}
+
+/**
+ * i40e_led_is_mine - helper to find matching led
+ * @hw: pointer to the hw struct
+ * @idx: index into GPIO registers
+ *
+ * returns: 0 if no match, otherwise the value of the GPIO_CTL register
+ */
+static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx)
+{
+ u32 gpio_val = 0;
+ u32 port;
+
+ if (!hw->func_caps.led[idx])
+ return 0;
+
+ gpio_val = rd32(hw, I40E_GLGEN_GPIO_CTL(idx));
+ port = (gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK) >>
+ I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT;
+
+ /* if PRT_NUM_NA is 1 then this LED is not port specific, OR
+ * if it is not our port then ignore
+ */
+ if ((gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_MASK) ||
+ (port != hw->port))
+ return 0;
+
+ return gpio_val;
}
+#define I40E_LED0 22
+#define I40E_LINK_ACTIVITY 0xC
+
/**
* i40e_led_get - return current on/off mode
* @hw: pointer to the hw struct
@@ -349,24 +477,20 @@ void i40e_clear_pxe_mode(struct i40e_hw *hw)
**/
u32 i40e_led_get(struct i40e_hw *hw)
{
- u32 gpio_val = 0;
u32 mode = 0;
- u32 port;
int i;
- for (i = 0; i < I40E_HW_CAP_MAX_GPIO; i++) {
- if (!hw->func_caps.led[i])
- continue;
-
- gpio_val = rd32(hw, I40E_GLGEN_GPIO_CTL(i));
- port = (gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK)
- >> I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT;
+ /* as per the documentation GPIO 22-29 are the LED
+ * GPIO pins named LED0..LED7
+ */
+ for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) {
+ u32 gpio_val = i40e_led_is_mine(hw, i);
- if (port != hw->port)
+ if (!gpio_val)
continue;
- mode = (gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK)
- >> I40E_GLGEN_GPIO_CTL_INT_MODE_SHIFT;
+ mode = (gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) >>
+ I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT;
break;
}
@@ -376,60 +500,45 @@ u32 i40e_led_get(struct i40e_hw *hw)
/**
* i40e_led_set - set new on/off mode
* @hw: pointer to the hw struct
- * @mode: 0=off, else on (see EAS for mode details)
+ * @mode: 0=off, 0xf=on (else see manual for mode details)
+ * @blink: true if the LED should blink when on, false if steady
+ *
+ * if this function is used to turn on the blink it should
+ * be used to disable the blink when restoring the original state.
**/
-void i40e_led_set(struct i40e_hw *hw, u32 mode)
+void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
{
- u32 gpio_val = 0;
- u32 led_mode = 0;
- u32 port;
int i;
- for (i = 0; i < I40E_HW_CAP_MAX_GPIO; i++) {
- if (!hw->func_caps.led[i])
- continue;
+ if (mode & 0xfffffff0)
+ hw_dbg(hw, "invalid mode passed in %X\n", mode);
- gpio_val = rd32(hw, I40E_GLGEN_GPIO_CTL(i));
- port = (gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK)
- >> I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT;
+ /* as per the documentation GPIO 22-29 are the LED
+ * GPIO pins named LED0..LED7
+ */
+ for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) {
+ u32 gpio_val = i40e_led_is_mine(hw, i);
- if (port != hw->port)
+ if (!gpio_val)
continue;
- led_mode = (mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) &
- I40E_GLGEN_GPIO_CTL_LED_MODE_MASK;
gpio_val &= ~I40E_GLGEN_GPIO_CTL_LED_MODE_MASK;
- gpio_val |= led_mode;
+ /* this & is a bit of paranoia, but serves as a range check */
+ gpio_val |= ((mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) &
+ I40E_GLGEN_GPIO_CTL_LED_MODE_MASK);
+
+ if (mode == I40E_LINK_ACTIVITY)
+ blink = false;
+
+ gpio_val |= (blink ? 1 : 0) <<
+ I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT;
+
wr32(hw, I40E_GLGEN_GPIO_CTL(i), gpio_val);
+ break;
}
}
/* Admin command wrappers */
-/**
- * i40e_aq_queue_shutdown
- * @hw: pointer to the hw struct
- * @unloading: is the driver unloading itself
- *
- * Tell the Firmware that we're shutting down the AdminQ and whether
- * or not the driver is unloading as well.
- **/
-i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw,
- bool unloading)
-{
- struct i40e_aq_desc desc;
- struct i40e_aqc_queue_shutdown *cmd =
- (struct i40e_aqc_queue_shutdown *)&desc.params.raw;
- i40e_status status;
-
- i40e_fill_default_direct_cmd_desc(&desc,
- i40e_aqc_opc_queue_shutdown);
-
- if (unloading)
- cmd->driver_unloading = cpu_to_le32(I40E_AQ_DRIVER_UNLOADING);
- status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
-
- return status;
-}
/**
* i40e_aq_set_link_restart_an
@@ -495,10 +604,12 @@ i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
/* update link status */
hw_link_info->phy_type = (enum i40e_aq_phy_type)resp->phy_type;
+ hw->phy.media_type = i40e_get_media_type(hw);
hw_link_info->link_speed = (enum i40e_aq_link_speed)resp->link_speed;
hw_link_info->link_info = resp->link_info;
hw_link_info->an_info = resp->an_info;
hw_link_info->ext_info = resp->ext_info;
+ hw_link_info->loopback = resp->loopback;
if (resp->command_flags & cpu_to_le16(I40E_AQ_LSE_ENABLE))
hw_link_info->lse_enable = true;
@@ -673,8 +784,8 @@ i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
- struct i40e_aqc_switch_seid *cmd =
- (struct i40e_aqc_switch_seid *)&desc.params.raw;
+ struct i40e_aqc_add_get_update_vsi *cmd =
+ (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
struct i40e_aqc_add_get_update_vsi_completion *resp =
(struct i40e_aqc_add_get_update_vsi_completion *)
&desc.params.raw;
@@ -683,7 +794,7 @@ i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw,
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_get_vsi_parameters);
- cmd->seid = cpu_to_le16(vsi_ctx->seid);
+ cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid);
desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
if (sizeof(vsi_ctx->info) > I40E_AQ_LARGE_BUF)
@@ -717,13 +828,13 @@ i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
- struct i40e_aqc_switch_seid *cmd =
- (struct i40e_aqc_switch_seid *)&desc.params.raw;
+ struct i40e_aqc_add_get_update_vsi *cmd =
+ (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
i40e_status status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_update_vsi_parameters);
- cmd->seid = cpu_to_le16(vsi_ctx->seid);
+ cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid);
desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
if (sizeof(vsi_ctx->info) > I40E_AQ_LARGE_BUF)
@@ -873,6 +984,7 @@ i40e_get_link_status_exit:
* @downlink_seid: the VSI SEID
* @enabled_tc: bitmap of TCs to be enabled
* @default_port: true for default port VSI, false for control port
+ * @enable_l2_filtering: true to add L2 filter table rules to regular forwarding rules for cloud support
* @veb_seid: pointer to where to put the resulting VEB SEID
* @cmd_details: pointer to command details structure or NULL
*
@@ -881,7 +993,8 @@ i40e_get_link_status_exit:
**/
i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
u16 downlink_seid, u8 enabled_tc,
- bool default_port, u16 *veb_seid,
+ bool default_port, bool enable_l2_filtering,
+ u16 *veb_seid,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
@@ -907,6 +1020,10 @@ i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT;
else
veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DATA;
+
+ if (enable_l2_filtering)
+ veb_flags |= I40E_AQC_ADD_VEB_ENABLE_L2_FILTER;
+
cmd->veb_flags = cpu_to_le16(veb_flags);
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
@@ -1059,86 +1176,6 @@ i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid,
}
/**
- * i40e_aq_add_vlan - Add VLAN ids to the HW filtering
- * @hw: pointer to the hw struct
- * @seid: VSI for the vlan filters
- * @v_list: list of vlan filters to be added
- * @count: length of the list
- * @cmd_details: pointer to command details structure or NULL
- **/
-i40e_status i40e_aq_add_vlan(struct i40e_hw *hw, u16 seid,
- struct i40e_aqc_add_remove_vlan_element_data *v_list,
- u8 count, struct i40e_asq_cmd_details *cmd_details)
-{
- struct i40e_aq_desc desc;
- struct i40e_aqc_macvlan *cmd =
- (struct i40e_aqc_macvlan *)&desc.params.raw;
- i40e_status status;
- u16 buf_size;
-
- if (count == 0 || !v_list || !hw)
- return I40E_ERR_PARAM;
-
- buf_size = count * sizeof(struct i40e_aqc_add_remove_vlan_element_data);
-
- /* prep the rest of the request */
- i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_vlan);
- cmd->num_addresses = cpu_to_le16(count);
- cmd->seid[0] = cpu_to_le16(seid | I40E_AQC_MACVLAN_CMD_SEID_VALID);
- cmd->seid[1] = 0;
- cmd->seid[2] = 0;
-
- desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
- if (buf_size > I40E_AQ_LARGE_BUF)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
-
- status = i40e_asq_send_command(hw, &desc, v_list, buf_size,
- cmd_details);
-
- return status;
-}
-
-/**
- * i40e_aq_remove_vlan - Remove VLANs from the HW filtering
- * @hw: pointer to the hw struct
- * @seid: VSI for the vlan filters
- * @v_list: list of macvlans to be removed
- * @count: length of the list
- * @cmd_details: pointer to command details structure or NULL
- **/
-i40e_status i40e_aq_remove_vlan(struct i40e_hw *hw, u16 seid,
- struct i40e_aqc_add_remove_vlan_element_data *v_list,
- u8 count, struct i40e_asq_cmd_details *cmd_details)
-{
- struct i40e_aq_desc desc;
- struct i40e_aqc_macvlan *cmd =
- (struct i40e_aqc_macvlan *)&desc.params.raw;
- i40e_status status;
- u16 buf_size;
-
- if (count == 0 || !v_list || !hw)
- return I40E_ERR_PARAM;
-
- buf_size = count * sizeof(struct i40e_aqc_add_remove_vlan_element_data);
-
- /* prep the rest of the request */
- i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_vlan);
- cmd->num_addresses = cpu_to_le16(count);
- cmd->seid[0] = cpu_to_le16(seid | I40E_AQC_MACVLAN_CMD_SEID_VALID);
- cmd->seid[1] = 0;
- cmd->seid[2] = 0;
-
- desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
- if (buf_size > I40E_AQ_LARGE_BUF)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
-
- status = i40e_asq_send_command(hw, &desc, v_list, buf_size,
- cmd_details);
-
- return status;
-}
-
-/**
* i40e_aq_send_msg_to_vf
* @hw: pointer to the hardware structure
* @vfid: vf id to send msg
@@ -1681,6 +1718,63 @@ i40e_status i40e_aq_start_lldp(struct i40e_hw *hw,
}
/**
+ * i40e_aq_add_udp_tunnel
+ * @hw: pointer to the hw struct
+ * @udp_port: the UDP port to add
+ * @header_len: length of the tunneling header length in DWords
+ * @protocol_index: protocol index type
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
+ u16 udp_port, u8 header_len,
+ u8 protocol_index, u8 *filter_index,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_udp_tunnel *cmd =
+ (struct i40e_aqc_add_udp_tunnel *)&desc.params.raw;
+ struct i40e_aqc_del_udp_tunnel_completion *resp =
+ (struct i40e_aqc_del_udp_tunnel_completion *)&desc.params.raw;
+ i40e_status status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_udp_tunnel);
+
+ cmd->udp_port = cpu_to_le16(udp_port);
+ cmd->header_len = header_len;
+ cmd->protocol_type = protocol_index;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (!status)
+ *filter_index = resp->index;
+
+ return status;
+}
+
+/**
+ * i40e_aq_del_udp_tunnel
+ * @hw: pointer to the hw struct
+ * @index: filter index
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_remove_udp_tunnel *cmd =
+ (struct i40e_aqc_remove_udp_tunnel *)&desc.params.raw;
+ i40e_status status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_del_udp_tunnel);
+
+ cmd->index = index;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
* i40e_aq_delete_element - Delete switch element
* @hw: pointer to the hw struct
* @seid: the SEID to delete from the switch
@@ -2039,3 +2133,47 @@ i40e_status i40e_set_filter_control(struct i40e_hw *hw,
return 0;
}
+/**
+ * i40e_set_pci_config_data - store PCI bus info
+ * @hw: pointer to hardware structure
+ * @link_status: the link status word from PCI config space
+ *
+ * Stores the PCI bus info (speed, width, type) within the i40e_hw structure
+ **/
+void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status)
+{
+ hw->bus.type = i40e_bus_type_pci_express;
+
+ switch (link_status & PCI_EXP_LNKSTA_NLW) {
+ case PCI_EXP_LNKSTA_NLW_X1:
+ hw->bus.width = i40e_bus_width_pcie_x1;
+ break;
+ case PCI_EXP_LNKSTA_NLW_X2:
+ hw->bus.width = i40e_bus_width_pcie_x2;
+ break;
+ case PCI_EXP_LNKSTA_NLW_X4:
+ hw->bus.width = i40e_bus_width_pcie_x4;
+ break;
+ case PCI_EXP_LNKSTA_NLW_X8:
+ hw->bus.width = i40e_bus_width_pcie_x8;
+ break;
+ default:
+ hw->bus.width = i40e_bus_width_unknown;
+ break;
+ }
+
+ switch (link_status & PCI_EXP_LNKSTA_CLS) {
+ case PCI_EXP_LNKSTA_CLS_2_5GB:
+ hw->bus.speed = i40e_bus_speed_2500;
+ break;
+ case PCI_EXP_LNKSTA_CLS_5_0GB:
+ hw->bus.speed = i40e_bus_speed_5000;
+ break;
+ case PCI_EXP_LNKSTA_CLS_8_0GB:
+ hw->bus.speed = i40e_bus_speed_8000;
+ break;
+ default:
+ hw->bus.speed = i40e_bus_speed_unknown;
+ break;
+ }
+}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index ef4cb1cf31f2..0220b18b2b18 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -192,12 +191,12 @@ static ssize_t i40e_dbg_dump_write(struct file *filp,
len = (sizeof(struct i40e_aq_desc)
* pf->hw.aq.num_asq_entries);
- memcpy(p, pf->hw.aq.asq.desc, len);
+ memcpy(p, pf->hw.aq.asq.desc_buf.va, len);
p += len;
len = (sizeof(struct i40e_aq_desc)
* pf->hw.aq.num_arq_entries);
- memcpy(p, pf->hw.aq.arq.desc, len);
+ memcpy(p, pf->hw.aq.arq.desc_buf.va, len);
p += len;
i40e_dbg_dump_data_len = buflen;
@@ -362,7 +361,7 @@ static ssize_t i40e_dbg_command_read(struct file *filp, char __user *buffer,
}
/**
- * i40e_dbg_dump_vsi_seid - handles dump vsi seid write into pokem datum
+ * i40e_dbg_dump_vsi_seid - handles dump vsi seid write into command datum
* @pf: the i40e_pf created in command write
* @seid: the seid the user put in
**/
@@ -516,10 +515,10 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
rx_ring->stats.bytes,
rx_ring->rx_stats.non_eop_descs);
dev_info(&pf->pdev->dev,
- " rx_rings[%i]: rx_stats: alloc_rx_page_failed = %lld, alloc_rx_buff_failed = %lld\n",
+ " rx_rings[%i]: rx_stats: alloc_page_failed = %lld, alloc_buff_failed = %lld\n",
i,
- rx_ring->rx_stats.alloc_rx_page_failed,
- rx_ring->rx_stats.alloc_rx_buff_failed);
+ rx_ring->rx_stats.alloc_page_failed,
+ rx_ring->rx_stats.alloc_buff_failed);
dev_info(&pf->pdev->dev,
" rx_rings[%i]: size = %i, dma = 0x%08lx\n",
i, rx_ring->size,
@@ -533,6 +532,7 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
struct i40e_ring *tx_ring = ACCESS_ONCE(vsi->tx_rings[i]);
if (!tx_ring)
continue;
+
dev_info(&pf->pdev->dev,
" tx_rings[%i]: desc = %p\n",
i, tx_ring->desc);
@@ -707,8 +707,13 @@ static void i40e_dbg_dump_aq_desc(struct i40e_pf *pf)
{
struct i40e_adminq_ring *ring;
struct i40e_hw *hw = &pf->hw;
+ char hdr[32];
int i;
+ snprintf(hdr, sizeof(hdr), "%s %s: ",
+ dev_driver_string(&pf->pdev->dev),
+ dev_name(&pf->pdev->dev));
+
/* first the send (command) ring, then the receive (event) ring */
dev_info(&pf->pdev->dev, "AdminQ Tx Ring\n");
ring = &(hw->aq.asq);
@@ -718,14 +723,8 @@ static void i40e_dbg_dump_aq_desc(struct i40e_pf *pf)
" at[%02d] flags=0x%04x op=0x%04x dlen=0x%04x ret=0x%04x cookie_h=0x%08x cookie_l=0x%08x\n",
i, d->flags, d->opcode, d->datalen, d->retval,
d->cookie_high, d->cookie_low);
- dev_info(&pf->pdev->dev,
- " %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n",
- d->params.raw[0], d->params.raw[1], d->params.raw[2],
- d->params.raw[3], d->params.raw[4], d->params.raw[5],
- d->params.raw[6], d->params.raw[7], d->params.raw[8],
- d->params.raw[9], d->params.raw[10], d->params.raw[11],
- d->params.raw[12], d->params.raw[13],
- d->params.raw[14], d->params.raw[15]);
+ print_hex_dump(KERN_INFO, hdr, DUMP_PREFIX_NONE,
+ 16, 1, d->params.raw, 16, 0);
}
dev_info(&pf->pdev->dev, "AdminQ Rx Ring\n");
@@ -736,14 +735,8 @@ static void i40e_dbg_dump_aq_desc(struct i40e_pf *pf)
" ar[%02d] flags=0x%04x op=0x%04x dlen=0x%04x ret=0x%04x cookie_h=0x%08x cookie_l=0x%08x\n",
i, d->flags, d->opcode, d->datalen, d->retval,
d->cookie_high, d->cookie_low);
- dev_info(&pf->pdev->dev,
- " %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n",
- d->params.raw[0], d->params.raw[1], d->params.raw[2],
- d->params.raw[3], d->params.raw[4], d->params.raw[5],
- d->params.raw[6], d->params.raw[7], d->params.raw[8],
- d->params.raw[9], d->params.raw[10], d->params.raw[11],
- d->params.raw[12], d->params.raw[13],
- d->params.raw[14], d->params.raw[15]);
+ print_hex_dump(KERN_INFO, hdr, DUMP_PREFIX_NONE,
+ 16, 1, d->params.raw, 16, 0);
}
}
@@ -766,20 +759,17 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
vsi = i40e_dbg_find_vsi(pf, vsi_seid);
if (!vsi) {
- dev_info(&pf->pdev->dev,
- "vsi %d not found\n", vsi_seid);
- if (is_rx_ring)
- dev_info(&pf->pdev->dev, "dump desc rx <vsi_seid> <ring_id> [<desc_n>]\n");
- else
- dev_info(&pf->pdev->dev, "dump desc tx <vsi_seid> <ring_id> [<desc_n>]\n");
+ dev_info(&pf->pdev->dev, "vsi %d not found\n", vsi_seid);
return;
}
if (ring_id >= vsi->num_queue_pairs || ring_id < 0) {
dev_info(&pf->pdev->dev, "ring %d not found\n", ring_id);
- if (is_rx_ring)
- dev_info(&pf->pdev->dev, "dump desc rx <vsi_seid> <ring_id> [<desc_n>]\n");
- else
- dev_info(&pf->pdev->dev, "dump desc tx <vsi_seid> <ring_id> [<desc_n>]\n");
+ return;
+ }
+ if (!vsi->tx_rings) {
+ dev_info(&pf->pdev->dev,
+ "descriptor rings have not been allocated for vsi %d\n",
+ vsi_seid);
return;
}
if (is_rx_ring)
@@ -830,10 +820,7 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
desc_n, ds->read.pkt_addr, ds->read.hdr_addr,
ds->read.rsvd1, ds->read.rsvd2);
} else {
- if (is_rx_ring)
- dev_info(&pf->pdev->dev, "dump desc rx <vsi_seid> <ring_id> [<desc_n>]\n");
- else
- dev_info(&pf->pdev->dev, "dump desc tx <vsi_seid> <ring_id> [<desc_n>]\n");
+ dev_info(&pf->pdev->dev, "dump desc rx/tx <vsi_seid> <ring_id> [<desc_n>]\n");
}
}
@@ -979,8 +966,7 @@ static void i40e_dbg_dump_veb_seid(struct i40e_pf *pf, int seid)
veb = i40e_dbg_find_veb(pf, seid);
if (!veb) {
- dev_info(&pf->pdev->dev,
- "%d: can't find veb\n", seid);
+ dev_info(&pf->pdev->dev, "can't find veb %d\n", seid);
return;
}
dev_info(&pf->pdev->dev,
@@ -1022,8 +1008,6 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
char *cmd_buf, *cmd_buf_tmp;
int bytes_not_copied;
struct i40e_vsi *vsi;
- u8 *print_buf_start;
- u8 *print_buf;
int vsi_seid;
int veb_seid;
int cnt;
@@ -1048,11 +1032,6 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
count = cmd_buf_tmp - cmd_buf + 1;
}
- print_buf_start = kzalloc(I40E_MAX_DEBUG_OUT_BUFFER, GFP_KERNEL);
- if (!print_buf_start)
- goto command_write_done;
- print_buf = print_buf_start;
-
if (strncmp(cmd_buf, "add vsi", 7) == 0) {
vsi_seid = -1;
cnt = sscanf(&cmd_buf[7], "%i", &vsi_seid);
@@ -1462,20 +1441,24 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
}
} else if (strncmp(cmd_buf, "pfr", 3) == 0) {
dev_info(&pf->pdev->dev, "forcing PFR\n");
- i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
+ i40e_do_reset_safe(pf, (1 << __I40E_PF_RESET_REQUESTED));
} else if (strncmp(cmd_buf, "corer", 5) == 0) {
dev_info(&pf->pdev->dev, "forcing CoreR\n");
- i40e_do_reset(pf, (1 << __I40E_CORE_RESET_REQUESTED));
+ i40e_do_reset_safe(pf, (1 << __I40E_CORE_RESET_REQUESTED));
} else if (strncmp(cmd_buf, "globr", 5) == 0) {
dev_info(&pf->pdev->dev, "forcing GlobR\n");
- i40e_do_reset(pf, (1 << __I40E_GLOBAL_RESET_REQUESTED));
+ i40e_do_reset_safe(pf, (1 << __I40E_GLOBAL_RESET_REQUESTED));
+
+ } else if (strncmp(cmd_buf, "empr", 4) == 0) {
+ dev_info(&pf->pdev->dev, "forcing EMPR\n");
+ i40e_do_reset_safe(pf, (1 << __I40E_EMP_RESET_REQUESTED));
} else if (strncmp(cmd_buf, "read", 4) == 0) {
u32 address;
u32 value;
- cnt = sscanf(&cmd_buf[4], "%x", &address);
+ cnt = sscanf(&cmd_buf[4], "%i", &address);
if (cnt != 1) {
dev_info(&pf->pdev->dev, "read <reg>\n");
goto command_write_done;
@@ -1494,7 +1477,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
} else if (strncmp(cmd_buf, "write", 5) == 0) {
u32 address, value;
- cnt = sscanf(&cmd_buf[5], "%x %x", &address, &value);
+ cnt = sscanf(&cmd_buf[5], "%i %i", &address, &value);
if (cnt != 2) {
dev_info(&pf->pdev->dev, "write <reg> <value>\n");
goto command_write_done;
@@ -1512,7 +1495,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
address, value);
} else if (strncmp(cmd_buf, "clear_stats", 11) == 0) {
if (strncmp(&cmd_buf[12], "vsi", 3) == 0) {
- cnt = sscanf(&cmd_buf[15], "%d", &vsi_seid);
+ cnt = sscanf(&cmd_buf[15], "%i", &vsi_seid);
if (cnt == 0) {
int i;
for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
@@ -1539,6 +1522,118 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
} else {
dev_info(&pf->pdev->dev, "clear_stats vsi [seid] or clear_stats pf\n");
}
+ } else if (strncmp(cmd_buf, "send aq_cmd", 11) == 0) {
+ struct i40e_aq_desc *desc;
+ i40e_status ret;
+
+ desc = kzalloc(sizeof(struct i40e_aq_desc), GFP_KERNEL);
+ if (!desc)
+ goto command_write_done;
+ cnt = sscanf(&cmd_buf[11],
+ "%hx %hx %hx %hx %x %x %x %x %x %x",
+ &desc->flags,
+ &desc->opcode, &desc->datalen, &desc->retval,
+ &desc->cookie_high, &desc->cookie_low,
+ &desc->params.internal.param0,
+ &desc->params.internal.param1,
+ &desc->params.internal.param2,
+ &desc->params.internal.param3);
+ if (cnt != 10) {
+ dev_info(&pf->pdev->dev,
+ "send aq_cmd: bad command string, cnt=%d\n",
+ cnt);
+ kfree(desc);
+ desc = NULL;
+ goto command_write_done;
+ }
+ ret = i40e_asq_send_command(&pf->hw, desc, NULL, 0, NULL);
+ if (!ret) {
+ dev_info(&pf->pdev->dev, "AQ command sent Status : Success\n");
+ } else if (ret == I40E_ERR_ADMIN_QUEUE_ERROR) {
+ dev_info(&pf->pdev->dev,
+ "AQ command send failed Opcode %x AQ Error: %d\n",
+ desc->opcode, pf->hw.aq.asq_last_status);
+ } else {
+ dev_info(&pf->pdev->dev,
+ "AQ command send failed Opcode %x Status: %d\n",
+ desc->opcode, ret);
+ }
+ dev_info(&pf->pdev->dev,
+ "AQ desc WB 0x%04x 0x%04x 0x%04x 0x%04x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ desc->flags, desc->opcode, desc->datalen, desc->retval,
+ desc->cookie_high, desc->cookie_low,
+ desc->params.internal.param0,
+ desc->params.internal.param1,
+ desc->params.internal.param2,
+ desc->params.internal.param3);
+ kfree(desc);
+ desc = NULL;
+ } else if (strncmp(cmd_buf, "send indirect aq_cmd", 20) == 0) {
+ struct i40e_aq_desc *desc;
+ i40e_status ret;
+ u16 buffer_len;
+ u8 *buff;
+
+ desc = kzalloc(sizeof(struct i40e_aq_desc), GFP_KERNEL);
+ if (!desc)
+ goto command_write_done;
+ cnt = sscanf(&cmd_buf[20],
+ "%hx %hx %hx %hx %x %x %x %x %x %x %hd",
+ &desc->flags,
+ &desc->opcode, &desc->datalen, &desc->retval,
+ &desc->cookie_high, &desc->cookie_low,
+ &desc->params.internal.param0,
+ &desc->params.internal.param1,
+ &desc->params.internal.param2,
+ &desc->params.internal.param3,
+ &buffer_len);
+ if (cnt != 11) {
+ dev_info(&pf->pdev->dev,
+ "send indirect aq_cmd: bad command string, cnt=%d\n",
+ cnt);
+ kfree(desc);
+ desc = NULL;
+ goto command_write_done;
+ }
+ /* Just stub a buffer big enough in case user messed up */
+ if (buffer_len == 0)
+ buffer_len = 1280;
+
+ buff = kzalloc(buffer_len, GFP_KERNEL);
+ if (!buff) {
+ kfree(desc);
+ desc = NULL;
+ goto command_write_done;
+ }
+ desc->flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ ret = i40e_asq_send_command(&pf->hw, desc, buff,
+ buffer_len, NULL);
+ if (!ret) {
+ dev_info(&pf->pdev->dev, "AQ command sent Status : Success\n");
+ } else if (ret == I40E_ERR_ADMIN_QUEUE_ERROR) {
+ dev_info(&pf->pdev->dev,
+ "AQ command send failed Opcode %x AQ Error: %d\n",
+ desc->opcode, pf->hw.aq.asq_last_status);
+ } else {
+ dev_info(&pf->pdev->dev,
+ "AQ command send failed Opcode %x Status: %d\n",
+ desc->opcode, ret);
+ }
+ dev_info(&pf->pdev->dev,
+ "AQ desc WB 0x%04x 0x%04x 0x%04x 0x%04x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ desc->flags, desc->opcode, desc->datalen, desc->retval,
+ desc->cookie_high, desc->cookie_low,
+ desc->params.internal.param0,
+ desc->params.internal.param1,
+ desc->params.internal.param2,
+ desc->params.internal.param3);
+ print_hex_dump(KERN_INFO, "AQ buffer WB: ",
+ DUMP_PREFIX_OFFSET, 16, 1,
+ buff, buffer_len, true);
+ kfree(buff);
+ buff = NULL;
+ kfree(desc);
+ desc = NULL;
} else if ((strncmp(cmd_buf, "add fd_filter", 13) == 0) ||
(strncmp(cmd_buf, "rem fd_filter", 13) == 0)) {
struct i40e_fdir_data fd_data;
@@ -1564,7 +1659,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
if (strncmp(cmd_buf, "add", 3) == 0)
add = true;
cnt = sscanf(&cmd_buf[13],
- "%hx %2hhx %2hhx %hx %2hhx %2hhx %hx %x %hd %512s",
+ "%hx %2hhx %2hhx %hx %2hhx %2hhx %hx %x %hd %511s",
&fd_data.q_index,
&fd_data.flex_off, &fd_data.pctype,
&fd_data.dest_vsi, &fd_data.dest_ctl,
@@ -1588,19 +1683,15 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
packet_len = min_t(u16,
packet_len, I40E_FDIR_MAX_RAW_PACKET_LOOKUP);
- dev_info(&pf->pdev->dev, "FD raw packet:\n");
for (i = 0; i < packet_len; i++) {
sscanf(&asc_packet[j], "%2hhx ",
&fd_data.raw_packet[i]);
j += 3;
- snprintf(print_buf, 3, "%02x ", fd_data.raw_packet[i]);
- print_buf += 3;
- if ((i % 16) == 15) {
- snprintf(print_buf, 1, "\n");
- print_buf++;
- }
}
- dev_info(&pf->pdev->dev, "%s\n", print_buf_start);
+ dev_info(&pf->pdev->dev, "FD raw packet dump\n");
+ print_hex_dump(KERN_INFO, "FD raw packet: ",
+ DUMP_PREFIX_OFFSET, 16, 1,
+ fd_data.raw_packet, packet_len, true);
ret = i40e_program_fdir_filter(&fd_data, pf, add);
if (!ret) {
dev_info(&pf->pdev->dev, "Filter command send Status : Success\n");
@@ -1634,7 +1725,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
} else if (strncmp(&cmd_buf[5],
"get local", 9) == 0) {
u16 llen, rlen;
- int ret, i;
+ int ret;
u8 *buff;
buff = kzalloc(I40E_LLDPDU_SIZE, GFP_KERNEL);
if (!buff)
@@ -1652,22 +1743,15 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
buff = NULL;
goto command_write_done;
}
- dev_info(&pf->pdev->dev,
- "Get LLDP MIB (local) AQ buffer written back:\n");
- for (i = 0; i < I40E_LLDPDU_SIZE; i++) {
- snprintf(print_buf, 3, "%02x ", buff[i]);
- print_buf += 3;
- if ((i % 16) == 15) {
- snprintf(print_buf, 1, "\n");
- print_buf++;
- }
- }
- dev_info(&pf->pdev->dev, "%s\n", print_buf_start);
+ dev_info(&pf->pdev->dev, "LLDP MIB (local)\n");
+ print_hex_dump(KERN_INFO, "LLDP MIB (local): ",
+ DUMP_PREFIX_OFFSET, 16, 1,
+ buff, I40E_LLDPDU_SIZE, true);
kfree(buff);
buff = NULL;
} else if (strncmp(&cmd_buf[5], "get remote", 10) == 0) {
u16 llen, rlen;
- int ret, i;
+ int ret;
u8 *buff;
buff = kzalloc(I40E_LLDPDU_SIZE, GFP_KERNEL);
if (!buff)
@@ -1686,17 +1770,10 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
buff = NULL;
goto command_write_done;
}
- dev_info(&pf->pdev->dev,
- "Get LLDP MIB (remote) AQ buffer written back:\n");
- for (i = 0; i < I40E_LLDPDU_SIZE; i++) {
- snprintf(print_buf, 3, "%02x ", buff[i]);
- print_buf += 3;
- if ((i % 16) == 15) {
- snprintf(print_buf, 1, "\n");
- print_buf++;
- }
- }
- dev_info(&pf->pdev->dev, "%s\n", print_buf_start);
+ dev_info(&pf->pdev->dev, "LLDP MIB (remote)\n");
+ print_hex_dump(KERN_INFO, "LLDP MIB (remote): ",
+ DUMP_PREFIX_OFFSET, 16, 1,
+ buff, I40E_LLDPDU_SIZE, true);
kfree(buff);
buff = NULL;
} else if (strncmp(&cmd_buf[5], "event on", 8) == 0) {
@@ -1721,7 +1798,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
}
}
} else if (strncmp(cmd_buf, "nvm read", 8) == 0) {
- u16 buffer_len, i, bytes;
+ u16 buffer_len, bytes;
u16 module;
u32 offset;
u16 *buff;
@@ -1775,16 +1852,10 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
dev_info(&pf->pdev->dev,
"Read NVM module=0x%x offset=0x%x words=%d\n",
module, offset, buffer_len);
- for (i = 0; i < buffer_len; i++) {
- if ((i % 16) == 0) {
- snprintf(print_buf, 11, "\n0x%08x: ",
- offset + i);
- print_buf += 11;
- }
- snprintf(print_buf, 5, "%04x ", buff[i]);
- print_buf += 5;
- }
- dev_info(&pf->pdev->dev, "%s\n", print_buf_start);
+ if (bytes)
+ print_hex_dump(KERN_INFO, "NVM Dump: ",
+ DUMP_PREFIX_OFFSET, 16, 2,
+ buff, bytes, true);
}
kfree(buff);
buff = NULL;
@@ -1814,6 +1885,8 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
dev_info(&pf->pdev->dev, " pfr\n");
dev_info(&pf->pdev->dev, " corer\n");
dev_info(&pf->pdev->dev, " globr\n");
+ dev_info(&pf->pdev->dev, " send aq_cmd <flags> <opcode> <datalen> <retval> <cookie_h> <cookie_l> <param0> <param1> <param2> <param3>\n");
+ dev_info(&pf->pdev->dev, " send indirect aq_cmd <flags> <opcode> <datalen> <retval> <cookie_h> <cookie_l> <param0> <param1> <param2> <param3> <buffer_len>\n");
dev_info(&pf->pdev->dev, " add fd_filter <dest q_index> <flex_off> <pctype> <dest_vsi> <dest_ctl> <fd_status> <cnt_index> <fd_id> <packet_len> <packet>\n");
dev_info(&pf->pdev->dev, " rem fd_filter <dest q_index> <flex_off> <pctype> <dest_vsi> <dest_ctl> <fd_status> <cnt_index> <fd_id> <packet_len> <packet>\n");
dev_info(&pf->pdev->dev, " lldp start\n");
@@ -1828,9 +1901,6 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
command_write_done:
kfree(cmd_buf);
cmd_buf = NULL;
- kfree(print_buf_start);
- print_buf = NULL;
- print_buf_start = NULL;
return count;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_diag.c b/drivers/net/ethernet/intel/i40e/i40e_diag.c
index de255143bde6..b2380daef8c1 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_diag.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_diag.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -68,16 +67,16 @@ static i40e_status i40e_diag_reg_pattern_test(struct i40e_hw *hw,
struct i40e_diag_reg_test_info i40e_reg_list[] = {
/* offset mask elements stride */
- {I40E_QTX_CTL(0), 0x0000FFBF, 64, I40E_QTX_CTL(1) - I40E_QTX_CTL(0)},
+ {I40E_QTX_CTL(0), 0x0000FFBF, 4, I40E_QTX_CTL(1) - I40E_QTX_CTL(0)},
{I40E_PFINT_ITR0(0), 0x00000FFF, 3, I40E_PFINT_ITR0(1) - I40E_PFINT_ITR0(0)},
- {I40E_PFINT_ITRN(0, 0), 0x00000FFF, 64, I40E_PFINT_ITRN(0, 1) - I40E_PFINT_ITRN(0, 0)},
- {I40E_PFINT_ITRN(1, 0), 0x00000FFF, 64, I40E_PFINT_ITRN(1, 1) - I40E_PFINT_ITRN(1, 0)},
- {I40E_PFINT_ITRN(2, 0), 0x00000FFF, 64, I40E_PFINT_ITRN(2, 1) - I40E_PFINT_ITRN(2, 0)},
+ {I40E_PFINT_ITRN(0, 0), 0x00000FFF, 8, I40E_PFINT_ITRN(0, 1) - I40E_PFINT_ITRN(0, 0)},
+ {I40E_PFINT_ITRN(1, 0), 0x00000FFF, 8, I40E_PFINT_ITRN(1, 1) - I40E_PFINT_ITRN(1, 0)},
+ {I40E_PFINT_ITRN(2, 0), 0x00000FFF, 8, I40E_PFINT_ITRN(2, 1) - I40E_PFINT_ITRN(2, 0)},
{I40E_PFINT_STAT_CTL0, 0x0000000C, 1, 0},
{I40E_PFINT_LNKLST0, 0x00001FFF, 1, 0},
- {I40E_PFINT_LNKLSTN(0), 0x000007FF, 511, I40E_PFINT_LNKLSTN(1) - I40E_PFINT_LNKLSTN(0)},
- {I40E_QINT_TQCTL(0), 0x000000FF, I40E_QINT_TQCTL_MAX_INDEX + 1, I40E_QINT_TQCTL(1) - I40E_QINT_TQCTL(0)},
- {I40E_QINT_RQCTL(0), 0x000000FF, I40E_QINT_RQCTL_MAX_INDEX + 1, I40E_QINT_RQCTL(1) - I40E_QINT_RQCTL(0)},
+ {I40E_PFINT_LNKLSTN(0), 0x000007FF, 64, I40E_PFINT_LNKLSTN(1) - I40E_PFINT_LNKLSTN(0)},
+ {I40E_QINT_TQCTL(0), 0x000000FF, 64, I40E_QINT_TQCTL(1) - I40E_QINT_TQCTL(0)},
+ {I40E_QINT_RQCTL(0), 0x000000FF, 64, I40E_QINT_RQCTL(1) - I40E_QINT_RQCTL(0)},
{I40E_PFINT_ICR0_ENA, 0xF7F20000, 1, 0},
{ 0 }
};
@@ -119,7 +118,7 @@ i40e_status i40e_diag_eeprom_test(struct i40e_hw *hw)
/* read NVM control word and if NVM valid, validate EEPROM checksum*/
ret_code = i40e_read_nvm_word(hw, I40E_SR_NVM_CONTROL_WORD, &reg_val);
- if ((!ret_code) &&
+ if (!ret_code &&
((reg_val & I40E_SR_CONTROL_WORD_1_MASK) ==
(0x01 << I40E_SR_CONTROL_WORD_1_SHIFT))) {
ret_code = i40e_validate_nvm_checksum(hw, NULL);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_diag.h b/drivers/net/ethernet/intel/i40e/i40e_diag.h
index 3d98277f4526..0b5911652084 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_diag.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_diag.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -31,10 +30,10 @@
#include "i40e_type.h"
enum i40e_lb_mode {
- I40E_LB_MODE_NONE = 0,
- I40E_LB_MODE_PHY_LOCAL,
- I40E_LB_MODE_PHY_REMOTE,
- I40E_LB_MODE_MAC_LOCAL,
+ I40E_LB_MODE_NONE = 0x0,
+ I40E_LB_MODE_PHY_LOCAL = I40E_AQ_LB_PHY_LOCAL,
+ I40E_LB_MODE_PHY_REMOTE = I40E_AQ_LB_PHY_REMOTE,
+ I40E_LB_MODE_MAC_LOCAL = I40E_AQ_LB_MAC_LOCAL,
};
struct i40e_diag_reg_test_info {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 1b86138fa9e1..b886ee5bd99d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -193,28 +192,48 @@ static int i40e_get_settings(struct net_device *netdev,
ecmd->supported = SUPPORTED_10000baseKR_Full;
ecmd->advertising = ADVERTISED_10000baseKR_Full;
break;
- case I40E_PHY_TYPE_10GBASE_T:
default:
- ecmd->supported = SUPPORTED_10000baseT_Full;
- ecmd->advertising = ADVERTISED_10000baseT_Full;
+ if (i40e_is_40G_device(hw->device_id)) {
+ ecmd->supported = SUPPORTED_40000baseSR4_Full;
+ ecmd->advertising = ADVERTISED_40000baseSR4_Full;
+ } else {
+ ecmd->supported = SUPPORTED_10000baseT_Full;
+ ecmd->advertising = ADVERTISED_10000baseT_Full;
+ }
break;
}
- /* for now just say autoneg all the time */
ecmd->supported |= SUPPORTED_Autoneg;
+ ecmd->advertising |= ADVERTISED_Autoneg;
+ ecmd->autoneg = ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ?
+ AUTONEG_ENABLE : AUTONEG_DISABLE);
- if (hw->phy.media_type == I40E_MEDIA_TYPE_BACKPLANE) {
+ switch (hw->phy.media_type) {
+ case I40E_MEDIA_TYPE_BACKPLANE:
ecmd->supported |= SUPPORTED_Backplane;
ecmd->advertising |= ADVERTISED_Backplane;
ecmd->port = PORT_NONE;
- } else if (hw->phy.media_type == I40E_MEDIA_TYPE_BASET) {
+ break;
+ case I40E_MEDIA_TYPE_BASET:
ecmd->supported |= SUPPORTED_TP;
ecmd->advertising |= ADVERTISED_TP;
ecmd->port = PORT_TP;
- } else {
+ break;
+ case I40E_MEDIA_TYPE_DA:
+ case I40E_MEDIA_TYPE_CX4:
+ ecmd->supported |= SUPPORTED_FIBRE;
+ ecmd->advertising |= ADVERTISED_FIBRE;
+ ecmd->port = PORT_DA;
+ break;
+ case I40E_MEDIA_TYPE_FIBER:
ecmd->supported |= SUPPORTED_FIBRE;
ecmd->advertising |= ADVERTISED_FIBRE;
ecmd->port = PORT_FIBRE;
+ break;
+ case I40E_MEDIA_TYPE_UNKNOWN:
+ default:
+ ecmd->port = PORT_OTHER;
+ break;
}
ecmd->transceiver = XCVR_EXTERNAL;
@@ -256,12 +275,14 @@ static void i40e_get_pauseparam(struct net_device *netdev,
((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ?
AUTONEG_ENABLE : AUTONEG_DISABLE);
- pause->rx_pause = 0;
- pause->tx_pause = 0;
- if (hw_link_info->an_info & I40E_AQ_LINK_PAUSE_RX)
+ if (hw->fc.current_mode == I40E_FC_RX_PAUSE) {
+ pause->rx_pause = 1;
+ } else if (hw->fc.current_mode == I40E_FC_TX_PAUSE) {
+ pause->tx_pause = 1;
+ } else if (hw->fc.current_mode == I40E_FC_FULL) {
pause->rx_pause = 1;
- if (hw_link_info->an_info & I40E_AQ_LINK_PAUSE_TX)
pause->tx_pause = 1;
+ }
}
static u32 i40e_get_msglevel(struct net_device *netdev)
@@ -329,38 +350,56 @@ static int i40e_get_eeprom(struct net_device *netdev,
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_hw *hw = &np->vsi->back->hw;
- int first_word, last_word;
- u16 i, eeprom_len;
- u16 *eeprom_buff;
- int ret_val = 0;
-
+ struct i40e_pf *pf = np->vsi->back;
+ int ret_val = 0, len;
+ u8 *eeprom_buff;
+ u16 i, sectors;
+ bool last;
+#define I40E_NVM_SECTOR_SIZE 4096
if (eeprom->len == 0)
return -EINVAL;
eeprom->magic = hw->vendor_id | (hw->device_id << 16);
- first_word = eeprom->offset >> 1;
- last_word = (eeprom->offset + eeprom->len - 1) >> 1;
- eeprom_len = last_word - first_word + 1;
-
- eeprom_buff = kmalloc(sizeof(u16) * eeprom_len, GFP_KERNEL);
+ eeprom_buff = kzalloc(eeprom->len, GFP_KERNEL);
if (!eeprom_buff)
return -ENOMEM;
- ret_val = i40e_read_nvm_buffer(hw, first_word, &eeprom_len,
- eeprom_buff);
- if (eeprom_len == 0) {
- kfree(eeprom_buff);
- return -EACCES;
+ ret_val = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+ if (ret_val) {
+ dev_info(&pf->pdev->dev,
+ "Failed Acquiring NVM resource for read err=%d status=0x%x\n",
+ ret_val, hw->aq.asq_last_status);
+ goto free_buff;
}
- /* Device's eeprom is always little-endian, word addressable */
- for (i = 0; i < eeprom_len; i++)
- le16_to_cpus(&eeprom_buff[i]);
+ sectors = eeprom->len / I40E_NVM_SECTOR_SIZE;
+ sectors += (eeprom->len % I40E_NVM_SECTOR_SIZE) ? 1 : 0;
+ len = I40E_NVM_SECTOR_SIZE;
+ last = false;
+ for (i = 0; i < sectors; i++) {
+ if (i == (sectors - 1)) {
+ len = eeprom->len - (I40E_NVM_SECTOR_SIZE * i);
+ last = true;
+ }
+ ret_val = i40e_aq_read_nvm(hw, 0x0,
+ eeprom->offset + (I40E_NVM_SECTOR_SIZE * i),
+ len,
+ (u8 *)eeprom_buff + (I40E_NVM_SECTOR_SIZE * i),
+ last, NULL);
+ if (ret_val) {
+ dev_info(&pf->pdev->dev,
+ "read NVM failed err=%d status=0x%x\n",
+ ret_val, hw->aq.asq_last_status);
+ goto release_nvm;
+ }
+ }
- memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len);
+release_nvm:
+ i40e_release_nvm(hw);
+ memcpy(bytes, (u8 *)eeprom_buff, eeprom->len);
+free_buff:
kfree(eeprom_buff);
-
return ret_val;
}
@@ -368,8 +407,14 @@ static int i40e_get_eeprom_len(struct net_device *netdev)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_hw *hw = &np->vsi->back->hw;
-
- return hw->nvm.sr_size * 2;
+ u32 val;
+
+ val = (rd32(hw, I40E_GLPCI_LBARCTRL)
+ & I40E_GLPCI_LBARCTRL_FL_SIZE_MASK)
+ >> I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT;
+ /* register returns value in power of 2, 64Kbyte chunks. */
+ val = (64 * 1024) * (1 << val);
+ return val;
}
static void i40e_get_drvinfo(struct net_device *netdev,
@@ -418,15 +463,19 @@ static int i40e_set_ringparam(struct net_device *netdev,
if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
return -EINVAL;
- new_tx_count = clamp_t(u32, ring->tx_pending,
- I40E_MIN_NUM_DESCRIPTORS,
- I40E_MAX_NUM_DESCRIPTORS);
- new_tx_count = ALIGN(new_tx_count, I40E_REQ_DESCRIPTOR_MULTIPLE);
+ if (ring->tx_pending > I40E_MAX_NUM_DESCRIPTORS ||
+ ring->tx_pending < I40E_MIN_NUM_DESCRIPTORS ||
+ ring->rx_pending > I40E_MAX_NUM_DESCRIPTORS ||
+ ring->rx_pending < I40E_MIN_NUM_DESCRIPTORS) {
+ netdev_info(netdev,
+ "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d]\n",
+ ring->tx_pending, ring->rx_pending,
+ I40E_MIN_NUM_DESCRIPTORS, I40E_MAX_NUM_DESCRIPTORS);
+ return -EINVAL;
+ }
- new_rx_count = clamp_t(u32, ring->rx_pending,
- I40E_MIN_NUM_DESCRIPTORS,
- I40E_MAX_NUM_DESCRIPTORS);
- new_rx_count = ALIGN(new_rx_count, I40E_REQ_DESCRIPTOR_MULTIPLE);
+ new_tx_count = ALIGN(ring->tx_pending, I40E_REQ_DESCRIPTOR_MULTIPLE);
+ new_rx_count = ALIGN(ring->rx_pending, I40E_REQ_DESCRIPTOR_MULTIPLE);
/* if nothing to do return success */
if ((new_tx_count == vsi->tx_rings[0]->count) &&
@@ -702,8 +751,12 @@ static int i40e_get_ts_info(struct net_device *dev,
return ethtool_op_get_ts_info(dev, info);
}
-static int i40e_link_test(struct i40e_pf *pf, u64 *data)
+static int i40e_link_test(struct net_device *netdev, u64 *data)
{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+
+ netif_info(pf, hw, netdev, "link test\n");
if (i40e_get_link_status(&pf->hw))
*data = 0;
else
@@ -712,36 +765,51 @@ static int i40e_link_test(struct i40e_pf *pf, u64 *data)
return *data;
}
-static int i40e_reg_test(struct i40e_pf *pf, u64 *data)
+static int i40e_reg_test(struct net_device *netdev, u64 *data)
{
- i40e_status ret;
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
- ret = i40e_diag_reg_test(&pf->hw);
- *data = ret;
+ netif_info(pf, hw, netdev, "register test\n");
+ *data = i40e_diag_reg_test(&pf->hw);
- return ret;
+ return *data;
}
-static int i40e_eeprom_test(struct i40e_pf *pf, u64 *data)
+static int i40e_eeprom_test(struct net_device *netdev, u64 *data)
{
- i40e_status ret;
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
- ret = i40e_diag_eeprom_test(&pf->hw);
- *data = ret;
+ netif_info(pf, hw, netdev, "eeprom test\n");
+ *data = i40e_diag_eeprom_test(&pf->hw);
- return ret;
+ return *data;
}
-static int i40e_intr_test(struct i40e_pf *pf, u64 *data)
+static int i40e_intr_test(struct net_device *netdev, u64 *data)
{
- *data = -ENOSYS;
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+ u16 swc_old = pf->sw_int_count;
+
+ netif_info(pf, hw, netdev, "interrupt test\n");
+ wr32(&pf->hw, I40E_PFINT_DYN_CTL0,
+ (I40E_PFINT_DYN_CTL0_INTENA_MASK |
+ I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK));
+ usleep_range(1000, 2000);
+ *data = (swc_old == pf->sw_int_count);
return *data;
}
-static int i40e_loopback_test(struct i40e_pf *pf, u64 *data)
+static int i40e_loopback_test(struct net_device *netdev, u64 *data)
{
- *data = -ENOSYS;
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+
+ netif_info(pf, hw, netdev, "loopback test not implemented\n");
+ *data = 0;
return *data;
}
@@ -752,42 +820,38 @@ static void i40e_diag_test(struct net_device *netdev,
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_pf *pf = np->vsi->back;
- set_bit(__I40E_TESTING, &pf->state);
if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
/* Offline tests */
+ netif_info(pf, drv, netdev, "offline testing starting\n");
- netdev_info(netdev, "offline testing starting\n");
+ set_bit(__I40E_TESTING, &pf->state);
/* Link test performed before hardware reset
* so autoneg doesn't interfere with test result
*/
- netdev_info(netdev, "link test starting\n");
- if (i40e_link_test(pf, &data[I40E_ETH_TEST_LINK]))
+ if (i40e_link_test(netdev, &data[I40E_ETH_TEST_LINK]))
eth_test->flags |= ETH_TEST_FL_FAILED;
- netdev_info(netdev, "register test starting\n");
- if (i40e_reg_test(pf, &data[I40E_ETH_TEST_REG]))
+ if (i40e_eeprom_test(netdev, &data[I40E_ETH_TEST_EEPROM]))
eth_test->flags |= ETH_TEST_FL_FAILED;
- i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
- netdev_info(netdev, "eeprom test starting\n");
- if (i40e_eeprom_test(pf, &data[I40E_ETH_TEST_EEPROM]))
+ if (i40e_intr_test(netdev, &data[I40E_ETH_TEST_INTR]))
eth_test->flags |= ETH_TEST_FL_FAILED;
- i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
- netdev_info(netdev, "interrupt test starting\n");
- if (i40e_intr_test(pf, &data[I40E_ETH_TEST_INTR]))
+ if (i40e_loopback_test(netdev, &data[I40E_ETH_TEST_LOOPBACK]))
eth_test->flags |= ETH_TEST_FL_FAILED;
- i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
- netdev_info(netdev, "loopback test starting\n");
- if (i40e_loopback_test(pf, &data[I40E_ETH_TEST_LOOPBACK]))
+ /* run reg test last, a reset is required after it */
+ if (i40e_reg_test(netdev, &data[I40E_ETH_TEST_REG]))
eth_test->flags |= ETH_TEST_FL_FAILED;
+ clear_bit(__I40E_TESTING, &pf->state);
+ i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
} else {
- netdev_info(netdev, "online test starting\n");
/* Online tests */
- if (i40e_link_test(pf, &data[I40E_ETH_TEST_LINK]))
+ netif_info(pf, drv, netdev, "online testing starting\n");
+
+ if (i40e_link_test(netdev, &data[I40E_ETH_TEST_LINK]))
eth_test->flags |= ETH_TEST_FL_FAILED;
/* Offline only tests, not run in online; pass by default */
@@ -795,16 +859,53 @@ static void i40e_diag_test(struct net_device *netdev,
data[I40E_ETH_TEST_EEPROM] = 0;
data[I40E_ETH_TEST_INTR] = 0;
data[I40E_ETH_TEST_LOOPBACK] = 0;
-
- clear_bit(__I40E_TESTING, &pf->state);
}
+
+ netif_info(pf, drv, netdev, "testing finished\n");
}
static void i40e_get_wol(struct net_device *netdev,
struct ethtool_wolinfo *wol)
{
- wol->supported = 0;
- wol->wolopts = 0;
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ u16 wol_nvm_bits;
+
+ /* NVM bit on means WoL disabled for the port */
+ i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
+ if ((1 << hw->port) & wol_nvm_bits) {
+ wol->supported = 0;
+ wol->wolopts = 0;
+ } else {
+ wol->supported = WAKE_MAGIC;
+ wol->wolopts = (pf->wol_en ? WAKE_MAGIC : 0);
+ }
+}
+
+static int i40e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ u16 wol_nvm_bits;
+
+ /* NVM bit on means WoL disabled for the port */
+ i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
+ if (((1 << hw->port) & wol_nvm_bits))
+ return -EOPNOTSUPP;
+
+ /* only magic packet is supported */
+ if (wol->wolopts && (wol->wolopts != WAKE_MAGIC))
+ return -EOPNOTSUPP;
+
+ /* is this a new value? */
+ if (pf->wol_en != !!wol->wolopts) {
+ pf->wol_en = !!wol->wolopts;
+ device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en);
+ }
+
+ return 0;
}
static int i40e_nway_reset(struct net_device *netdev)
@@ -838,13 +939,13 @@ static int i40e_set_phys_id(struct net_device *netdev,
pf->led_status = i40e_led_get(hw);
return blink_freq;
case ETHTOOL_ID_ON:
- i40e_led_set(hw, 0xF);
+ i40e_led_set(hw, 0xF, false);
break;
case ETHTOOL_ID_OFF:
- i40e_led_set(hw, 0x0);
+ i40e_led_set(hw, 0x0, false);
break;
case ETHTOOL_ID_INACTIVE:
- i40e_led_set(hw, pf->led_status);
+ i40e_led_set(hw, pf->led_status, false);
break;
}
@@ -1142,6 +1243,7 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
}
#define IP_HEADER_OFFSET 14
+#define I40E_UDPIP_DUMMY_PACKET_LEN 42
/**
* i40e_add_del_fdir_udpv4 - Add/Remove UDPv4 Flow Director filters for
* a specific flow spec
@@ -1162,6 +1264,12 @@ static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
bool err = false;
int ret;
int i;
+ char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
+ 0x45, 0, 0, 0x1c, 0, 0, 0x40, 0, 0x40, 0x11,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0};
+
+ memcpy(fd_data->raw_packet, packet, I40E_UDPIP_DUMMY_PACKET_LEN);
ip = (struct iphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET);
udp = (struct udphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET
@@ -1192,6 +1300,7 @@ static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
return err ? -EOPNOTSUPP : 0;
}
+#define I40E_TCPIP_DUMMY_PACKET_LEN 54
/**
* i40e_add_del_fdir_tcpv4 - Add/Remove TCPv4 Flow Director filters for
* a specific flow spec
@@ -1211,6 +1320,14 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
struct iphdr *ip;
bool err = false;
int ret;
+ /* Dummy packet */
+ char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
+ 0x45, 0, 0, 0x28, 0, 0, 0x40, 0, 0x40, 0x6,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0x80, 0x11, 0x0, 0x72, 0, 0, 0, 0};
+
+ memcpy(fd_data->raw_packet, packet, I40E_TCPIP_DUMMY_PACKET_LEN);
ip = (struct iphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET);
tcp = (struct tcphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET
@@ -1218,6 +1335,8 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
ip->daddr = fsp->h_u.tcp_ip4_spec.ip4dst;
tcp->dest = fsp->h_u.tcp_ip4_spec.pdst;
+ ip->saddr = fsp->h_u.tcp_ip4_spec.ip4src;
+ tcp->source = fsp->h_u.tcp_ip4_spec.psrc;
fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN;
ret = i40e_program_fdir_filter(fd_data, pf, add);
@@ -1232,9 +1351,6 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
fd_data->pctype, ret);
}
- ip->saddr = fsp->h_u.tcp_ip4_spec.ip4src;
- tcp->source = fsp->h_u.tcp_ip4_spec.psrc;
-
fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
ret = i40e_program_fdir_filter(fd_data, pf, add);
@@ -1268,6 +1384,7 @@ static int i40e_add_del_fdir_sctpv4(struct i40e_vsi *vsi,
return -EOPNOTSUPP;
}
+#define I40E_IP_DUMMY_PACKET_LEN 34
/**
* i40e_add_del_fdir_ipv4 - Add/Remove IPv4 Flow Director filters for
* a specific flow spec
@@ -1287,7 +1404,11 @@ static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
bool err = false;
int ret;
int i;
+ char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
+ 0x45, 0, 0, 0x14, 0, 0, 0x40, 0, 0x40, 0x10,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+ memcpy(fd_data->raw_packet, packet, I40E_IP_DUMMY_PACKET_LEN);
ip = (struct iphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET);
ip->saddr = fsp->h_u.usr_ip4_spec.ip4src;
@@ -1431,6 +1552,94 @@ static int i40e_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
return ret;
}
+/**
+ * i40e_max_channels - get Max number of combined channels supported
+ * @vsi: vsi pointer
+ **/
+static unsigned int i40e_max_channels(struct i40e_vsi *vsi)
+{
+ /* TODO: This code assumes DCB and FD is disabled for now. */
+ return vsi->alloc_queue_pairs;
+}
+
+/**
+ * i40e_get_channels - Get the current channels enabled and max supported etc.
+ * @netdev: network interface device structure
+ * @ch: ethtool channels structure
+ *
+ * We don't support separate tx and rx queues as channels. The other count
+ * represents how many queues are being used for control. max_combined counts
+ * how many queue pairs we can support. They may not be mapped 1 to 1 with
+ * q_vectors since we support a lot more queue pairs than q_vectors.
+ **/
+static void i40e_get_channels(struct net_device *dev,
+ struct ethtool_channels *ch)
+{
+ struct i40e_netdev_priv *np = netdev_priv(dev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+
+ /* report maximum channels */
+ ch->max_combined = i40e_max_channels(vsi);
+
+ /* report info for other vector */
+ ch->other_count = (pf->flags & I40E_FLAG_FDIR_ENABLED) ? 1 : 0;
+ ch->max_other = ch->other_count;
+
+ /* Note: This code assumes DCB is disabled for now. */
+ ch->combined_count = vsi->num_queue_pairs;
+}
+
+/**
+ * i40e_set_channels - Set the new channels count.
+ * @netdev: network interface device structure
+ * @ch: ethtool channels structure
+ *
+ * The new channels count may not be the same as requested by the user
+ * since it gets rounded down to a power of 2 value.
+ **/
+static int i40e_set_channels(struct net_device *dev,
+ struct ethtool_channels *ch)
+{
+ struct i40e_netdev_priv *np = netdev_priv(dev);
+ unsigned int count = ch->combined_count;
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ int new_count;
+
+ /* We do not support setting channels for any other VSI at present */
+ if (vsi->type != I40E_VSI_MAIN)
+ return -EINVAL;
+
+ /* verify they are not requesting separate vectors */
+ if (!count || ch->rx_count || ch->tx_count)
+ return -EINVAL;
+
+ /* verify other_count has not changed */
+ if (ch->other_count != ((pf->flags & I40E_FLAG_FDIR_ENABLED) ? 1 : 0))
+ return -EINVAL;
+
+ /* verify the number of channels does not exceed hardware limits */
+ if (count > i40e_max_channels(vsi))
+ return -EINVAL;
+
+ /* update feature limits from largest to smallest supported values */
+ /* TODO: Flow director limit, DCB etc */
+
+ /* cap RSS limit */
+ if (count > pf->rss_size_max)
+ count = pf->rss_size_max;
+
+ /* use rss_reconfig to rebuild with new queue count and update traffic
+ * class queue mapping
+ */
+ new_count = i40e_reconfig_rss_queues(pf, count);
+ if (new_count > 1)
+ return 0;
+ else
+ return -EINVAL;
+}
+
static const struct ethtool_ops i40e_ethtool_ops = {
.get_settings = i40e_get_settings,
.get_drvinfo = i40e_get_drvinfo,
@@ -1439,6 +1648,7 @@ static const struct ethtool_ops i40e_ethtool_ops = {
.nway_reset = i40e_nway_reset,
.get_link = ethtool_op_get_link,
.get_wol = i40e_get_wol,
+ .set_wol = i40e_set_wol,
.get_eeprom_len = i40e_get_eeprom_len,
.get_eeprom = i40e_get_eeprom,
.get_ringparam = i40e_get_ringparam,
@@ -1455,6 +1665,8 @@ static const struct ethtool_ops i40e_ethtool_ops = {
.get_ethtool_stats = i40e_get_ethtool_stats,
.get_coalesce = i40e_get_coalesce,
.set_coalesce = i40e_set_coalesce,
+ .get_channels = i40e_get_channels,
+ .set_channels = i40e_set_channels,
.get_ts_info = i40e_get_ts_info,
};
diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_hmc.c
index 901804af8b0e..76dfef3e0104 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_hmc.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -47,10 +46,10 @@ i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw,
u64 direct_mode_sz)
{
enum i40e_memory_type mem_type __attribute__((unused));
- i40e_status ret_code = 0;
struct i40e_hmc_sd_entry *sd_entry;
bool dma_mem_alloc_done = false;
struct i40e_dma_mem mem;
+ i40e_status ret_code;
u64 alloc_len;
if (NULL == hmc_info->sd_table.sd_entry) {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.h b/drivers/net/ethernet/intel/i40e/i40e_hmc.h
index aacd42a261e9..72bf30aba150 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_hmc.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
index a695b91c9c79..101ed4107312 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h
index 00ff35006077..341de925a298 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -113,8 +112,8 @@ enum i40e_hmc_lan_object_size {
#define I40E_HMC_L2OBJ_BASE_ALIGNMENT 512
#define I40E_HMC_OBJ_SIZE_TXQ 128
#define I40E_HMC_OBJ_SIZE_RXQ 32
-#define I40E_HMC_OBJ_SIZE_FCOE_CNTX 128
-#define I40E_HMC_OBJ_SIZE_FCOE_FILT 32
+#define I40E_HMC_OBJ_SIZE_FCOE_CNTX 64
+#define I40E_HMC_OBJ_SIZE_FCOE_FILT 64
enum i40e_hmc_lan_rsrc_type {
I40E_HMC_LAN_FULL = 0,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index be15938ba213..1b792ee592d0 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -27,6 +26,9 @@
/* Local includes */
#include "i40e.h"
+#ifdef CONFIG_I40E_VXLAN
+#include <net/vxlan.h>
+#endif
const char i40e_driver_name[] = "i40e";
static const char i40e_driver_string[] =
@@ -36,7 +38,7 @@ static const char i40e_driver_string[] =
#define DRV_VERSION_MAJOR 0
#define DRV_VERSION_MINOR 3
-#define DRV_VERSION_BUILD 11
+#define DRV_VERSION_BUILD 25
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) DRV_KERN
@@ -48,7 +50,7 @@ static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi);
static void i40e_handle_reset_warning(struct i40e_pf *pf);
static int i40e_add_vsi(struct i40e_vsi *vsi);
static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi);
-static int i40e_setup_pf_switch(struct i40e_pf *pf);
+static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit);
static int i40e_setup_misc_vector(struct i40e_pf *pf);
static void i40e_determine_queue_usage(struct i40e_pf *pf);
static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
@@ -354,6 +356,13 @@ static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi);
int i;
+
+ if (test_bit(__I40E_DOWN, &vsi->state))
+ return stats;
+
+ if (!vsi->tx_rings)
+ return stats;
+
rcu_read_lock();
for (i = 0; i < vsi->num_queue_pairs; i++) {
struct i40e_ring *tx_ring, *rx_ring;
@@ -413,7 +422,7 @@ void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
memset(&vsi->net_stats_offsets, 0, sizeof(vsi->net_stats_offsets));
memset(&vsi->eth_stats, 0, sizeof(vsi->eth_stats));
memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets));
- if (vsi->rx_rings)
+ if (vsi->rx_rings && vsi->rx_rings[0]) {
for (i = 0; i < vsi->num_queue_pairs; i++) {
memset(&vsi->rx_rings[i]->stats, 0 ,
sizeof(vsi->rx_rings[i]->stats));
@@ -424,6 +433,7 @@ void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
memset(&vsi->tx_rings[i]->tx_stats, 0,
sizeof(vsi->tx_rings[i]->tx_stats));
}
+ }
vsi->stat_offsets_loaded = false;
}
@@ -574,10 +584,11 @@ static void i40e_update_veb_stats(struct i40e_veb *veb)
i40e_stat_update32(hw, I40E_GLSW_TDPC(idx),
veb->stat_offsets_loaded,
&oes->tx_discards, &es->tx_discards);
- i40e_stat_update32(hw, I40E_GLSW_RUPP(idx),
- veb->stat_offsets_loaded,
- &oes->rx_unknown_protocol, &es->rx_unknown_protocol);
-
+ if (hw->revision_id > 0)
+ i40e_stat_update32(hw, I40E_GLSW_RUPP(idx),
+ veb->stat_offsets_loaded,
+ &oes->rx_unknown_protocol,
+ &es->rx_unknown_protocol);
i40e_stat_update48(hw, I40E_GLSW_GORCH(idx), I40E_GLSW_GORCL(idx),
veb->stat_offsets_loaded,
&oes->rx_bytes, &es->rx_bytes);
@@ -775,8 +786,8 @@ void i40e_update_stats(struct i40e_vsi *vsi)
} while (u64_stats_fetch_retry_bh(&p->syncp, start));
rx_b += bytes;
rx_p += packets;
- rx_buf += p->rx_stats.alloc_rx_buff_failed;
- rx_page += p->rx_stats.alloc_rx_page_failed;
+ rx_buf += p->rx_stats.alloc_buff_failed;
+ rx_page += p->rx_stats.alloc_page_failed;
}
rcu_read_unlock();
vsi->tx_restart = tx_restart;
@@ -1204,6 +1215,10 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
if (ether_addr_equal(netdev->dev_addr, addr->sa_data))
return 0;
+ if (test_bit(__I40E_DOWN, &vsi->back->state) ||
+ test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
+ return -EADDRNOTAVAIL;
+
if (vsi->type == I40E_VSI_MAIN) {
i40e_status ret;
ret = i40e_aq_mac_address_write(&vsi->back->hw,
@@ -1494,11 +1509,6 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
cpu_to_le16((u16)(f->vlan ==
I40E_VLAN_ANY ? 0 : f->vlan));
- /* vlan0 as wild card to allow packets from all vlans */
- if (f->vlan == I40E_VLAN_ANY ||
- (vsi->netdev && !(vsi->netdev->features &
- NETIF_F_HW_VLAN_CTAG_FILTER)))
- cmd_flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
del_list[num_del].flags = cmd_flags;
num_del++;
@@ -1564,12 +1574,6 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
add_list[num_add].queue_number = 0;
cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
-
- /* vlan0 as wild card to allow packets from all vlans */
- if (f->vlan == I40E_VLAN_ANY || (vsi->netdev &&
- !(vsi->netdev->features &
- NETIF_F_HW_VLAN_CTAG_FILTER)))
- cmd_flags |= I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
add_list[num_add].flags = cpu_to_le16(cmd_flags);
num_add++;
@@ -1635,6 +1639,13 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
dev_info(&pf->pdev->dev,
"set uni promisc failed, err %d, aq_err %d\n",
aq_ret, pf->hw.aq.asq_last_status);
+ aq_ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw,
+ vsi->seid,
+ cur_promisc, NULL);
+ if (aq_ret)
+ dev_info(&pf->pdev->dev,
+ "set brdcast promisc failed, err %d, aq_err %d\n",
+ aq_ret, pf->hw.aq.asq_last_status);
}
clear_bit(__I40E_CONFIG_BUSY, &vsi->state);
@@ -1768,7 +1779,6 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
{
struct i40e_mac_filter *f, *add_f;
bool is_netdev, is_vf;
- int ret;
is_vf = (vsi->type == I40E_VSI_SRIOV);
is_netdev = !!(vsi->netdev);
@@ -1794,13 +1804,6 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
}
}
- ret = i40e_sync_vsi_filters(vsi);
- if (ret) {
- dev_info(&vsi->back->pdev->dev,
- "Could not sync filters for vid %d\n", vid);
- return ret;
- }
-
/* Now if we add a vlan tag, make sure to check if it is the first
* tag (i.e. a "tag" -1 does exist) and if so replace the -1 "tag"
* with 0, so we now accept untagged and specified tagged traffic
@@ -1837,10 +1840,13 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
}
}
}
- ret = i40e_sync_vsi_filters(vsi);
}
- return ret;
+ if (test_bit(__I40E_DOWN, &vsi->back->state) ||
+ test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
+ return 0;
+
+ return i40e_sync_vsi_filters(vsi);
}
/**
@@ -1856,7 +1862,6 @@ int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
struct i40e_mac_filter *f, *add_f;
bool is_vf, is_netdev;
int filter_count = 0;
- int ret;
is_vf = (vsi->type == I40E_VSI_SRIOV);
is_netdev = !!(netdev);
@@ -1867,12 +1872,6 @@ int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
list_for_each_entry(f, &vsi->mac_filter_list, list)
i40e_del_filter(vsi, f->macaddr, vid, is_vf, is_netdev);
- ret = i40e_sync_vsi_filters(vsi);
- if (ret) {
- dev_info(&vsi->back->pdev->dev, "Could not sync filters\n");
- return ret;
- }
-
/* go through all the filters for this VSI and if there is only
* vid == 0 it means there are no other filters, so vid 0 must
* be replaced with -1. This signifies that we should from now
@@ -1915,6 +1914,10 @@ int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
}
}
+ if (test_bit(__I40E_DOWN, &vsi->back->state) ||
+ test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
+ return 0;
+
return i40e_sync_vsi_filters(vsi);
}
@@ -2005,8 +2008,9 @@ int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
vsi->info.pvid = cpu_to_le16(vid);
- vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_INSERT_PVID;
- vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
+ vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_TAGGED |
+ I40E_AQ_VSI_PVLAN_INSERT_PVID |
+ I40E_AQ_VSI_PVLAN_EMOD_STR;
ctxt.seid = vsi->seid;
memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
@@ -2029,8 +2033,9 @@ int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
**/
void i40e_vsi_remove_pvid(struct i40e_vsi *vsi)
{
+ i40e_vlan_stripping_disable(vsi);
+
vsi->info.pvid = 0;
- i40e_vlan_rx_register(vsi->netdev, vsi->netdev->features);
}
/**
@@ -2063,8 +2068,11 @@ static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi)
{
int i;
+ if (!vsi->tx_rings)
+ return;
+
for (i = 0; i < vsi->num_queue_pairs; i++)
- if (vsi->tx_rings[i]->desc)
+ if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
i40e_free_tx_resources(vsi->tx_rings[i]);
}
@@ -2097,8 +2105,11 @@ static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
{
int i;
+ if (!vsi->rx_rings)
+ return;
+
for (i = 0; i < vsi->num_queue_pairs; i++)
- if (vsi->rx_rings[i]->desc)
+ if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
i40e_free_rx_resources(vsi->rx_rings[i]);
}
@@ -2240,7 +2251,10 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
rx_ctx.tphwdesc_ena = 1;
rx_ctx.tphdata_ena = 1;
rx_ctx.tphhead_ena = 1;
- rx_ctx.lrxqthresh = 2;
+ if (hw->revision_id == 0)
+ rx_ctx.lrxqthresh = 0;
+ else
+ rx_ctx.lrxqthresh = 2;
rx_ctx.crcstrip = 1;
rx_ctx.l2tsel = 1;
rx_ctx.showiv = 1;
@@ -2482,8 +2496,8 @@ static void i40e_enable_misc_int_causes(struct i40e_hw *hw)
wr32(hw, I40E_PFINT_ICR0_ENA, val);
/* SW_ITR_IDX = 0, but don't change INTENA */
- wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK |
- I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK);
+ wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
+ I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
/* OTHER_ITR_IDX = 0 */
wr32(hw, I40E_PFINT_STAT_CTL0, 0);
@@ -2529,6 +2543,19 @@ static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
}
/**
+ * i40e_irq_dynamic_disable_icr0 - Disable default interrupt generation for icr0
+ * @pf: board private structure
+ **/
+void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+
+ wr32(hw, I40E_PFINT_DYN_CTL0,
+ I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
+ i40e_flush(hw);
+}
+
+/**
* i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0
* @pf: board private structure
**/
@@ -2737,20 +2764,21 @@ static irqreturn_t i40e_intr(int irq, void *data)
{
struct i40e_pf *pf = (struct i40e_pf *)data;
struct i40e_hw *hw = &pf->hw;
+ irqreturn_t ret = IRQ_NONE;
u32 icr0, icr0_remaining;
u32 val, ena_mask;
icr0 = rd32(hw, I40E_PFINT_ICR0);
-
- val = rd32(hw, I40E_PFINT_DYN_CTL0);
- val = val | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
- wr32(hw, I40E_PFINT_DYN_CTL0, val);
+ ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA);
/* if sharing a legacy IRQ, we might get called w/o an intr pending */
if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0)
- return IRQ_NONE;
+ goto enable_intr;
- ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA);
+ /* if interrupt but no bits showing, must be SWINT */
+ if (((icr0 & ~I40E_PFINT_ICR0_INTEVENT_MASK) == 0) ||
+ (icr0 & I40E_PFINT_ICR0_SWINT_MASK))
+ pf->sw_int_count++;
/* only q0 is used in MSI/Legacy mode, and none are used in MSIX */
if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) {
@@ -2790,14 +2818,19 @@ static irqreturn_t i40e_intr(int irq, void *data)
val = rd32(hw, I40E_GLGEN_RSTAT);
val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
>> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
- if (val & I40E_RESET_CORER)
+ if (val == I40E_RESET_CORER)
pf->corer_count++;
- else if (val & I40E_RESET_GLOBR)
+ else if (val == I40E_RESET_GLOBR)
pf->globr_count++;
- else if (val & I40E_RESET_EMPR)
+ else if (val == I40E_RESET_EMPR)
pf->empr_count++;
}
+ if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) {
+ icr0 &= ~I40E_PFINT_ICR0_HMC_ERR_MASK;
+ dev_info(&pf->pdev->dev, "HMC error interrupt\n");
+ }
+
/* If a critical error is pending we have no choice but to reset the
* device.
* Report and mask out any remaining unexpected interrupts.
@@ -2806,22 +2839,19 @@ static irqreturn_t i40e_intr(int irq, void *data)
if (icr0_remaining) {
dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n",
icr0_remaining);
- if ((icr0_remaining & I40E_PFINT_ICR0_HMC_ERR_MASK) ||
- (icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) ||
+ if ((icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) ||
(icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) ||
(icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK) ||
(icr0_remaining & I40E_PFINT_ICR0_MAL_DETECT_MASK)) {
- if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) {
- dev_info(&pf->pdev->dev, "HMC error interrupt\n");
- } else {
- dev_info(&pf->pdev->dev, "device will be reset\n");
- set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
- i40e_service_event_schedule(pf);
- }
+ dev_info(&pf->pdev->dev, "device will be reset\n");
+ set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
+ i40e_service_event_schedule(pf);
}
ena_mask &= ~icr0_remaining;
}
+ ret = IRQ_HANDLED;
+enable_intr:
/* re-enable interrupt causes */
wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
if (!test_bit(__I40E_DOWN, &pf->state)) {
@@ -2829,7 +2859,7 @@ static irqreturn_t i40e_intr(int irq, void *data)
i40e_irq_dynamic_enable_icr0(pf);
}
- return IRQ_HANDLED;
+ return ret;
}
/**
@@ -2971,21 +3001,11 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
} while (j-- && ((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT)
^ (tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT)) & 1);
- if (enable) {
- /* is STAT set ? */
- if ((tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) {
- dev_info(&pf->pdev->dev,
- "Tx %d already enabled\n", i);
- continue;
- }
- } else {
- /* is !STAT set ? */
- if (!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) {
- dev_info(&pf->pdev->dev,
- "Tx %d already disabled\n", i);
- continue;
- }
- }
+ /* Skip if the queue is already in the requested state */
+ if (enable && (tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
+ continue;
+ if (!enable && !(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
+ continue;
/* turn on/off the queue */
if (enable)
@@ -3016,6 +3036,9 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
}
}
+ if (hw->revision_id == 0)
+ mdelay(50);
+
return 0;
}
@@ -3088,7 +3111,7 @@ static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
* @vsi: the VSI being configured
* @enable: start or stop the rings
**/
-static int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool request)
+int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool request)
{
int ret;
@@ -3128,7 +3151,8 @@ static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
u16 vector = i + base;
/* free only the irqs that were actually requested */
- if (vsi->q_vectors[i]->num_ringpairs == 0)
+ if (!vsi->q_vectors[i] ||
+ !vsi->q_vectors[i]->num_ringpairs)
continue;
/* clear the affinity_mask in the IRQ descriptor */
@@ -3954,22 +3978,28 @@ static int i40e_open(struct net_device *netdev)
if (err)
goto err_setup_rx;
+ /* Notify the stack of the actual queue counts. */
+ err = netif_set_real_num_tx_queues(netdev, vsi->num_queue_pairs);
+ if (err)
+ goto err_set_queues;
+
+ err = netif_set_real_num_rx_queues(netdev, vsi->num_queue_pairs);
+ if (err)
+ goto err_set_queues;
+
err = i40e_up_complete(vsi);
if (err)
goto err_up_complete;
- if ((vsi->type == I40E_VSI_MAIN) || (vsi->type == I40E_VSI_VMDQ2)) {
- err = i40e_aq_set_vsi_broadcast(&pf->hw, vsi->seid, true, NULL);
- if (err)
- netdev_info(netdev,
- "couldn't set broadcast err %d aq_err %d\n",
- err, pf->hw.aq.asq_last_status);
- }
+#ifdef CONFIG_I40E_VXLAN
+ vxlan_get_rx_port(netdev);
+#endif
return 0;
err_up_complete:
i40e_down(vsi);
+err_set_queues:
i40e_vsi_free_irq(vsi);
err_setup_rx:
i40e_vsi_free_rx_resources(vsi);
@@ -4051,6 +4081,24 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
i40e_flush(&pf->hw);
+ } else if (reset_flags & (1 << __I40E_EMP_RESET_REQUESTED)) {
+
+ /* Request a Firmware Reset
+ *
+ * Same as Global reset, plus restarting the
+ * embedded firmware engine.
+ */
+ /* enable EMP Reset */
+ val = rd32(&pf->hw, I40E_GLGEN_RSTENA_EMP);
+ val |= I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_MASK;
+ wr32(&pf->hw, I40E_GLGEN_RSTENA_EMP, val);
+
+ /* force the reset */
+ val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
+ val |= I40E_GLGEN_RTRIG_EMPFWR_MASK;
+ wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
+ i40e_flush(&pf->hw);
+
} else if (reset_flags & (1 << __I40E_PF_RESET_REQUESTED)) {
/* Request a PF Reset
@@ -4089,6 +4137,19 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
}
/**
+ * i40e_do_reset_safe - Protected reset path for userland calls.
+ * @pf: board private structure
+ * @reset_flags: which reset is requested
+ *
+ **/
+void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags)
+{
+ rtnl_lock();
+ i40e_do_reset(pf, reset_flags);
+ rtnl_unlock();
+}
+
+/**
* i40e_handle_lan_overflow_event - Handler for LAN queue overflow event
* @pf: board private structure
* @e: event info posted on ARQ
@@ -4333,6 +4394,7 @@ static void i40e_reset_subtask(struct i40e_pf *pf)
{
u32 reset_flags = 0;
+ rtnl_lock();
if (test_bit(__I40E_REINIT_REQUESTED, &pf->state)) {
reset_flags |= (1 << __I40E_REINIT_REQUESTED);
clear_bit(__I40E_REINIT_REQUESTED, &pf->state);
@@ -4355,7 +4417,7 @@ static void i40e_reset_subtask(struct i40e_pf *pf)
*/
if (test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) {
i40e_handle_reset_warning(pf);
- return;
+ goto unlock;
}
/* If we're already down or resetting, just bail */
@@ -4363,6 +4425,9 @@ static void i40e_reset_subtask(struct i40e_pf *pf)
!test_bit(__I40E_DOWN, &pf->state) &&
!test_bit(__I40E_CONFIG_BUSY, &pf->state))
i40e_do_reset(pf, reset_flags);
+
+unlock:
+ rtnl_unlock();
}
/**
@@ -4426,6 +4491,7 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
return;
do {
+ event.msg_size = I40E_MAX_AQ_BUF_SIZE; /* reinit each time */
ret = i40e_clean_arq_element(hw, &event, &pending);
if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK) {
dev_info(&pf->pdev->dev, "No ARQ event found\n");
@@ -4456,10 +4522,13 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
dev_info(&pf->pdev->dev, "ARQ LAN queue overflow event received\n");
i40e_handle_lan_overflow_event(pf, &event);
break;
+ case i40e_aqc_opc_send_msg_to_peer:
+ dev_info(&pf->pdev->dev, "ARQ: Msg from other pf\n");
+ break;
default:
dev_info(&pf->pdev->dev,
- "ARQ Error: Unknown event %d received\n",
- event.desc.opcode);
+ "ARQ Error: Unknown event 0x%04x received\n",
+ opcode);
break;
}
} while (pending && (i++ < pf->adminq_work_limit));
@@ -4589,6 +4658,13 @@ static int i40e_get_capabilities(struct i40e_pf *pf)
}
} while (err);
+ if (pf->hw.revision_id == 0 && (pf->flags & I40E_FLAG_MFP_ENABLED)) {
+ pf->hw.func_caps.num_msix_vectors += 1;
+ pf->hw.func_caps.num_tx_qp =
+ min_t(int, pf->hw.func_caps.num_tx_qp,
+ I40E_MAX_NPAR_QPS);
+ }
+
if (pf->hw.debug_mask & I40E_DEBUG_USER)
dev_info(&pf->pdev->dev,
"pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n",
@@ -4600,6 +4676,15 @@ static int i40e_get_capabilities(struct i40e_pf *pf)
pf->hw.func_caps.num_tx_qp,
pf->hw.func_caps.num_vsis);
+#define DEF_NUM_VSI (1 + (pf->hw.func_caps.fcoe ? 1 : 0) \
+ + pf->hw.func_caps.num_vfs)
+ if (pf->hw.revision_id == 0 && (DEF_NUM_VSI > pf->hw.func_caps.num_vsis)) {
+ dev_info(&pf->pdev->dev,
+ "got num_vsis %d, setting num_vsis to %d\n",
+ pf->hw.func_caps.num_vsis, DEF_NUM_VSI);
+ pf->hw.func_caps.num_vsis = DEF_NUM_VSI;
+ }
+
return 0;
}
@@ -4670,26 +4755,25 @@ static void i40e_fdir_teardown(struct i40e_pf *pf)
}
/**
- * i40e_handle_reset_warning - prep for the core to reset
+ * i40e_prep_for_reset - prep for the core to reset
* @pf: board private structure
*
- * Close up the VFs and other things in prep for a Core Reset,
- * then get ready to rebuild the world.
- **/
-static void i40e_handle_reset_warning(struct i40e_pf *pf)
+ * Close up the VFs and other things in prep for pf Reset.
+ **/
+static int i40e_prep_for_reset(struct i40e_pf *pf)
{
- struct i40e_driver_version dv;
struct i40e_hw *hw = &pf->hw;
i40e_status ret;
u32 v;
clear_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);
if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
- return;
+ return 0;
dev_info(&pf->pdev->dev, "Tearing down internal switch for reset\n");
- i40e_vc_notify_reset(pf);
+ if (i40e_check_asq_alive(hw))
+ i40e_vc_notify_reset(pf);
/* quiesce the VSIs and their queues that are not already DOWN */
i40e_pf_quiesce_all_vsi(pf);
@@ -4701,6 +4785,27 @@ static void i40e_handle_reset_warning(struct i40e_pf *pf)
i40e_shutdown_adminq(&pf->hw);
+ /* call shutdown HMC */
+ ret = i40e_shutdown_lan_hmc(hw);
+ if (ret) {
+ dev_info(&pf->pdev->dev, "shutdown_lan_hmc failed: %d\n", ret);
+ clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state);
+ }
+ return ret;
+}
+
+/**
+ * i40e_reset_and_rebuild - reset and rebuild using a saved config
+ * @pf: board private structure
+ * @reinit: if the Main VSI needs to re-initialized.
+ **/
+static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
+{
+ struct i40e_driver_version dv;
+ struct i40e_hw *hw = &pf->hw;
+ i40e_status ret;
+ u32 v;
+
/* Now we wait for GRST to settle out.
* We don't have to delete the VEBs or VSIs from the hw switch
* because the reset will make them disappear.
@@ -4728,13 +4833,6 @@ static void i40e_handle_reset_warning(struct i40e_pf *pf)
goto end_core_reset;
}
- /* call shutdown HMC */
- ret = i40e_shutdown_lan_hmc(hw);
- if (ret) {
- dev_info(&pf->pdev->dev, "shutdown_lan_hmc failed: %d\n", ret);
- goto end_core_reset;
- }
-
ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
hw->func_caps.num_rx_qp,
pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num);
@@ -4749,7 +4847,7 @@ static void i40e_handle_reset_warning(struct i40e_pf *pf)
}
/* do basic switch setup */
- ret = i40e_setup_pf_switch(pf);
+ ret = i40e_setup_pf_switch(pf, reinit);
if (ret)
goto end_core_reset;
@@ -4828,6 +4926,22 @@ end_core_reset:
}
/**
+ * i40e_handle_reset_warning - prep for the pf to reset, reset and rebuild
+ * @pf: board private structure
+ *
+ * Close up the VFs and other things in prep for a Core Reset,
+ * then get ready to rebuild the world.
+ **/
+static void i40e_handle_reset_warning(struct i40e_pf *pf)
+{
+ i40e_status ret;
+
+ ret = i40e_prep_for_reset(pf);
+ if (!ret)
+ i40e_reset_and_rebuild(pf, false);
+}
+
+/**
* i40e_handle_mdd_event
* @pf: pointer to the pf structure
*
@@ -4908,6 +5022,52 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
i40e_flush(hw);
}
+#ifdef CONFIG_I40E_VXLAN
+/**
+ * i40e_sync_vxlan_filters_subtask - Sync the VSI filter list with HW
+ * @pf: board private structure
+ **/
+static void i40e_sync_vxlan_filters_subtask(struct i40e_pf *pf)
+{
+ const int vxlan_hdr_qwords = 4;
+ struct i40e_hw *hw = &pf->hw;
+ i40e_status ret;
+ u8 filter_index;
+ __be16 port;
+ int i;
+
+ if (!(pf->flags & I40E_FLAG_VXLAN_FILTER_SYNC))
+ return;
+
+ pf->flags &= ~I40E_FLAG_VXLAN_FILTER_SYNC;
+
+ for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
+ if (pf->pending_vxlan_bitmap & (1 << i)) {
+ pf->pending_vxlan_bitmap &= ~(1 << i);
+ port = pf->vxlan_ports[i];
+ ret = port ?
+ i40e_aq_add_udp_tunnel(hw, ntohs(port),
+ vxlan_hdr_qwords,
+ I40E_AQC_TUNNEL_TYPE_VXLAN,
+ &filter_index, NULL)
+ : i40e_aq_del_udp_tunnel(hw, i, NULL);
+
+ if (ret) {
+ dev_info(&pf->pdev->dev, "Failed to execute AQ command for %s port %d with index %d\n",
+ port ? "adding" : "deleting",
+ ntohs(port), port ? i : i);
+
+ pf->vxlan_ports[i] = 0;
+ } else {
+ dev_info(&pf->pdev->dev, "%s port %d with AQ command with index %d\n",
+ port ? "Added" : "Deleted",
+ ntohs(port), port ? i : filter_index);
+ }
+ }
+ }
+}
+
+#endif
/**
* i40e_service_task - Run the driver's async subtasks
* @work: pointer to work_struct containing our data
@@ -4926,6 +5086,9 @@ static void i40e_service_task(struct work_struct *work)
i40e_fdir_reinit_subtask(pf);
i40e_check_hang_subtask(pf);
i40e_sync_filters_subtask(pf);
+#ifdef CONFIG_I40E_VXLAN
+ i40e_sync_vxlan_filters_subtask(pf);
+#endif
i40e_clean_adminq_subtask(pf);
i40e_service_event_complete(pf);
@@ -5003,6 +5166,42 @@ static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
}
/**
+ * i40e_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi
+ * @type: VSI pointer
+ * @alloc_qvectors: a bool to specify if q_vectors need to be allocated.
+ *
+ * On error: returns error code (negative)
+ * On success: returns 0
+ **/
+static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi, bool alloc_qvectors)
+{
+ int size;
+ int ret = 0;
+
+ /* allocate memory for both Tx and Rx ring pointers */
+ size = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs * 2;
+ vsi->tx_rings = kzalloc(size, GFP_KERNEL);
+ if (!vsi->tx_rings)
+ return -ENOMEM;
+ vsi->rx_rings = &vsi->tx_rings[vsi->alloc_queue_pairs];
+
+ if (alloc_qvectors) {
+ /* allocate memory for q_vector pointers */
+ size = sizeof(struct i40e_q_vectors *) * vsi->num_q_vectors;
+ vsi->q_vectors = kzalloc(size, GFP_KERNEL);
+ if (!vsi->q_vectors) {
+ ret = -ENOMEM;
+ goto err_vectors;
+ }
+ }
+ return ret;
+
+err_vectors:
+ kfree(vsi->tx_rings);
+ return ret;
+}
+
+/**
* i40e_vsi_mem_alloc - Allocates the next available struct vsi in the PF
* @pf: board private structure
* @type: type of VSI
@@ -5014,8 +5213,6 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
{
int ret = -ENODEV;
struct i40e_vsi *vsi;
- int sz_vectors;
- int sz_rings;
int vsi_idx;
int i;
@@ -5065,22 +5262,9 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
if (ret)
goto err_rings;
- /* allocate memory for ring pointers */
- sz_rings = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs * 2;
- vsi->tx_rings = kzalloc(sz_rings, GFP_KERNEL);
- if (!vsi->tx_rings) {
- ret = -ENOMEM;
+ ret = i40e_vsi_alloc_arrays(vsi, true);
+ if (ret)
goto err_rings;
- }
- vsi->rx_rings = &vsi->tx_rings[vsi->alloc_queue_pairs];
-
- /* allocate memory for q_vector pointers */
- sz_vectors = sizeof(struct i40e_q_vectors *) * vsi->num_q_vectors;
- vsi->q_vectors = kzalloc(sz_vectors, GFP_KERNEL);
- if (!vsi->q_vectors) {
- ret = -ENOMEM;
- goto err_vectors;
- }
/* Setup default MSIX irq handler for VSI */
i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings);
@@ -5089,8 +5273,6 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
ret = vsi_idx;
goto unlock_pf;
-err_vectors:
- kfree(vsi->tx_rings);
err_rings:
pf->next_vsi = i - 1;
kfree(vsi);
@@ -5100,6 +5282,26 @@ unlock_pf:
}
/**
+ * i40e_vsi_free_arrays - Free queue and vector pointer arrays for the VSI
+ * @type: VSI pointer
+ * @free_qvectors: a bool to specify if q_vectors need to be freed.
+ *
+ * On error: returns error code (negative)
+ * On success: returns 0
+ **/
+static void i40e_vsi_free_arrays(struct i40e_vsi *vsi, bool free_qvectors)
+{
+ /* free the ring and vector containers */
+ if (free_qvectors) {
+ kfree(vsi->q_vectors);
+ vsi->q_vectors = NULL;
+ }
+ kfree(vsi->tx_rings);
+ vsi->tx_rings = NULL;
+ vsi->rx_rings = NULL;
+}
+
+/**
* i40e_vsi_clear - Deallocate the VSI provided
* @vsi: the VSI being un-configured
**/
@@ -5135,9 +5337,7 @@ static int i40e_vsi_clear(struct i40e_vsi *vsi)
i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
- /* free the ring and vector containers */
- kfree(vsi->q_vectors);
- kfree(vsi->tx_rings);
+ i40e_vsi_free_arrays(vsi, true);
pf->vsi[vsi->idx] = NULL;
if (vsi->idx < pf->next_vsi)
@@ -5155,18 +5355,17 @@ free_vsi:
* i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI
* @vsi: the VSI being cleaned
**/
-static s32 i40e_vsi_clear_rings(struct i40e_vsi *vsi)
+static void i40e_vsi_clear_rings(struct i40e_vsi *vsi)
{
int i;
- if (vsi->tx_rings[0])
+ if (vsi->tx_rings && vsi->tx_rings[0]) {
for (i = 0; i < vsi->alloc_queue_pairs; i++) {
kfree_rcu(vsi->tx_rings[i], rcu);
vsi->tx_rings[i] = NULL;
vsi->rx_rings[i] = NULL;
}
-
- return 0;
+ }
}
/**
@@ -5183,6 +5382,7 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi)
struct i40e_ring *tx_ring;
struct i40e_ring *rx_ring;
+ /* allocate space for both Tx and Rx in one shot */
tx_ring = kzalloc(sizeof(struct i40e_ring) * 2, GFP_KERNEL);
if (!tx_ring)
goto err_out;
@@ -5286,15 +5486,18 @@ static int i40e_init_msix(struct i40e_pf *pf)
/* The number of vectors we'll request will be comprised of:
* - Add 1 for "other" cause for Admin Queue events, etc.
* - The number of LAN queue pairs
- * already adjusted for the NUMA node
- * assumes symmetric Tx/Rx pairing
+ * - Queues being used for RSS.
+ * We don't need as many as max_rss_size vectors.
+ * use rss_size instead in the calculation since that
+ * is governed by number of cpus in the system.
+ * - assumes symmetric Tx/Rx pairing
* - The number of VMDq pairs
* Once we count this up, try the request.
*
* If we can't get what we want, we'll simplify to nearly nothing
* and try again. If that still fails, we punt.
*/
- pf->num_lan_msix = pf->num_lan_qps;
+ pf->num_lan_msix = pf->num_lan_qps - (pf->rss_size_max - pf->rss_size);
pf->num_vmdq_msix = pf->num_vmdq_qps;
v_budget = 1 + pf->num_lan_msix;
v_budget += (pf->num_vmdq_vsis * pf->num_vmdq_msix);
@@ -5436,7 +5639,6 @@ static void i40e_init_interrupt_scheme(struct i40e_pf *pf)
if (err) {
pf->flags &= ~(I40E_FLAG_MSIX_ENABLED |
I40E_FLAG_RSS_ENABLED |
- I40E_FLAG_MQ_ENABLED |
I40E_FLAG_DCB_ENABLED |
I40E_FLAG_SRIOV_ENABLED |
I40E_FLAG_FDIR_ENABLED |
@@ -5510,15 +5712,15 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf)
**/
static int i40e_config_rss(struct i40e_pf *pf)
{
- struct i40e_hw *hw = &pf->hw;
- u32 lut = 0;
- int i, j;
- u64 hena;
/* Set of random keys generated using kernel random number generator */
static const u32 seed[I40E_PFQF_HKEY_MAX_INDEX + 1] = {0x41b01687,
0x183cfd8c, 0xce880440, 0x580cbc3c, 0x35897377,
0x328b25e1, 0x4fa98922, 0xb7d90c14, 0xd5bad70d,
0xcd15a2c1, 0xe8580225, 0x4a1e9d11, 0xfe5731be};
+ struct i40e_hw *hw = &pf->hw;
+ u32 lut = 0;
+ int i, j;
+ u64 hena;
/* Fill out hash function seed */
for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
@@ -5527,16 +5729,7 @@ static int i40e_config_rss(struct i40e_pf *pf)
/* By default we enable TCP/UDP with IPv4/IPv6 ptypes */
hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) |
((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32);
- hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) |
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) |
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) |
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) |
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) |
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) |
- ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4)|
- ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
+ hena |= I40E_DEFAULT_RSS_HENA;
wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
@@ -5565,6 +5758,34 @@ static int i40e_config_rss(struct i40e_pf *pf)
}
/**
+ * i40e_reconfig_rss_queues - change number of queues for rss and rebuild
+ * @pf: board private structure
+ * @queue_count: the requested queue count for rss.
+ *
+ * returns 0 if rss is not enabled, if enabled returns the final rss queue
+ * count which may be different from the requested queue count.
+ **/
+int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
+{
+ if (!(pf->flags & I40E_FLAG_RSS_ENABLED))
+ return 0;
+
+ queue_count = min_t(int, queue_count, pf->rss_size_max);
+ queue_count = rounddown_pow_of_two(queue_count);
+
+ if (queue_count != pf->rss_size) {
+ i40e_prep_for_reset(pf);
+
+ pf->rss_size = queue_count;
+
+ i40e_reset_and_rebuild(pf, true);
+ i40e_config_rss(pf);
+ }
+ dev_info(&pf->pdev->dev, "RSS count: %d\n", pf->rss_size);
+ return pf->rss_size;
+}
+
+/**
* i40e_sw_init - Initialize general software structures (struct i40e_pf)
* @pf: board private structure to initialize
*
@@ -5579,6 +5800,7 @@ static int i40e_sw_init(struct i40e_pf *pf)
pf->msg_enable = netif_msg_init(I40E_DEFAULT_MSG_ENABLE,
(NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK));
+ pf->hw.debug_mask = pf->msg_enable | I40E_DEBUG_DIAG;
if (debug != -1 && debug != I40E_DEFAULT_MSG_ENABLE) {
if (I40E_DEBUG_USER & debug)
pf->hw.debug_mask = debug;
@@ -5590,19 +5812,27 @@ static int i40e_sw_init(struct i40e_pf *pf)
pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
I40E_FLAG_MSI_ENABLED |
I40E_FLAG_MSIX_ENABLED |
- I40E_FLAG_RX_PS_ENABLED |
- I40E_FLAG_MQ_ENABLED |
I40E_FLAG_RX_1BUF_ENABLED;
+ /* Depending on PF configurations, it is possible that the RSS
+ * maximum might end up larger than the available queues
+ */
pf->rss_size_max = 0x1 << pf->hw.func_caps.rss_table_entry_width;
+ pf->rss_size_max = min_t(int, pf->rss_size_max,
+ pf->hw.func_caps.num_tx_qp);
if (pf->hw.func_caps.rss) {
pf->flags |= I40E_FLAG_RSS_ENABLED;
- pf->rss_size = min_t(int, pf->rss_size_max,
- nr_cpus_node(numa_node_id()));
+ pf->rss_size = min_t(int, pf->rss_size_max, num_online_cpus());
} else {
pf->rss_size = 1;
}
+ /* MFP mode enabled */
+ if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.mfp_mode_1) {
+ pf->flags |= I40E_FLAG_MFP_ENABLED;
+ dev_info(&pf->pdev->dev, "MFP mode Enabled\n");
+ }
+
if (pf->hw.func_caps.dcb)
pf->num_tc_qps = I40E_DEFAULT_QUEUES_PER_TC;
else
@@ -5631,12 +5861,6 @@ static int i40e_sw_init(struct i40e_pf *pf)
pf->num_vmdq_qps = I40E_DEFAULT_QUEUES_PER_VMDQ;
}
- /* MFP mode enabled */
- if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.mfp_mode_1) {
- pf->flags |= I40E_FLAG_MFP_ENABLED;
- dev_info(&pf->pdev->dev, "MFP mode Enabled\n");
- }
-
#ifdef CONFIG_PCI_IOV
if (pf->hw.func_caps.num_vfs) {
pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
@@ -5644,6 +5868,9 @@ static int i40e_sw_init(struct i40e_pf *pf)
pf->num_req_vfs = min_t(int,
pf->hw.func_caps.num_vfs,
I40E_MAX_VF_COUNT);
+ dev_info(&pf->pdev->dev,
+ "Number of VFs being requested for PF[%d] = %d\n",
+ pf->hw.pf_id, pf->num_req_vfs);
}
#endif /* CONFIG_PCI_IOV */
pf->eeprom_version = 0xDEAD;
@@ -5698,6 +5925,104 @@ static int i40e_set_features(struct net_device *netdev,
return 0;
}
+#ifdef CONFIG_I40E_VXLAN
+/**
+ * i40e_get_vxlan_port_idx - Lookup a possibly offloaded for Rx UDP port
+ * @pf: board private structure
+ * @port: The UDP port to look up
+ *
+ * Returns the index number or I40E_MAX_PF_UDP_OFFLOAD_PORTS if port not found
+ **/
+static u8 i40e_get_vxlan_port_idx(struct i40e_pf *pf, __be16 port)
+{
+ u8 i;
+
+ for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
+ if (pf->vxlan_ports[i] == port)
+ return i;
+ }
+
+ return i;
+}
+
+/**
+ * i40e_add_vxlan_port - Get notifications about VXLAN ports that come up
+ * @netdev: This physical port's netdev
+ * @sa_family: Socket Family that VXLAN is notifying us about
+ * @port: New UDP port number that VXLAN started listening to
+ **/
+static void i40e_add_vxlan_port(struct net_device *netdev,
+ sa_family_t sa_family, __be16 port)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ u8 next_idx;
+ u8 idx;
+
+ if (sa_family == AF_INET6)
+ return;
+
+ idx = i40e_get_vxlan_port_idx(pf, port);
+
+ /* Check if port already exists */
+ if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
+ netdev_info(netdev, "Port %d already offloaded\n", ntohs(port));
+ return;
+ }
+
+ /* Now check if there is space to add the new port */
+ next_idx = i40e_get_vxlan_port_idx(pf, 0);
+
+ if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
+ netdev_info(netdev, "Maximum number of UDP ports reached, not adding port %d\n",
+ ntohs(port));
+ return;
+ }
+
+ /* New port: add it and mark its index in the bitmap */
+ pf->vxlan_ports[next_idx] = port;
+ pf->pending_vxlan_bitmap |= (1 << next_idx);
+
+ pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC;
+}
+
+/**
+ * i40e_del_vxlan_port - Get notifications about VXLAN ports that go away
+ * @netdev: This physical port's netdev
+ * @sa_family: Socket Family that VXLAN is notifying us about
+ * @port: UDP port number that VXLAN stopped listening to
+ **/
+static void i40e_del_vxlan_port(struct net_device *netdev,
+ sa_family_t sa_family, __be16 port)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ u8 idx;
+
+ if (sa_family == AF_INET6)
+ return;
+
+ idx = i40e_get_vxlan_port_idx(pf, port);
+
+ /* Check if port already exists */
+ if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
+ /* if port exists, set it to 0 (mark for deletion)
+ * and make it pending
+ */
+ pf->vxlan_ports[idx] = 0;
+
+ pf->pending_vxlan_bitmap |= (1 << idx);
+
+ pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC;
+ } else {
+ netdev_warn(netdev, "Port %d was not found, not deleting\n",
+ ntohs(port));
+ }
+}
+
+#endif
static const struct net_device_ops i40e_netdev_ops = {
.ndo_open = i40e_open,
.ndo_stop = i40e_close,
@@ -5719,6 +6044,10 @@ static const struct net_device_ops i40e_netdev_ops = {
.ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan,
.ndo_set_vf_tx_rate = i40e_ndo_set_vf_bw,
.ndo_get_vf_config = i40e_ndo_get_vf_config,
+#ifdef CONFIG_I40E_VXLAN
+ .ndo_add_vxlan_port = i40e_add_vxlan_port,
+ .ndo_del_vxlan_port = i40e_del_vxlan_port,
+#endif
};
/**
@@ -5729,6 +6058,7 @@ static const struct net_device_ops i40e_netdev_ops = {
**/
static int i40e_config_netdev(struct i40e_vsi *vsi)
{
+ u8 brdcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
struct i40e_netdev_priv *np;
@@ -5778,6 +6108,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
random_ether_addr(mac_addr);
i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY, false, false);
}
+ i40e_add_filter(vsi, brdcast, I40E_VLAN_ANY, false, false);
memcpy(netdev->dev_addr, mac_addr, ETH_ALEN);
memcpy(netdev->perm_addr, mac_addr, ETH_ALEN);
@@ -6130,6 +6461,69 @@ vector_setup_out:
}
/**
+ * i40e_vsi_reinit_setup - return and reallocate resources for a VSI
+ * @vsi: pointer to the vsi.
+ *
+ * This re-allocates a vsi's queue resources.
+ *
+ * Returns pointer to the successfully allocated and configured VSI sw struct
+ * on success, otherwise returns NULL on failure.
+ **/
+static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi)
+{
+ struct i40e_pf *pf = vsi->back;
+ u8 enabled_tc;
+ int ret;
+
+ i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
+ i40e_vsi_clear_rings(vsi);
+
+ i40e_vsi_free_arrays(vsi, false);
+ i40e_set_num_rings_in_vsi(vsi);
+ ret = i40e_vsi_alloc_arrays(vsi, false);
+ if (ret)
+ goto err_vsi;
+
+ ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, vsi->idx);
+ if (ret < 0) {
+ dev_info(&pf->pdev->dev, "VSI %d get_lump failed %d\n",
+ vsi->seid, ret);
+ goto err_vsi;
+ }
+ vsi->base_queue = ret;
+
+ /* Update the FW view of the VSI. Force a reset of TC and queue
+ * layout configurations.
+ */
+ enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
+ pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
+ pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
+ i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
+
+ /* assign it some queues */
+ ret = i40e_alloc_rings(vsi);
+ if (ret)
+ goto err_rings;
+
+ /* map all of the rings to the q_vectors */
+ i40e_vsi_map_rings_to_vectors(vsi);
+ return vsi;
+
+err_rings:
+ i40e_vsi_free_q_vectors(vsi);
+ if (vsi->netdev_registered) {
+ vsi->netdev_registered = false;
+ unregister_netdev(vsi->netdev);
+ free_netdev(vsi->netdev);
+ vsi->netdev = NULL;
+ }
+ i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
+err_vsi:
+ i40e_vsi_clear(vsi);
+ return NULL;
+}
+
+/**
* i40e_vsi_setup - Set up a VSI by a given type
* @pf: board private structure
* @type: VSI type
@@ -6500,12 +6894,14 @@ void i40e_veb_release(struct i40e_veb *veb)
**/
static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
{
- bool is_default = (vsi->idx == vsi->back->lan_vsi);
+ bool is_default = false;
+ bool is_cloud = false;
int ret;
/* get a VEB from the hardware */
ret = i40e_aq_add_veb(&veb->pf->hw, veb->uplink_seid, vsi->seid,
- veb->enabled_tc, is_default, &veb->seid, NULL);
+ veb->enabled_tc, is_default,
+ is_cloud, &veb->seid, NULL);
if (ret) {
dev_info(&veb->pf->pdev->dev,
"couldn't add VEB, err %d, aq_err %d\n",
@@ -6770,11 +7166,13 @@ int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
/**
* i40e_setup_pf_switch - Setup the HW switch on startup or after reset
* @pf: board private structure
+ * @reinit: if the Main VSI needs to re-initialized.
*
* Returns 0 on success, negative value on failure
**/
-static int i40e_setup_pf_switch(struct i40e_pf *pf)
+static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
{
+ u32 rxfc = 0, txfc = 0, rxfc_reg;
int ret;
/* find out what's out there already */
@@ -6794,7 +7192,7 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf)
i40e_fdir_setup(pf);
/* first time setup */
- if (pf->lan_vsi == I40E_NO_VSI) {
+ if (pf->lan_vsi == I40E_NO_VSI || reinit) {
struct i40e_vsi *vsi = NULL;
u16 uplink_seid;
@@ -6805,19 +7203,15 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf)
uplink_seid = pf->veb[pf->lan_veb]->seid;
else
uplink_seid = pf->mac_seid;
-
- vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0);
+ if (pf->lan_vsi == I40E_NO_VSI)
+ vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0);
+ else if (reinit)
+ vsi = i40e_vsi_reinit_setup(pf->vsi[pf->lan_vsi]);
if (!vsi) {
dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n");
i40e_fdir_teardown(pf);
return -EAGAIN;
}
- /* accommodate kcompat by copying the main VSI queue count
- * into the pf, since this newer code pushes the pf queue
- * info down a level into a VSI
- */
- pf->num_rx_queues = vsi->alloc_queue_pairs;
- pf->num_tx_queues = vsi->alloc_queue_pairs;
} else {
/* force a reset of TC and queue layout configurations */
u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
@@ -6845,20 +7239,65 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf)
i40e_aq_get_link_info(&pf->hw, true, NULL, NULL);
i40e_link_event(pf);
- /* Initialize user-specifics link properties */
+ /* Initialize user-specific link properties */
pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info &
I40E_AQ_AN_COMPLETED) ? true : false);
- pf->hw.fc.requested_mode = I40E_FC_DEFAULT;
- if (pf->hw.phy.link_info.an_info &
- (I40E_AQ_LINK_PAUSE_TX | I40E_AQ_LINK_PAUSE_RX))
+ /* requested_mode is set in probe or by ethtool */
+ if (!pf->fc_autoneg_status)
+ goto no_autoneg;
+
+ if ((pf->hw.phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX) &&
+ (pf->hw.phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX))
pf->hw.fc.current_mode = I40E_FC_FULL;
else if (pf->hw.phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX)
pf->hw.fc.current_mode = I40E_FC_TX_PAUSE;
else if (pf->hw.phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX)
pf->hw.fc.current_mode = I40E_FC_RX_PAUSE;
else
- pf->hw.fc.current_mode = I40E_FC_DEFAULT;
+ pf->hw.fc.current_mode = I40E_FC_NONE;
+ /* sync the flow control settings with the auto-neg values */
+ switch (pf->hw.fc.current_mode) {
+ case I40E_FC_FULL:
+ txfc = 1;
+ rxfc = 1;
+ break;
+ case I40E_FC_TX_PAUSE:
+ txfc = 1;
+ rxfc = 0;
+ break;
+ case I40E_FC_RX_PAUSE:
+ txfc = 0;
+ rxfc = 1;
+ break;
+ case I40E_FC_NONE:
+ case I40E_FC_DEFAULT:
+ txfc = 0;
+ rxfc = 0;
+ break;
+ case I40E_FC_PFC:
+ /* TBD */
+ break;
+ /* no default case, we have to handle all possibilities here */
+ }
+
+ wr32(&pf->hw, I40E_PRTDCB_FCCFG, txfc << I40E_PRTDCB_FCCFG_TFCE_SHIFT);
+
+ rxfc_reg = rd32(&pf->hw, I40E_PRTDCB_MFLCN) &
+ ~I40E_PRTDCB_MFLCN_RFCE_MASK;
+ rxfc_reg |= (rxfc << I40E_PRTDCB_MFLCN_RFCE_SHIFT);
+
+ wr32(&pf->hw, I40E_PRTDCB_MFLCN, rxfc_reg);
+
+ goto fc_complete;
+
+no_autoneg:
+ /* disable L2 flow control, user can turn it on if they wish */
+ wr32(&pf->hw, I40E_PRTDCB_FCCFG, 0);
+ wr32(&pf->hw, I40E_PRTDCB_MFLCN, rd32(&pf->hw, I40E_PRTDCB_MFLCN) &
+ ~I40E_PRTDCB_MFLCN_RFCE_MASK);
+
+fc_complete:
return ret;
}
@@ -6872,7 +7311,7 @@ static u16 i40e_set_rss_size(struct i40e_pf *pf, int queues_left)
int num_tc0;
num_tc0 = min_t(int, queues_left, pf->rss_size_max);
- num_tc0 = min_t(int, num_tc0, nr_cpus_node(numa_node_id()));
+ num_tc0 = min_t(int, num_tc0, num_online_cpus());
num_tc0 = rounddown_pow_of_two(num_tc0);
return num_tc0;
@@ -6897,8 +7336,7 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
*/
queues_left = pf->hw.func_caps.num_tx_qp;
- if (!((pf->flags & I40E_FLAG_MSIX_ENABLED) &&
- (pf->flags & I40E_FLAG_MQ_ENABLED)) ||
+ if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) ||
!(pf->flags & (I40E_FLAG_RSS_ENABLED |
I40E_FLAG_FDIR_ENABLED | I40E_FLAG_DCB_ENABLED)) ||
(queues_left == 1)) {
@@ -6909,7 +7347,6 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
/* make sure all the fancies are disabled */
pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
- I40E_FLAG_MQ_ENABLED |
I40E_FLAG_FDIR_ENABLED |
I40E_FLAG_FDIR_ATR_ENABLED |
I40E_FLAG_DCB_ENABLED |
@@ -6923,7 +7360,7 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
pf->rss_size = i40e_set_rss_size(pf, queues_left);
queues_left -= pf->rss_size;
- pf->num_lan_qps = pf->rss_size;
+ pf->num_lan_qps = pf->rss_size_max;
} else if (pf->flags & I40E_FLAG_RSS_ENABLED &&
!(pf->flags & I40E_FLAG_FDIR_ENABLED) &&
@@ -6942,7 +7379,7 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
return;
}
- pf->num_lan_qps = pf->rss_size + accum_tc_size;
+ pf->num_lan_qps = pf->rss_size_max + accum_tc_size;
} else if (pf->flags & I40E_FLAG_RSS_ENABLED &&
(pf->flags & I40E_FLAG_FDIR_ENABLED) &&
@@ -6958,7 +7395,7 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
return;
}
- pf->num_lan_qps = pf->rss_size;
+ pf->num_lan_qps = pf->rss_size_max;
} else if (pf->flags & I40E_FLAG_RSS_ENABLED &&
(pf->flags & I40E_FLAG_FDIR_ENABLED) &&
@@ -6978,7 +7415,7 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
return;
}
- pf->num_lan_qps = pf->rss_size + accum_tc_size;
+ pf->num_lan_qps = pf->rss_size_max + accum_tc_size;
} else {
dev_info(&pf->pdev->dev,
@@ -7000,6 +7437,7 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
queues_left -= (pf->num_vmdq_vsis * pf->num_vmdq_qps);
}
+ pf->queues_left = queues_left;
return;
}
@@ -7050,6 +7488,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct i40e_driver_version dv;
struct i40e_pf *pf;
struct i40e_hw *hw;
+ static u16 pfs_found;
+ u16 link_status;
int err = 0;
u32 len;
@@ -7115,6 +7555,18 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw->subsystem_device_id = pdev->subsystem_device;
hw->bus.device = PCI_SLOT(pdev->devfn);
hw->bus.func = PCI_FUNC(pdev->devfn);
+ pf->instance = pfs_found;
+
+ /* do a special CORER for clearing PXE mode once at init */
+ if (hw->revision_id == 0 &&
+ (rd32(hw, I40E_GLLAN_RCTL_0) & I40E_GLLAN_RCTL_0_PXE_MODE_MASK)) {
+ wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
+ i40e_flush(hw);
+ msleep(200);
+ pf->corer_count++;
+
+ i40e_clear_pxe_mode(hw);
+ }
/* Reset here to make sure all is clean and to define PF 'n' */
err = i40e_pf_reset(hw);
@@ -7139,8 +7591,18 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_pf_reset;
}
+ /* set up a default setting for link flow control */
+ pf->hw.fc.requested_mode = I40E_FC_NONE;
+
err = i40e_init_adminq(hw);
dev_info(&pdev->dev, "%s\n", i40e_fw_version_str(hw));
+ if (((hw->nvm.version & I40E_NVM_VERSION_HI_MASK)
+ >> I40E_NVM_VERSION_HI_SHIFT) != I40E_CURRENT_NVM_VERSION_HI) {
+ dev_info(&pdev->dev,
+ "warning: NVM version not supported, supported version: %02x.%02x\n",
+ I40E_CURRENT_NVM_VERSION_HI,
+ I40E_CURRENT_NVM_VERSION_LO);
+ }
if (err) {
dev_info(&pdev->dev,
"init_adminq failed: %d expecting API %02x.%02x\n",
@@ -7175,7 +7637,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
i40e_get_mac_addr(hw, hw->mac.addr);
- if (i40e_validate_mac_addr(hw->mac.addr)) {
+ if (!is_valid_ether_addr(hw->mac.addr)) {
dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr);
err = -EIO;
goto err_mac_addr;
@@ -7195,6 +7657,10 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pf->flags |= I40E_FLAG_NEED_LINK_UPDATE;
pf->link_check_timeout = jiffies;
+ /* WoL defaults to disabled */
+ pf->wol_en = false;
+ device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en);
+
/* set up the main switch operations */
i40e_determine_queue_usage(pf);
i40e_init_interrupt_scheme(pf);
@@ -7209,7 +7675,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_switch_setup;
}
- err = i40e_setup_pf_switch(pf);
+ err = i40e_setup_pf_switch(pf, false);
if (err) {
dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
goto err_vsis;
@@ -7247,6 +7713,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
i40e_flush(hw);
}
+ pfs_found++;
+
i40e_dbg_pf_init(pf);
/* tell the firmware that we're starting */
@@ -7260,14 +7728,37 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
mod_timer(&pf->service_timer,
round_jiffies(jiffies + pf->service_timer_period));
+ /* Get the negotiated link width and speed from PCI config space */
+ pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA, &link_status);
+
+ i40e_set_pci_config_data(hw, link_status);
+
+ dev_info(&pdev->dev, "PCI Express: %s %s\n",
+ (hw->bus.speed == i40e_bus_speed_8000 ? "Speed 8.0GT/s" :
+ hw->bus.speed == i40e_bus_speed_5000 ? "Speed 5.0GT/s" :
+ hw->bus.speed == i40e_bus_speed_2500 ? "Speed 2.5GT/s" :
+ "Unknown"),
+ (hw->bus.width == i40e_bus_width_pcie_x8 ? "Width x8" :
+ hw->bus.width == i40e_bus_width_pcie_x4 ? "Width x4" :
+ hw->bus.width == i40e_bus_width_pcie_x2 ? "Width x2" :
+ hw->bus.width == i40e_bus_width_pcie_x1 ? "Width x1" :
+ "Unknown"));
+
+ if (hw->bus.width < i40e_bus_width_pcie_x8 ||
+ hw->bus.speed < i40e_bus_speed_8000) {
+ dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n");
+ dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n");
+ }
+
return 0;
/* Unwind what we've done if something failed in the setup */
err_vsis:
set_bit(__I40E_DOWN, &pf->state);
-err_switch_setup:
i40e_clear_interrupt_scheme(pf);
kfree(pf->vsi);
+err_switch_setup:
+ i40e_reset_interrupt_capability(pf);
del_timer_sync(&pf->service_timer);
err_mac_addr:
err_configure_lan_hmc:
@@ -7353,7 +7844,6 @@ static void i40e_remove(struct pci_dev *pdev)
"Failed to destroy the HMC resources: %d\n", ret_code);
/* shutdown the adminq */
- i40e_aq_queue_shutdown(&pf->hw, true);
ret_code = i40e_shutdown_adminq(&pf->hw);
if (ret_code)
dev_warn(&pdev->dev,
@@ -7410,7 +7900,11 @@ static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev,
dev_info(&pdev->dev, "%s: error %d\n", __func__, error);
/* shutdown all operations */
- i40e_pf_quiesce_all_vsi(pf);
+ if (!test_bit(__I40E_SUSPENDED, &pf->state)) {
+ rtnl_lock();
+ i40e_prep_for_reset(pf);
+ rtnl_unlock();
+ }
/* Request a slot reset */
return PCI_ERS_RESULT_NEED_RESET;
@@ -7473,9 +7967,103 @@ static void i40e_pci_error_resume(struct pci_dev *pdev)
struct i40e_pf *pf = pci_get_drvdata(pdev);
dev_info(&pdev->dev, "%s\n", __func__);
+ if (test_bit(__I40E_SUSPENDED, &pf->state))
+ return;
+
+ rtnl_lock();
i40e_handle_reset_warning(pf);
+ rtnl_lock();
}
+/**
+ * i40e_shutdown - PCI callback for shutting down
+ * @pdev: PCI device information struct
+ **/
+static void i40e_shutdown(struct pci_dev *pdev)
+{
+ struct i40e_pf *pf = pci_get_drvdata(pdev);
+ struct i40e_hw *hw = &pf->hw;
+
+ set_bit(__I40E_SUSPENDED, &pf->state);
+ set_bit(__I40E_DOWN, &pf->state);
+ rtnl_lock();
+ i40e_prep_for_reset(pf);
+ rtnl_unlock();
+
+ wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
+ wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
+
+ if (system_state == SYSTEM_POWER_OFF) {
+ pci_wake_from_d3(pdev, pf->wol_en);
+ pci_set_power_state(pdev, PCI_D3hot);
+ }
+}
+
+#ifdef CONFIG_PM
+/**
+ * i40e_suspend - PCI callback for moving to D3
+ * @pdev: PCI device information struct
+ **/
+static int i40e_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct i40e_pf *pf = pci_get_drvdata(pdev);
+ struct i40e_hw *hw = &pf->hw;
+
+ set_bit(__I40E_SUSPENDED, &pf->state);
+ set_bit(__I40E_DOWN, &pf->state);
+ rtnl_lock();
+ i40e_prep_for_reset(pf);
+ rtnl_unlock();
+
+ wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
+ wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
+
+ pci_wake_from_d3(pdev, pf->wol_en);
+ pci_set_power_state(pdev, PCI_D3hot);
+
+ return 0;
+}
+
+/**
+ * i40e_resume - PCI callback for waking up from D3
+ * @pdev: PCI device information struct
+ **/
+static int i40e_resume(struct pci_dev *pdev)
+{
+ struct i40e_pf *pf = pci_get_drvdata(pdev);
+ u32 err;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ /* pci_restore_state() clears dev->state_saves, so
+ * call pci_save_state() again to restore it.
+ */
+ pci_save_state(pdev);
+
+ err = pci_enable_device_mem(pdev);
+ if (err) {
+ dev_err(&pdev->dev,
+ "%s: Cannot enable PCI device from suspend\n",
+ __func__);
+ return err;
+ }
+ pci_set_master(pdev);
+
+ /* no wakeup events while running */
+ pci_wake_from_d3(pdev, false);
+
+ /* handling the reset will rebuild the device state */
+ if (test_and_clear_bit(__I40E_SUSPENDED, &pf->state)) {
+ clear_bit(__I40E_DOWN, &pf->state);
+ rtnl_lock();
+ i40e_reset_and_rebuild(pf, false);
+ rtnl_unlock();
+ }
+
+ return 0;
+}
+
+#endif
static const struct pci_error_handlers i40e_err_handler = {
.error_detected = i40e_pci_error_detected,
.slot_reset = i40e_pci_error_slot_reset,
@@ -7487,6 +8075,11 @@ static struct pci_driver i40e_driver = {
.id_table = i40e_pci_tbl,
.probe = i40e_probe,
.remove = i40e_remove,
+#ifdef CONFIG_PM
+ .suspend = i40e_suspend,
+ .resume = i40e_resume,
+#endif
+ .shutdown = i40e_shutdown,
.err_handler = &i40e_err_handler,
.sriov_configure = i40e_pci_sriov_configure,
};
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index 97e1bb30ef8a..37d66c87abc9 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -166,15 +165,15 @@ static i40e_status i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
}
/**
- * i40e_read_nvm_srctl - Reads Shadow RAM.
+ * i40e_read_nvm_word - Reads Shadow RAM
* @hw: pointer to the HW structure.
* @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
* @data: word read from the Shadow RAM.
*
* Reads 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
**/
-static i40e_status i40e_read_nvm_srctl(struct i40e_hw *hw, u16 offset,
- u16 *data)
+i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
+ u16 *data)
{
i40e_status ret_code = I40E_ERR_TIMEOUT;
u32 sr_reg;
@@ -211,29 +210,6 @@ read_nvm_exit:
}
/**
- * i40e_read_nvm_word - Reads Shadow RAM word.
- * @hw: pointer to the HW structure.
- * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
- * @data: word read from the Shadow RAM.
- *
- * Reads 16 bit word from the Shadow RAM. Each read is preceded
- * with the NVM ownership taking and followed by the release.
- **/
-i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
- u16 *data)
-{
- i40e_status ret_code = 0;
-
- ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
- if (!ret_code) {
- ret_code = i40e_read_nvm_srctl(hw, offset, data);
- i40e_release_nvm(hw);
- }
-
- return ret_code;
-}
-
-/**
* i40e_read_nvm_buffer - Reads Shadow RAM buffer.
* @hw: pointer to the HW structure.
* @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
@@ -250,30 +226,18 @@ i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
{
i40e_status ret_code = 0;
u16 index, word;
- u32 time;
- ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
- if (!ret_code) {
- /* Loop thru the selected region. */
- for (word = 0; word < *words; word++) {
- index = offset + word;
- ret_code = i40e_read_nvm_srctl(hw, index, &data[word]);
- if (ret_code)
- break;
- /* Check if we didn't exceeded the semaphore timeout. */
- time = rd32(hw, I40E_GLVFGEN_TIMER);
- if (time >= hw->nvm.hw_semaphore_timeout) {
- ret_code = I40E_ERR_TIMEOUT;
- hw_dbg(hw, "NVM read error: timeout.\n");
- break;
- }
- }
- /* Update the number of words read from the Shadow RAM. */
- *words = word;
- /* Release the NVM ownership. */
- i40e_release_nvm(hw);
+ /* Loop thru the selected region. */
+ for (word = 0; word < *words; word++) {
+ index = offset + word;
+ ret_code = i40e_read_nvm_word(hw, index, &data[word]);
+ if (ret_code)
+ break;
}
+ /* Update the number of words read from the Shadow RAM. */
+ *words = word;
+
return ret_code;
}
@@ -297,14 +261,14 @@ static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw,
u32 i = 0;
/* read pointer to VPD area */
- ret_code = i40e_read_nvm_srctl(hw, I40E_SR_VPD_PTR, &vpd_module);
+ ret_code = i40e_read_nvm_word(hw, I40E_SR_VPD_PTR, &vpd_module);
if (ret_code) {
ret_code = I40E_ERR_NVM_CHECKSUM;
goto i40e_calc_nvm_checksum_exit;
}
/* read pointer to PCIe Alt Auto-load module */
- ret_code = i40e_read_nvm_srctl(hw, I40E_SR_PCIE_ALT_AUTO_LOAD_PTR,
+ ret_code = i40e_read_nvm_word(hw, I40E_SR_PCIE_ALT_AUTO_LOAD_PTR,
&pcie_alt_module);
if (ret_code) {
ret_code = I40E_ERR_NVM_CHECKSUM;
@@ -331,7 +295,7 @@ static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw,
break;
}
- ret_code = i40e_read_nvm_srctl(hw, (u16)i, &word);
+ ret_code = i40e_read_nvm_word(hw, (u16)i, &word);
if (ret_code) {
ret_code = I40E_ERR_NVM_CHECKSUM;
goto i40e_calc_nvm_checksum_exit;
@@ -371,7 +335,7 @@ i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
/* Do not use i40e_read_nvm_word() because we do not want to take
* the synchronization semaphores twice here.
*/
- i40e_read_nvm_srctl(hw, I40E_SR_SW_CHECKSUM_WORD, &checksum_sr);
+ i40e_read_nvm_word(hw, I40E_SR_SW_CHECKSUM_WORD, &checksum_sr);
/* Verify read checksum from EEPROM is the same as
* calculated checksum
diff --git a/drivers/net/ethernet/intel/i40e/i40e_osdep.h b/drivers/net/ethernet/intel/i40e/i40e_osdep.h
index 702c81ba86e3..ecd0f0b663c9 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_osdep.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_osdep.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index f75bb9ccc900..c7c3d8231b36 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -51,7 +50,6 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
void *buff, /* can be NULL */
u16 buff_size,
struct i40e_asq_cmd_details *cmd_details);
-bool i40e_asq_done(struct i40e_hw *hw);
/* debug function for adminq */
void i40e_debug_aq(struct i40e_hw *hw,
@@ -60,10 +58,12 @@ void i40e_debug_aq(struct i40e_hw *hw,
void *buffer);
void i40e_idle_aq(struct i40e_hw *hw);
-void i40e_resume_aq(struct i40e_hw *hw);
+bool i40e_check_asq_alive(struct i40e_hw *hw);
+i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw,
+ bool unloading);
u32 i40e_led_get(struct i40e_hw *hw);
-void i40e_led_set(struct i40e_hw *hw, u32 mode);
+void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink);
/* admin send queue commands */
@@ -71,8 +71,6 @@ i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw,
u16 *fw_major_version, u16 *fw_minor_version,
u16 *api_major_version, u16 *api_minor_version,
struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw,
- bool unloading);
i40e_status i40e_aq_set_phy_reset(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw, u16 vsi_id,
@@ -106,7 +104,8 @@ i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
u16 downlink_seid, u8 enabled_tc,
- bool default_port, u16 *pveb_seid,
+ bool default_port, bool enable_l2_filtering,
+ u16 *pveb_seid,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_get_veb_parameters(struct i40e_hw *hw,
u16 veb_seid, u16 *switch_id, bool *floating,
@@ -119,12 +118,6 @@ i40e_status i40e_aq_add_macvlan(struct i40e_hw *hw, u16 vsi_id,
i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 vsi_id,
struct i40e_aqc_remove_macvlan_element_data *mv_list,
u16 count, struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_add_vlan(struct i40e_hw *hw, u16 vsi_id,
- struct i40e_aqc_add_remove_vlan_element_data *v_list,
- u8 count, struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_remove_vlan(struct i40e_hw *hw, u16 vsi_id,
- struct i40e_aqc_add_remove_vlan_element_data *v_list,
- u8 count, struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen,
struct i40e_asq_cmd_details *cmd_details);
@@ -164,6 +157,12 @@ i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_start_lldp(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
+ u16 udp_port, u8 header_len,
+ u8 protocol_index, u8 *filter_index,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index,
+ struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_mac_address_write(struct i40e_hw *hw,
@@ -206,7 +205,6 @@ void i40e_clear_pxe_mode(struct i40e_hw *hw);
bool i40e_get_link_status(struct i40e_hw *hw);
i40e_status i40e_get_mac_addr(struct i40e_hw *hw,
u8 *mac_addr);
-i40e_status i40e_validate_mac_addr(u8 *mac_addr);
i40e_status i40e_read_lldp_cfg(struct i40e_hw *hw,
struct i40e_lldp_variables *lldp_cfg);
/* prototype for functions used for NVM access */
@@ -222,6 +220,7 @@ i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
u16 *words, u16 *data);
i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
u16 *checksum);
+void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status);
/* prototype for functions used for SW locks */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_register.h b/drivers/net/ethernet/intel/i40e/i40e_register.h
index 6bd333cde28b..1d40f425acf1 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_register.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_register.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -28,6 +27,10 @@
#ifndef _I40E_REGISTER_H_
#define _I40E_REGISTER_H_
+#define I40E_GL_GP_FUSE(_i) (0x0009400C + ((_i) * 4)) /* _i=0...28 */
+#define I40E_GL_GP_FUSE_MAX_INDEX 28
+#define I40E_GL_GP_FUSE_GL_GP_FUSE_SHIFT 0
+#define I40E_GL_GP_FUSE_GL_GP_FUSE_MASK (0xFFFFFFFF << I40E_GL_GP_FUSE_GL_GP_FUSE_SHIFT)
#define I40E_GLPCI_PM_MUX_NPQ 0x0009C4F4
#define I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_SHIFT 0
#define I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_MASK (0x7 << I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_SHIFT)
@@ -38,6 +41,11 @@
#define I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_MASK (0x1F << I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_SHIFT)
#define I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_SHIFT 16
#define I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_MASK (0x7 << I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_SHIFT)
+#define I40E_GLPCI_PQ_MAX_USED_SPC 0x0009C4EC
+#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_SHIFT 0
+#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_MASK (0xFF << I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_SHIFT)
+#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_SHIFT 8
+#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_MASK (0xFF << I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_SHIFT)
#define I40E_GLPCI_SPARE_BITS_0 0x0009C4F8
#define I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_SHIFT 0
#define I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_MASK (0xFFFFFFFF << I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_SHIFT)
@@ -50,9 +58,14 @@
#define I40E_PFPCI_VF_FLUSH_DONE 0x0009C600
#define I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_SHIFT 0
#define I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_MASK (0x1 << I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_SHIFT)
+#define I40E_PFPCI_VF_FLUSH_DONE1(_VF) (0x0009C600 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_PFPCI_VF_FLUSH_DONE1_MAX_INDEX 127
+#define I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_SHIFT 0
+#define I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_MASK (0x1 << I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_SHIFT)
#define I40E_PFPCI_VM_FLUSH_DONE 0x0009C880
#define I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_SHIFT 0
#define I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_MASK (0x1 << I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_SHIFT)
+
#define I40E_PF_ARQBAH 0x00080180
#define I40E_PF_ARQBAH_ARQBAH_SHIFT 0
#define I40E_PF_ARQBAH_ARQBAH_MASK (0xFFFFFFFF << I40E_PF_ARQBAH_ARQBAH_SHIFT)
@@ -837,7 +850,7 @@
#define I40E_GLHMC_PEQ1FLCNT_FPMPEQ1FLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEQ1FLCNT_FPMPEQ1FLCNT_SHIFT)
#define I40E_GLHMC_PEQ1FLMAX 0x000C2058
#define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT 0
-#define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_MASK (0x3FFFFF << I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT)
+#define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_MASK (0x3FFFFFF << I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT)
#define I40E_GLHMC_PEQ1MAX 0x000C2054
#define I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_SHIFT 0
#define I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_MASK (0x3FFFFFF << I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_SHIFT)
@@ -903,7 +916,7 @@
#define I40E_GLHMC_PEXFFLCNT_FPMPEXFFLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEXFFLCNT_FPMPEXFFLCNT_SHIFT)
#define I40E_GLHMC_PEXFFLMAX 0x000C204c
#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT 0
-#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_MASK (0x3FFFFF << I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT)
+#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_MASK (0x1FFFFFF << I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT)
#define I40E_GLHMC_PEXFMAX 0x000C2048
#define I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT 0
#define I40E_GLHMC_PEXFMAX_PMPEXFMAX_MASK (0x3FFFFFF << I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT)
@@ -1636,7 +1649,7 @@
#define I40E_VSILAN_QBASE_VSIQTABLE_ENA_SHIFT 11
#define I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK (0x1 << I40E_VSILAN_QBASE_VSIQTABLE_ENA_SHIFT)
#define I40E_VSILAN_QTABLE(_i, _VSI) (0x00200000 + ((_i) * 2048 + (_VSI) * 4))
-#define I40E_VSILAN_QTABLE_MAX_INDEX 15
+#define I40E_VSILAN_QTABLE_MAX_INDEX 7
#define I40E_VSILAN_QTABLE_QINDEX_0_SHIFT 0
#define I40E_VSILAN_QTABLE_QINDEX_0_MASK (0x7FF << I40E_VSILAN_QTABLE_QINDEX_0_SHIFT)
#define I40E_VSILAN_QTABLE_QINDEX_1_SHIFT 16
@@ -1773,16 +1786,20 @@
#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_SHIFT 14
#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_SHIFT)
#define I40E_GL_MNG_FWSM 0x000B6134
-#define I40E_GL_MNG_FWSM_FW_MODES_SHIFT 0
-#define I40E_GL_MNG_FWSM_FW_MODES_MASK (0x3FF << I40E_GL_MNG_FWSM_FW_MODES_SHIFT)
-#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT 10
+#define I40E_GL_MNG_FWSM_FW_MODES_SHIFT 1
+#define I40E_GL_MNG_FWSM_FW_MODES_MASK (0x7 << I40E_GL_MNG_FWSM_FW_MODES_SHIFT)
+#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT 6
#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_MASK (0x1 << I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT)
#define I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_SHIFT 11
#define I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_MASK (0xF << I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_SHIFT)
#define I40E_GL_MNG_FWSM_FW_STATUS_VALID_SHIFT 15
#define I40E_GL_MNG_FWSM_FW_STATUS_VALID_MASK (0x1 << I40E_GL_MNG_FWSM_FW_STATUS_VALID_SHIFT)
+#define I40E_GL_MNG_FWSM_RESET_CNT_SHIFT 16
+#define I40E_GL_MNG_FWSM_RESET_CNT_MASK (0x7 << I40E_GL_MNG_FWSM_RESET_CNT_SHIFT)
#define I40E_GL_MNG_FWSM_EXT_ERR_IND_SHIFT 19
#define I40E_GL_MNG_FWSM_EXT_ERR_IND_MASK (0x3F << I40E_GL_MNG_FWSM_EXT_ERR_IND_SHIFT)
+#define I40E_GL_MNG_FWSM_RSVD_SHIFT 25
+#define I40E_GL_MNG_FWSM_RSVD_MASK (0x1 << I40E_GL_MNG_FWSM_RSVD_SHIFT)
#define I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_SHIFT 26
#define I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_MASK (0x1 << I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_SHIFT)
#define I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_SHIFT 27
@@ -2035,6 +2052,28 @@
#define I40E_GLNVM_SRDATA_WRDATA_MASK (0xFFFF << I40E_GLNVM_SRDATA_WRDATA_SHIFT)
#define I40E_GLNVM_SRDATA_RDDATA_SHIFT 16
#define I40E_GLNVM_SRDATA_RDDATA_MASK (0xFFFF << I40E_GLNVM_SRDATA_RDDATA_SHIFT)
+#define I40E_GLNVM_ULD 0x000B6008
+#define I40E_GLNVM_ULD_CONF_PCIR_DONE_SHIFT 0
+#define I40E_GLNVM_ULD_CONF_PCIR_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_PCIR_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_PCIRTL_DONE_SHIFT 1
+#define I40E_GLNVM_ULD_CONF_PCIRTL_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_PCIRTL_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_LCB_DONE_SHIFT 2
+#define I40E_GLNVM_ULD_CONF_LCB_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_LCB_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_CORE_DONE_SHIFT 3
+#define I40E_GLNVM_ULD_CONF_CORE_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_CORE_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_GLOBAL_DONE_SHIFT 4
+#define I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_GLOBAL_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_POR_DONE_SHIFT 5
+#define I40E_GLNVM_ULD_CONF_POR_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_POR_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_SHIFT 6
+#define I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_SHIFT 7
+#define I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_EMP_DONE_SHIFT 8
+#define I40E_GLNVM_ULD_CONF_EMP_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_EMP_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_PCIALT_DONE_SHIFT 9
+#define I40E_GLNVM_ULD_CONF_PCIALT_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_PCIALT_DONE_SHIFT)
+
#define I40E_GLPCI_BYTCTH 0x0009C484
#define I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_SHIFT 0
#define I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_MASK (0xFFFFFFFF << I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_SHIFT)
@@ -2170,6 +2209,12 @@
#define I40E_GLPCI_PCIERR 0x000BE4FC
#define I40E_GLPCI_PCIERR_PCIE_ERR_REP_SHIFT 0
#define I40E_GLPCI_PCIERR_PCIE_ERR_REP_MASK (0xFFFFFFFF << I40E_GLPCI_PCIERR_PCIE_ERR_REP_SHIFT)
+#define I40E_GLPCI_PCITEST2 0x000BE4BC
+#define I40E_GLPCI_PCITEST2_IOV_TEST_MODE_SHIFT 0
+#define I40E_GLPCI_PCITEST2_IOV_TEST_MODE_MASK (0x1 << I40E_GLPCI_PCITEST2_IOV_TEST_MODE_SHIFT)
+#define I40E_GLPCI_PCITEST2_TAG_ALLOC_SHIFT 1
+#define I40E_GLPCI_PCITEST2_TAG_ALLOC_MASK (0x1 << I40E_GLPCI_PCITEST2_TAG_ALLOC_SHIFT)
+
#define I40E_GLPCI_PKTCT 0x0009C4BC
#define I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_SHIFT 0
#define I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_MASK (0xFFFFFFFF << I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_SHIFT)
@@ -2380,8 +2425,7 @@
#define I40E_PFPE_IPCONFIG0_PEIPID_MASK (0xFFFF << I40E_PFPE_IPCONFIG0_PEIPID_SHIFT)
#define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16
#define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_MASK (0x1 << I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT)
-#define I40E_PFPE_IPCONFIG0_USEUPPERIDRANGE_SHIFT 17
-#define I40E_PFPE_IPCONFIG0_USEUPPERIDRANGE_MASK (0x1 << I40E_PFPE_IPCONFIG0_USEUPPERIDRANGE_SHIFT)
+
#define I40E_PFPE_MRTEIDXMASK 0x00008600
#define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0
#define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_MASK (0x1F << I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT)
@@ -2460,8 +2504,6 @@
#define I40E_VFPE_IPCONFIG0_PEIPID_MASK (0xFFFF << I40E_VFPE_IPCONFIG0_PEIPID_SHIFT)
#define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16
#define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_MASK (0x1 << I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT)
-#define I40E_VFPE_IPCONFIG0_USEUPPERIDRANGE_SHIFT 17
-#define I40E_VFPE_IPCONFIG0_USEUPPERIDRANGE_MASK (0x1 << I40E_VFPE_IPCONFIG0_USEUPPERIDRANGE_SHIFT)
#define I40E_VFPE_MRTEIDXMASK(_VF) (0x00003000 + ((_VF) * 4)) /* _i=0...127 */
#define I40E_VFPE_MRTEIDXMASK_MAX_INDEX 127
#define I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0
@@ -3141,30 +3183,6 @@
#define I40E_GLPES_VFUDPTXPKTSLO_MAX_INDEX 31
#define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT 0
#define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT)
-#define I40E_GLPM_DMACR 0x000881F4
-#define I40E_GLPM_DMACR_DMACWT_SHIFT 0
-#define I40E_GLPM_DMACR_DMACWT_MASK (0xFFFF << I40E_GLPM_DMACR_DMACWT_SHIFT)
-#define I40E_GLPM_DMACR_EXIT_DC_SHIFT 29
-#define I40E_GLPM_DMACR_EXIT_DC_MASK (0x1 << I40E_GLPM_DMACR_EXIT_DC_SHIFT)
-#define I40E_GLPM_DMACR_LX_COALESCING_INDICATION_SHIFT 30
-#define I40E_GLPM_DMACR_LX_COALESCING_INDICATION_MASK (0x1 << I40E_GLPM_DMACR_LX_COALESCING_INDICATION_SHIFT)
-#define I40E_GLPM_DMACR_DMAC_EN_SHIFT 31
-#define I40E_GLPM_DMACR_DMAC_EN_MASK (0x1 << I40E_GLPM_DMACR_DMAC_EN_SHIFT)
-#define I40E_GLPM_LTRC 0x000BE500
-#define I40E_GLPM_LTRC_SLTRV_SHIFT 0
-#define I40E_GLPM_LTRC_SLTRV_MASK (0x3FF << I40E_GLPM_LTRC_SLTRV_SHIFT)
-#define I40E_GLPM_LTRC_SSCALE_SHIFT 10
-#define I40E_GLPM_LTRC_SSCALE_MASK (0x7 << I40E_GLPM_LTRC_SSCALE_SHIFT)
-#define I40E_GLPM_LTRC_LTRS_REQUIREMENT_SHIFT 15
-#define I40E_GLPM_LTRC_LTRS_REQUIREMENT_MASK (0x1 << I40E_GLPM_LTRC_LTRS_REQUIREMENT_SHIFT)
-#define I40E_GLPM_LTRC_NSLTRV_SHIFT 16
-#define I40E_GLPM_LTRC_NSLTRV_MASK (0x3FF << I40E_GLPM_LTRC_NSLTRV_SHIFT)
-#define I40E_GLPM_LTRC_NSSCALE_SHIFT 26
-#define I40E_GLPM_LTRC_NSSCALE_MASK (0x7 << I40E_GLPM_LTRC_NSSCALE_SHIFT)
-#define I40E_GLPM_LTRC_LTR_SEND_SHIFT 30
-#define I40E_GLPM_LTRC_LTR_SEND_MASK (0x1 << I40E_GLPM_LTRC_LTR_SEND_SHIFT)
-#define I40E_GLPM_LTRC_LTRNS_REQUIREMENT_SHIFT 31
-#define I40E_GLPM_LTRC_LTRNS_REQUIREMENT_MASK (0x1 << I40E_GLPM_LTRC_LTRNS_REQUIREMENT_SHIFT)
#define I40E_PRTPM_EEE_STAT 0x001E4320
#define I40E_PRTPM_EEE_STAT_EEE_NEG_SHIFT 29
#define I40E_PRTPM_EEE_STAT_EEE_NEG_MASK (0x1 << I40E_PRTPM_EEE_STAT_EEE_NEG_SHIFT)
@@ -3201,9 +3219,6 @@
#define I40E_PRTPM_GC_LCDMP_MASK (0x1 << I40E_PRTPM_GC_LCDMP_SHIFT)
#define I40E_PRTPM_GC_LPLU_ASSERTED_SHIFT 31
#define I40E_PRTPM_GC_LPLU_ASSERTED_MASK (0x1 << I40E_PRTPM_GC_LPLU_ASSERTED_SHIFT)
-#define I40E_PRTPM_HPTC 0x000AC800
-#define I40E_PRTPM_HPTC_HIGH_PRI_TC_SHIFT 0
-#define I40E_PRTPM_HPTC_HIGH_PRI_TC_MASK (0xFF << I40E_PRTPM_HPTC_HIGH_PRI_TC_SHIFT)
#define I40E_PRTPM_RLPIC 0x001E43A0
#define I40E_PRTPM_RLPIC_ERLPIC_SHIFT 0
#define I40E_PRTPM_RLPIC_ERLPIC_MASK (0xFFFFFFFF << I40E_PRTPM_RLPIC_ERLPIC_SHIFT)
@@ -3265,8 +3280,8 @@
#define I40E_GLQF_CTL_HTOEP_FCOE_MASK (0x1 << I40E_GLQF_CTL_HTOEP_FCOE_SHIFT)
#define I40E_GLQF_CTL_PCNT_ALLOC_SHIFT 3
#define I40E_GLQF_CTL_PCNT_ALLOC_MASK (0x7 << I40E_GLQF_CTL_PCNT_ALLOC_SHIFT)
-#define I40E_GLQF_CTL_DDPLPEN_SHIFT 7
-#define I40E_GLQF_CTL_DDPLPEN_MASK (0x1 << I40E_GLQF_CTL_DDPLPEN_SHIFT)
+#define I40E_GLQF_CTL_RSVD_SHIFT 7
+#define I40E_GLQF_CTL_RSVD_MASK (0x1 << I40E_GLQF_CTL_RSVD_SHIFT)
#define I40E_GLQF_CTL_MAXPEBLEN_SHIFT 8
#define I40E_GLQF_CTL_MAXPEBLEN_MASK (0x7 << I40E_GLQF_CTL_MAXPEBLEN_SHIFT)
#define I40E_GLQF_CTL_MAXFCBLEN_SHIFT 11
@@ -3416,9 +3431,9 @@
#define I40E_PRTQF_FLX_PIT(_i) (0x00255200 + ((_i) * 32)) /* _i=0...8 */
#define I40E_PRTQF_FLX_PIT_MAX_INDEX 8
#define I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT 0
-#define I40E_PRTQF_FLX_PIT_SOURCE_OFF_MASK (0x3F << I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT)
-#define I40E_PRTQF_FLX_PIT_FSIZE_SHIFT 6
-#define I40E_PRTQF_FLX_PIT_FSIZE_MASK (0xF << I40E_PRTQF_FLX_PIT_FSIZE_SHIFT)
+#define I40E_PRTQF_FLX_PIT_SOURCE_OFF_MASK (0x1F << I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT)
+#define I40E_PRTQF_FLX_PIT_FSIZE_SHIFT 5
+#define I40E_PRTQF_FLX_PIT_FSIZE_MASK (0x1F << I40E_PRTQF_FLX_PIT_FSIZE_SHIFT)
#define I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT 10
#define I40E_PRTQF_FLX_PIT_DEST_OFF_MASK (0x3F << I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT)
#define I40E_VFQF_HENA1(_i, _VF) (0x00230800 + ((_i) * 1024 + (_VF) * 4))
@@ -3504,7 +3519,7 @@
#define I40E_VSIQF_CTL_PEMFRAG_ENA_SHIFT 5
#define I40E_VSIQF_CTL_PEMFRAG_ENA_MASK (0x1 << I40E_VSIQF_CTL_PEMFRAG_ENA_SHIFT)
#define I40E_VSIQF_TCREGION(_i, _VSI) (0x00206000 + ((_i) * 2048 + (_VSI) * 4))
-#define I40E_VSIQF_TCREGION_MAX_INDEX 7
+#define I40E_VSIQF_TCREGION_MAX_INDEX 3
#define I40E_VSIQF_TCREGION_TC_OFFSET_SHIFT 0
#define I40E_VSIQF_TCREGION_TC_OFFSET_MASK (0x1FF << I40E_VSIQF_TCREGION_TC_OFFSET_SHIFT)
#define I40E_VSIQF_TCREGION_TC_SIZE_SHIFT 9
@@ -3521,10 +3536,7 @@
#define I40E_GL_FCOEDDPC_MAX_INDEX 143
#define I40E_GL_FCOEDDPC_FCOEDDPC_SHIFT 0
#define I40E_GL_FCOEDDPC_FCOEDDPC_MASK (0xFFFFFFFF << I40E_GL_FCOEDDPC_FCOEDDPC_SHIFT)
-#define I40E_GL_FCOEDDPEC(_i) (0x00314900 + ((_i) * 8)) /* _i=0...143 */
-#define I40E_GL_FCOEDDPEC_MAX_INDEX 143
-#define I40E_GL_FCOEDDPEC_CFOEDDPEC_SHIFT 0
-#define I40E_GL_FCOEDDPEC_CFOEDDPEC_MASK (0xFFFFFFFF << I40E_GL_FCOEDDPEC_CFOEDDPEC_SHIFT)
+/* _i=0...143 */
#define I40E_GL_FCOEDIFEC(_i) (0x00318480 + ((_i) * 8)) /* _i=0...143 */
#define I40E_GL_FCOEDIFEC_MAX_INDEX 143
#define I40E_GL_FCOEDIFEC_FCOEDIFRC_SHIFT 0
@@ -4276,46 +4288,10 @@
#define I40E_PFPM_APM 0x000B8080
#define I40E_PFPM_APM_APME_SHIFT 0
#define I40E_PFPM_APM_APME_MASK (0x1 << I40E_PFPM_APM_APME_SHIFT)
-#define I40E_PFPM_FHFT_DATA(_i, _j) (0x00060000 + ((_i) * 4096 + (_j) * 128))
-#define I40E_PFPM_FHFT_DATA_MAX_INDEX 7
-#define I40E_PFPM_FHFT_DATA_DWORD_SHIFT 0
-#define I40E_PFPM_FHFT_DATA_DWORD_MASK (0xFFFFFFFF << I40E_PFPM_FHFT_DATA_DWORD_SHIFT)
#define I40E_PFPM_FHFT_LENGTH(_i) (0x0006A000 + ((_i) * 128)) /* _i=0...7 */
#define I40E_PFPM_FHFT_LENGTH_MAX_INDEX 7
#define I40E_PFPM_FHFT_LENGTH_LENGTH_SHIFT 0
#define I40E_PFPM_FHFT_LENGTH_LENGTH_MASK (0xFF << I40E_PFPM_FHFT_LENGTH_LENGTH_SHIFT)
-#define I40E_PFPM_FHFT_MASK(_i, _j) (0x00068000 + ((_i) * 1024 + (_j) * 128))
-#define I40E_PFPM_FHFT_MASK_MAX_INDEX 7
-#define I40E_PFPM_FHFT_MASK_MASK_SHIFT 0
-#define I40E_PFPM_FHFT_MASK_MASK_MASK (0xFFFF << I40E_PFPM_FHFT_MASK_MASK_SHIFT)
-#define I40E_PFPM_PROXYFC 0x00245A80
-#define I40E_PFPM_PROXYFC_PPROXYE_SHIFT 0
-#define I40E_PFPM_PROXYFC_PPROXYE_MASK (0x1 << I40E_PFPM_PROXYFC_PPROXYE_SHIFT)
-#define I40E_PFPM_PROXYFC_EX_SHIFT 1
-#define I40E_PFPM_PROXYFC_EX_MASK (0x1 << I40E_PFPM_PROXYFC_EX_SHIFT)
-#define I40E_PFPM_PROXYFC_ARP_SHIFT 4
-#define I40E_PFPM_PROXYFC_ARP_MASK (0x1 << I40E_PFPM_PROXYFC_ARP_SHIFT)
-#define I40E_PFPM_PROXYFC_ARP_DIRECTED_SHIFT 5
-#define I40E_PFPM_PROXYFC_ARP_DIRECTED_MASK (0x1 << I40E_PFPM_PROXYFC_ARP_DIRECTED_SHIFT)
-#define I40E_PFPM_PROXYFC_NS_SHIFT 9
-#define I40E_PFPM_PROXYFC_NS_MASK (0x1 << I40E_PFPM_PROXYFC_NS_SHIFT)
-#define I40E_PFPM_PROXYFC_NS_DIRECTED_SHIFT 10
-#define I40E_PFPM_PROXYFC_NS_DIRECTED_MASK (0x1 << I40E_PFPM_PROXYFC_NS_DIRECTED_SHIFT)
-#define I40E_PFPM_PROXYFC_MLD_SHIFT 12
-#define I40E_PFPM_PROXYFC_MLD_MASK (0x1 << I40E_PFPM_PROXYFC_MLD_SHIFT)
-#define I40E_PFPM_PROXYS 0x00245B80
-#define I40E_PFPM_PROXYS_EX_SHIFT 1
-#define I40E_PFPM_PROXYS_EX_MASK (0x1 << I40E_PFPM_PROXYS_EX_SHIFT)
-#define I40E_PFPM_PROXYS_ARP_SHIFT 4
-#define I40E_PFPM_PROXYS_ARP_MASK (0x1 << I40E_PFPM_PROXYS_ARP_SHIFT)
-#define I40E_PFPM_PROXYS_ARP_DIRECTED_SHIFT 5
-#define I40E_PFPM_PROXYS_ARP_DIRECTED_MASK (0x1 << I40E_PFPM_PROXYS_ARP_DIRECTED_SHIFT)
-#define I40E_PFPM_PROXYS_NS_SHIFT 9
-#define I40E_PFPM_PROXYS_NS_MASK (0x1 << I40E_PFPM_PROXYS_NS_SHIFT)
-#define I40E_PFPM_PROXYS_NS_DIRECTED_SHIFT 10
-#define I40E_PFPM_PROXYS_NS_DIRECTED_MASK (0x1 << I40E_PFPM_PROXYS_NS_DIRECTED_SHIFT)
-#define I40E_PFPM_PROXYS_MLD_SHIFT 12
-#define I40E_PFPM_PROXYS_MLD_MASK (0x1 << I40E_PFPM_PROXYS_MLD_SHIFT)
#define I40E_PFPM_WUC 0x0006B200
#define I40E_PFPM_WUC_EN_APM_D0_SHIFT 5
#define I40E_PFPM_WUC_EN_APM_D0_MASK (0x1 << I40E_PFPM_WUC_EN_APM_D0_SHIFT)
@@ -4536,21 +4512,21 @@
#define I40E_VFMSIX_PBA 0x00002000
#define I40E_VFMSIX_PBA_PENBIT_SHIFT 0
#define I40E_VFMSIX_PBA_PENBIT_MASK (0xFFFFFFFF << I40E_VFMSIX_PBA_PENBIT_SHIFT)
-#define I40E_VFMSIX_TADD(_i) (0x00000008 + ((_i) * 16)) /* _i=0...16 */
+#define I40E_VFMSIX_TADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...16 */
#define I40E_VFMSIX_TADD_MAX_INDEX 16
#define I40E_VFMSIX_TADD_MSIXTADD10_SHIFT 0
#define I40E_VFMSIX_TADD_MSIXTADD10_MASK (0x3 << I40E_VFMSIX_TADD_MSIXTADD10_SHIFT)
#define I40E_VFMSIX_TADD_MSIXTADD_SHIFT 2
#define I40E_VFMSIX_TADD_MSIXTADD_MASK (0x3FFFFFFF << I40E_VFMSIX_TADD_MSIXTADD_SHIFT)
-#define I40E_VFMSIX_TMSG(_i) (0x0000000C + ((_i) * 16)) /* _i=0...16 */
+#define I40E_VFMSIX_TMSG(_i) (0x00000008 + ((_i) * 16)) /* _i=0...16 */
#define I40E_VFMSIX_TMSG_MAX_INDEX 16
#define I40E_VFMSIX_TMSG_MSIXTMSG_SHIFT 0
#define I40E_VFMSIX_TMSG_MSIXTMSG_MASK (0xFFFFFFFF << I40E_VFMSIX_TMSG_MSIXTMSG_SHIFT)
-#define I40E_VFMSIX_TUADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...16 */
+#define I40E_VFMSIX_TUADD(_i) (0x00000004 + ((_i) * 16)) /* _i=0...16 */
#define I40E_VFMSIX_TUADD_MAX_INDEX 16
#define I40E_VFMSIX_TUADD_MSIXTUADD_SHIFT 0
#define I40E_VFMSIX_TUADD_MSIXTUADD_MASK (0xFFFFFFFF << I40E_VFMSIX_TUADD_MSIXTUADD_SHIFT)
-#define I40E_VFMSIX_TVCTRL(_i) (0x00000004 + ((_i) * 16)) /* _i=0...16 */
+#define I40E_VFMSIX_TVCTRL(_i) (0x0000000C + ((_i) * 16)) /* _i=0...16 */
#define I40E_VFMSIX_TVCTRL_MAX_INDEX 16
#define I40E_VFMSIX_TVCTRL_MASK_SHIFT 0
#define I40E_VFMSIX_TVCTRL_MASK_MASK (0x1 << I40E_VFMSIX_TVCTRL_MASK_SHIFT)
@@ -4610,8 +4586,6 @@
#define I40E_VFPE_IPCONFIG01_PEIPID_MASK (0xFFFF << I40E_VFPE_IPCONFIG01_PEIPID_SHIFT)
#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT 16
#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_MASK (0x1 << I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT)
-#define I40E_VFPE_IPCONFIG01_USEUPPERIDRANGE_SHIFT 17
-#define I40E_VFPE_IPCONFIG01_USEUPPERIDRANGE_MASK (0x1 << I40E_VFPE_IPCONFIG01_USEUPPERIDRANGE_SHIFT)
#define I40E_VFPE_MRTEIDXMASK1 0x00009000
#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT 0
#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_MASK (0x1F << I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT)
@@ -4684,5 +4658,13 @@
#define I40E_VFQF_HREGION_OVERRIDE_ENA_7_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT)
#define I40E_VFQF_HREGION_REGION_7_SHIFT 29
#define I40E_VFQF_HREGION_REGION_7_MASK (0x7 << I40E_VFQF_HREGION_REGION_7_SHIFT)
-
+#define I40E_RCU_PST_FOC_ACCESS_STATUS 0x00270110
+#define I40E_RCU_PST_FOC_ACCESS_STATUS_WR_ACCESS_CNT_SHIFT 0
+#define I40E_RCU_PST_FOC_ACCESS_STATUS_WR_ACCESS_CNT_MASK (0xFF << I40E_RCU_PST_FOC_ACCESS_STATUS_WR_ACCESS_CNT_SHIFT)
+#define I40E_RCU_PST_FOC_ACCESS_STATUS_RD_ACCESS_CNT_SHIFT 8
+#define I40E_RCU_PST_FOC_ACCESS_STATUS_RD_ACCESS_CNT_MASK (0xFF << I40E_RCU_PST_FOC_ACCESS_STATUS_RD_ACCESS_CNT_SHIFT)
+#define I40E_RCU_PST_FOC_ACCESS_STATUS_ERR_CNT_SHIFT 16
+#define I40E_RCU_PST_FOC_ACCESS_STATUS_ERR_CNT_MASK (0xFF << I40E_RCU_PST_FOC_ACCESS_STATUS_ERR_CNT_SHIFT)
+#define I40E_RCU_PST_FOC_ACCESS_STATUS_LAST_ERR_CODE_SHIFT 24
+#define I40E_RCU_PST_FOC_ACCESS_STATUS_LAST_ERR_CODE_MASK (0x7 << I40E_RCU_PST_FOC_ACCESS_STATUS_LAST_ERR_CODE_SHIFT)
#endif
diff --git a/drivers/net/ethernet/intel/i40e/i40e_status.h b/drivers/net/ethernet/intel/i40e/i40e_status.h
index 5e5bcddac573..5f9cac55aa55 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_status.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_status.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index f1f03bc5c729..43d88dd66ed4 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -77,7 +76,6 @@ int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data,
/* grab the next descriptor */
i = tx_ring->next_to_use;
fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
- tx_buf = &tx_ring->tx_bi[i];
tx_ring->next_to_use = (i + 1 < tx_ring->count) ? i + 1 : 0;
@@ -129,15 +127,23 @@ int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data,
/* Now program a dummy descriptor */
i = tx_ring->next_to_use;
tx_desc = I40E_TX_DESC(tx_ring, i);
+ tx_buf = &tx_ring->tx_bi[i];
tx_ring->next_to_use = (i + 1 < tx_ring->count) ? i + 1 : 0;
+ /* record length, and DMA address */
+ dma_unmap_len_set(tx_buf, len, I40E_FDIR_MAX_RAW_PACKET_LOOKUP);
+ dma_unmap_addr_set(tx_buf, dma, dma);
+
tx_desc->buffer_addr = cpu_to_le64(dma);
td_cmd = I40E_TXD_CMD | I40E_TX_DESC_CMD_DUMMY;
tx_desc->cmd_type_offset_bsz =
build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_LOOKUP, 0);
+ /* set the timestamp */
+ tx_buf->time_stamp = jiffies;
+
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
@@ -768,7 +774,7 @@ void i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
rx_ring->rx_buf_len);
if (!skb) {
- rx_ring->rx_stats.alloc_rx_buff_failed++;
+ rx_ring->rx_stats.alloc_buff_failed++;
goto no_buffers;
}
/* initialize queue mapping */
@@ -782,7 +788,7 @@ void i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
rx_ring->rx_buf_len,
DMA_FROM_DEVICE);
if (dma_mapping_error(rx_ring->dev, bi->dma)) {
- rx_ring->rx_stats.alloc_rx_buff_failed++;
+ rx_ring->rx_stats.alloc_buff_failed++;
bi->dma = 0;
goto no_buffers;
}
@@ -792,7 +798,7 @@ void i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
if (!bi->page) {
bi->page = alloc_page(GFP_ATOMIC);
if (!bi->page) {
- rx_ring->rx_stats.alloc_rx_page_failed++;
+ rx_ring->rx_stats.alloc_page_failed++;
goto no_buffers;
}
}
@@ -807,7 +813,7 @@ void i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
DMA_FROM_DEVICE);
if (dma_mapping_error(rx_ring->dev,
bi->page_dma)) {
- rx_ring->rx_stats.alloc_rx_page_failed++;
+ rx_ring->rx_stats.alloc_page_failed++;
bi->page_dma = 0;
goto no_buffers;
}
@@ -860,12 +866,25 @@ static void i40e_receive_skb(struct i40e_ring *rx_ring,
* @skb: skb currently being received and modified
* @rx_status: status value of last descriptor in packet
* @rx_error: error value of last descriptor in packet
+ * @rx_ptype: ptype value of last descriptor in packet
**/
static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
struct sk_buff *skb,
u32 rx_status,
- u32 rx_error)
+ u32 rx_error,
+ u16 rx_ptype)
{
+ bool ipv4_tunnel, ipv6_tunnel;
+ __wsum rx_udp_csum;
+ __sum16 csum;
+ struct iphdr *iph;
+
+ ipv4_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT4_MAC_PAY3) &&
+ (rx_ptype < I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4);
+ ipv6_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT6_MAC_PAY3) &&
+ (rx_ptype < I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4);
+
+ skb->encapsulation = ipv4_tunnel || ipv6_tunnel;
skb->ip_summed = CHECKSUM_NONE;
/* Rx csum enabled and ip headers found? */
@@ -873,13 +892,43 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
return;
- /* IP or L4 checksum error */
+ /* IP or L4 or outmost IP checksum error */
if (rx_error & ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT) |
- (1 << I40E_RX_DESC_ERROR_L4E_SHIFT))) {
+ (1 << I40E_RX_DESC_ERROR_L4E_SHIFT) |
+ (1 << I40E_RX_DESC_ERROR_EIPE_SHIFT))) {
vsi->back->hw_csum_rx_error++;
return;
}
+ if (ipv4_tunnel &&
+ !(rx_status & (1 << I40E_RX_DESC_STATUS_UDP_0_SHIFT))) {
+ /* If VXLAN traffic has an outer UDPv4 checksum we need to check
+ * it in the driver, hardware does not do it for us.
+ * Since L3L4P bit was set we assume a valid IHL value (>=5)
+ * so the total length of IPv4 header is IHL*4 bytes
+ */
+ skb->transport_header = skb->mac_header +
+ sizeof(struct ethhdr) +
+ (ip_hdr(skb)->ihl * 4);
+
+ /* Add 4 bytes for VLAN tagged packets */
+ skb->transport_header += (skb->protocol == htons(ETH_P_8021Q) ||
+ skb->protocol == htons(ETH_P_8021AD))
+ ? VLAN_HLEN : 0;
+
+ rx_udp_csum = udp_csum(skb);
+ iph = ip_hdr(skb);
+ csum = csum_tcpudp_magic(
+ iph->saddr, iph->daddr,
+ (skb->len - skb_transport_offset(skb)),
+ IPPROTO_UDP, rx_udp_csum);
+
+ if (udp_hdr(skb)->check != csum) {
+ vsi->back->hw_csum_rx_error++;
+ return;
+ }
+ }
+
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
@@ -891,13 +940,15 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
static inline u32 i40e_rx_hash(struct i40e_ring *ring,
union i40e_rx_desc *rx_desc)
{
- if (ring->netdev->features & NETIF_F_RXHASH) {
- if ((le64_to_cpu(rx_desc->wb.qword1.status_error_len) >>
- I40E_RX_DESC_STATUS_FLTSTAT_SHIFT) &
- I40E_RX_DESC_FLTSTAT_RSS_HASH)
- return le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
- }
- return 0;
+ const __le64 rss_mask =
+ cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
+ I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
+
+ if ((ring->netdev->features & NETIF_F_RXHASH) &&
+ (rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask)
+ return le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
+ else
+ return 0;
}
/**
@@ -918,6 +969,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
union i40e_rx_desc *rx_desc;
u32 rx_error, rx_status;
u64 qword;
+ u16 rx_ptype;
rx_desc = I40E_RX_DESC(rx_ring, i);
qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
@@ -938,18 +990,20 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
skb = rx_bi->skb;
prefetch(skb->data);
- rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK)
- >> I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
- rx_header_len = (qword & I40E_RXD_QW1_LENGTH_HBUF_MASK)
- >> I40E_RXD_QW1_LENGTH_HBUF_SHIFT;
- rx_sph = (qword & I40E_RXD_QW1_LENGTH_SPH_MASK)
- >> I40E_RXD_QW1_LENGTH_SPH_SHIFT;
+ rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
+ I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
+ rx_header_len = (qword & I40E_RXD_QW1_LENGTH_HBUF_MASK) >>
+ I40E_RXD_QW1_LENGTH_HBUF_SHIFT;
+ rx_sph = (qword & I40E_RXD_QW1_LENGTH_SPH_MASK) >>
+ I40E_RXD_QW1_LENGTH_SPH_SHIFT;
- rx_error = (qword & I40E_RXD_QW1_ERROR_MASK)
- >> I40E_RXD_QW1_ERROR_SHIFT;
+ rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
+ I40E_RXD_QW1_ERROR_SHIFT;
rx_hbo = rx_error & (1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
rx_error &= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
+ rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
+ I40E_RXD_QW1_PTYPE_SHIFT;
rx_bi->skb = NULL;
/* This memory barrier is needed to keep us from reading
@@ -1030,13 +1084,14 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
}
skb->rxhash = i40e_rx_hash(rx_ring, rx_desc);
- i40e_rx_checksum(vsi, skb, rx_status, rx_error);
-
/* probably a little skewed due to removing CRC */
total_rx_bytes += skb->len;
total_rx_packets++;
skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+
+ i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
+
vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
: 0;
@@ -1059,8 +1114,8 @@ next_desc:
/* use prefetched values */
rx_desc = next_rxd;
qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
- rx_status = (qword & I40E_RXD_QW1_STATUS_MASK)
- >> I40E_RXD_QW1_STATUS_SHIFT;
+ rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
+ I40E_RXD_QW1_STATUS_SHIFT;
}
rx_ring->next_to_clean = i;
@@ -1268,7 +1323,7 @@ static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
tx_flags |= vlan_tx_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
tx_flags |= I40E_TX_FLAGS_HW_VLAN;
/* else if it is a SW VLAN, check the next protocol and store the tag */
- } else if (protocol == __constant_htons(ETH_P_8021Q)) {
+ } else if (protocol == htons(ETH_P_8021Q)) {
struct vlan_hdr *vhdr, _vhdr;
vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
if (!vhdr)
@@ -1333,7 +1388,7 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
return err;
}
- if (protocol == __constant_htons(ETH_P_IP)) {
+ if (protocol == htons(ETH_P_IP)) {
iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
iph->tot_len = 0;
@@ -1359,10 +1414,10 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
cd_cmd = I40E_TX_CTX_DESC_TSO;
cd_tso_len = skb->len - *hdr_len;
cd_mss = skb_shinfo(skb)->gso_size;
- *cd_type_cmd_tso_mss |= ((u64)cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT)
- | ((u64)cd_tso_len
- << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT)
- | ((u64)cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
+ *cd_type_cmd_tso_mss |= ((u64)cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
+ ((u64)cd_tso_len <<
+ I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
+ ((u64)cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
return 1;
}
@@ -1660,6 +1715,7 @@ dma_error:
static inline int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
{
netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
+ /* Memory barrier before checking head and tail */
smp_mb();
/* Check again in a case another CPU has just made room available. */
@@ -1756,9 +1812,9 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
first = &tx_ring->tx_bi[tx_ring->next_to_use];
/* setup IPv4/IPv6 offloads */
- if (protocol == __constant_htons(ETH_P_IP))
+ if (protocol == htons(ETH_P_IP))
tx_flags |= I40E_TX_FLAGS_IPV4;
- else if (protocol == __constant_htons(ETH_P_IPV6))
+ else if (protocol == htons(ETH_P_IPV6))
tx_flags |= I40E_TX_FLAGS_IPV6;
tso = i40e_tso(tx_ring, skb, tx_flags, protocol, &hdr_len,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index db55d9947f15..6f8506c181d9 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -25,6 +24,9 @@
*
******************************************************************************/
+#ifndef _I40E_TXRX_H_
+#define _I40E_TXRX_H_
+
/* Interrupt Throttling and Rate Limiting (storm control) Goodies */
#define I40E_MAX_ITR 0x07FF
@@ -49,10 +51,43 @@
#define I40E_QUEUE_END_OF_LIST 0x7FF
-#define I40E_ITR_NONE 3
-#define I40E_RX_ITR 0
-#define I40E_TX_ITR 1
-#define I40E_PE_ITR 2
+/* this enum matches hardware bits and is meant to be used by DYN_CTLN
+ * registers and QINT registers or more generally anywhere in the manual
+ * mentioning ITR_INDX, ITR_NONE cannot be used as an index 'n' into any
+ * register but instead is a special value meaning "don't update" ITR0/1/2.
+ */
+enum i40e_dyn_idx_t {
+ I40E_IDX_ITR0 = 0,
+ I40E_IDX_ITR1 = 1,
+ I40E_IDX_ITR2 = 2,
+ I40E_ITR_NONE = 3 /* ITR_NONE must not be used as an index */
+};
+
+/* these are indexes into ITRN registers */
+#define I40E_RX_ITR I40E_IDX_ITR0
+#define I40E_TX_ITR I40E_IDX_ITR1
+#define I40E_PE_ITR I40E_IDX_ITR2
+
+/* Supported RSS offloads */
+#define I40E_DEFAULT_RSS_HENA ( \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD))
+
/* Supported Rx Buffer Sizes */
#define I40E_RXBUFFER_512 512 /* Used for packet split */
#define I40E_RXBUFFER_2048 2048
@@ -139,8 +174,8 @@ struct i40e_tx_queue_stats {
struct i40e_rx_queue_stats {
u64 non_eop_descs;
- u64 alloc_rx_page_failed;
- u64 alloc_rx_buff_failed;
+ u64 alloc_page_failed;
+ u64 alloc_buff_failed;
};
enum i40e_ring_state_t {
@@ -262,3 +297,4 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring);
void i40e_free_tx_resources(struct i40e_ring *tx_ring);
void i40e_free_rx_resources(struct i40e_ring *rx_ring);
int i40e_napi_poll(struct napi_struct *napi, int budget);
+#endif /* _I40E_TXRX_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index f3f22b20f02f..12473ad5c8e6 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -49,12 +48,14 @@
#define I40E_VF_DEVICE_ID 0x154C
#define I40E_VF_HV_DEVICE_ID 0x1571
-#define I40E_FW_API_VERSION_MAJOR 0x0001
-#define I40E_FW_API_VERSION_MINOR 0x0000
+#define i40e_is_40G_device(d) ((d) == I40E_QSFP_A_DEVICE_ID || \
+ (d) == I40E_QSFP_B_DEVICE_ID || \
+ (d) == I40E_QSFP_C_DEVICE_ID)
#define I40E_MAX_VSI_QP 16
#define I40E_MAX_VF_VSI 3
#define I40E_MAX_CHAINED_RX_BUFFERS 5
+#define I40E_MAX_PF_UDP_OFFLOAD_PORTS 16
/* Max default timeout in ms, */
#define I40E_MAX_NVM_TIMEOUT 18000
@@ -75,8 +76,6 @@
struct i40e_hw;
typedef void (*I40E_ADMINQ_CALLBACK)(struct i40e_hw *, struct i40e_aq_desc *);
-#define I40E_ETH_LENGTH_OF_ADDRESS 6
-
/* Data type manipulation macros. */
#define I40E_DESC_UNUSED(R) \
@@ -87,7 +86,7 @@ typedef void (*I40E_ADMINQ_CALLBACK)(struct i40e_hw *, struct i40e_aq_desc *);
#define I40E_QTX_CTL_VF_QUEUE 0x0
#define I40E_QTX_CTL_PF_QUEUE 0x2
-/* debug masks */
+/* debug masks - set these bits in hw->debug_mask to control output */
enum i40e_debug_mask {
I40E_DEBUG_INIT = 0x00000001,
I40E_DEBUG_RELEASE = 0x00000002,
@@ -101,10 +100,10 @@ enum i40e_debug_mask {
I40E_DEBUG_DCB = 0x00000400,
I40E_DEBUG_DIAG = 0x00000800,
- I40E_DEBUG_AQ_MESSAGE = 0x01000000, /* for i40e_debug() */
+ I40E_DEBUG_AQ_MESSAGE = 0x01000000,
I40E_DEBUG_AQ_DESCRIPTOR = 0x02000000,
I40E_DEBUG_AQ_DESC_BUFFER = 0x04000000,
- I40E_DEBUG_AQ_COMMAND = 0x06000000, /* for i40e_debug_aq() */
+ I40E_DEBUG_AQ_COMMAND = 0x06000000,
I40E_DEBUG_AQ = 0x0F000000,
I40E_DEBUG_USER = 0xF0000000,
@@ -134,6 +133,7 @@ enum i40e_media_type {
I40E_MEDIA_TYPE_BASET,
I40E_MEDIA_TYPE_BACKPLANE,
I40E_MEDIA_TYPE_CX4,
+ I40E_MEDIA_TYPE_DA,
I40E_MEDIA_TYPE_VIRTUAL
};
@@ -171,6 +171,7 @@ struct i40e_link_status {
u8 link_info;
u8 an_info;
u8 ext_info;
+ u8 loopback;
/* is Link Status Event notification to SW enabled */
bool lse_enable;
};
@@ -236,9 +237,9 @@ struct i40e_hw_capabilities {
struct i40e_mac_info {
enum i40e_mac_type type;
- u8 addr[I40E_ETH_LENGTH_OF_ADDRESS];
- u8 perm_addr[I40E_ETH_LENGTH_OF_ADDRESS];
- u8 san_addr[I40E_ETH_LENGTH_OF_ADDRESS];
+ u8 addr[ETH_ALEN];
+ u8 perm_addr[ETH_ALEN];
+ u8 san_addr[ETH_ALEN];
u16 max_fcoeq;
};
@@ -500,18 +501,24 @@ enum i40e_rx_desc_status_bits {
I40E_RX_DESC_STATUS_L2TAG1P_SHIFT = 2,
I40E_RX_DESC_STATUS_L3L4P_SHIFT = 3,
I40E_RX_DESC_STATUS_CRCP_SHIFT = 4,
- I40E_RX_DESC_STATUS_TSYNINDX_SHIFT = 5, /* 3 BITS */
+ I40E_RX_DESC_STATUS_TSYNINDX_SHIFT = 5, /* 2 BITS */
+ I40E_RX_DESC_STATUS_TSYNVALID_SHIFT = 7,
I40E_RX_DESC_STATUS_PIF_SHIFT = 8,
I40E_RX_DESC_STATUS_UMBCAST_SHIFT = 9, /* 2 BITS */
I40E_RX_DESC_STATUS_FLM_SHIFT = 11,
I40E_RX_DESC_STATUS_FLTSTAT_SHIFT = 12, /* 2 BITS */
- I40E_RX_DESC_STATUS_LPBK_SHIFT = 14
+ I40E_RX_DESC_STATUS_LPBK_SHIFT = 14,
+ I40E_RX_DESC_STATUS_UDP_0_SHIFT = 16
};
#define I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT I40E_RX_DESC_STATUS_TSYNINDX_SHIFT
-#define I40E_RXD_QW1_STATUS_TSYNINDX_MASK (0x7UL << \
+#define I40E_RXD_QW1_STATUS_TSYNINDX_MASK (0x3UL << \
I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT)
+#define I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT I40E_RX_DESC_STATUS_TSYNVALID_SHIFT
+#define I40E_RXD_QW1_STATUS_TSYNVALID_MASK (0x1UL << \
+ I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT)
+
enum i40e_rx_desc_fltstat_values {
I40E_RX_DESC_FLTSTAT_NO_DATA = 0,
I40E_RX_DESC_FLTSTAT_RSV_FD_ID = 1, /* 16byte desc? FD_ID : RSV */
@@ -547,28 +554,32 @@ enum i40e_rx_desc_error_l3l4e_fcoe_masks {
/* Packet type non-ip values */
enum i40e_rx_l2_ptype {
- I40E_RX_PTYPE_L2_RESERVED = 0,
- I40E_RX_PTYPE_L2_MAC_PAY2 = 1,
- I40E_RX_PTYPE_L2_TIMESYNC_PAY2 = 2,
- I40E_RX_PTYPE_L2_FIP_PAY2 = 3,
- I40E_RX_PTYPE_L2_OUI_PAY2 = 4,
- I40E_RX_PTYPE_L2_MACCNTRL_PAY2 = 5,
- I40E_RX_PTYPE_L2_LLDP_PAY2 = 6,
- I40E_RX_PTYPE_L2_ECP_PAY2 = 7,
- I40E_RX_PTYPE_L2_EVB_PAY2 = 8,
- I40E_RX_PTYPE_L2_QCN_PAY2 = 9,
- I40E_RX_PTYPE_L2_EAPOL_PAY2 = 10,
- I40E_RX_PTYPE_L2_ARP = 11,
- I40E_RX_PTYPE_L2_FCOE_PAY3 = 12,
- I40E_RX_PTYPE_L2_FCOE_FCDATA_PAY3 = 13,
- I40E_RX_PTYPE_L2_FCOE_FCRDY_PAY3 = 14,
- I40E_RX_PTYPE_L2_FCOE_FCRSP_PAY3 = 15,
- I40E_RX_PTYPE_L2_FCOE_FCOTHER_PA = 16,
- I40E_RX_PTYPE_L2_FCOE_VFT_PAY3 = 17,
- I40E_RX_PTYPE_L2_FCOE_VFT_FCDATA = 18,
- I40E_RX_PTYPE_L2_FCOE_VFT_FCRDY = 19,
- I40E_RX_PTYPE_L2_FCOE_VFT_FCRSP = 20,
- I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER = 21
+ I40E_RX_PTYPE_L2_RESERVED = 0,
+ I40E_RX_PTYPE_L2_MAC_PAY2 = 1,
+ I40E_RX_PTYPE_L2_TIMESYNC_PAY2 = 2,
+ I40E_RX_PTYPE_L2_FIP_PAY2 = 3,
+ I40E_RX_PTYPE_L2_OUI_PAY2 = 4,
+ I40E_RX_PTYPE_L2_MACCNTRL_PAY2 = 5,
+ I40E_RX_PTYPE_L2_LLDP_PAY2 = 6,
+ I40E_RX_PTYPE_L2_ECP_PAY2 = 7,
+ I40E_RX_PTYPE_L2_EVB_PAY2 = 8,
+ I40E_RX_PTYPE_L2_QCN_PAY2 = 9,
+ I40E_RX_PTYPE_L2_EAPOL_PAY2 = 10,
+ I40E_RX_PTYPE_L2_ARP = 11,
+ I40E_RX_PTYPE_L2_FCOE_PAY3 = 12,
+ I40E_RX_PTYPE_L2_FCOE_FCDATA_PAY3 = 13,
+ I40E_RX_PTYPE_L2_FCOE_FCRDY_PAY3 = 14,
+ I40E_RX_PTYPE_L2_FCOE_FCRSP_PAY3 = 15,
+ I40E_RX_PTYPE_L2_FCOE_FCOTHER_PA = 16,
+ I40E_RX_PTYPE_L2_FCOE_VFT_PAY3 = 17,
+ I40E_RX_PTYPE_L2_FCOE_VFT_FCDATA = 18,
+ I40E_RX_PTYPE_L2_FCOE_VFT_FCRDY = 19,
+ I40E_RX_PTYPE_L2_FCOE_VFT_FCRSP = 20,
+ I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER = 21,
+ I40E_RX_PTYPE_GRENAT4_MAC_PAY3 = 58,
+ I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4 = 87,
+ I40E_RX_PTYPE_GRENAT6_MAC_PAY3 = 124,
+ I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4 = 153
};
struct i40e_rx_ptype_decoded {
@@ -852,10 +863,7 @@ struct i40e_filter_program_desc {
/* Packet Classifier Types for filters */
enum i40e_filter_pctype {
- /* Note: Value 0-25 are reserved for future use */
- I40E_FILTER_PCTYPE_IPV4_TEREDO_UDP = 26,
- I40E_FILTER_PCTYPE_IPV6_TEREDO_UDP = 27,
- I40E_FILTER_PCTYPE_NONF_IPV4_1588_UDP = 28,
+ /* Note: Values 0-28 are reserved for future use */
I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP = 29,
I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP = 30,
I40E_FILTER_PCTYPE_NONF_IPV4_UDP = 31,
@@ -864,8 +872,7 @@ enum i40e_filter_pctype {
I40E_FILTER_PCTYPE_NONF_IPV4_SCTP = 34,
I40E_FILTER_PCTYPE_NONF_IPV4_OTHER = 35,
I40E_FILTER_PCTYPE_FRAG_IPV4 = 36,
- /* Note: Value 37 is reserved for future use */
- I40E_FILTER_PCTYPE_NONF_IPV6_1588_UDP = 38,
+ /* Note: Values 37-38 are reserved for future use */
I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP = 39,
I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP = 40,
I40E_FILTER_PCTYPE_NONF_IPV6_UDP = 41,
@@ -877,7 +884,8 @@ enum i40e_filter_pctype {
/* Note: Value 47 is reserved for future use */
I40E_FILTER_PCTYPE_FCOE_OX = 48,
I40E_FILTER_PCTYPE_FCOE_RX = 49,
- /* Note: Value 50-62 are reserved for future use */
+ I40E_FILTER_PCTYPE_FCOE_OTHER = 50,
+ /* Note: Values 51-62 are reserved for future use */
I40E_FILTER_PCTYPE_L2_PAYLOAD = 63,
};
@@ -1014,6 +1022,7 @@ struct i40e_hw_port_stats {
#define I40E_SR_NVM_CONTROL_WORD 0x00
#define I40E_SR_EMP_MODULE_PTR 0x0F
#define I40E_SR_NVM_IMAGE_VERSION 0x18
+#define I40E_SR_NVM_WAKE_ON_LAN 0x19
#define I40E_SR_ALTERNATE_SAN_MAC_ADDRESS_PTR 0x27
#define I40E_SR_NVM_EETRACK_LO 0x2D
#define I40E_SR_NVM_EETRACK_HI 0x2E
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
index cc6654f1dac7..22a1b69cd646 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -142,7 +141,7 @@ struct i40e_virtchnl_vsi_resource {
u16 num_queue_pairs;
enum i40e_vsi_type vsi_type;
u16 qset_handle;
- u8 default_mac_addr[I40E_ETH_LENGTH_OF_ADDRESS];
+ u8 default_mac_addr[ETH_ALEN];
};
/* VF offload flags */
#define I40E_VIRTCHNL_VF_OFFLOAD_L2 0x00000001
@@ -265,7 +264,7 @@ struct i40e_virtchnl_queue_select {
*/
struct i40e_virtchnl_ether_addr {
- u8 addr[I40E_ETH_LENGTH_OF_ADDRESS];
+ u8 addr[ETH_ALEN];
u8 pad[2];
};
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 07596982a477..51a4f6125437 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -70,7 +69,7 @@ static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u8 vector_id)
{
struct i40e_pf *pf = vf->pf;
- return vector_id < pf->hw.func_caps.num_msix_vectors_vf;
+ return vector_id <= pf->hw.func_caps.num_msix_vectors_vf;
}
/***********************vf resource mgmt routines*****************/
@@ -102,130 +101,6 @@ static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u8 vsi_idx,
}
/**
- * i40e_ctrl_vsi_tx_queue
- * @vf: pointer to the vf info
- * @vsi_idx: index of VSI in PF struct
- * @vsi_queue_id: vsi relative queue index
- * @ctrl: control flags
- *
- * enable/disable/enable check/disable check
- **/
-static int i40e_ctrl_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_idx,
- u16 vsi_queue_id,
- enum i40e_queue_ctrl ctrl)
-{
- struct i40e_pf *pf = vf->pf;
- struct i40e_hw *hw = &pf->hw;
- bool writeback = false;
- u16 pf_queue_id;
- int ret = 0;
- u32 reg;
-
- pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id);
- reg = rd32(hw, I40E_QTX_ENA(pf_queue_id));
-
- switch (ctrl) {
- case I40E_QUEUE_CTRL_ENABLE:
- reg |= I40E_QTX_ENA_QENA_REQ_MASK;
- writeback = true;
- break;
- case I40E_QUEUE_CTRL_ENABLECHECK:
- ret = (reg & I40E_QTX_ENA_QENA_STAT_MASK) ? 0 : -EPERM;
- break;
- case I40E_QUEUE_CTRL_DISABLE:
- reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
- writeback = true;
- break;
- case I40E_QUEUE_CTRL_DISABLECHECK:
- ret = (reg & I40E_QTX_ENA_QENA_STAT_MASK) ? -EPERM : 0;
- break;
- case I40E_QUEUE_CTRL_FASTDISABLE:
- reg |= I40E_QTX_ENA_FAST_QDIS_MASK;
- writeback = true;
- break;
- case I40E_QUEUE_CTRL_FASTDISABLECHECK:
- ret = (reg & I40E_QTX_ENA_QENA_STAT_MASK) ? -EPERM : 0;
- if (!ret) {
- reg &= ~I40E_QTX_ENA_FAST_QDIS_MASK;
- writeback = true;
- }
- break;
- default:
- ret = -EINVAL;
- break;
- }
-
- if (writeback) {
- wr32(hw, I40E_QTX_ENA(pf_queue_id), reg);
- i40e_flush(hw);
- }
-
- return ret;
-}
-
-/**
- * i40e_ctrl_vsi_rx_queue
- * @vf: pointer to the vf info
- * @vsi_idx: index of VSI in PF struct
- * @vsi_queue_id: vsi relative queue index
- * @ctrl: control flags
- *
- * enable/disable/enable check/disable check
- **/
-static int i40e_ctrl_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_idx,
- u16 vsi_queue_id,
- enum i40e_queue_ctrl ctrl)
-{
- struct i40e_pf *pf = vf->pf;
- struct i40e_hw *hw = &pf->hw;
- bool writeback = false;
- u16 pf_queue_id;
- int ret = 0;
- u32 reg;
-
- pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id);
- reg = rd32(hw, I40E_QRX_ENA(pf_queue_id));
-
- switch (ctrl) {
- case I40E_QUEUE_CTRL_ENABLE:
- reg |= I40E_QRX_ENA_QENA_REQ_MASK;
- writeback = true;
- break;
- case I40E_QUEUE_CTRL_ENABLECHECK:
- ret = (reg & I40E_QRX_ENA_QENA_STAT_MASK) ? 0 : -EPERM;
- break;
- case I40E_QUEUE_CTRL_DISABLE:
- reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
- writeback = true;
- break;
- case I40E_QUEUE_CTRL_DISABLECHECK:
- ret = (reg & I40E_QRX_ENA_QENA_STAT_MASK) ? -EPERM : 0;
- break;
- case I40E_QUEUE_CTRL_FASTDISABLE:
- reg |= I40E_QRX_ENA_FAST_QDIS_MASK;
- writeback = true;
- break;
- case I40E_QUEUE_CTRL_FASTDISABLECHECK:
- ret = (reg & I40E_QRX_ENA_QENA_STAT_MASK) ? -EPERM : 0;
- if (!ret) {
- reg &= ~I40E_QRX_ENA_FAST_QDIS_MASK;
- writeback = true;
- }
- break;
- default:
- ret = -EINVAL;
- break;
- }
-
- if (writeback) {
- wr32(hw, I40E_QRX_ENA(pf_queue_id), reg);
- i40e_flush(hw);
- }
-
- return ret;
-}
-
-/**
* i40e_config_irq_link_list
* @vf: pointer to the vf info
* @vsi_idx: index of VSI in PF struct
@@ -260,23 +135,17 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_idx,
goto irq_list_done;
}
tempmap = vecmap->rxq_map;
- vsi_queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
- while (vsi_queue_id < I40E_MAX_VSI_QP) {
+ for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
linklistmap |= (1 <<
(I40E_VIRTCHNL_SUPPORTED_QTYPES *
vsi_queue_id));
- vsi_queue_id =
- find_next_bit(&tempmap, I40E_MAX_VSI_QP, vsi_queue_id + 1);
}
tempmap = vecmap->txq_map;
- vsi_queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
- while (vsi_queue_id < I40E_MAX_VSI_QP) {
+ for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
linklistmap |= (1 <<
(I40E_VIRTCHNL_SUPPORTED_QTYPES * vsi_queue_id
+ 1));
- vsi_queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
- vsi_queue_id + 1);
}
next_q = find_first_bit(&linklistmap,
@@ -307,7 +176,8 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_idx,
(I40E_MAX_VSI_QP *
I40E_VIRTCHNL_SUPPORTED_QTYPES),
next_q + 1);
- if (next_q < (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)) {
+ if (next_q <
+ (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)) {
vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx,
@@ -499,7 +369,6 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
{
struct i40e_mac_filter *f = NULL;
struct i40e_pf *pf = vf->pf;
- struct i40e_hw *hw = &pf->hw;
struct i40e_vsi *vsi;
int ret = 0;
@@ -513,14 +382,32 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
goto error_alloc_vsi_res;
}
if (type == I40E_VSI_SRIOV) {
+ u8 brdcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
vf->lan_vsi_index = vsi->idx;
vf->lan_vsi_id = vsi->id;
dev_info(&pf->pdev->dev,
"LAN VSI index %d, VSI id %d\n",
vsi->idx, vsi->id);
+ /* If the port VLAN has been configured and then the
+ * VF driver was removed then the VSI port VLAN
+ * configuration was destroyed. Check if there is
+ * a port VLAN and restore the VSI configuration if
+ * needed.
+ */
+ if (vf->port_vlan_id)
+ i40e_vsi_add_pvid(vsi, vf->port_vlan_id);
f = i40e_add_filter(vsi, vf->default_lan_addr.addr,
- 0, true, false);
+ vf->port_vlan_id, true, false);
+ if (!f)
+ dev_info(&pf->pdev->dev,
+ "Could not allocate VF MAC addr\n");
+ f = i40e_add_filter(vsi, brdcast, vf->port_vlan_id,
+ true, false);
+ if (!f)
+ dev_info(&pf->pdev->dev,
+ "Could not allocate VF broadcast filter\n");
}
+
if (!f) {
dev_err(&pf->pdev->dev, "Unable to add ucast filter\n");
ret = -ENOMEM;
@@ -534,150 +421,11 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
goto error_alloc_vsi_res;
}
- /* accept bcast pkts. by default */
- ret = i40e_aq_set_vsi_broadcast(hw, vsi->seid, true, NULL);
- if (ret) {
- dev_err(&pf->pdev->dev,
- "set vsi bcast failed for vf %d, vsi %d, aq_err %d\n",
- vf->vf_id, vsi->idx, pf->hw.aq.asq_last_status);
- ret = -EINVAL;
- }
-
error_alloc_vsi_res:
return ret;
}
/**
- * i40e_reset_vf
- * @vf: pointer to the vf structure
- * @flr: VFLR was issued or not
- *
- * reset the vf
- **/
-int i40e_reset_vf(struct i40e_vf *vf, bool flr)
-{
- int ret = -ENOENT;
- struct i40e_pf *pf = vf->pf;
- struct i40e_hw *hw = &pf->hw;
- u32 reg, reg_idx, msix_vf;
- bool rsd = false;
- u16 pf_queue_id;
- int i, j;
-
- /* warn the VF */
- wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_INPROGRESS);
-
- clear_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
-
- /* PF triggers VFR only when VF requests, in case of
- * VFLR, HW triggers VFR
- */
- if (!flr) {
- /* reset vf using VPGEN_VFRTRIG reg */
- reg = I40E_VPGEN_VFRTRIG_VFSWR_MASK;
- wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
- i40e_flush(hw);
- }
-
- /* poll VPGEN_VFRSTAT reg to make sure
- * that reset is complete
- */
- for (i = 0; i < 4; i++) {
- /* vf reset requires driver to first reset the
- * vf & than poll the status register to make sure
- * that the requested op was completed
- * successfully
- */
- udelay(10);
- reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
- if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) {
- rsd = true;
- break;
- }
- }
-
- if (!rsd)
- dev_err(&pf->pdev->dev, "VF reset check timeout %d\n",
- vf->vf_id);
-
- /* fast disable qps */
- for (j = 0; j < pf->vsi[vf->lan_vsi_index]->num_queue_pairs; j++) {
- ret = i40e_ctrl_vsi_tx_queue(vf, vf->lan_vsi_index, j,
- I40E_QUEUE_CTRL_FASTDISABLE);
- ret = i40e_ctrl_vsi_rx_queue(vf, vf->lan_vsi_index, j,
- I40E_QUEUE_CTRL_FASTDISABLE);
- }
-
- /* Queue enable/disable requires driver to
- * first reset the vf & than poll the status register
- * to make sure that the requested op was completed
- * successfully
- */
- udelay(10);
- for (j = 0; j < pf->vsi[vf->lan_vsi_index]->num_queue_pairs; j++) {
- ret = i40e_ctrl_vsi_tx_queue(vf, vf->lan_vsi_index, j,
- I40E_QUEUE_CTRL_FASTDISABLECHECK);
- if (ret)
- dev_info(&pf->pdev->dev,
- "Queue control check failed on Tx queue %d of VSI %d VF %d\n",
- vf->lan_vsi_index, j, vf->vf_id);
- ret = i40e_ctrl_vsi_rx_queue(vf, vf->lan_vsi_index, j,
- I40E_QUEUE_CTRL_FASTDISABLECHECK);
- if (ret)
- dev_info(&pf->pdev->dev,
- "Queue control check failed on Rx queue %d of VSI %d VF %d\n",
- vf->lan_vsi_index, j, vf->vf_id);
- }
-
- /* clear the irq settings */
- msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
- for (i = 0; i < msix_vf; i++) {
- /* format is same for both registers */
- if (0 == i)
- reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
- else
- reg_idx = I40E_VPINT_LNKLSTN(((msix_vf - 1) *
- (vf->vf_id))
- + (i - 1));
- reg = (I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK |
- I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
- wr32(hw, reg_idx, reg);
- i40e_flush(hw);
- }
- /* disable interrupts so the VF starts in a known state */
- for (i = 0; i < msix_vf; i++) {
- /* format is same for both registers */
- if (0 == i)
- reg_idx = I40E_VFINT_DYN_CTL0(vf->vf_id);
- else
- reg_idx = I40E_VFINT_DYN_CTLN(((msix_vf - 1) *
- (vf->vf_id))
- + (i - 1));
- wr32(hw, reg_idx, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
- i40e_flush(hw);
- }
-
- /* set the defaults for the rqctl & tqctl registers */
- reg = (I40E_QINT_RQCTL_NEXTQ_INDX_MASK | I40E_QINT_RQCTL_ITR_INDX_MASK |
- I40E_QINT_RQCTL_NEXTQ_TYPE_MASK);
- for (j = 0; j < pf->vsi[vf->lan_vsi_index]->num_queue_pairs; j++) {
- pf_queue_id = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index, j);
- wr32(hw, I40E_QINT_RQCTL(pf_queue_id), reg);
- wr32(hw, I40E_QINT_TQCTL(pf_queue_id), reg);
- }
-
- /* clear the reset bit in the VPGEN_VFRTRIG reg */
- reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
- reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
- wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
- /* tell the VF the reset is done */
- wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_COMPLETED);
- i40e_flush(hw);
-
- return ret;
-}
-
-/**
* i40e_enable_vf_mappings
* @vf: pointer to the vf info
*
@@ -756,6 +504,9 @@ static void i40e_disable_vf_mappings(struct i40e_vf *vf)
static void i40e_free_vf_res(struct i40e_vf *vf)
{
struct i40e_pf *pf = vf->pf;
+ struct i40e_hw *hw = &pf->hw;
+ u32 reg_idx, reg;
+ int i, msix_vf;
/* free vsi & disconnect it from the parent uplink */
if (vf->lan_vsi_index) {
@@ -763,6 +514,34 @@ static void i40e_free_vf_res(struct i40e_vf *vf)
vf->lan_vsi_index = 0;
vf->lan_vsi_id = 0;
}
+ msix_vf = pf->hw.func_caps.num_msix_vectors_vf + 1;
+ /* disable interrupts so the VF starts in a known state */
+ for (i = 0; i < msix_vf; i++) {
+ /* format is same for both registers */
+ if (0 == i)
+ reg_idx = I40E_VFINT_DYN_CTL0(vf->vf_id);
+ else
+ reg_idx = I40E_VFINT_DYN_CTLN(((msix_vf - 1) *
+ (vf->vf_id))
+ + (i - 1));
+ wr32(hw, reg_idx, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
+ i40e_flush(hw);
+ }
+
+ /* clear the irq settings */
+ for (i = 0; i < msix_vf; i++) {
+ /* format is same for both registers */
+ if (0 == i)
+ reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
+ else
+ reg_idx = I40E_VPINT_LNKLSTN(((msix_vf - 1) *
+ (vf->vf_id))
+ + (i - 1));
+ reg = (I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK |
+ I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
+ wr32(hw, reg_idx, reg);
+ i40e_flush(hw);
+ }
/* reset some of the state varibles keeping
* track of the resources
*/
@@ -804,6 +583,111 @@ error_alloc:
return ret;
}
+#define VF_DEVICE_STATUS 0xAA
+#define VF_TRANS_PENDING_MASK 0x20
+/**
+ * i40e_quiesce_vf_pci
+ * @vf: pointer to the vf structure
+ *
+ * Wait for VF PCI transactions to be cleared after reset. Returns -EIO
+ * if the transactions never clear.
+ **/
+static int i40e_quiesce_vf_pci(struct i40e_vf *vf)
+{
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_hw *hw = &pf->hw;
+ int vf_abs_id, i;
+ u32 reg;
+
+ vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id;
+
+ wr32(hw, I40E_PF_PCI_CIAA,
+ VF_DEVICE_STATUS | (vf_abs_id << I40E_PF_PCI_CIAA_VF_NUM_SHIFT));
+ for (i = 0; i < 100; i++) {
+ reg = rd32(hw, I40E_PF_PCI_CIAD);
+ if ((reg & VF_TRANS_PENDING_MASK) == 0)
+ return 0;
+ udelay(1);
+ }
+ return -EIO;
+}
+
+/**
+ * i40e_reset_vf
+ * @vf: pointer to the vf structure
+ * @flr: VFLR was issued or not
+ *
+ * reset the vf
+ **/
+void i40e_reset_vf(struct i40e_vf *vf, bool flr)
+{
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_hw *hw = &pf->hw;
+ bool rsd = false;
+ int i;
+ u32 reg;
+
+ /* warn the VF */
+ clear_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
+
+ /* In the case of a VFLR, the HW has already reset the VF and we
+ * just need to clean up, so don't hit the VFRTRIG register.
+ */
+ if (!flr) {
+ /* reset vf using VPGEN_VFRTRIG reg */
+ reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
+ reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
+ wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
+ i40e_flush(hw);
+ }
+
+ if (i40e_quiesce_vf_pci(vf))
+ dev_err(&pf->pdev->dev, "VF %d PCI transactions stuck\n",
+ vf->vf_id);
+
+ /* poll VPGEN_VFRSTAT reg to make sure
+ * that reset is complete
+ */
+ for (i = 0; i < 100; i++) {
+ /* vf reset requires driver to first reset the
+ * vf & than poll the status register to make sure
+ * that the requested op was completed
+ * successfully
+ */
+ udelay(10);
+ reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
+ if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) {
+ rsd = true;
+ break;
+ }
+ }
+
+ if (!rsd)
+ dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
+ vf->vf_id);
+ wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_COMPLETED);
+ /* clear the reset bit in the VPGEN_VFRTRIG reg */
+ reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
+ reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
+ wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
+
+ /* On initial reset, we won't have any queues */
+ if (vf->lan_vsi_index == 0)
+ goto complete_reset;
+
+ i40e_vsi_control_rings(pf->vsi[vf->lan_vsi_index], false);
+complete_reset:
+ /* reallocate vf resources to reset the VSI state */
+ i40e_free_vf_res(vf);
+ mdelay(10);
+ i40e_alloc_vf_res(vf);
+ i40e_enable_vf_mappings(vf);
+
+ /* tell the VF the reset is done */
+ wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE);
+ i40e_flush(hw);
+}
+
/**
* i40e_vfs_are_assigned
* @pf: pointer to the pf structure
@@ -832,6 +716,76 @@ static bool i40e_vfs_are_assigned(struct i40e_pf *pf)
return false;
}
+#ifdef CONFIG_PCI_IOV
+
+/**
+ * i40e_enable_pf_switch_lb
+ * @pf: pointer to the pf structure
+ *
+ * enable switch loop back or die - no point in a return value
+ **/
+static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
+{
+ struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+ struct i40e_vsi_context ctxt;
+ int aq_ret;
+
+ ctxt.seid = pf->main_vsi_seid;
+ ctxt.pf_num = pf->hw.pf_id;
+ ctxt.vf_num = 0;
+ aq_ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
+ if (aq_ret) {
+ dev_info(&pf->pdev->dev,
+ "%s couldn't get pf vsi config, err %d, aq_err %d\n",
+ __func__, aq_ret, pf->hw.aq.asq_last_status);
+ return;
+ }
+ ctxt.flags = I40E_AQ_VSI_TYPE_PF;
+ ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
+ ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
+
+ aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+ if (aq_ret) {
+ dev_info(&pf->pdev->dev,
+ "%s: update vsi switch failed, aq_err=%d\n",
+ __func__, vsi->back->hw.aq.asq_last_status);
+ }
+}
+#endif
+
+/**
+ * i40e_disable_pf_switch_lb
+ * @pf: pointer to the pf structure
+ *
+ * disable switch loop back or die - no point in a return value
+ **/
+static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
+{
+ struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+ struct i40e_vsi_context ctxt;
+ int aq_ret;
+
+ ctxt.seid = pf->main_vsi_seid;
+ ctxt.pf_num = pf->hw.pf_id;
+ ctxt.vf_num = 0;
+ aq_ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
+ if (aq_ret) {
+ dev_info(&pf->pdev->dev,
+ "%s couldn't get pf vsi config, err %d, aq_err %d\n",
+ __func__, aq_ret, pf->hw.aq.asq_last_status);
+ return;
+ }
+ ctxt.flags = I40E_AQ_VSI_TYPE_PF;
+ ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
+ ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
+
+ aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+ if (aq_ret) {
+ dev_info(&pf->pdev->dev,
+ "%s: update vsi switch failed, aq_err=%d\n",
+ __func__, vsi->back->hw.aq.asq_last_status);
+ }
+}
/**
* i40e_free_vfs
@@ -842,17 +796,20 @@ static bool i40e_vfs_are_assigned(struct i40e_pf *pf)
void i40e_free_vfs(struct i40e_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
- int i;
+ u32 reg_idx, bit_idx;
+ int i, tmp, vf_id;
if (!pf->vf)
return;
/* Disable interrupt 0 so we don't try to handle the VFLR. */
- wr32(hw, I40E_PFINT_DYN_CTL0, 0);
- i40e_flush(hw);
+ i40e_irq_dynamic_disable_icr0(pf);
+ mdelay(10); /* let any messages in transit get finished up */
/* free up vf resources */
- for (i = 0; i < pf->num_alloc_vfs; i++) {
+ tmp = pf->num_alloc_vfs;
+ pf->num_alloc_vfs = 0;
+ for (i = 0; i < tmp; i++) {
if (test_bit(I40E_VF_STAT_INIT, &pf->vf[i].vf_states))
i40e_free_vf_res(&pf->vf[i]);
/* disable qp mappings */
@@ -861,20 +818,25 @@ void i40e_free_vfs(struct i40e_pf *pf)
kfree(pf->vf);
pf->vf = NULL;
- pf->num_alloc_vfs = 0;
- if (!i40e_vfs_are_assigned(pf))
+ if (!i40e_vfs_are_assigned(pf)) {
pci_disable_sriov(pf->pdev);
- else
+ /* Acknowledge VFLR for all VFS. Without this, VFs will fail to
+ * work correctly when SR-IOV gets re-enabled.
+ */
+ for (vf_id = 0; vf_id < tmp; vf_id++) {
+ reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
+ bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
+ wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx));
+ }
+ i40e_disable_pf_switch_lb(pf);
+ } else {
dev_warn(&pf->pdev->dev,
"unable to disable SR-IOV because VFs are assigned.\n");
+ }
/* Re-enable interrupt 0. */
- wr32(hw, I40E_PFINT_DYN_CTL0,
- I40E_PFINT_DYN_CTL0_INTENA_MASK |
- I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
- (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT));
- i40e_flush(hw);
+ i40e_irq_dynamic_enable_icr0(pf);
}
#ifdef CONFIG_PCI_IOV
@@ -890,6 +852,9 @@ static int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
struct i40e_vf *vfs;
int i, ret = 0;
+ /* Disable interrupt 0 so we don't try to handle the VFLR. */
+ i40e_irq_dynamic_disable_icr0(pf);
+
ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
if (ret) {
dev_err(&pf->pdev->dev,
@@ -913,11 +878,8 @@ static int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
/* assign default capabilities */
set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps);
-
- ret = i40e_alloc_vf_res(&vfs[i]);
- i40e_reset_vf(&vfs[i], true);
- if (ret)
- break;
+ /* vf resources get allocated during reset */
+ i40e_reset_vf(&vfs[i], false);
/* enable vf vplan_qtable mappings */
i40e_enable_vf_mappings(&vfs[i]);
@@ -925,10 +887,13 @@ static int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
pf->vf = vfs;
pf->num_alloc_vfs = num_alloc_vfs;
+ i40e_enable_pf_switch_lb(pf);
err_alloc:
if (ret)
i40e_free_vfs(pf);
err_iov:
+ /* Re-enable interrupt 0. */
+ i40e_irq_dynamic_enable_icr0(pf);
return ret;
}
@@ -1009,6 +974,7 @@ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
{
struct i40e_pf *pf = vf->pf;
struct i40e_hw *hw = &pf->hw;
+ int true_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
i40e_status aq_ret;
/* single place to detect unsuccessful return values */
@@ -1028,8 +994,8 @@ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
vf->num_valid_msgs++;
}
- aq_ret = i40e_aq_send_msg_to_vf(hw, vf->vf_id, v_opcode, v_retval,
- msg, msglen, NULL);
+ aq_ret = i40e_aq_send_msg_to_vf(hw, true_vf_id, v_opcode, v_retval,
+ msg, msglen, NULL);
if (aq_ret) {
dev_err(&pf->pdev->dev,
"Unable to send the message to VF %d aq_err %d\n",
@@ -1144,12 +1110,10 @@ err:
* unlike other virtchnl messages, pf driver
* doesn't send the response back to the vf
**/
-static int i40e_vc_reset_vf_msg(struct i40e_vf *vf)
+static void i40e_vc_reset_vf_msg(struct i40e_vf *vf)
{
- if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states))
- return -ENOENT;
-
- return i40e_reset_vf(vf, false);
+ if (test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states))
+ i40e_reset_vf(vf, false);
}
/**
@@ -1291,27 +1255,21 @@ static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
/* lookout for the invalid queue index */
tempmap = map->rxq_map;
- vsi_queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
- while (vsi_queue_id < I40E_MAX_VSI_QP) {
+ for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
if (!i40e_vc_isvalid_queue_id(vf, vsi_id,
vsi_queue_id)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
- vsi_queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
- vsi_queue_id + 1);
}
tempmap = map->txq_map;
- vsi_queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
- while (vsi_queue_id < I40E_MAX_VSI_QP) {
+ for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
if (!i40e_vc_isvalid_queue_id(vf, vsi_id,
vsi_queue_id)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
- vsi_queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
- vsi_queue_id + 1);
}
i40e_config_irq_link_list(vf, vsi_id, map);
@@ -1337,8 +1295,6 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
struct i40e_pf *pf = vf->pf;
u16 vsi_id = vqs->vsi_id;
i40e_status aq_ret = 0;
- unsigned long tempmap;
- u16 queue_id;
if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
aq_ret = I40E_ERR_PARAM;
@@ -1354,66 +1310,8 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
-
- tempmap = vqs->rx_queues;
- queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
- while (queue_id < I40E_MAX_VSI_QP) {
- if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id)) {
- aq_ret = I40E_ERR_PARAM;
- goto error_param;
- }
- i40e_ctrl_vsi_rx_queue(vf, vsi_id, queue_id,
- I40E_QUEUE_CTRL_ENABLE);
-
- queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
- queue_id + 1);
- }
-
- tempmap = vqs->tx_queues;
- queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
- while (queue_id < I40E_MAX_VSI_QP) {
- if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id)) {
- aq_ret = I40E_ERR_PARAM;
- goto error_param;
- }
- i40e_ctrl_vsi_tx_queue(vf, vsi_id, queue_id,
- I40E_QUEUE_CTRL_ENABLE);
-
- queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
- queue_id + 1);
- }
-
- /* Poll the status register to make sure that the
- * requested op was completed successfully
- */
- udelay(10);
-
- tempmap = vqs->rx_queues;
- queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
- while (queue_id < I40E_MAX_VSI_QP) {
- if (i40e_ctrl_vsi_rx_queue(vf, vsi_id, queue_id,
- I40E_QUEUE_CTRL_ENABLECHECK)) {
- dev_err(&pf->pdev->dev,
- "Queue control check failed on RX queue %d of VSI %d VF %d\n",
- queue_id, vsi_id, vf->vf_id);
- }
- queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
- queue_id + 1);
- }
-
- tempmap = vqs->tx_queues;
- queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
- while (queue_id < I40E_MAX_VSI_QP) {
- if (i40e_ctrl_vsi_tx_queue(vf, vsi_id, queue_id,
- I40E_QUEUE_CTRL_ENABLECHECK)) {
- dev_err(&pf->pdev->dev,
- "Queue control check failed on TX queue %d of VSI %d VF %d\n",
- queue_id, vsi_id, vf->vf_id);
- }
- queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
- queue_id + 1);
- }
-
+ if (i40e_vsi_control_rings(pf->vsi[vsi_id], true))
+ aq_ret = I40E_ERR_TIMEOUT;
error_param:
/* send the response to the vf */
return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
@@ -1436,8 +1334,6 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
struct i40e_pf *pf = vf->pf;
u16 vsi_id = vqs->vsi_id;
i40e_status aq_ret = 0;
- unsigned long tempmap;
- u16 queue_id;
if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
aq_ret = I40E_ERR_PARAM;
@@ -1453,65 +1349,8 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
-
- tempmap = vqs->rx_queues;
- queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
- while (queue_id < I40E_MAX_VSI_QP) {
- if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id)) {
- aq_ret = I40E_ERR_PARAM;
- goto error_param;
- }
- i40e_ctrl_vsi_rx_queue(vf, vsi_id, queue_id,
- I40E_QUEUE_CTRL_DISABLE);
-
- queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
- queue_id + 1);
- }
-
- tempmap = vqs->tx_queues;
- queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
- while (queue_id < I40E_MAX_VSI_QP) {
- if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id)) {
- aq_ret = I40E_ERR_PARAM;
- goto error_param;
- }
- i40e_ctrl_vsi_tx_queue(vf, vsi_id, queue_id,
- I40E_QUEUE_CTRL_DISABLE);
-
- queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
- queue_id + 1);
- }
-
- /* Poll the status register to make sure that the
- * requested op was completed successfully
- */
- udelay(10);
-
- tempmap = vqs->rx_queues;
- queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
- while (queue_id < I40E_MAX_VSI_QP) {
- if (i40e_ctrl_vsi_rx_queue(vf, vsi_id, queue_id,
- I40E_QUEUE_CTRL_DISABLECHECK)) {
- dev_err(&pf->pdev->dev,
- "Queue control check failed on RX queue %d of VSI %d VF %d\n",
- queue_id, vsi_id, vf->vf_id);
- }
- queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
- queue_id + 1);
- }
-
- tempmap = vqs->tx_queues;
- queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
- while (queue_id < I40E_MAX_VSI_QP) {
- if (i40e_ctrl_vsi_tx_queue(vf, vsi_id, queue_id,
- I40E_QUEUE_CTRL_DISABLECHECK)) {
- dev_err(&pf->pdev->dev,
- "Queue control check failed on TX queue %d of VSI %d VF %d\n",
- queue_id, vsi_id, vf->vf_id);
- }
- queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
- queue_id + 1);
- }
+ if (i40e_vsi_control_rings(pf->vsi[vsi_id], false))
+ aq_ret = I40E_ERR_TIMEOUT;
error_param:
/* send the response to the vf */
@@ -1554,7 +1393,7 @@ static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
goto error_param;
}
i40e_update_eth_stats(vsi);
- memcpy(&stats, &vsi->eth_stats, sizeof(struct i40e_eth_stats));
+ stats = vsi->eth_stats;
error_param:
/* send the response back to the vf */
@@ -1563,6 +1402,40 @@ error_param:
}
/**
+ * i40e_check_vf_permission
+ * @vf: pointer to the vf info
+ * @macaddr: pointer to the MAC Address being checked
+ *
+ * Check if the VF has permission to add or delete unicast MAC address
+ * filters and return error code -EPERM if not. Then check if the
+ * address filter requested is broadcast or zero and if so return
+ * an invalid MAC address error code.
+ **/
+static inline int i40e_check_vf_permission(struct i40e_vf *vf, u8 *macaddr)
+{
+ struct i40e_pf *pf = vf->pf;
+ int ret = 0;
+
+ if (is_broadcast_ether_addr(macaddr) ||
+ is_zero_ether_addr(macaddr)) {
+ dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n", macaddr);
+ ret = I40E_ERR_INVALID_MAC_ADDR;
+ } else if (vf->pf_set_mac && !is_multicast_ether_addr(macaddr) &&
+ !ether_addr_equal(macaddr, vf->default_lan_addr.addr)) {
+ /* If the host VMM administrator has set the VF MAC address
+ * administratively via the ndo_set_vf_mac command then deny
+ * permission to the VF to add or delete unicast MAC addresses.
+ * The VF may request to set the MAC address filter already
+ * assigned to it so do not return an error in that case.
+ */
+ dev_err(&pf->pdev->dev,
+ "VF attempting to override administratively set MAC address\nPlease reload the VF driver to resume normal operation\n");
+ ret = -EPERM;
+ }
+ return ret;
+}
+
+/**
* i40e_vc_add_mac_addr_msg
* @vf: pointer to the vf info
* @msg: pointer to the msg buffer
@@ -1577,24 +1450,20 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = NULL;
u16 vsi_id = al->vsi_id;
- i40e_status aq_ret = 0;
+ i40e_status ret = 0;
int i;
if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
!i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
- aq_ret = I40E_ERR_PARAM;
+ ret = I40E_ERR_PARAM;
goto error_param;
}
for (i = 0; i < al->num_elements; i++) {
- if (is_broadcast_ether_addr(al->list[i].addr) ||
- is_zero_ether_addr(al->list[i].addr)) {
- dev_err(&pf->pdev->dev, "invalid VF MAC addr %pMAC\n",
- al->list[i].addr);
- aq_ret = I40E_ERR_PARAM;
+ ret = i40e_check_vf_permission(vf, al->list[i].addr);
+ if (ret)
goto error_param;
- }
}
vsi = pf->vsi[vsi_id];
@@ -1603,7 +1472,7 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
struct i40e_mac_filter *f;
f = i40e_find_mac(vsi, al->list[i].addr, true, false);
- if (f) {
+ if (!f) {
if (i40e_is_vsi_in_vlan(vsi))
f = i40e_put_mac_in_vlan(vsi, al->list[i].addr,
true, false);
@@ -1615,7 +1484,7 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
if (!f) {
dev_err(&pf->pdev->dev,
"Unable to add VF MAC filter\n");
- aq_ret = I40E_ERR_PARAM;
+ ret = I40E_ERR_PARAM;
goto error_param;
}
}
@@ -1627,7 +1496,7 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
error_param:
/* send the response to the vf */
return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
- aq_ret);
+ ret);
}
/**
@@ -1645,15 +1514,21 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = NULL;
u16 vsi_id = al->vsi_id;
- i40e_status aq_ret = 0;
+ i40e_status ret = 0;
int i;
if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
!i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
- aq_ret = I40E_ERR_PARAM;
+ ret = I40E_ERR_PARAM;
goto error_param;
}
+
+ for (i = 0; i < al->num_elements; i++) {
+ ret = i40e_check_vf_permission(vf, al->list[i].addr);
+ if (ret)
+ goto error_param;
+ }
vsi = pf->vsi[vsi_id];
/* delete addresses from the list */
@@ -1668,7 +1543,7 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
error_param:
/* send the response to the vf */
return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS,
- aq_ret);
+ ret);
}
/**
@@ -1777,30 +1652,6 @@ error_param:
}
/**
- * i40e_vc_fcoe_msg
- * @vf: pointer to the vf info
- * @msg: pointer to the msg buffer
- * @msglen: msg length
- *
- * called from the vf for the fcoe msgs
- **/
-static int i40e_vc_fcoe_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
-{
- i40e_status aq_ret = 0;
-
- if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
- !test_bit(I40E_VF_STAT_FCOEENA, &vf->vf_states)) {
- aq_ret = I40E_ERR_PARAM;
- goto error_param;
- }
- aq_ret = I40E_ERR_NOT_IMPLEMENTED;
-
-error_param:
- /* send the response to the vf */
- return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_FCOE, aq_ret);
-}
-
-/**
* i40e_vc_validate_vf_msg
* @vf: pointer to the vf info
* @msg: pointer to the msg buffer
@@ -1920,19 +1771,24 @@ static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode,
int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
u32 v_retval, u8 *msg, u16 msglen)
{
- struct i40e_vf *vf = &(pf->vf[vf_id]);
struct i40e_hw *hw = &pf->hw;
+ int local_vf_id = vf_id - hw->func_caps.vf_base_id;
+ struct i40e_vf *vf;
int ret;
pf->vf_aq_requests++;
+ if (local_vf_id >= pf->num_alloc_vfs)
+ return -EINVAL;
+ vf = &(pf->vf[local_vf_id]);
/* perform basic checks on the msg */
ret = i40e_vc_validate_vf_msg(vf, v_opcode, v_retval, msg, msglen);
if (ret) {
- dev_err(&pf->pdev->dev, "invalid message from vf %d\n", vf_id);
+ dev_err(&pf->pdev->dev, "Invalid message from vf %d, opcode %d, len %d\n",
+ local_vf_id, v_opcode, msglen);
return ret;
}
- wr32(hw, I40E_VFGEN_RSTAT1(vf_id), I40E_VFR_VFACTIVE);
+ wr32(hw, I40E_VFGEN_RSTAT1(local_vf_id), I40E_VFR_VFACTIVE);
switch (v_opcode) {
case I40E_VIRTCHNL_OP_VERSION:
ret = i40e_vc_get_version_msg(vf);
@@ -1941,7 +1797,8 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
ret = i40e_vc_get_vf_resources_msg(vf);
break;
case I40E_VIRTCHNL_OP_RESET_VF:
- ret = i40e_vc_reset_vf_msg(vf);
+ i40e_vc_reset_vf_msg(vf);
+ ret = 0;
break;
case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
ret = i40e_vc_config_promiscuous_mode_msg(vf, msg, msglen);
@@ -1973,13 +1830,10 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
case I40E_VIRTCHNL_OP_GET_STATS:
ret = i40e_vc_get_stats_msg(vf, msg, msglen);
break;
- case I40E_VIRTCHNL_OP_FCOE:
- ret = i40e_vc_fcoe_msg(vf, msg, msglen);
- break;
case I40E_VIRTCHNL_OP_UNKNOWN:
default:
- dev_err(&pf->pdev->dev,
- "Unsupported opcode %d from vf %d\n", v_opcode, vf_id);
+ dev_err(&pf->pdev->dev, "Unsupported opcode %d from vf %d\n",
+ v_opcode, local_vf_id);
ret = i40e_vc_send_resp_to_vf(vf, v_opcode,
I40E_ERR_NOT_IMPLEMENTED);
break;
@@ -2015,19 +1869,7 @@ int i40e_vc_process_vflr_event(struct i40e_pf *pf)
/* clear the bit in GLGEN_VFLRSTAT */
wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx));
- if (i40e_reset_vf(vf, true))
- dev_err(&pf->pdev->dev,
- "Unable to reset the VF %d\n", vf_id);
- /* free up vf resources to destroy vsi state */
- i40e_free_vf_res(vf);
-
- /* allocate new vf resources with the default state */
- if (i40e_alloc_vf_res(vf))
- dev_err(&pf->pdev->dev,
- "Unable to allocate VF resources %d\n",
- vf_id);
-
- i40e_enable_vf_mappings(vf);
+ i40e_reset_vf(vf, true);
}
}
@@ -2183,6 +2025,7 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
goto error_param;
}
memcpy(vf->default_lan_addr.addr, mac, ETH_ALEN);
+ vf->pf_set_mac = true;
dev_info(&pf->pdev->dev, "Reload the VF driver to make this change effective.\n");
ret = 0;
@@ -2243,7 +2086,7 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
ret = i40e_vsi_add_pvid(vsi,
vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT));
else
- i40e_vlan_stripping_disable(vsi);
+ i40e_vsi_remove_pvid(vsi);
if (vlan_id) {
dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n",
@@ -2263,6 +2106,10 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
dev_err(&pf->pdev->dev, "Unable to update VF vsi context\n");
goto error_pvid;
}
+ /* The Port VLAN needs to be saved across resets the same as the
+ * default LAN MAC address.
+ */
+ vf->port_vlan_id = le16_to_cpu(vsi->info.pvid);
ret = 0;
error_pvid:
@@ -2294,7 +2141,6 @@ int i40e_ndo_get_vf_config(struct net_device *netdev,
int vf_id, struct ifla_vf_info *ivi)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
- struct i40e_mac_filter *f, *ftmp;
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
struct i40e_vf *vf;
@@ -2318,11 +2164,7 @@ int i40e_ndo_get_vf_config(struct net_device *netdev,
ivi->vf = vf_id;
- /* first entry of the list is the default ethernet address */
- list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
- memcpy(&ivi->mac, f->macaddr, I40E_ETH_LENGTH_OF_ADDRESS);
- break;
- }
+ memcpy(&ivi->mac, vf->default_lan_addr.addr, ETH_ALEN);
ivi->tx_rate = 0;
ivi->vlan = le16_to_cpu(vsi->info.pvid) & I40E_VLAN_MASK;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index 360382cf3040..cc1feee36e12 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -82,6 +81,8 @@ struct i40e_vf {
struct i40e_virtchnl_ether_addr default_lan_addr;
struct i40e_virtchnl_ether_addr default_fcoe_addr;
+ u16 port_vlan_id;
+ bool pf_set_mac; /* The VMM admin set the VF MAC address */
/* VSI indices - actual VSI pointers are maintained in the PF structure
* When assigned, these will be non-zero, because VSI 0 is always
@@ -104,7 +105,7 @@ int i40e_pci_sriov_configure(struct pci_dev *dev, int num_vfs);
int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
u32 v_retval, u8 *msg, u16 msglen);
int i40e_vc_process_vflr_event(struct i40e_pf *pf);
-int i40e_reset_vf(struct i40e_vf *vf, bool flr);
+void i40e_reset_vf(struct i40e_vf *vf, bool flr);
void i40e_vc_notify_vf_reset(struct i40e_vf *vf);
/* vf configuration related iplink handlers */
diff --git a/drivers/net/ethernet/intel/i40evf/Makefile b/drivers/net/ethernet/intel/i40evf/Makefile
new file mode 100644
index 000000000000..e09be37a07a8
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/Makefile
@@ -0,0 +1,33 @@
+################################################################################
+#
+# Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+# Copyright(c) 2013 Intel Corporation.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+#
+# This program is distributed in the hope it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+# more details.
+#
+# The full GNU General Public License is included in this distribution in
+# the file called "COPYING".
+#
+# Contact Information:
+# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+#
+################################################################################
+
+#
+## Makefile for the Intel(R) 40GbE VF driver
+#
+#
+
+obj-$(CONFIG_I40EVF) += i40evf.o
+
+i40evf-objs := i40evf_main.o i40evf_ethtool.o i40evf_virtchnl.o \
+ i40e_txrx.o i40e_common.o i40e_adminq.o
+
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
new file mode 100644
index 000000000000..5470ce95936e
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
@@ -0,0 +1,927 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#include "i40e_status.h"
+#include "i40e_type.h"
+#include "i40e_register.h"
+#include "i40e_adminq.h"
+#include "i40e_prototype.h"
+
+/**
+ * i40e_adminq_init_regs - Initialize AdminQ registers
+ * @hw: pointer to the hardware structure
+ *
+ * This assumes the alloc_asq and alloc_arq functions have already been called
+ **/
+static void i40e_adminq_init_regs(struct i40e_hw *hw)
+{
+ /* set head and tail registers in our local struct */
+ if (hw->mac.type == I40E_MAC_VF) {
+ hw->aq.asq.tail = I40E_VF_ATQT1;
+ hw->aq.asq.head = I40E_VF_ATQH1;
+ hw->aq.asq.len = I40E_VF_ATQLEN1;
+ hw->aq.arq.tail = I40E_VF_ARQT1;
+ hw->aq.arq.head = I40E_VF_ARQH1;
+ hw->aq.arq.len = I40E_VF_ARQLEN1;
+ } else {
+ hw->aq.asq.tail = I40E_PF_ATQT;
+ hw->aq.asq.head = I40E_PF_ATQH;
+ hw->aq.asq.len = I40E_PF_ATQLEN;
+ hw->aq.arq.tail = I40E_PF_ARQT;
+ hw->aq.arq.head = I40E_PF_ARQH;
+ hw->aq.arq.len = I40E_PF_ARQLEN;
+ }
+}
+
+/**
+ * i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings
+ * @hw: pointer to the hardware structure
+ **/
+static i40e_status i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
+{
+ i40e_status ret_code;
+
+ ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
+ i40e_mem_atq_ring,
+ (hw->aq.num_asq_entries *
+ sizeof(struct i40e_aq_desc)),
+ I40E_ADMINQ_DESC_ALIGNMENT);
+ if (ret_code)
+ return ret_code;
+
+ ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
+ (hw->aq.num_asq_entries *
+ sizeof(struct i40e_asq_cmd_details)));
+ if (ret_code) {
+ i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
+ return ret_code;
+ }
+
+ return ret_code;
+}
+
+/**
+ * i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
+ * @hw: pointer to the hardware structure
+ **/
+static i40e_status i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
+{
+ i40e_status ret_code;
+
+ ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
+ i40e_mem_arq_ring,
+ (hw->aq.num_arq_entries *
+ sizeof(struct i40e_aq_desc)),
+ I40E_ADMINQ_DESC_ALIGNMENT);
+
+ return ret_code;
+}
+
+/**
+ * i40e_free_adminq_asq - Free Admin Queue send rings
+ * @hw: pointer to the hardware structure
+ *
+ * This assumes the posted send buffers have already been cleaned
+ * and de-allocated
+ **/
+static void i40e_free_adminq_asq(struct i40e_hw *hw)
+{
+ i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
+}
+
+/**
+ * i40e_free_adminq_arq - Free Admin Queue receive rings
+ * @hw: pointer to the hardware structure
+ *
+ * This assumes the posted receive buffers have already been cleaned
+ * and de-allocated
+ **/
+static void i40e_free_adminq_arq(struct i40e_hw *hw)
+{
+ i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
+}
+
+/**
+ * i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
+ * @hw: pointer to the hardware structure
+ **/
+static i40e_status i40e_alloc_arq_bufs(struct i40e_hw *hw)
+{
+ i40e_status ret_code;
+ struct i40e_aq_desc *desc;
+ struct i40e_dma_mem *bi;
+ int i;
+
+ /* We'll be allocating the buffer info memory first, then we can
+ * allocate the mapped buffers for the event processing
+ */
+
+ /* buffer_info structures do not need alignment */
+ ret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
+ (hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem)));
+ if (ret_code)
+ goto alloc_arq_bufs;
+ hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va;
+
+ /* allocate the mapped buffers */
+ for (i = 0; i < hw->aq.num_arq_entries; i++) {
+ bi = &hw->aq.arq.r.arq_bi[i];
+ ret_code = i40e_allocate_dma_mem(hw, bi,
+ i40e_mem_arq_buf,
+ hw->aq.arq_buf_size,
+ I40E_ADMINQ_DESC_ALIGNMENT);
+ if (ret_code)
+ goto unwind_alloc_arq_bufs;
+
+ /* now configure the descriptors for use */
+ desc = I40E_ADMINQ_DESC(hw->aq.arq, i);
+
+ desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF);
+ if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
+ desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB);
+ desc->opcode = 0;
+ /* This is in accordance with Admin queue design, there is no
+ * register for buffer size configuration
+ */
+ desc->datalen = cpu_to_le16((u16)bi->size);
+ desc->retval = 0;
+ desc->cookie_high = 0;
+ desc->cookie_low = 0;
+ desc->params.external.addr_high =
+ cpu_to_le32(upper_32_bits(bi->pa));
+ desc->params.external.addr_low =
+ cpu_to_le32(lower_32_bits(bi->pa));
+ desc->params.external.param0 = 0;
+ desc->params.external.param1 = 0;
+ }
+
+alloc_arq_bufs:
+ return ret_code;
+
+unwind_alloc_arq_bufs:
+ /* don't try to free the one that failed... */
+ i--;
+ for (; i >= 0; i--)
+ i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
+ i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
+
+ return ret_code;
+}
+
+/**
+ * i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue
+ * @hw: pointer to the hardware structure
+ **/
+static i40e_status i40e_alloc_asq_bufs(struct i40e_hw *hw)
+{
+ i40e_status ret_code;
+ struct i40e_dma_mem *bi;
+ int i;
+
+ /* No mapped memory needed yet, just the buffer info structures */
+ ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
+ (hw->aq.num_asq_entries * sizeof(struct i40e_dma_mem)));
+ if (ret_code)
+ goto alloc_asq_bufs;
+ hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va;
+
+ /* allocate the mapped buffers */
+ for (i = 0; i < hw->aq.num_asq_entries; i++) {
+ bi = &hw->aq.asq.r.asq_bi[i];
+ ret_code = i40e_allocate_dma_mem(hw, bi,
+ i40e_mem_asq_buf,
+ hw->aq.asq_buf_size,
+ I40E_ADMINQ_DESC_ALIGNMENT);
+ if (ret_code)
+ goto unwind_alloc_asq_bufs;
+ }
+alloc_asq_bufs:
+ return ret_code;
+
+unwind_alloc_asq_bufs:
+ /* don't try to free the one that failed... */
+ i--;
+ for (; i >= 0; i--)
+ i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
+ i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
+
+ return ret_code;
+}
+
+/**
+ * i40e_free_arq_bufs - Free receive queue buffer info elements
+ * @hw: pointer to the hardware structure
+ **/
+static void i40e_free_arq_bufs(struct i40e_hw *hw)
+{
+ int i;
+
+ /* free descriptors */
+ for (i = 0; i < hw->aq.num_arq_entries; i++)
+ i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
+
+ /* free the descriptor memory */
+ i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
+
+ /* free the dma header */
+ i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
+}
+
+/**
+ * i40e_free_asq_bufs - Free send queue buffer info elements
+ * @hw: pointer to the hardware structure
+ **/
+static void i40e_free_asq_bufs(struct i40e_hw *hw)
+{
+ int i;
+
+ /* only unmap if the address is non-NULL */
+ for (i = 0; i < hw->aq.num_asq_entries; i++)
+ if (hw->aq.asq.r.asq_bi[i].pa)
+ i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
+
+ /* free the buffer info list */
+ i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
+
+ /* free the descriptor memory */
+ i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
+
+ /* free the dma header */
+ i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
+}
+
+/**
+ * i40e_config_asq_regs - configure ASQ registers
+ * @hw: pointer to the hardware structure
+ *
+ * Configure base address and length registers for the transmit queue
+ **/
+static void i40e_config_asq_regs(struct i40e_hw *hw)
+{
+ if (hw->mac.type == I40E_MAC_VF) {
+ /* configure the transmit queue */
+ wr32(hw, I40E_VF_ATQBAH1,
+ upper_32_bits(hw->aq.asq.desc_buf.pa));
+ wr32(hw, I40E_VF_ATQBAL1,
+ lower_32_bits(hw->aq.asq.desc_buf.pa));
+ wr32(hw, I40E_VF_ATQLEN1, (hw->aq.num_asq_entries |
+ I40E_VF_ATQLEN1_ATQENABLE_MASK));
+ } else {
+ /* configure the transmit queue */
+ wr32(hw, I40E_PF_ATQBAH,
+ upper_32_bits(hw->aq.asq.desc_buf.pa));
+ wr32(hw, I40E_PF_ATQBAL,
+ lower_32_bits(hw->aq.asq.desc_buf.pa));
+ wr32(hw, I40E_PF_ATQLEN, (hw->aq.num_asq_entries |
+ I40E_PF_ATQLEN_ATQENABLE_MASK));
+ }
+}
+
+/**
+ * i40e_config_arq_regs - ARQ register configuration
+ * @hw: pointer to the hardware structure
+ *
+ * Configure base address and length registers for the receive (event queue)
+ **/
+static void i40e_config_arq_regs(struct i40e_hw *hw)
+{
+ if (hw->mac.type == I40E_MAC_VF) {
+ /* configure the receive queue */
+ wr32(hw, I40E_VF_ARQBAH1,
+ upper_32_bits(hw->aq.arq.desc_buf.pa));
+ wr32(hw, I40E_VF_ARQBAL1,
+ lower_32_bits(hw->aq.arq.desc_buf.pa));
+ wr32(hw, I40E_VF_ARQLEN1, (hw->aq.num_arq_entries |
+ I40E_VF_ARQLEN1_ARQENABLE_MASK));
+ } else {
+ /* configure the receive queue */
+ wr32(hw, I40E_PF_ARQBAH,
+ upper_32_bits(hw->aq.arq.desc_buf.pa));
+ wr32(hw, I40E_PF_ARQBAL,
+ lower_32_bits(hw->aq.arq.desc_buf.pa));
+ wr32(hw, I40E_PF_ARQLEN, (hw->aq.num_arq_entries |
+ I40E_PF_ARQLEN_ARQENABLE_MASK));
+ }
+
+ /* Update tail in the HW to post pre-allocated buffers */
+ wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
+}
+
+/**
+ * i40e_init_asq - main initialization routine for ASQ
+ * @hw: pointer to the hardware structure
+ *
+ * This is the main initialization routine for the Admin Send Queue
+ * Prior to calling this function, drivers *MUST* set the following fields
+ * in the hw->aq structure:
+ * - hw->aq.num_asq_entries
+ * - hw->aq.arq_buf_size
+ *
+ * Do *NOT* hold the lock when calling this as the memory allocation routines
+ * called are not going to be atomic context safe
+ **/
+static i40e_status i40e_init_asq(struct i40e_hw *hw)
+{
+ i40e_status ret_code = 0;
+
+ if (hw->aq.asq.count > 0) {
+ /* queue already initialized */
+ ret_code = I40E_ERR_NOT_READY;
+ goto init_adminq_exit;
+ }
+
+ /* verify input for valid configuration */
+ if ((hw->aq.num_asq_entries == 0) ||
+ (hw->aq.asq_buf_size == 0)) {
+ ret_code = I40E_ERR_CONFIG;
+ goto init_adminq_exit;
+ }
+
+ hw->aq.asq.next_to_use = 0;
+ hw->aq.asq.next_to_clean = 0;
+ hw->aq.asq.count = hw->aq.num_asq_entries;
+
+ /* allocate the ring memory */
+ ret_code = i40e_alloc_adminq_asq_ring(hw);
+ if (ret_code)
+ goto init_adminq_exit;
+
+ /* allocate buffers in the rings */
+ ret_code = i40e_alloc_asq_bufs(hw);
+ if (ret_code)
+ goto init_adminq_free_rings;
+
+ /* initialize base registers */
+ i40e_config_asq_regs(hw);
+
+ /* success! */
+ goto init_adminq_exit;
+
+init_adminq_free_rings:
+ i40e_free_adminq_asq(hw);
+
+init_adminq_exit:
+ return ret_code;
+}
+
+/**
+ * i40e_init_arq - initialize ARQ
+ * @hw: pointer to the hardware structure
+ *
+ * The main initialization routine for the Admin Receive (Event) Queue.
+ * Prior to calling this function, drivers *MUST* set the following fields
+ * in the hw->aq structure:
+ * - hw->aq.num_asq_entries
+ * - hw->aq.arq_buf_size
+ *
+ * Do *NOT* hold the lock when calling this as the memory allocation routines
+ * called are not going to be atomic context safe
+ **/
+static i40e_status i40e_init_arq(struct i40e_hw *hw)
+{
+ i40e_status ret_code = 0;
+
+ if (hw->aq.arq.count > 0) {
+ /* queue already initialized */
+ ret_code = I40E_ERR_NOT_READY;
+ goto init_adminq_exit;
+ }
+
+ /* verify input for valid configuration */
+ if ((hw->aq.num_arq_entries == 0) ||
+ (hw->aq.arq_buf_size == 0)) {
+ ret_code = I40E_ERR_CONFIG;
+ goto init_adminq_exit;
+ }
+
+ hw->aq.arq.next_to_use = 0;
+ hw->aq.arq.next_to_clean = 0;
+ hw->aq.arq.count = hw->aq.num_arq_entries;
+
+ /* allocate the ring memory */
+ ret_code = i40e_alloc_adminq_arq_ring(hw);
+ if (ret_code)
+ goto init_adminq_exit;
+
+ /* allocate buffers in the rings */
+ ret_code = i40e_alloc_arq_bufs(hw);
+ if (ret_code)
+ goto init_adminq_free_rings;
+
+ /* initialize base registers */
+ i40e_config_arq_regs(hw);
+
+ /* success! */
+ goto init_adminq_exit;
+
+init_adminq_free_rings:
+ i40e_free_adminq_arq(hw);
+
+init_adminq_exit:
+ return ret_code;
+}
+
+/**
+ * i40e_shutdown_asq - shutdown the ASQ
+ * @hw: pointer to the hardware structure
+ *
+ * The main shutdown routine for the Admin Send Queue
+ **/
+static i40e_status i40e_shutdown_asq(struct i40e_hw *hw)
+{
+ i40e_status ret_code = 0;
+
+ if (hw->aq.asq.count == 0)
+ return I40E_ERR_NOT_READY;
+
+ /* Stop firmware AdminQ processing */
+ wr32(hw, hw->aq.asq.head, 0);
+ wr32(hw, hw->aq.asq.tail, 0);
+ wr32(hw, hw->aq.asq.len, 0);
+
+ /* make sure lock is available */
+ mutex_lock(&hw->aq.asq_mutex);
+
+ hw->aq.asq.count = 0; /* to indicate uninitialized queue */
+
+ /* free ring buffers */
+ i40e_free_asq_bufs(hw);
+
+ mutex_unlock(&hw->aq.asq_mutex);
+
+ return ret_code;
+}
+
+/**
+ * i40e_shutdown_arq - shutdown ARQ
+ * @hw: pointer to the hardware structure
+ *
+ * The main shutdown routine for the Admin Receive Queue
+ **/
+static i40e_status i40e_shutdown_arq(struct i40e_hw *hw)
+{
+ i40e_status ret_code = 0;
+
+ if (hw->aq.arq.count == 0)
+ return I40E_ERR_NOT_READY;
+
+ /* Stop firmware AdminQ processing */
+ wr32(hw, hw->aq.arq.head, 0);
+ wr32(hw, hw->aq.arq.tail, 0);
+ wr32(hw, hw->aq.arq.len, 0);
+
+ /* make sure lock is available */
+ mutex_lock(&hw->aq.arq_mutex);
+
+ hw->aq.arq.count = 0; /* to indicate uninitialized queue */
+
+ /* free ring buffers */
+ i40e_free_arq_bufs(hw);
+
+ mutex_unlock(&hw->aq.arq_mutex);
+
+ return ret_code;
+}
+
+/**
+ * i40evf_init_adminq - main initialization routine for Admin Queue
+ * @hw: pointer to the hardware structure
+ *
+ * Prior to calling this function, drivers *MUST* set the following fields
+ * in the hw->aq structure:
+ * - hw->aq.num_asq_entries
+ * - hw->aq.num_arq_entries
+ * - hw->aq.arq_buf_size
+ * - hw->aq.asq_buf_size
+ **/
+i40e_status i40evf_init_adminq(struct i40e_hw *hw)
+{
+ i40e_status ret_code;
+
+ /* verify input for valid configuration */
+ if ((hw->aq.num_arq_entries == 0) ||
+ (hw->aq.num_asq_entries == 0) ||
+ (hw->aq.arq_buf_size == 0) ||
+ (hw->aq.asq_buf_size == 0)) {
+ ret_code = I40E_ERR_CONFIG;
+ goto init_adminq_exit;
+ }
+
+ /* initialize locks */
+ mutex_init(&hw->aq.asq_mutex);
+ mutex_init(&hw->aq.arq_mutex);
+
+ /* Set up register offsets */
+ i40e_adminq_init_regs(hw);
+
+ /* allocate the ASQ */
+ ret_code = i40e_init_asq(hw);
+ if (ret_code)
+ goto init_adminq_destroy_locks;
+
+ /* allocate the ARQ */
+ ret_code = i40e_init_arq(hw);
+ if (ret_code)
+ goto init_adminq_free_asq;
+
+ /* success! */
+ goto init_adminq_exit;
+
+init_adminq_free_asq:
+ i40e_shutdown_asq(hw);
+init_adminq_destroy_locks:
+
+init_adminq_exit:
+ return ret_code;
+}
+
+/**
+ * i40evf_shutdown_adminq - shutdown routine for the Admin Queue
+ * @hw: pointer to the hardware structure
+ **/
+i40e_status i40evf_shutdown_adminq(struct i40e_hw *hw)
+{
+ i40e_status ret_code = 0;
+
+ if (i40evf_check_asq_alive(hw))
+ i40evf_aq_queue_shutdown(hw, true);
+
+ i40e_shutdown_asq(hw);
+ i40e_shutdown_arq(hw);
+
+ /* destroy the locks */
+
+ return ret_code;
+}
+
+/**
+ * i40e_clean_asq - cleans Admin send queue
+ * @hw: pointer to the hardware structure
+ *
+ * returns the number of free desc
+ **/
+static u16 i40e_clean_asq(struct i40e_hw *hw)
+{
+ struct i40e_adminq_ring *asq = &(hw->aq.asq);
+ struct i40e_asq_cmd_details *details;
+ u16 ntc = asq->next_to_clean;
+ struct i40e_aq_desc desc_cb;
+ struct i40e_aq_desc *desc;
+
+ desc = I40E_ADMINQ_DESC(*asq, ntc);
+ details = I40E_ADMINQ_DETAILS(*asq, ntc);
+ while (rd32(hw, hw->aq.asq.head) != ntc) {
+ if (details->callback) {
+ I40E_ADMINQ_CALLBACK cb_func =
+ (I40E_ADMINQ_CALLBACK)details->callback;
+ desc_cb = *desc;
+ cb_func(hw, &desc_cb);
+ }
+ memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
+ memset((void *)details, 0,
+ sizeof(struct i40e_asq_cmd_details));
+ ntc++;
+ if (ntc == asq->count)
+ ntc = 0;
+ desc = I40E_ADMINQ_DESC(*asq, ntc);
+ details = I40E_ADMINQ_DETAILS(*asq, ntc);
+ }
+
+ asq->next_to_clean = ntc;
+
+ return I40E_DESC_UNUSED(asq);
+}
+
+/**
+ * i40evf_asq_done - check if FW has processed the Admin Send Queue
+ * @hw: pointer to the hw struct
+ *
+ * Returns true if the firmware has processed all descriptors on the
+ * admin send queue. Returns false if there are still requests pending.
+ **/
+bool i40evf_asq_done(struct i40e_hw *hw)
+{
+ /* AQ designers suggest use of head for better
+ * timing reliability than DD bit
+ */
+ return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
+
+}
+
+/**
+ * i40evf_asq_send_command - send command to Admin Queue
+ * @hw: pointer to the hw struct
+ * @desc: prefilled descriptor describing the command (non DMA mem)
+ * @buff: buffer to use for indirect commands
+ * @buff_size: size of buffer for indirect commands
+ * @cmd_details: pointer to command details structure
+ *
+ * This is the main send command driver routine for the Admin Queue send
+ * queue. It runs the queue, cleans the queue, etc
+ **/
+i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
+ struct i40e_aq_desc *desc,
+ void *buff, /* can be NULL */
+ u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ i40e_status status = 0;
+ struct i40e_dma_mem *dma_buff = NULL;
+ struct i40e_asq_cmd_details *details;
+ struct i40e_aq_desc *desc_on_ring;
+ bool cmd_completed = false;
+ u16 retval = 0;
+
+ if (hw->aq.asq.count == 0) {
+ i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: Admin queue not initialized.\n");
+ status = I40E_ERR_QUEUE_EMPTY;
+ goto asq_send_command_exit;
+ }
+
+ details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
+ if (cmd_details) {
+ *details = *cmd_details;
+
+ /* If the cmd_details are defined copy the cookie. The
+ * cpu_to_le32 is not needed here because the data is ignored
+ * by the FW, only used by the driver
+ */
+ if (details->cookie) {
+ desc->cookie_high =
+ cpu_to_le32(upper_32_bits(details->cookie));
+ desc->cookie_low =
+ cpu_to_le32(lower_32_bits(details->cookie));
+ }
+ } else {
+ memset(details, 0, sizeof(struct i40e_asq_cmd_details));
+ }
+
+ /* clear requested flags and then set additional flags if defined */
+ desc->flags &= ~cpu_to_le16(details->flags_dis);
+ desc->flags |= cpu_to_le16(details->flags_ena);
+
+ mutex_lock(&hw->aq.asq_mutex);
+
+ if (buff_size > hw->aq.asq_buf_size) {
+ i40e_debug(hw,
+ I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: Invalid buffer size: %d.\n",
+ buff_size);
+ status = I40E_ERR_INVALID_SIZE;
+ goto asq_send_command_error;
+ }
+
+ if (details->postpone && !details->async) {
+ i40e_debug(hw,
+ I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: Async flag not set along with postpone flag");
+ status = I40E_ERR_PARAM;
+ goto asq_send_command_error;
+ }
+
+ /* call clean and check queue available function to reclaim the
+ * descriptors that were processed by FW, the function returns the
+ * number of desc available
+ */
+ /* the clean function called here could be called in a separate thread
+ * in case of asynchronous completions
+ */
+ if (i40e_clean_asq(hw) == 0) {
+ i40e_debug(hw,
+ I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: Error queue is full.\n");
+ status = I40E_ERR_ADMIN_QUEUE_FULL;
+ goto asq_send_command_error;
+ }
+
+ /* initialize the temp desc pointer with the right desc */
+ desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
+
+ /* if the desc is available copy the temp desc to the right place */
+ *desc_on_ring = *desc;
+
+ /* if buff is not NULL assume indirect command */
+ if (buff != NULL) {
+ dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]);
+ /* copy the user buff into the respective DMA buff */
+ memcpy(dma_buff->va, buff, buff_size);
+ desc_on_ring->datalen = cpu_to_le16(buff_size);
+
+ /* Update the address values in the desc with the pa value
+ * for respective buffer
+ */
+ desc_on_ring->params.external.addr_high =
+ cpu_to_le32(upper_32_bits(dma_buff->pa));
+ desc_on_ring->params.external.addr_low =
+ cpu_to_le32(lower_32_bits(dma_buff->pa));
+ }
+
+ /* bump the tail */
+ i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring, buff);
+ (hw->aq.asq.next_to_use)++;
+ if (hw->aq.asq.next_to_use == hw->aq.asq.count)
+ hw->aq.asq.next_to_use = 0;
+ if (!details->postpone)
+ wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
+
+ /* if cmd_details are not defined or async flag is not set,
+ * we need to wait for desc write back
+ */
+ if (!details->async && !details->postpone) {
+ u32 total_delay = 0;
+ u32 delay_len = 10;
+
+ do {
+ /* AQ designers suggest use of head for better
+ * timing reliability than DD bit
+ */
+ if (i40evf_asq_done(hw))
+ break;
+ /* ugh! delay while spin_lock */
+ udelay(delay_len);
+ total_delay += delay_len;
+ } while (total_delay < I40E_ASQ_CMD_TIMEOUT);
+ }
+
+ /* if ready, copy the desc back to temp */
+ if (i40evf_asq_done(hw)) {
+ *desc = *desc_on_ring;
+ if (buff != NULL)
+ memcpy(buff, dma_buff->va, buff_size);
+ retval = le16_to_cpu(desc->retval);
+ if (retval != 0) {
+ i40e_debug(hw,
+ I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: Command completed with error 0x%X.\n",
+ retval);
+ /* strip off FW internal code */
+ retval &= 0xff;
+ }
+ cmd_completed = true;
+ if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK)
+ status = 0;
+ else
+ status = I40E_ERR_ADMIN_QUEUE_ERROR;
+ hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
+ }
+
+ /* update the error if time out occurred */
+ if ((!cmd_completed) &&
+ (!details->async && !details->postpone)) {
+ i40e_debug(hw,
+ I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: Writeback timeout.\n");
+ status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
+ }
+
+asq_send_command_error:
+ mutex_unlock(&hw->aq.asq_mutex);
+asq_send_command_exit:
+ return status;
+}
+
+/**
+ * i40evf_fill_default_direct_cmd_desc - AQ descriptor helper function
+ * @desc: pointer to the temp descriptor (non DMA mem)
+ * @opcode: the opcode can be used to decide which flags to turn off or on
+ *
+ * Fill the desc with default values
+ **/
+void i40evf_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
+ u16 opcode)
+{
+ /* zero out the desc */
+ memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
+ desc->opcode = cpu_to_le16(opcode);
+ desc->flags = cpu_to_le16(I40E_AQ_FLAG_SI);
+}
+
+/**
+ * i40evf_clean_arq_element
+ * @hw: pointer to the hw struct
+ * @e: event info from the receive descriptor, includes any buffers
+ * @pending: number of events that could be left to process
+ *
+ * This function cleans one Admin Receive Queue element and returns
+ * the contents through e. It can also return how many events are
+ * left to process through 'pending'
+ **/
+i40e_status i40evf_clean_arq_element(struct i40e_hw *hw,
+ struct i40e_arq_event_info *e,
+ u16 *pending)
+{
+ i40e_status ret_code = 0;
+ u16 ntc = hw->aq.arq.next_to_clean;
+ struct i40e_aq_desc *desc;
+ struct i40e_dma_mem *bi;
+ u16 desc_idx;
+ u16 datalen;
+ u16 flags;
+ u16 ntu;
+
+ /* take the lock before we start messing with the ring */
+ mutex_lock(&hw->aq.arq_mutex);
+
+ /* set next_to_use to head */
+ ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
+ if (ntu == ntc) {
+ /* nothing to do - shouldn't need to update ring's values */
+ i40e_debug(hw,
+ I40E_DEBUG_AQ_MESSAGE,
+ "AQRX: Queue is empty.\n");
+ ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
+ goto clean_arq_element_out;
+ }
+
+ /* now clean the next descriptor */
+ desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc);
+ desc_idx = ntc;
+ i40evf_debug_aq(hw,
+ I40E_DEBUG_AQ_COMMAND,
+ (void *)desc,
+ hw->aq.arq.r.arq_bi[desc_idx].va);
+
+ flags = le16_to_cpu(desc->flags);
+ if (flags & I40E_AQ_FLAG_ERR) {
+ ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
+ hw->aq.arq_last_status =
+ (enum i40e_admin_queue_err)le16_to_cpu(desc->retval);
+ i40e_debug(hw,
+ I40E_DEBUG_AQ_MESSAGE,
+ "AQRX: Event received with error 0x%X.\n",
+ hw->aq.arq_last_status);
+ } else {
+ e->desc = *desc;
+ datalen = le16_to_cpu(desc->datalen);
+ e->msg_size = min(datalen, e->msg_size);
+ if (e->msg_buf != NULL && (e->msg_size != 0))
+ memcpy(e->msg_buf, hw->aq.arq.r.arq_bi[desc_idx].va,
+ e->msg_size);
+ }
+
+ /* Restore the original datalen and buffer address in the desc,
+ * FW updates datalen to indicate the event message
+ * size
+ */
+ bi = &hw->aq.arq.r.arq_bi[ntc];
+ memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
+
+ desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF);
+ if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
+ desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB);
+ desc->datalen = cpu_to_le16((u16)bi->size);
+ desc->params.external.addr_high = cpu_to_le32(upper_32_bits(bi->pa));
+ desc->params.external.addr_low = cpu_to_le32(lower_32_bits(bi->pa));
+
+ /* set tail = the last cleaned desc index. */
+ wr32(hw, hw->aq.arq.tail, ntc);
+ /* ntc is updated to tail + 1 */
+ ntc++;
+ if (ntc == hw->aq.num_arq_entries)
+ ntc = 0;
+ hw->aq.arq.next_to_clean = ntc;
+ hw->aq.arq.next_to_use = ntu;
+
+clean_arq_element_out:
+ /* Set pending if needed, unlock and return */
+ if (pending != NULL)
+ *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
+ mutex_unlock(&hw->aq.arq_mutex);
+
+ return ret_code;
+}
+
+void i40evf_resume_aq(struct i40e_hw *hw)
+{
+ /* Registers are reset after PF reset */
+ hw->aq.asq.next_to_use = 0;
+ hw->aq.asq.next_to_clean = 0;
+
+ i40e_config_asq_regs(hw);
+
+ hw->aq.arq.next_to_use = 0;
+ hw->aq.arq.next_to_clean = 0;
+
+ i40e_config_arq_regs(hw);
+}
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
new file mode 100644
index 000000000000..8f72c31d95cc
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
@@ -0,0 +1,106 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_ADMINQ_H_
+#define _I40E_ADMINQ_H_
+
+#include "i40e_osdep.h"
+#include "i40e_adminq_cmd.h"
+
+#define I40E_ADMINQ_DESC(R, i) \
+ (&(((struct i40e_aq_desc *)((R).desc_buf.va))[i]))
+
+#define I40E_ADMINQ_DESC_ALIGNMENT 4096
+
+struct i40e_adminq_ring {
+ struct i40e_virt_mem dma_head; /* space for dma structures */
+ struct i40e_dma_mem desc_buf; /* descriptor ring memory */
+ struct i40e_virt_mem cmd_buf; /* command buffer memory */
+
+ union {
+ struct i40e_dma_mem *asq_bi;
+ struct i40e_dma_mem *arq_bi;
+ } r;
+
+ u16 count; /* Number of descriptors */
+ u16 rx_buf_len; /* Admin Receive Queue buffer length */
+
+ /* used for interrupt processing */
+ u16 next_to_use;
+ u16 next_to_clean;
+
+ /* used for queue tracking */
+ u32 head;
+ u32 tail;
+ u32 len;
+};
+
+/* ASQ transaction details */
+struct i40e_asq_cmd_details {
+ void *callback; /* cast from type I40E_ADMINQ_CALLBACK */
+ u64 cookie;
+ u16 flags_ena;
+ u16 flags_dis;
+ bool async;
+ bool postpone;
+};
+
+#define I40E_ADMINQ_DETAILS(R, i) \
+ (&(((struct i40e_asq_cmd_details *)((R).cmd_buf.va))[i]))
+
+/* ARQ event information */
+struct i40e_arq_event_info {
+ struct i40e_aq_desc desc;
+ u16 msg_size;
+ u8 *msg_buf;
+};
+
+/* Admin Queue information */
+struct i40e_adminq_info {
+ struct i40e_adminq_ring arq; /* receive queue */
+ struct i40e_adminq_ring asq; /* send queue */
+ u16 num_arq_entries; /* receive queue depth */
+ u16 num_asq_entries; /* send queue depth */
+ u16 arq_buf_size; /* receive queue buffer size */
+ u16 asq_buf_size; /* send queue buffer size */
+ u16 fw_maj_ver; /* firmware major version */
+ u16 fw_min_ver; /* firmware minor version */
+ u16 api_maj_ver; /* api major version */
+ u16 api_min_ver; /* api minor version */
+
+ struct mutex asq_mutex; /* Send queue lock */
+ struct mutex arq_mutex; /* Receive queue lock */
+
+ /* last status values on send and receive queues */
+ enum i40e_admin_queue_err asq_last_status;
+ enum i40e_admin_queue_err arq_last_status;
+};
+
+/* general information */
+#define I40E_AQ_LARGE_BUF 512
+#define I40E_ASQ_CMD_TIMEOUT 100000 /* usecs */
+
+void i40evf_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
+ u16 opcode);
+
+#endif /* _I40E_ADMINQ_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
new file mode 100644
index 000000000000..f7cea1bca38d
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
@@ -0,0 +1,2153 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_ADMINQ_CMD_H_
+#define _I40E_ADMINQ_CMD_H_
+
+/* This header file defines the i40e Admin Queue commands and is shared between
+ * i40e Firmware and Software.
+ *
+ * This file needs to comply with the Linux Kernel coding style.
+ */
+
+#define I40E_FW_API_VERSION_MAJOR 0x0001
+#define I40E_FW_API_VERSION_MINOR 0x0001
+#define I40E_FW_API_VERSION_A0_MINOR 0x0000
+
+struct i40e_aq_desc {
+ __le16 flags;
+ __le16 opcode;
+ __le16 datalen;
+ __le16 retval;
+ __le32 cookie_high;
+ __le32 cookie_low;
+ union {
+ struct {
+ __le32 param0;
+ __le32 param1;
+ __le32 param2;
+ __le32 param3;
+ } internal;
+ struct {
+ __le32 param0;
+ __le32 param1;
+ __le32 addr_high;
+ __le32 addr_low;
+ } external;
+ u8 raw[16];
+ } params;
+};
+
+/* Flags sub-structure
+ * |0 |1 |2 |3 |4 |5 |6 |7 |8 |9 |10 |11 |12 |13 |14 |15 |
+ * |DD |CMP|ERR|VFE| * * RESERVED * * |LB |RD |VFC|BUF|SI |EI |FE |
+ */
+
+/* command flags and offsets*/
+#define I40E_AQ_FLAG_DD_SHIFT 0
+#define I40E_AQ_FLAG_CMP_SHIFT 1
+#define I40E_AQ_FLAG_ERR_SHIFT 2
+#define I40E_AQ_FLAG_VFE_SHIFT 3
+#define I40E_AQ_FLAG_LB_SHIFT 9
+#define I40E_AQ_FLAG_RD_SHIFT 10
+#define I40E_AQ_FLAG_VFC_SHIFT 11
+#define I40E_AQ_FLAG_BUF_SHIFT 12
+#define I40E_AQ_FLAG_SI_SHIFT 13
+#define I40E_AQ_FLAG_EI_SHIFT 14
+#define I40E_AQ_FLAG_FE_SHIFT 15
+
+#define I40E_AQ_FLAG_DD (1 << I40E_AQ_FLAG_DD_SHIFT) /* 0x1 */
+#define I40E_AQ_FLAG_CMP (1 << I40E_AQ_FLAG_CMP_SHIFT) /* 0x2 */
+#define I40E_AQ_FLAG_ERR (1 << I40E_AQ_FLAG_ERR_SHIFT) /* 0x4 */
+#define I40E_AQ_FLAG_VFE (1 << I40E_AQ_FLAG_VFE_SHIFT) /* 0x8 */
+#define I40E_AQ_FLAG_LB (1 << I40E_AQ_FLAG_LB_SHIFT) /* 0x200 */
+#define I40E_AQ_FLAG_RD (1 << I40E_AQ_FLAG_RD_SHIFT) /* 0x400 */
+#define I40E_AQ_FLAG_VFC (1 << I40E_AQ_FLAG_VFC_SHIFT) /* 0x800 */
+#define I40E_AQ_FLAG_BUF (1 << I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */
+#define I40E_AQ_FLAG_SI (1 << I40E_AQ_FLAG_SI_SHIFT) /* 0x2000 */
+#define I40E_AQ_FLAG_EI (1 << I40E_AQ_FLAG_EI_SHIFT) /* 0x4000 */
+#define I40E_AQ_FLAG_FE (1 << I40E_AQ_FLAG_FE_SHIFT) /* 0x8000 */
+
+/* error codes */
+enum i40e_admin_queue_err {
+ I40E_AQ_RC_OK = 0, /* success */
+ I40E_AQ_RC_EPERM = 1, /* Operation not permitted */
+ I40E_AQ_RC_ENOENT = 2, /* No such element */
+ I40E_AQ_RC_ESRCH = 3, /* Bad opcode */
+ I40E_AQ_RC_EINTR = 4, /* operation interrupted */
+ I40E_AQ_RC_EIO = 5, /* I/O error */
+ I40E_AQ_RC_ENXIO = 6, /* No such resource */
+ I40E_AQ_RC_E2BIG = 7, /* Arg too long */
+ I40E_AQ_RC_EAGAIN = 8, /* Try again */
+ I40E_AQ_RC_ENOMEM = 9, /* Out of memory */
+ I40E_AQ_RC_EACCES = 10, /* Permission denied */
+ I40E_AQ_RC_EFAULT = 11, /* Bad address */
+ I40E_AQ_RC_EBUSY = 12, /* Device or resource busy */
+ I40E_AQ_RC_EEXIST = 13, /* object already exists */
+ I40E_AQ_RC_EINVAL = 14, /* Invalid argument */
+ I40E_AQ_RC_ENOTTY = 15, /* Not a typewriter */
+ I40E_AQ_RC_ENOSPC = 16, /* No space left or alloc failure */
+ I40E_AQ_RC_ENOSYS = 17, /* Function not implemented */
+ I40E_AQ_RC_ERANGE = 18, /* Parameter out of range */
+ I40E_AQ_RC_EFLUSHED = 19, /* Cmd flushed because of prev cmd error */
+ I40E_AQ_RC_BAD_ADDR = 20, /* Descriptor contains a bad pointer */
+ I40E_AQ_RC_EMODE = 21, /* Op not allowed in current dev mode */
+ I40E_AQ_RC_EFBIG = 22, /* File too large */
+};
+
+/* Admin Queue command opcodes */
+enum i40e_admin_queue_opc {
+ /* aq commands */
+ i40e_aqc_opc_get_version = 0x0001,
+ i40e_aqc_opc_driver_version = 0x0002,
+ i40e_aqc_opc_queue_shutdown = 0x0003,
+
+ /* resource ownership */
+ i40e_aqc_opc_request_resource = 0x0008,
+ i40e_aqc_opc_release_resource = 0x0009,
+
+ i40e_aqc_opc_list_func_capabilities = 0x000A,
+ i40e_aqc_opc_list_dev_capabilities = 0x000B,
+
+ i40e_aqc_opc_set_cppm_configuration = 0x0103,
+ i40e_aqc_opc_set_arp_proxy_entry = 0x0104,
+ i40e_aqc_opc_set_ns_proxy_entry = 0x0105,
+
+ /* LAA */
+ i40e_aqc_opc_mng_laa = 0x0106, /* AQ obsolete */
+ i40e_aqc_opc_mac_address_read = 0x0107,
+ i40e_aqc_opc_mac_address_write = 0x0108,
+
+ /* PXE */
+ i40e_aqc_opc_clear_pxe_mode = 0x0110,
+
+ /* internal switch commands */
+ i40e_aqc_opc_get_switch_config = 0x0200,
+ i40e_aqc_opc_add_statistics = 0x0201,
+ i40e_aqc_opc_remove_statistics = 0x0202,
+ i40e_aqc_opc_set_port_parameters = 0x0203,
+ i40e_aqc_opc_get_switch_resource_alloc = 0x0204,
+
+ i40e_aqc_opc_add_vsi = 0x0210,
+ i40e_aqc_opc_update_vsi_parameters = 0x0211,
+ i40e_aqc_opc_get_vsi_parameters = 0x0212,
+
+ i40e_aqc_opc_add_pv = 0x0220,
+ i40e_aqc_opc_update_pv_parameters = 0x0221,
+ i40e_aqc_opc_get_pv_parameters = 0x0222,
+
+ i40e_aqc_opc_add_veb = 0x0230,
+ i40e_aqc_opc_update_veb_parameters = 0x0231,
+ i40e_aqc_opc_get_veb_parameters = 0x0232,
+
+ i40e_aqc_opc_delete_element = 0x0243,
+
+ i40e_aqc_opc_add_macvlan = 0x0250,
+ i40e_aqc_opc_remove_macvlan = 0x0251,
+ i40e_aqc_opc_add_vlan = 0x0252,
+ i40e_aqc_opc_remove_vlan = 0x0253,
+ i40e_aqc_opc_set_vsi_promiscuous_modes = 0x0254,
+ i40e_aqc_opc_add_tag = 0x0255,
+ i40e_aqc_opc_remove_tag = 0x0256,
+ i40e_aqc_opc_add_multicast_etag = 0x0257,
+ i40e_aqc_opc_remove_multicast_etag = 0x0258,
+ i40e_aqc_opc_update_tag = 0x0259,
+ i40e_aqc_opc_add_control_packet_filter = 0x025A,
+ i40e_aqc_opc_remove_control_packet_filter = 0x025B,
+ i40e_aqc_opc_add_cloud_filters = 0x025C,
+ i40e_aqc_opc_remove_cloud_filters = 0x025D,
+
+ i40e_aqc_opc_add_mirror_rule = 0x0260,
+ i40e_aqc_opc_delete_mirror_rule = 0x0261,
+
+ i40e_aqc_opc_set_storm_control_config = 0x0280,
+ i40e_aqc_opc_get_storm_control_config = 0x0281,
+
+ /* DCB commands */
+ i40e_aqc_opc_dcb_ignore_pfc = 0x0301,
+ i40e_aqc_opc_dcb_updated = 0x0302,
+
+ /* TX scheduler */
+ i40e_aqc_opc_configure_vsi_bw_limit = 0x0400,
+ i40e_aqc_opc_configure_vsi_ets_sla_bw_limit = 0x0406,
+ i40e_aqc_opc_configure_vsi_tc_bw = 0x0407,
+ i40e_aqc_opc_query_vsi_bw_config = 0x0408,
+ i40e_aqc_opc_query_vsi_ets_sla_config = 0x040A,
+ i40e_aqc_opc_configure_switching_comp_bw_limit = 0x0410,
+
+ i40e_aqc_opc_enable_switching_comp_ets = 0x0413,
+ i40e_aqc_opc_modify_switching_comp_ets = 0x0414,
+ i40e_aqc_opc_disable_switching_comp_ets = 0x0415,
+ i40e_aqc_opc_configure_switching_comp_ets_bw_limit = 0x0416,
+ i40e_aqc_opc_configure_switching_comp_bw_config = 0x0417,
+ i40e_aqc_opc_query_switching_comp_ets_config = 0x0418,
+ i40e_aqc_opc_query_port_ets_config = 0x0419,
+ i40e_aqc_opc_query_switching_comp_bw_config = 0x041A,
+ i40e_aqc_opc_suspend_port_tx = 0x041B,
+ i40e_aqc_opc_resume_port_tx = 0x041C,
+
+ /* hmc */
+ i40e_aqc_opc_query_hmc_resource_profile = 0x0500,
+ i40e_aqc_opc_set_hmc_resource_profile = 0x0501,
+
+ /* phy commands*/
+ i40e_aqc_opc_get_phy_abilities = 0x0600,
+ i40e_aqc_opc_set_phy_config = 0x0601,
+ i40e_aqc_opc_set_mac_config = 0x0603,
+ i40e_aqc_opc_set_link_restart_an = 0x0605,
+ i40e_aqc_opc_get_link_status = 0x0607,
+ i40e_aqc_opc_set_phy_int_mask = 0x0613,
+ i40e_aqc_opc_get_local_advt_reg = 0x0614,
+ i40e_aqc_opc_set_local_advt_reg = 0x0615,
+ i40e_aqc_opc_get_partner_advt = 0x0616,
+ i40e_aqc_opc_set_lb_modes = 0x0618,
+ i40e_aqc_opc_get_phy_wol_caps = 0x0621,
+ i40e_aqc_opc_set_phy_reset = 0x0622,
+ i40e_aqc_opc_upload_ext_phy_fm = 0x0625,
+
+ /* NVM commands */
+ i40e_aqc_opc_nvm_read = 0x0701,
+ i40e_aqc_opc_nvm_erase = 0x0702,
+ i40e_aqc_opc_nvm_update = 0x0703,
+
+ /* virtualization commands */
+ i40e_aqc_opc_send_msg_to_pf = 0x0801,
+ i40e_aqc_opc_send_msg_to_vf = 0x0802,
+ i40e_aqc_opc_send_msg_to_peer = 0x0803,
+
+ /* alternate structure */
+ i40e_aqc_opc_alternate_write = 0x0900,
+ i40e_aqc_opc_alternate_write_indirect = 0x0901,
+ i40e_aqc_opc_alternate_read = 0x0902,
+ i40e_aqc_opc_alternate_read_indirect = 0x0903,
+ i40e_aqc_opc_alternate_write_done = 0x0904,
+ i40e_aqc_opc_alternate_set_mode = 0x0905,
+ i40e_aqc_opc_alternate_clear_port = 0x0906,
+
+ /* LLDP commands */
+ i40e_aqc_opc_lldp_get_mib = 0x0A00,
+ i40e_aqc_opc_lldp_update_mib = 0x0A01,
+ i40e_aqc_opc_lldp_add_tlv = 0x0A02,
+ i40e_aqc_opc_lldp_update_tlv = 0x0A03,
+ i40e_aqc_opc_lldp_delete_tlv = 0x0A04,
+ i40e_aqc_opc_lldp_stop = 0x0A05,
+ i40e_aqc_opc_lldp_start = 0x0A06,
+
+ /* Tunnel commands */
+ i40e_aqc_opc_add_udp_tunnel = 0x0B00,
+ i40e_aqc_opc_del_udp_tunnel = 0x0B01,
+ i40e_aqc_opc_tunnel_key_structure = 0x0B10,
+
+ /* Async Events */
+ i40e_aqc_opc_event_lan_overflow = 0x1001,
+
+ /* OEM commands */
+ i40e_aqc_opc_oem_parameter_change = 0xFE00,
+ i40e_aqc_opc_oem_device_status_change = 0xFE01,
+
+ /* debug commands */
+ i40e_aqc_opc_debug_get_deviceid = 0xFF00,
+ i40e_aqc_opc_debug_set_mode = 0xFF01,
+ i40e_aqc_opc_debug_read_reg = 0xFF03,
+ i40e_aqc_opc_debug_write_reg = 0xFF04,
+ i40e_aqc_opc_debug_read_reg_sg = 0xFF05,
+ i40e_aqc_opc_debug_write_reg_sg = 0xFF06,
+ i40e_aqc_opc_debug_modify_reg = 0xFF07,
+ i40e_aqc_opc_debug_dump_internals = 0xFF08,
+ i40e_aqc_opc_debug_modify_internals = 0xFF09,
+};
+
+/* command structures and indirect data structures */
+
+/* Structure naming conventions:
+ * - no suffix for direct command descriptor structures
+ * - _data for indirect sent data
+ * - _resp for indirect return data (data which is both will use _data)
+ * - _completion for direct return data
+ * - _element_ for repeated elements (may also be _data or _resp)
+ *
+ * Command structures are expected to overlay the params.raw member of the basic
+ * descriptor, and as such cannot exceed 16 bytes in length.
+ */
+
+/* This macro is used to generate a compilation error if a structure
+ * is not exactly the correct length. It gives a divide by zero error if the
+ * structure is not of the correct size, otherwise it creates an enum that is
+ * never used.
+ */
+#define I40E_CHECK_STRUCT_LEN(n, X) enum i40e_static_assert_enum_##X \
+ { i40e_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) }
+
+/* This macro is used extensively to ensure that command structures are 16
+ * bytes in length as they have to map to the raw array of that size.
+ */
+#define I40E_CHECK_CMD_LENGTH(X) I40E_CHECK_STRUCT_LEN(16, X)
+
+/* internal (0x00XX) commands */
+
+/* Get version (direct 0x0001) */
+struct i40e_aqc_get_version {
+ __le32 rom_ver;
+ __le32 fw_build;
+ __le16 fw_major;
+ __le16 fw_minor;
+ __le16 api_major;
+ __le16 api_minor;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_version);
+
+/* Send driver version (indirect 0x0002) */
+struct i40e_aqc_driver_version {
+ u8 driver_major_ver;
+ u8 driver_minor_ver;
+ u8 driver_build_ver;
+ u8 driver_subbuild_ver;
+ u8 reserved[4];
+ __le32 address_high;
+ __le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_driver_version);
+
+/* Queue Shutdown (direct 0x0003) */
+struct i40e_aqc_queue_shutdown {
+ __le32 driver_unloading;
+#define I40E_AQ_DRIVER_UNLOADING 0x1
+ u8 reserved[12];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_queue_shutdown);
+
+/* Request resource ownership (direct 0x0008)
+ * Release resource ownership (direct 0x0009)
+ */
+#define I40E_AQ_RESOURCE_NVM 1
+#define I40E_AQ_RESOURCE_SDP 2
+#define I40E_AQ_RESOURCE_ACCESS_READ 1
+#define I40E_AQ_RESOURCE_ACCESS_WRITE 2
+#define I40E_AQ_RESOURCE_NVM_READ_TIMEOUT 3000
+#define I40E_AQ_RESOURCE_NVM_WRITE_TIMEOUT 180000
+
+struct i40e_aqc_request_resource {
+ __le16 resource_id;
+ __le16 access_type;
+ __le32 timeout;
+ __le32 resource_number;
+ u8 reserved[4];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_request_resource);
+
+/* Get function capabilities (indirect 0x000A)
+ * Get device capabilities (indirect 0x000B)
+ */
+struct i40e_aqc_list_capabilites {
+ u8 command_flags;
+#define I40E_AQ_LIST_CAP_PF_INDEX_EN 1
+ u8 pf_index;
+ u8 reserved[2];
+ __le32 count;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_list_capabilites);
+
+struct i40e_aqc_list_capabilities_element_resp {
+ __le16 id;
+ u8 major_rev;
+ u8 minor_rev;
+ __le32 number;
+ __le32 logical_id;
+ __le32 phys_id;
+ u8 reserved[16];
+};
+
+/* list of caps */
+
+#define I40E_AQ_CAP_ID_SWITCH_MODE 0x0001
+#define I40E_AQ_CAP_ID_MNG_MODE 0x0002
+#define I40E_AQ_CAP_ID_NPAR_ACTIVE 0x0003
+#define I40E_AQ_CAP_ID_OS2BMC_CAP 0x0004
+#define I40E_AQ_CAP_ID_FUNCTIONS_VALID 0x0005
+#define I40E_AQ_CAP_ID_ALTERNATE_RAM 0x0006
+#define I40E_AQ_CAP_ID_SRIOV 0x0012
+#define I40E_AQ_CAP_ID_VF 0x0013
+#define I40E_AQ_CAP_ID_VMDQ 0x0014
+#define I40E_AQ_CAP_ID_8021QBG 0x0015
+#define I40E_AQ_CAP_ID_8021QBR 0x0016
+#define I40E_AQ_CAP_ID_VSI 0x0017
+#define I40E_AQ_CAP_ID_DCB 0x0018
+#define I40E_AQ_CAP_ID_FCOE 0x0021
+#define I40E_AQ_CAP_ID_RSS 0x0040
+#define I40E_AQ_CAP_ID_RXQ 0x0041
+#define I40E_AQ_CAP_ID_TXQ 0x0042
+#define I40E_AQ_CAP_ID_MSIX 0x0043
+#define I40E_AQ_CAP_ID_VF_MSIX 0x0044
+#define I40E_AQ_CAP_ID_FLOW_DIRECTOR 0x0045
+#define I40E_AQ_CAP_ID_1588 0x0046
+#define I40E_AQ_CAP_ID_IWARP 0x0051
+#define I40E_AQ_CAP_ID_LED 0x0061
+#define I40E_AQ_CAP_ID_SDP 0x0062
+#define I40E_AQ_CAP_ID_MDIO 0x0063
+#define I40E_AQ_CAP_ID_FLEX10 0x00F1
+#define I40E_AQ_CAP_ID_CEM 0x00F2
+
+/* Set CPPM Configuration (direct 0x0103) */
+struct i40e_aqc_cppm_configuration {
+ __le16 command_flags;
+#define I40E_AQ_CPPM_EN_LTRC 0x0800
+#define I40E_AQ_CPPM_EN_DMCTH 0x1000
+#define I40E_AQ_CPPM_EN_DMCTLX 0x2000
+#define I40E_AQ_CPPM_EN_HPTC 0x4000
+#define I40E_AQ_CPPM_EN_DMARC 0x8000
+ __le16 ttlx;
+ __le32 dmacr;
+ __le16 dmcth;
+ u8 hptc;
+ u8 reserved;
+ __le32 pfltrc;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_cppm_configuration);
+
+/* Set ARP Proxy command / response (indirect 0x0104) */
+struct i40e_aqc_arp_proxy_data {
+ __le16 command_flags;
+#define I40E_AQ_ARP_INIT_IPV4 0x0008
+#define I40E_AQ_ARP_UNSUP_CTL 0x0010
+#define I40E_AQ_ARP_ENA 0x0020
+#define I40E_AQ_ARP_ADD_IPV4 0x0040
+#define I40E_AQ_ARP_DEL_IPV4 0x0080
+ __le16 table_id;
+ __le32 pfpm_proxyfc;
+ __le32 ip_addr;
+ u8 mac_addr[6];
+};
+
+/* Set NS Proxy Table Entry Command (indirect 0x0105) */
+struct i40e_aqc_ns_proxy_data {
+ __le16 table_idx_mac_addr_0;
+ __le16 table_idx_mac_addr_1;
+ __le16 table_idx_ipv6_0;
+ __le16 table_idx_ipv6_1;
+ __le16 control;
+#define I40E_AQ_NS_PROXY_ADD_0 0x0100
+#define I40E_AQ_NS_PROXY_DEL_0 0x0200
+#define I40E_AQ_NS_PROXY_ADD_1 0x0400
+#define I40E_AQ_NS_PROXY_DEL_1 0x0800
+#define I40E_AQ_NS_PROXY_ADD_IPV6_0 0x1000
+#define I40E_AQ_NS_PROXY_DEL_IPV6_0 0x2000
+#define I40E_AQ_NS_PROXY_ADD_IPV6_1 0x4000
+#define I40E_AQ_NS_PROXY_DEL_IPV6_1 0x8000
+#define I40E_AQ_NS_PROXY_COMMAND_SEQ 0x0001
+#define I40E_AQ_NS_PROXY_INIT_IPV6_TBL 0x0002
+#define I40E_AQ_NS_PROXY_INIT_MAC_TBL 0x0004
+ u8 mac_addr_0[6];
+ u8 mac_addr_1[6];
+ u8 local_mac_addr[6];
+ u8 ipv6_addr_0[16]; /* Warning! spec specifies BE byte order */
+ u8 ipv6_addr_1[16];
+};
+
+/* Manage LAA Command (0x0106) - obsolete */
+struct i40e_aqc_mng_laa {
+ __le16 command_flags;
+#define I40E_AQ_LAA_FLAG_WR 0x8000
+ u8 reserved[2];
+ __le32 sal;
+ __le16 sah;
+ u8 reserved2[6];
+};
+
+/* Manage MAC Address Read Command (indirect 0x0107) */
+struct i40e_aqc_mac_address_read {
+ __le16 command_flags;
+#define I40E_AQC_LAN_ADDR_VALID 0x10
+#define I40E_AQC_SAN_ADDR_VALID 0x20
+#define I40E_AQC_PORT_ADDR_VALID 0x40
+#define I40E_AQC_WOL_ADDR_VALID 0x80
+#define I40E_AQC_ADDR_VALID_MASK 0xf0
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_read);
+
+struct i40e_aqc_mac_address_read_data {
+ u8 pf_lan_mac[6];
+ u8 pf_san_mac[6];
+ u8 port_mac[6];
+ u8 pf_wol_mac[6];
+};
+
+I40E_CHECK_STRUCT_LEN(24, i40e_aqc_mac_address_read_data);
+
+/* Manage MAC Address Write Command (0x0108) */
+struct i40e_aqc_mac_address_write {
+ __le16 command_flags;
+#define I40E_AQC_WRITE_TYPE_LAA_ONLY 0x0000
+#define I40E_AQC_WRITE_TYPE_LAA_WOL 0x4000
+#define I40E_AQC_WRITE_TYPE_PORT 0x8000
+#define I40E_AQC_WRITE_TYPE_MASK 0xc000
+ __le16 mac_sah;
+ __le32 mac_sal;
+ u8 reserved[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_write);
+
+/* PXE commands (0x011x) */
+
+/* Clear PXE Command and response (direct 0x0110) */
+struct i40e_aqc_clear_pxe {
+ u8 rx_cnt;
+ u8 reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_clear_pxe);
+
+/* Switch configuration commands (0x02xx) */
+
+/* Used by many indirect commands that only pass an seid and a buffer in the
+ * command
+ */
+struct i40e_aqc_switch_seid {
+ __le16 seid;
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_switch_seid);
+
+/* Get Switch Configuration command (indirect 0x0200)
+ * uses i40e_aqc_switch_seid for the descriptor
+ */
+struct i40e_aqc_get_switch_config_header_resp {
+ __le16 num_reported;
+ __le16 num_total;
+ u8 reserved[12];
+};
+
+struct i40e_aqc_switch_config_element_resp {
+ u8 element_type;
+#define I40E_AQ_SW_ELEM_TYPE_MAC 1
+#define I40E_AQ_SW_ELEM_TYPE_PF 2
+#define I40E_AQ_SW_ELEM_TYPE_VF 3
+#define I40E_AQ_SW_ELEM_TYPE_EMP 4
+#define I40E_AQ_SW_ELEM_TYPE_BMC 5
+#define I40E_AQ_SW_ELEM_TYPE_PV 16
+#define I40E_AQ_SW_ELEM_TYPE_VEB 17
+#define I40E_AQ_SW_ELEM_TYPE_PA 18
+#define I40E_AQ_SW_ELEM_TYPE_VSI 19
+ u8 revision;
+#define I40E_AQ_SW_ELEM_REV_1 1
+ __le16 seid;
+ __le16 uplink_seid;
+ __le16 downlink_seid;
+ u8 reserved[3];
+ u8 connection_type;
+#define I40E_AQ_CONN_TYPE_REGULAR 0x1
+#define I40E_AQ_CONN_TYPE_DEFAULT 0x2
+#define I40E_AQ_CONN_TYPE_CASCADED 0x3
+ __le16 scheduler_id;
+ __le16 element_info;
+};
+
+/* Get Switch Configuration (indirect 0x0200)
+ * an array of elements are returned in the response buffer
+ * the first in the array is the header, remainder are elements
+ */
+struct i40e_aqc_get_switch_config_resp {
+ struct i40e_aqc_get_switch_config_header_resp header;
+ struct i40e_aqc_switch_config_element_resp element[1];
+};
+
+/* Add Statistics (direct 0x0201)
+ * Remove Statistics (direct 0x0202)
+ */
+struct i40e_aqc_add_remove_statistics {
+ __le16 seid;
+ __le16 vlan;
+ __le16 stat_index;
+ u8 reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_statistics);
+
+/* Set Port Parameters command (direct 0x0203) */
+struct i40e_aqc_set_port_parameters {
+ __le16 command_flags;
+#define I40E_AQ_SET_P_PARAMS_SAVE_BAD_PACKETS 1
+#define I40E_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS 2 /* must set! */
+#define I40E_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA 4
+ __le16 bad_frame_vsi;
+ __le16 default_seid; /* reserved for command */
+ u8 reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_port_parameters);
+
+/* Get Switch Resource Allocation (indirect 0x0204) */
+struct i40e_aqc_get_switch_resource_alloc {
+ u8 num_entries; /* reserved for command */
+ u8 reserved[7];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_switch_resource_alloc);
+
+/* expect an array of these structs in the response buffer */
+struct i40e_aqc_switch_resource_alloc_element_resp {
+ u8 resource_type;
+#define I40E_AQ_RESOURCE_TYPE_VEB 0x0
+#define I40E_AQ_RESOURCE_TYPE_VSI 0x1
+#define I40E_AQ_RESOURCE_TYPE_MACADDR 0x2
+#define I40E_AQ_RESOURCE_TYPE_STAG 0x3
+#define I40E_AQ_RESOURCE_TYPE_ETAG 0x4
+#define I40E_AQ_RESOURCE_TYPE_MULTICAST_HASH 0x5
+#define I40E_AQ_RESOURCE_TYPE_UNICAST_HASH 0x6
+#define I40E_AQ_RESOURCE_TYPE_VLAN 0x7
+#define I40E_AQ_RESOURCE_TYPE_VSI_LIST_ENTRY 0x8
+#define I40E_AQ_RESOURCE_TYPE_ETAG_LIST_ENTRY 0x9
+#define I40E_AQ_RESOURCE_TYPE_VLAN_STAT_POOL 0xA
+#define I40E_AQ_RESOURCE_TYPE_MIRROR_RULE 0xB
+#define I40E_AQ_RESOURCE_TYPE_QUEUE_SETS 0xC
+#define I40E_AQ_RESOURCE_TYPE_VLAN_FILTERS 0xD
+#define I40E_AQ_RESOURCE_TYPE_INNER_MAC_FILTERS 0xF
+#define I40E_AQ_RESOURCE_TYPE_IP_FILTERS 0x10
+#define I40E_AQ_RESOURCE_TYPE_GRE_VN_KEYS 0x11
+#define I40E_AQ_RESOURCE_TYPE_VN2_KEYS 0x12
+#define I40E_AQ_RESOURCE_TYPE_TUNNEL_PORTS 0x13
+ u8 reserved1;
+ __le16 guaranteed;
+ __le16 total;
+ __le16 used;
+ __le16 total_unalloced;
+ u8 reserved2[6];
+};
+
+/* Add VSI (indirect 0x0210)
+ * this indirect command uses struct i40e_aqc_vsi_properties_data
+ * as the indirect buffer (128 bytes)
+ *
+ * Update VSI (indirect 0x211)
+ * uses the same data structure as Add VSI
+ *
+ * Get VSI (indirect 0x0212)
+ * uses the same completion and data structure as Add VSI
+ */
+struct i40e_aqc_add_get_update_vsi {
+ __le16 uplink_seid;
+ u8 connection_type;
+#define I40E_AQ_VSI_CONN_TYPE_NORMAL 0x1
+#define I40E_AQ_VSI_CONN_TYPE_DEFAULT 0x2
+#define I40E_AQ_VSI_CONN_TYPE_CASCADED 0x3
+ u8 reserved1;
+ u8 vf_id;
+ u8 reserved2;
+ __le16 vsi_flags;
+#define I40E_AQ_VSI_TYPE_SHIFT 0x0
+#define I40E_AQ_VSI_TYPE_MASK (0x3 << I40E_AQ_VSI_TYPE_SHIFT)
+#define I40E_AQ_VSI_TYPE_VF 0x0
+#define I40E_AQ_VSI_TYPE_VMDQ2 0x1
+#define I40E_AQ_VSI_TYPE_PF 0x2
+#define I40E_AQ_VSI_TYPE_EMP_MNG 0x3
+#define I40E_AQ_VSI_FLAG_CASCADED_PV 0x4
+#define I40E_AQ_VSI_FLAG_CLOUD_VSI 0x8
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_get_update_vsi);
+
+struct i40e_aqc_add_get_update_vsi_completion {
+ __le16 seid;
+ __le16 vsi_number;
+ __le16 vsi_used;
+ __le16 vsi_free;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_get_update_vsi_completion);
+
+struct i40e_aqc_vsi_properties_data {
+ /* first 96 byte are written by SW */
+ __le16 valid_sections;
+#define I40E_AQ_VSI_PROP_SWITCH_VALID 0x0001
+#define I40E_AQ_VSI_PROP_SECURITY_VALID 0x0002
+#define I40E_AQ_VSI_PROP_VLAN_VALID 0x0004
+#define I40E_AQ_VSI_PROP_CAS_PV_VALID 0x0008
+#define I40E_AQ_VSI_PROP_INGRESS_UP_VALID 0x0010
+#define I40E_AQ_VSI_PROP_EGRESS_UP_VALID 0x0020
+#define I40E_AQ_VSI_PROP_QUEUE_MAP_VALID 0x0040
+#define I40E_AQ_VSI_PROP_QUEUE_OPT_VALID 0x0080
+#define I40E_AQ_VSI_PROP_OUTER_UP_VALID 0x0100
+#define I40E_AQ_VSI_PROP_SCHED_VALID 0x0200
+ /* switch section */
+ __le16 switch_id; /* 12bit id combined with flags below */
+#define I40E_AQ_VSI_SW_ID_SHIFT 0x0000
+#define I40E_AQ_VSI_SW_ID_MASK (0xFFF << I40E_AQ_VSI_SW_ID_SHIFT)
+#define I40E_AQ_VSI_SW_ID_FLAG_NOT_STAG 0x1000
+#define I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB 0x2000
+#define I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB 0x4000
+ u8 sw_reserved[2];
+ /* security section */
+ u8 sec_flags;
+#define I40E_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD 0x01
+#define I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK 0x02
+#define I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK 0x04
+ u8 sec_reserved;
+ /* VLAN section */
+ __le16 pvid; /* VLANS include priority bits */
+ __le16 fcoe_pvid;
+ u8 port_vlan_flags;
+#define I40E_AQ_VSI_PVLAN_MODE_SHIFT 0x00
+#define I40E_AQ_VSI_PVLAN_MODE_MASK (0x03 << \
+ I40E_AQ_VSI_PVLAN_MODE_SHIFT)
+#define I40E_AQ_VSI_PVLAN_MODE_TAGGED 0x01
+#define I40E_AQ_VSI_PVLAN_MODE_UNTAGGED 0x02
+#define I40E_AQ_VSI_PVLAN_MODE_ALL 0x03
+#define I40E_AQ_VSI_PVLAN_INSERT_PVID 0x04
+#define I40E_AQ_VSI_PVLAN_EMOD_SHIFT 0x03
+#define I40E_AQ_VSI_PVLAN_EMOD_MASK (0x3 << \
+ I40E_AQ_VSI_PVLAN_EMOD_SHIFT)
+#define I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH 0x0
+#define I40E_AQ_VSI_PVLAN_EMOD_STR_UP 0x08
+#define I40E_AQ_VSI_PVLAN_EMOD_STR 0x10
+#define I40E_AQ_VSI_PVLAN_EMOD_NOTHING 0x18
+ u8 pvlan_reserved[3];
+ /* ingress egress up sections */
+ __le32 ingress_table; /* bitmap, 3 bits per up */
+#define I40E_AQ_VSI_UP_TABLE_UP0_SHIFT 0
+#define I40E_AQ_VSI_UP_TABLE_UP0_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP0_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP1_SHIFT 3
+#define I40E_AQ_VSI_UP_TABLE_UP1_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP1_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP2_SHIFT 6
+#define I40E_AQ_VSI_UP_TABLE_UP2_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP2_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP3_SHIFT 9
+#define I40E_AQ_VSI_UP_TABLE_UP3_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP3_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP4_SHIFT 12
+#define I40E_AQ_VSI_UP_TABLE_UP4_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP4_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP5_SHIFT 15
+#define I40E_AQ_VSI_UP_TABLE_UP5_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP5_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP6_SHIFT 18
+#define I40E_AQ_VSI_UP_TABLE_UP6_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP6_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP7_SHIFT 21
+#define I40E_AQ_VSI_UP_TABLE_UP7_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP7_SHIFT)
+ __le32 egress_table; /* same defines as for ingress table */
+ /* cascaded PV section */
+ __le16 cas_pv_tag;
+ u8 cas_pv_flags;
+#define I40E_AQ_VSI_CAS_PV_TAGX_SHIFT 0x00
+#define I40E_AQ_VSI_CAS_PV_TAGX_MASK (0x03 << \
+ I40E_AQ_VSI_CAS_PV_TAGX_SHIFT)
+#define I40E_AQ_VSI_CAS_PV_TAGX_LEAVE 0x00
+#define I40E_AQ_VSI_CAS_PV_TAGX_REMOVE 0x01
+#define I40E_AQ_VSI_CAS_PV_TAGX_COPY 0x02
+#define I40E_AQ_VSI_CAS_PV_INSERT_TAG 0x10
+#define I40E_AQ_VSI_CAS_PV_ETAG_PRUNE 0x20
+#define I40E_AQ_VSI_CAS_PV_ACCEPT_HOST_TAG 0x40
+ u8 cas_pv_reserved;
+ /* queue mapping section */
+ __le16 mapping_flags;
+#define I40E_AQ_VSI_QUE_MAP_CONTIG 0x0
+#define I40E_AQ_VSI_QUE_MAP_NONCONTIG 0x1
+ __le16 queue_mapping[16];
+#define I40E_AQ_VSI_QUEUE_SHIFT 0x0
+#define I40E_AQ_VSI_QUEUE_MASK (0x7FF << I40E_AQ_VSI_QUEUE_SHIFT)
+ __le16 tc_mapping[8];
+#define I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT 0
+#define I40E_AQ_VSI_TC_QUE_OFFSET_MASK (0x1FF << \
+ I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT)
+#define I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT 9
+#define I40E_AQ_VSI_TC_QUE_NUMBER_MASK (0x7 << \
+ I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)
+ /* queueing option section */
+ u8 queueing_opt_flags;
+#define I40E_AQ_VSI_QUE_OPT_TCP_ENA 0x10
+#define I40E_AQ_VSI_QUE_OPT_FCOE_ENA 0x20
+ u8 queueing_opt_reserved[3];
+ /* scheduler section */
+ u8 up_enable_bits;
+ u8 sched_reserved;
+ /* outer up section */
+ __le32 outer_up_table; /* same structure and defines as ingress table */
+ u8 cmd_reserved[8];
+ /* last 32 bytes are written by FW */
+ __le16 qs_handle[8];
+#define I40E_AQ_VSI_QS_HANDLE_INVALID 0xFFFF
+ __le16 stat_counter_idx;
+ __le16 sched_id;
+ u8 resp_reserved[12];
+};
+
+I40E_CHECK_STRUCT_LEN(128, i40e_aqc_vsi_properties_data);
+
+/* Add Port Virtualizer (direct 0x0220)
+ * also used for update PV (direct 0x0221) but only flags are used
+ * (IS_CTRL_PORT only works on add PV)
+ */
+struct i40e_aqc_add_update_pv {
+ __le16 command_flags;
+#define I40E_AQC_PV_FLAG_PV_TYPE 0x1
+#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_STAG_EN 0x2
+#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_ETAG_EN 0x4
+#define I40E_AQC_PV_FLAG_IS_CTRL_PORT 0x8
+ __le16 uplink_seid;
+ __le16 connected_seid;
+ u8 reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv);
+
+struct i40e_aqc_add_update_pv_completion {
+ /* reserved for update; for add also encodes error if rc == ENOSPC */
+ __le16 pv_seid;
+#define I40E_AQC_PV_ERR_FLAG_NO_PV 0x1
+#define I40E_AQC_PV_ERR_FLAG_NO_SCHED 0x2
+#define I40E_AQC_PV_ERR_FLAG_NO_COUNTER 0x4
+#define I40E_AQC_PV_ERR_FLAG_NO_ENTRY 0x8
+ u8 reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv_completion);
+
+/* Get PV Params (direct 0x0222)
+ * uses i40e_aqc_switch_seid for the descriptor
+ */
+
+struct i40e_aqc_get_pv_params_completion {
+ __le16 seid;
+ __le16 default_stag;
+ __le16 pv_flags; /* same flags as add_pv */
+#define I40E_AQC_GET_PV_PV_TYPE 0x1
+#define I40E_AQC_GET_PV_FRWD_UNKNOWN_STAG 0x2
+#define I40E_AQC_GET_PV_FRWD_UNKNOWN_ETAG 0x4
+ u8 reserved[8];
+ __le16 default_port_seid;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_pv_params_completion);
+
+/* Add VEB (direct 0x0230) */
+struct i40e_aqc_add_veb {
+ __le16 uplink_seid;
+ __le16 downlink_seid;
+ __le16 veb_flags;
+#define I40E_AQC_ADD_VEB_FLOATING 0x1
+#define I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT 1
+#define I40E_AQC_ADD_VEB_PORT_TYPE_MASK (0x3 << \
+ I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT)
+#define I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT 0x2
+#define I40E_AQC_ADD_VEB_PORT_TYPE_DATA 0x4
+#define I40E_AQC_ADD_VEB_ENABLE_L2_FILTER 0x8
+ u8 enable_tcs;
+ u8 reserved[9];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb);
+
+struct i40e_aqc_add_veb_completion {
+ u8 reserved[6];
+ __le16 switch_seid;
+ /* also encodes error if rc == ENOSPC; codes are the same as add_pv */
+ __le16 veb_seid;
+#define I40E_AQC_VEB_ERR_FLAG_NO_VEB 0x1
+#define I40E_AQC_VEB_ERR_FLAG_NO_SCHED 0x2
+#define I40E_AQC_VEB_ERR_FLAG_NO_COUNTER 0x4
+#define I40E_AQC_VEB_ERR_FLAG_NO_ENTRY 0x8
+ __le16 statistic_index;
+ __le16 vebs_used;
+ __le16 vebs_free;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb_completion);
+
+/* Get VEB Parameters (direct 0x0232)
+ * uses i40e_aqc_switch_seid for the descriptor
+ */
+struct i40e_aqc_get_veb_parameters_completion {
+ __le16 seid;
+ __le16 switch_id;
+ __le16 veb_flags; /* only the first/last flags from 0x0230 is valid */
+ __le16 statistic_index;
+ __le16 vebs_used;
+ __le16 vebs_free;
+ u8 reserved[4];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_veb_parameters_completion);
+
+/* Delete Element (direct 0x0243)
+ * uses the generic i40e_aqc_switch_seid
+ */
+
+/* Add MAC-VLAN (indirect 0x0250) */
+
+/* used for the command for most vlan commands */
+struct i40e_aqc_macvlan {
+ __le16 num_addresses;
+ __le16 seid[3];
+#define I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT 0
+#define I40E_AQC_MACVLAN_CMD_SEID_NUM_MASK (0x3FF << \
+ I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT)
+#define I40E_AQC_MACVLAN_CMD_SEID_VALID 0x8000
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_macvlan);
+
+/* indirect data for command and response */
+struct i40e_aqc_add_macvlan_element_data {
+ u8 mac_addr[6];
+ __le16 vlan_tag;
+ __le16 flags;
+#define I40E_AQC_MACVLAN_ADD_PERFECT_MATCH 0x0001
+#define I40E_AQC_MACVLAN_ADD_HASH_MATCH 0x0002
+#define I40E_AQC_MACVLAN_ADD_IGNORE_VLAN 0x0004
+#define I40E_AQC_MACVLAN_ADD_TO_QUEUE 0x0008
+ __le16 queue_number;
+#define I40E_AQC_MACVLAN_CMD_QUEUE_SHIFT 0
+#define I40E_AQC_MACVLAN_CMD_QUEUE_MASK (0x7FF << \
+ I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT)
+ /* response section */
+ u8 match_method;
+#define I40E_AQC_MM_PERFECT_MATCH 0x01
+#define I40E_AQC_MM_HASH_MATCH 0x02
+#define I40E_AQC_MM_ERR_NO_RES 0xFF
+ u8 reserved1[3];
+};
+
+struct i40e_aqc_add_remove_macvlan_completion {
+ __le16 perfect_mac_used;
+ __le16 perfect_mac_free;
+ __le16 unicast_hash_free;
+ __le16 multicast_hash_free;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_macvlan_completion);
+
+/* Remove MAC-VLAN (indirect 0x0251)
+ * uses i40e_aqc_macvlan for the descriptor
+ * data points to an array of num_addresses of elements
+ */
+
+struct i40e_aqc_remove_macvlan_element_data {
+ u8 mac_addr[6];
+ __le16 vlan_tag;
+ u8 flags;
+#define I40E_AQC_MACVLAN_DEL_PERFECT_MATCH 0x01
+#define I40E_AQC_MACVLAN_DEL_HASH_MATCH 0x02
+#define I40E_AQC_MACVLAN_DEL_IGNORE_VLAN 0x08
+#define I40E_AQC_MACVLAN_DEL_ALL_VSIS 0x10
+ u8 reserved[3];
+ /* reply section */
+ u8 error_code;
+#define I40E_AQC_REMOVE_MACVLAN_SUCCESS 0x0
+#define I40E_AQC_REMOVE_MACVLAN_FAIL 0xFF
+ u8 reply_reserved[3];
+};
+
+/* Add VLAN (indirect 0x0252)
+ * Remove VLAN (indirect 0x0253)
+ * use the generic i40e_aqc_macvlan for the command
+ */
+struct i40e_aqc_add_remove_vlan_element_data {
+ __le16 vlan_tag;
+ u8 vlan_flags;
+/* flags for add VLAN */
+#define I40E_AQC_ADD_VLAN_LOCAL 0x1
+#define I40E_AQC_ADD_PVLAN_TYPE_SHIFT 1
+#define I40E_AQC_ADD_PVLAN_TYPE_MASK (0x3 << \
+ I40E_AQC_ADD_PVLAN_TYPE_SHIFT)
+#define I40E_AQC_ADD_PVLAN_TYPE_REGULAR 0x0
+#define I40E_AQC_ADD_PVLAN_TYPE_PRIMARY 0x2
+#define I40E_AQC_ADD_PVLAN_TYPE_SECONDARY 0x4
+#define I40E_AQC_VLAN_PTYPE_SHIFT 3
+#define I40E_AQC_VLAN_PTYPE_MASK (0x3 << I40E_AQC_VLAN_PTYPE_SHIFT)
+#define I40E_AQC_VLAN_PTYPE_REGULAR_VSI 0x0
+#define I40E_AQC_VLAN_PTYPE_PROMISC_VSI 0x8
+#define I40E_AQC_VLAN_PTYPE_COMMUNITY_VSI 0x10
+#define I40E_AQC_VLAN_PTYPE_ISOLATED_VSI 0x18
+/* flags for remove VLAN */
+#define I40E_AQC_REMOVE_VLAN_ALL 0x1
+ u8 reserved;
+ u8 result;
+/* flags for add VLAN */
+#define I40E_AQC_ADD_VLAN_SUCCESS 0x0
+#define I40E_AQC_ADD_VLAN_FAIL_REQUEST 0xFE
+#define I40E_AQC_ADD_VLAN_FAIL_RESOURCE 0xFF
+/* flags for remove VLAN */
+#define I40E_AQC_REMOVE_VLAN_SUCCESS 0x0
+#define I40E_AQC_REMOVE_VLAN_FAIL 0xFF
+ u8 reserved1[3];
+};
+
+struct i40e_aqc_add_remove_vlan_completion {
+ u8 reserved[4];
+ __le16 vlans_used;
+ __le16 vlans_free;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Set VSI Promiscuous Modes (direct 0x0254) */
+struct i40e_aqc_set_vsi_promiscuous_modes {
+ __le16 promiscuous_flags;
+ __le16 valid_flags;
+/* flags used for both fields above */
+#define I40E_AQC_SET_VSI_PROMISC_UNICAST 0x01
+#define I40E_AQC_SET_VSI_PROMISC_MULTICAST 0x02
+#define I40E_AQC_SET_VSI_PROMISC_BROADCAST 0x04
+#define I40E_AQC_SET_VSI_DEFAULT 0x08
+#define I40E_AQC_SET_VSI_PROMISC_VLAN 0x10
+ __le16 seid;
+#define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF
+ u8 reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_vsi_promiscuous_modes);
+
+/* Add S/E-tag command (direct 0x0255)
+ * Uses generic i40e_aqc_add_remove_tag_completion for completion
+ */
+struct i40e_aqc_add_tag {
+ __le16 flags;
+#define I40E_AQC_ADD_TAG_FLAG_TO_QUEUE 0x0001
+ __le16 seid;
+#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT 0
+#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_MASK (0x3FF << \
+ I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT)
+ __le16 tag;
+ __le16 queue_number;
+ u8 reserved[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_tag);
+
+struct i40e_aqc_add_remove_tag_completion {
+ u8 reserved[12];
+ __le16 tags_used;
+ __le16 tags_free;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_tag_completion);
+
+/* Remove S/E-tag command (direct 0x0256)
+ * Uses generic i40e_aqc_add_remove_tag_completion for completion
+ */
+struct i40e_aqc_remove_tag {
+ __le16 seid;
+#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT 0
+#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_MASK (0x3FF << \
+ I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT)
+ __le16 tag;
+ u8 reserved[12];
+};
+
+/* Add multicast E-Tag (direct 0x0257)
+ * del multicast E-Tag (direct 0x0258) only uses pv_seid and etag fields
+ * and no external data
+ */
+struct i40e_aqc_add_remove_mcast_etag {
+ __le16 pv_seid;
+ __le16 etag;
+ u8 num_unicast_etags;
+ u8 reserved[3];
+ __le32 addr_high; /* address of array of 2-byte s-tags */
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag);
+
+struct i40e_aqc_add_remove_mcast_etag_completion {
+ u8 reserved[4];
+ __le16 mcast_etags_used;
+ __le16 mcast_etags_free;
+ __le32 addr_high;
+ __le32 addr_low;
+
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag_completion);
+
+/* Update S/E-Tag (direct 0x0259) */
+struct i40e_aqc_update_tag {
+ __le16 seid;
+#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT 0
+#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_MASK (0x3FF << \
+ I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT)
+ __le16 old_tag;
+ __le16 new_tag;
+ u8 reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag);
+
+struct i40e_aqc_update_tag_completion {
+ u8 reserved[12];
+ __le16 tags_used;
+ __le16 tags_free;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag_completion);
+
+/* Add Control Packet filter (direct 0x025A)
+ * Remove Control Packet filter (direct 0x025B)
+ * uses the i40e_aqc_add_oveb_cloud,
+ * and the generic direct completion structure
+ */
+struct i40e_aqc_add_remove_control_packet_filter {
+ u8 mac[6];
+ __le16 etype;
+ __le16 flags;
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC 0x0001
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP 0x0002
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE 0x0004
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX 0x0008
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_RX 0x0000
+ __le16 seid;
+#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT 0
+#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_MASK (0x3FF << \
+ I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT)
+ __le16 queue;
+ u8 reserved[2];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter);
+
+struct i40e_aqc_add_remove_control_packet_filter_completion {
+ __le16 mac_etype_used;
+ __le16 etype_used;
+ __le16 mac_etype_free;
+ __le16 etype_free;
+ u8 reserved[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter_completion);
+
+/* Add Cloud filters (indirect 0x025C)
+ * Remove Cloud filters (indirect 0x025D)
+ * uses the i40e_aqc_add_remove_cloud_filters,
+ * and the generic indirect completion structure
+ */
+struct i40e_aqc_add_remove_cloud_filters {
+ u8 num_filters;
+ u8 reserved;
+ __le16 seid;
+#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT 0
+#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_MASK (0x3FF << \
+ I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT)
+ u8 reserved2[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_cloud_filters);
+
+struct i40e_aqc_add_remove_cloud_filters_element_data {
+ u8 outer_mac[6];
+ u8 inner_mac[6];
+ __le16 inner_vlan;
+ union {
+ struct {
+ u8 reserved[12];
+ u8 data[4];
+ } v4;
+ struct {
+ u8 data[16];
+ } v6;
+ } ipaddr;
+ __le16 flags;
+#define I40E_AQC_ADD_CLOUD_FILTER_SHIFT 0
+#define I40E_AQC_ADD_CLOUD_FILTER_MASK (0x3F << \
+ I40E_AQC_ADD_CLOUD_FILTER_SHIFT)
+#define I40E_AQC_ADD_CLOUD_FILTER_OIP_GRE 0x0002
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_GRE 0x0004
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_VNL 0x0007
+/* 0x0000 reserved */
+#define I40E_AQC_ADD_CLOUD_FILTER_OIP 0x0001
+/* 0x0002 reserved */
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN 0x0003
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID 0x0004
+/* 0x0005 reserved */
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID 0x0006
+/* 0x0007 reserved */
+/* 0x0008 reserved */
+#define I40E_AQC_ADD_CLOUD_FILTER_OMAC 0x0009
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC 0x000A
+#define I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC 0x000B
+#define I40E_AQC_ADD_CLOUD_FILTER_IIP 0x000C
+
+#define I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE 0x0080
+#define I40E_AQC_ADD_CLOUD_VNK_SHIFT 6
+#define I40E_AQC_ADD_CLOUD_VNK_MASK 0x00C0
+#define I40E_AQC_ADD_CLOUD_FLAGS_IPV4 0
+#define I40E_AQC_ADD_CLOUD_FLAGS_IPV6 0x0100
+
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT 9
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK 0x1E00
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_XVLAN 0
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC 1
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NGE 2
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_IP 3
+
+ __le32 tenant_id ;
+ u8 reserved[4];
+ __le16 queue_number;
+#define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT 0
+#define I40E_AQC_ADD_CLOUD_QUEUE_MASK (0x3F << \
+ I40E_AQC_ADD_CLOUD_QUEUE_SHIFT)
+ u8 reserved2[14];
+ /* response section */
+ u8 allocation_result;
+#define I40E_AQC_ADD_CLOUD_FILTER_SUCCESS 0x0
+#define I40E_AQC_ADD_CLOUD_FILTER_FAIL 0xFF
+ u8 response_reserved[7];
+};
+
+struct i40e_aqc_remove_cloud_filters_completion {
+ __le16 perfect_ovlan_used;
+ __le16 perfect_ovlan_free;
+ __le16 vlan_used;
+ __le16 vlan_free;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_cloud_filters_completion);
+
+/* Add Mirror Rule (indirect or direct 0x0260)
+ * Delete Mirror Rule (indirect or direct 0x0261)
+ * note: some rule types (4,5) do not use an external buffer.
+ * take care to set the flags correctly.
+ */
+struct i40e_aqc_add_delete_mirror_rule {
+ __le16 seid;
+ __le16 rule_type;
+#define I40E_AQC_MIRROR_RULE_TYPE_SHIFT 0
+#define I40E_AQC_MIRROR_RULE_TYPE_MASK (0x7 << \
+ I40E_AQC_MIRROR_RULE_TYPE_SHIFT)
+#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS 1
+#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS 2
+#define I40E_AQC_MIRROR_RULE_TYPE_VLAN 3
+#define I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS 4
+#define I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS 5
+ __le16 num_entries;
+ __le16 destination; /* VSI for add, rule id for delete */
+ __le32 addr_high; /* address of array of 2-byte VSI or VLAN ids */
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule);
+
+struct i40e_aqc_add_delete_mirror_rule_completion {
+ u8 reserved[2];
+ __le16 rule_id; /* only used on add */
+ __le16 mirror_rules_used;
+ __le16 mirror_rules_free;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion);
+
+/* Set Storm Control Configuration (direct 0x0280)
+ * Get Storm Control Configuration (direct 0x0281)
+ * the command and response use the same descriptor structure
+ */
+struct i40e_aqc_set_get_storm_control_config {
+ __le32 broadcast_threshold;
+ __le32 multicast_threshold;
+ __le32 control_flags;
+#define I40E_AQC_STORM_CONTROL_MDIPW 0x01
+#define I40E_AQC_STORM_CONTROL_MDICW 0x02
+#define I40E_AQC_STORM_CONTROL_BDIPW 0x04
+#define I40E_AQC_STORM_CONTROL_BDICW 0x08
+#define I40E_AQC_STORM_CONTROL_BIDU 0x10
+#define I40E_AQC_STORM_CONTROL_INTERVAL_SHIFT 8
+#define I40E_AQC_STORM_CONTROL_INTERVAL_MASK (0x3FF << \
+ I40E_AQC_STORM_CONTROL_INTERVAL_SHIFT)
+ u8 reserved[4];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_get_storm_control_config);
+
+/* DCB 0x03xx*/
+
+/* PFC Ignore (direct 0x0301)
+ * the command and response use the same descriptor structure
+ */
+struct i40e_aqc_pfc_ignore {
+ u8 tc_bitmap;
+ u8 command_flags; /* unused on response */
+#define I40E_AQC_PFC_IGNORE_SET 0x80
+#define I40E_AQC_PFC_IGNORE_CLEAR 0x0
+ u8 reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_pfc_ignore);
+
+/* DCB Update (direct 0x0302) uses the i40e_aq_desc structure
+ * with no parameters
+ */
+
+/* TX scheduler 0x04xx */
+
+/* Almost all the indirect commands use
+ * this generic struct to pass the SEID in param0
+ */
+struct i40e_aqc_tx_sched_ind {
+ __le16 vsi_seid;
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_tx_sched_ind);
+
+/* Several commands respond with a set of queue set handles */
+struct i40e_aqc_qs_handles_resp {
+ __le16 qs_handles[8];
+};
+
+/* Configure VSI BW limits (direct 0x0400) */
+struct i40e_aqc_configure_vsi_bw_limit {
+ __le16 vsi_seid;
+ u8 reserved[2];
+ __le16 credit;
+ u8 reserved1[2];
+ u8 max_credit; /* 0-3, limit = 2^max */
+ u8 reserved2[7];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_vsi_bw_limit);
+
+/* Configure VSI Bandwidth Limit per Traffic Type (indirect 0x0406)
+ * responds with i40e_aqc_qs_handles_resp
+ */
+struct i40e_aqc_configure_vsi_ets_sla_bw_data {
+ u8 tc_valid_bits;
+ u8 reserved[15];
+ __le16 tc_bw_credits[8]; /* FW writesback QS handles here */
+
+ /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
+ __le16 tc_bw_max[2];
+ u8 reserved1[28];
+};
+
+/* Configure VSI Bandwidth Allocation per Traffic Type (indirect 0x0407)
+ * responds with i40e_aqc_qs_handles_resp
+ */
+struct i40e_aqc_configure_vsi_tc_bw_data {
+ u8 tc_valid_bits;
+ u8 reserved[3];
+ u8 tc_bw_credits[8];
+ u8 reserved1[4];
+ __le16 qs_handles[8];
+};
+
+/* Query vsi bw configuration (indirect 0x0408) */
+struct i40e_aqc_query_vsi_bw_config_resp {
+ u8 tc_valid_bits;
+ u8 tc_suspended_bits;
+ u8 reserved[14];
+ __le16 qs_handles[8];
+ u8 reserved1[4];
+ __le16 port_bw_limit;
+ u8 reserved2[2];
+ u8 max_bw; /* 0-3, limit = 2^max */
+ u8 reserved3[23];
+};
+
+/* Query VSI Bandwidth Allocation per Traffic Type (indirect 0x040A) */
+struct i40e_aqc_query_vsi_ets_sla_config_resp {
+ u8 tc_valid_bits;
+ u8 reserved[3];
+ u8 share_credits[8];
+ __le16 credits[8];
+
+ /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
+ __le16 tc_bw_max[2];
+};
+
+/* Configure Switching Component Bandwidth Limit (direct 0x0410) */
+struct i40e_aqc_configure_switching_comp_bw_limit {
+ __le16 seid;
+ u8 reserved[2];
+ __le16 credit;
+ u8 reserved1[2];
+ u8 max_bw; /* 0-3, limit = 2^max */
+ u8 reserved2[7];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_switching_comp_bw_limit);
+
+/* Enable Physical Port ETS (indirect 0x0413)
+ * Modify Physical Port ETS (indirect 0x0414)
+ * Disable Physical Port ETS (indirect 0x0415)
+ */
+struct i40e_aqc_configure_switching_comp_ets_data {
+ u8 reserved[4];
+ u8 tc_valid_bits;
+ u8 reserved1;
+ u8 tc_strict_priority_flags;
+ u8 reserved2[17];
+ u8 tc_bw_share_credits[8];
+ u8 reserved3[96];
+};
+
+/* Configure Switching Component Bandwidth Limits per Tc (indirect 0x0416) */
+struct i40e_aqc_configure_switching_comp_ets_bw_limit_data {
+ u8 tc_valid_bits;
+ u8 reserved[15];
+ __le16 tc_bw_credit[8];
+
+ /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
+ __le16 tc_bw_max[2];
+ u8 reserved1[28];
+};
+
+/* Configure Switching Component Bandwidth Allocation per Tc
+ * (indirect 0x0417)
+ */
+struct i40e_aqc_configure_switching_comp_bw_config_data {
+ u8 tc_valid_bits;
+ u8 reserved[2];
+ u8 absolute_credits; /* bool */
+ u8 tc_bw_share_credits[8];
+ u8 reserved1[20];
+};
+
+/* Query Switching Component Configuration (indirect 0x0418) */
+struct i40e_aqc_query_switching_comp_ets_config_resp {
+ u8 tc_valid_bits;
+ u8 reserved[35];
+ __le16 port_bw_limit;
+ u8 reserved1[2];
+ u8 tc_bw_max; /* 0-3, limit = 2^max */
+ u8 reserved2[23];
+};
+
+/* Query PhysicalPort ETS Configuration (indirect 0x0419) */
+struct i40e_aqc_query_port_ets_config_resp {
+ u8 reserved[4];
+ u8 tc_valid_bits;
+ u8 reserved1;
+ u8 tc_strict_priority_bits;
+ u8 reserved2;
+ u8 tc_bw_share_credits[8];
+ __le16 tc_bw_limits[8];
+
+ /* 4 bits per tc 0-7, 4th bit reserved, limit = 2^max */
+ __le16 tc_bw_max[2];
+ u8 reserved3[32];
+};
+
+/* Query Switching Component Bandwidth Allocation per Traffic Type
+ * (indirect 0x041A)
+ */
+struct i40e_aqc_query_switching_comp_bw_config_resp {
+ u8 tc_valid_bits;
+ u8 reserved[2];
+ u8 absolute_credits_enable; /* bool */
+ u8 tc_bw_share_credits[8];
+ __le16 tc_bw_limits[8];
+
+ /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
+ __le16 tc_bw_max[2];
+};
+
+/* Suspend/resume port TX traffic
+ * (direct 0x041B and 0x041C) uses the generic SEID struct
+ */
+
+/* Get and set the active HMC resource profile and status.
+ * (direct 0x0500) and (direct 0x0501)
+ */
+struct i40e_aq_get_set_hmc_resource_profile {
+ u8 pm_profile;
+ u8 pe_vf_enabled;
+ u8 reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aq_get_set_hmc_resource_profile);
+
+enum i40e_aq_hmc_profile {
+ /* I40E_HMC_PROFILE_NO_CHANGE = 0, reserved */
+ I40E_HMC_PROFILE_DEFAULT = 1,
+ I40E_HMC_PROFILE_FAVOR_VF = 2,
+ I40E_HMC_PROFILE_EQUAL = 3,
+};
+
+#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_PM_MASK 0xF
+#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_COUNT_MASK 0x3F
+
+/* Get PHY Abilities (indirect 0x0600) uses the generic indirect struct */
+
+/* set in param0 for get phy abilities to report qualified modules */
+#define I40E_AQ_PHY_REPORT_QUALIFIED_MODULES 0x0001
+#define I40E_AQ_PHY_REPORT_INITIAL_VALUES 0x0002
+
+enum i40e_aq_phy_type {
+ I40E_PHY_TYPE_SGMII = 0x0,
+ I40E_PHY_TYPE_1000BASE_KX = 0x1,
+ I40E_PHY_TYPE_10GBASE_KX4 = 0x2,
+ I40E_PHY_TYPE_10GBASE_KR = 0x3,
+ I40E_PHY_TYPE_40GBASE_KR4 = 0x4,
+ I40E_PHY_TYPE_XAUI = 0x5,
+ I40E_PHY_TYPE_XFI = 0x6,
+ I40E_PHY_TYPE_SFI = 0x7,
+ I40E_PHY_TYPE_XLAUI = 0x8,
+ I40E_PHY_TYPE_XLPPI = 0x9,
+ I40E_PHY_TYPE_40GBASE_CR4_CU = 0xA,
+ I40E_PHY_TYPE_10GBASE_CR1_CU = 0xB,
+ I40E_PHY_TYPE_100BASE_TX = 0x11,
+ I40E_PHY_TYPE_1000BASE_T = 0x12,
+ I40E_PHY_TYPE_10GBASE_T = 0x13,
+ I40E_PHY_TYPE_10GBASE_SR = 0x14,
+ I40E_PHY_TYPE_10GBASE_LR = 0x15,
+ I40E_PHY_TYPE_10GBASE_SFPP_CU = 0x16,
+ I40E_PHY_TYPE_10GBASE_CR1 = 0x17,
+ I40E_PHY_TYPE_40GBASE_CR4 = 0x18,
+ I40E_PHY_TYPE_40GBASE_SR4 = 0x19,
+ I40E_PHY_TYPE_40GBASE_LR4 = 0x1A,
+ I40E_PHY_TYPE_20GBASE_KR2 = 0x1B,
+ I40E_PHY_TYPE_MAX
+};
+
+#define I40E_LINK_SPEED_100MB_SHIFT 0x1
+#define I40E_LINK_SPEED_1000MB_SHIFT 0x2
+#define I40E_LINK_SPEED_10GB_SHIFT 0x3
+#define I40E_LINK_SPEED_40GB_SHIFT 0x4
+#define I40E_LINK_SPEED_20GB_SHIFT 0x5
+
+enum i40e_aq_link_speed {
+ I40E_LINK_SPEED_UNKNOWN = 0,
+ I40E_LINK_SPEED_100MB = (1 << I40E_LINK_SPEED_100MB_SHIFT),
+ I40E_LINK_SPEED_1GB = (1 << I40E_LINK_SPEED_1000MB_SHIFT),
+ I40E_LINK_SPEED_10GB = (1 << I40E_LINK_SPEED_10GB_SHIFT),
+ I40E_LINK_SPEED_40GB = (1 << I40E_LINK_SPEED_40GB_SHIFT),
+ I40E_LINK_SPEED_20GB = (1 << I40E_LINK_SPEED_20GB_SHIFT)
+};
+
+struct i40e_aqc_module_desc {
+ u8 oui[3];
+ u8 reserved1;
+ u8 part_number[16];
+ u8 revision[4];
+ u8 reserved2[8];
+};
+
+struct i40e_aq_get_phy_abilities_resp {
+ __le32 phy_type; /* bitmap using the above enum for offsets */
+ u8 link_speed; /* bitmap using the above enum bit patterns */
+ u8 abilities;
+#define I40E_AQ_PHY_FLAG_PAUSE_TX 0x01
+#define I40E_AQ_PHY_FLAG_PAUSE_RX 0x02
+#define I40E_AQ_PHY_FLAG_LOW_POWER 0x04
+#define I40E_AQ_PHY_FLAG_AN_SHIFT 3
+#define I40E_AQ_PHY_FLAG_AN_MASK (0x3 << I40E_AQ_PHY_FLAG_AN_SHIFT)
+#define I40E_AQ_PHY_FLAG_AN_OFF 0x00 /* link forced on */
+#define I40E_AQ_PHY_FLAG_AN_OFF_LINK_DOWN 0x01
+#define I40E_AQ_PHY_FLAG_AN_ON 0x02
+#define I40E_AQ_PHY_FLAG_MODULE_QUAL 0x20
+ __le16 eee_capability;
+#define I40E_AQ_EEE_100BASE_TX 0x0002
+#define I40E_AQ_EEE_1000BASE_T 0x0004
+#define I40E_AQ_EEE_10GBASE_T 0x0008
+#define I40E_AQ_EEE_1000BASE_KX 0x0010
+#define I40E_AQ_EEE_10GBASE_KX4 0x0020
+#define I40E_AQ_EEE_10GBASE_KR 0x0040
+ __le32 eeer_val;
+ u8 d3_lpan;
+#define I40E_AQ_SET_PHY_D3_LPAN_ENA 0x01
+ u8 reserved[3];
+ u8 phy_id[4];
+ u8 module_type[3];
+ u8 qualified_module_count;
+#define I40E_AQ_PHY_MAX_QMS 16
+ struct i40e_aqc_module_desc qualified_module[I40E_AQ_PHY_MAX_QMS];
+};
+
+/* Set PHY Config (direct 0x0601) */
+struct i40e_aq_set_phy_config { /* same bits as above in all */
+ __le32 phy_type;
+ u8 link_speed;
+ u8 abilities;
+/* bits 0-2 use the values from get_phy_abilities_resp */
+#define I40E_AQ_PHY_ENABLE_LINK 0x08
+#define I40E_AQ_PHY_ENABLE_AN 0x10
+#define I40E_AQ_PHY_ENABLE_ATOMIC_LINK 0x20
+ __le16 eee_capability;
+ __le32 eeer;
+ u8 low_power_ctrl;
+ u8 reserved[3];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aq_set_phy_config);
+
+/* Set MAC Config command data structure (direct 0x0603) */
+struct i40e_aq_set_mac_config {
+ __le16 max_frame_size;
+ u8 params;
+#define I40E_AQ_SET_MAC_CONFIG_CRC_EN 0x04
+#define I40E_AQ_SET_MAC_CONFIG_PACING_MASK 0x78
+#define I40E_AQ_SET_MAC_CONFIG_PACING_SHIFT 3
+#define I40E_AQ_SET_MAC_CONFIG_PACING_NONE 0x0
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1B_13TX 0xF
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_9TX 0x9
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_4TX 0x8
+#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_7TX 0x7
+#define I40E_AQ_SET_MAC_CONFIG_PACING_2DW_3TX 0x6
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_1TX 0x5
+#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_2TX 0x4
+#define I40E_AQ_SET_MAC_CONFIG_PACING_7DW_3TX 0x3
+#define I40E_AQ_SET_MAC_CONFIG_PACING_4DW_1TX 0x2
+#define I40E_AQ_SET_MAC_CONFIG_PACING_9DW_1TX 0x1
+ u8 tx_timer_priority; /* bitmap */
+ __le16 tx_timer_value;
+ __le16 fc_refresh_threshold;
+ u8 reserved[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aq_set_mac_config);
+
+/* Restart Auto-Negotiation (direct 0x605) */
+struct i40e_aqc_set_link_restart_an {
+ u8 command;
+#define I40E_AQ_PHY_RESTART_AN 0x02
+#define I40E_AQ_PHY_LINK_ENABLE 0x04
+ u8 reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_link_restart_an);
+
+/* Get Link Status cmd & response data structure (direct 0x0607) */
+struct i40e_aqc_get_link_status {
+ __le16 command_flags; /* only field set on command */
+#define I40E_AQ_LSE_MASK 0x3
+#define I40E_AQ_LSE_NOP 0x0
+#define I40E_AQ_LSE_DISABLE 0x2
+#define I40E_AQ_LSE_ENABLE 0x3
+/* only response uses this flag */
+#define I40E_AQ_LSE_IS_ENABLED 0x1
+ u8 phy_type; /* i40e_aq_phy_type */
+ u8 link_speed; /* i40e_aq_link_speed */
+ u8 link_info;
+#define I40E_AQ_LINK_UP 0x01
+#define I40E_AQ_LINK_FAULT 0x02
+#define I40E_AQ_LINK_FAULT_TX 0x04
+#define I40E_AQ_LINK_FAULT_RX 0x08
+#define I40E_AQ_LINK_FAULT_REMOTE 0x10
+#define I40E_AQ_MEDIA_AVAILABLE 0x40
+#define I40E_AQ_SIGNAL_DETECT 0x80
+ u8 an_info;
+#define I40E_AQ_AN_COMPLETED 0x01
+#define I40E_AQ_LP_AN_ABILITY 0x02
+#define I40E_AQ_PD_FAULT 0x04
+#define I40E_AQ_FEC_EN 0x08
+#define I40E_AQ_PHY_LOW_POWER 0x10
+#define I40E_AQ_LINK_PAUSE_TX 0x20
+#define I40E_AQ_LINK_PAUSE_RX 0x40
+#define I40E_AQ_QUALIFIED_MODULE 0x80
+ u8 ext_info;
+#define I40E_AQ_LINK_PHY_TEMP_ALARM 0x01
+#define I40E_AQ_LINK_XCESSIVE_ERRORS 0x02
+#define I40E_AQ_LINK_TX_SHIFT 0x02
+#define I40E_AQ_LINK_TX_MASK (0x03 << I40E_AQ_LINK_TX_SHIFT)
+#define I40E_AQ_LINK_TX_ACTIVE 0x00
+#define I40E_AQ_LINK_TX_DRAINED 0x01
+#define I40E_AQ_LINK_TX_FLUSHED 0x03
+ u8 loopback; /* use defines from i40e_aqc_set_lb_mode */
+ __le16 max_frame_size;
+ u8 config;
+#define I40E_AQ_CONFIG_CRC_ENA 0x04
+#define I40E_AQ_CONFIG_PACING_MASK 0x78
+ u8 reserved[5];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_link_status);
+
+/* Set event mask command (direct 0x613) */
+struct i40e_aqc_set_phy_int_mask {
+ u8 reserved[8];
+ __le16 event_mask;
+#define I40E_AQ_EVENT_LINK_UPDOWN 0x0002
+#define I40E_AQ_EVENT_MEDIA_NA 0x0004
+#define I40E_AQ_EVENT_LINK_FAULT 0x0008
+#define I40E_AQ_EVENT_PHY_TEMP_ALARM 0x0010
+#define I40E_AQ_EVENT_EXCESSIVE_ERRORS 0x0020
+#define I40E_AQ_EVENT_SIGNAL_DETECT 0x0040
+#define I40E_AQ_EVENT_AN_COMPLETED 0x0080
+#define I40E_AQ_EVENT_MODULE_QUAL_FAIL 0x0100
+#define I40E_AQ_EVENT_PORT_TX_SUSPENDED 0x0200
+ u8 reserved1[6];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_int_mask);
+
+/* Get Local AN advt register (direct 0x0614)
+ * Set Local AN advt register (direct 0x0615)
+ * Get Link Partner AN advt register (direct 0x0616)
+ */
+struct i40e_aqc_an_advt_reg {
+ __le32 local_an_reg0;
+ __le16 local_an_reg1;
+ u8 reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_an_advt_reg);
+
+/* Set Loopback mode (0x0618) */
+struct i40e_aqc_set_lb_mode {
+ __le16 lb_mode;
+#define I40E_AQ_LB_PHY_LOCAL 0x01
+#define I40E_AQ_LB_PHY_REMOTE 0x02
+#define I40E_AQ_LB_MAC_LOCAL 0x04
+ u8 reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_lb_mode);
+
+/* Set PHY Reset command (0x0622) */
+struct i40e_aqc_set_phy_reset {
+ u8 reset_flags;
+#define I40E_AQ_PHY_RESET_REQUEST 0x02
+ u8 reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_reset);
+
+enum i40e_aq_phy_reg_type {
+ I40E_AQC_PHY_REG_INTERNAL = 0x1,
+ I40E_AQC_PHY_REG_EXERNAL_BASET = 0x2,
+ I40E_AQC_PHY_REG_EXERNAL_MODULE = 0x3
+};
+
+/* NVM Read command (indirect 0x0701)
+ * NVM Erase commands (direct 0x0702)
+ * NVM Update commands (indirect 0x0703)
+ */
+struct i40e_aqc_nvm_update {
+ u8 command_flags;
+#define I40E_AQ_NVM_LAST_CMD 0x01
+#define I40E_AQ_NVM_FLASH_ONLY 0x80
+ u8 module_pointer;
+ __le16 length;
+ __le32 offset;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_update);
+
+/* Send to PF command (indirect 0x0801) id is only used by PF
+ * Send to VF command (indirect 0x0802) id is only used by PF
+ * Send to Peer PF command (indirect 0x0803)
+ */
+struct i40e_aqc_pf_vf_message {
+ __le32 id;
+ u8 reserved[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_pf_vf_message);
+
+/* Alternate structure */
+
+/* Direct write (direct 0x0900)
+ * Direct read (direct 0x0902)
+ */
+struct i40e_aqc_alternate_write {
+ __le32 address0;
+ __le32 data0;
+ __le32 address1;
+ __le32 data1;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_write);
+
+/* Indirect write (indirect 0x0901)
+ * Indirect read (indirect 0x0903)
+ */
+
+struct i40e_aqc_alternate_ind_write {
+ __le32 address;
+ __le32 length;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_ind_write);
+
+/* Done alternate write (direct 0x0904)
+ * uses i40e_aq_desc
+ */
+struct i40e_aqc_alternate_write_done {
+ __le16 cmd_flags;
+#define I40E_AQ_ALTERNATE_MODE_BIOS_MASK 1
+#define I40E_AQ_ALTERNATE_MODE_BIOS_LEGACY 0
+#define I40E_AQ_ALTERNATE_MODE_BIOS_UEFI 1
+#define I40E_AQ_ALTERNATE_RESET_NEEDED 2
+ u8 reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_write_done);
+
+/* Set OEM mode (direct 0x0905) */
+struct i40e_aqc_alternate_set_mode {
+ __le32 mode;
+#define I40E_AQ_ALTERNATE_MODE_NONE 0
+#define I40E_AQ_ALTERNATE_MODE_OEM 1
+ u8 reserved[12];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_set_mode);
+
+/* Clear port Alternate RAM (direct 0x0906) uses i40e_aq_desc */
+
+/* async events 0x10xx */
+
+/* Lan Queue Overflow Event (direct, 0x1001) */
+struct i40e_aqc_lan_overflow {
+ __le32 prtdcb_rupto;
+ __le32 otx_ctl;
+ u8 reserved[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lan_overflow);
+
+/* Get LLDP MIB (indirect 0x0A00) */
+struct i40e_aqc_lldp_get_mib {
+ u8 type;
+ u8 reserved1;
+#define I40E_AQ_LLDP_MIB_TYPE_MASK 0x3
+#define I40E_AQ_LLDP_MIB_LOCAL 0x0
+#define I40E_AQ_LLDP_MIB_REMOTE 0x1
+#define I40E_AQ_LLDP_MIB_LOCAL_AND_REMOTE 0x2
+#define I40E_AQ_LLDP_BRIDGE_TYPE_MASK 0xC
+#define I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT 0x2
+#define I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE 0x0
+#define I40E_AQ_LLDP_BRIDGE_TYPE_NON_TPMR 0x1
+#define I40E_AQ_LLDP_TX_SHIFT 0x4
+#define I40E_AQ_LLDP_TX_MASK (0x03 << I40E_AQ_LLDP_TX_SHIFT)
+/* TX pause flags use I40E_AQ_LINK_TX_* above */
+ __le16 local_len;
+ __le16 remote_len;
+ u8 reserved2[2];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_get_mib);
+
+/* Configure LLDP MIB Change Event (direct 0x0A01)
+ * also used for the event (with type in the command field)
+ */
+struct i40e_aqc_lldp_update_mib {
+ u8 command;
+#define I40E_AQ_LLDP_MIB_UPDATE_ENABLE 0x0
+#define I40E_AQ_LLDP_MIB_UPDATE_DISABLE 0x1
+ u8 reserved[7];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_mib);
+
+/* Add LLDP TLV (indirect 0x0A02)
+ * Delete LLDP TLV (indirect 0x0A04)
+ */
+struct i40e_aqc_lldp_add_tlv {
+ u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */
+ u8 reserved1[1];
+ __le16 len;
+ u8 reserved2[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_add_tlv);
+
+/* Update LLDP TLV (indirect 0x0A03) */
+struct i40e_aqc_lldp_update_tlv {
+ u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */
+ u8 reserved;
+ __le16 old_len;
+ __le16 new_offset;
+ __le16 new_len;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_tlv);
+
+/* Stop LLDP (direct 0x0A05) */
+struct i40e_aqc_lldp_stop {
+ u8 command;
+#define I40E_AQ_LLDP_AGENT_STOP 0x0
+#define I40E_AQ_LLDP_AGENT_SHUTDOWN 0x1
+ u8 reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_stop);
+
+/* Start LLDP (direct 0x0A06) */
+
+struct i40e_aqc_lldp_start {
+ u8 command;
+#define I40E_AQ_LLDP_AGENT_START 0x1
+ u8 reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start);
+
+/* Apply MIB changes (0x0A07)
+ * uses the generic struc as it contains no data
+ */
+
+/* Add Udp Tunnel command and completion (direct 0x0B00) */
+struct i40e_aqc_add_udp_tunnel {
+ __le16 udp_port;
+ u8 header_len; /* in DWords, 1 to 15 */
+ u8 protocol_type;
+#define I40E_AQC_TUNNEL_TYPE_TEREDO 0x0
+#define I40E_AQC_TUNNEL_TYPE_VXLAN 0x2
+#define I40E_AQC_TUNNEL_TYPE_NGE 0x3
+ u8 variable_udp_length;
+#define I40E_AQC_TUNNEL_FIXED_UDP_LENGTH 0x0
+#define I40E_AQC_TUNNEL_VARIABLE_UDP_LENGTH 0x1
+ u8 udp_key_index;
+#define I40E_AQC_TUNNEL_KEY_INDEX_VXLAN 0x0
+#define I40E_AQC_TUNNEL_KEY_INDEX_NGE 0x1
+#define I40E_AQC_TUNNEL_KEY_INDEX_PROPRIETARY_UDP 0x2
+ u8 reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel);
+
+struct i40e_aqc_add_udp_tunnel_completion {
+ __le16 udp_port;
+ u8 filter_entry_index;
+ u8 multiple_pfs;
+#define I40E_AQC_SINGLE_PF 0x0
+#define I40E_AQC_MULTIPLE_PFS 0x1
+ u8 total_filters;
+ u8 reserved[11];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel_completion);
+
+/* remove UDP Tunnel command (0x0B01) */
+struct i40e_aqc_remove_udp_tunnel {
+ u8 reserved[2];
+ u8 index; /* 0 to 15 */
+ u8 pf_filters;
+ u8 total_filters;
+ u8 reserved2[11];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_udp_tunnel);
+
+struct i40e_aqc_del_udp_tunnel_completion {
+ __le16 udp_port;
+ u8 index; /* 0 to 15 */
+ u8 multiple_pfs;
+ u8 total_filters_used;
+ u8 reserved;
+ u8 tunnels_free;
+ u8 reserved1[9];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion);
+
+/* tunnel key structure 0x0B10 */
+
+struct i40e_aqc_tunnel_key_structure_A0 {
+ __le16 key1_off;
+ __le16 key1_len;
+ __le16 key2_off;
+ __le16 key2_len;
+ __le16 flags;
+#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDE 0x01
+/* response flags */
+#define I40E_AQC_TUNNEL_KEY_STRUCT_SUCCESS 0x01
+#define I40E_AQC_TUNNEL_KEY_STRUCT_MODIFIED 0x02
+#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDDEN 0x03
+ u8 resreved[6];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_tunnel_key_structure_A0);
+
+struct i40e_aqc_tunnel_key_structure {
+ u8 key1_off;
+ u8 key2_off;
+ u8 key1_len; /* 0 to 15 */
+ u8 key2_len; /* 0 to 15 */
+ u8 flags;
+#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDE 0x01
+/* response flags */
+#define I40E_AQC_TUNNEL_KEY_STRUCT_SUCCESS 0x01
+#define I40E_AQC_TUNNEL_KEY_STRUCT_MODIFIED 0x02
+#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDDEN 0x03
+ u8 network_key_index;
+#define I40E_AQC_NETWORK_KEY_INDEX_VXLAN 0x0
+#define I40E_AQC_NETWORK_KEY_INDEX_NGE 0x1
+#define I40E_AQC_NETWORK_KEY_INDEX_FLEX_MAC_IN_UDP 0x2
+#define I40E_AQC_NETWORK_KEY_INDEX_GRE 0x3
+ u8 reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_tunnel_key_structure);
+
+/* OEM mode commands (direct 0xFE0x) */
+struct i40e_aqc_oem_param_change {
+ __le32 param_type;
+#define I40E_AQ_OEM_PARAM_TYPE_PF_CTL 0
+#define I40E_AQ_OEM_PARAM_TYPE_BW_CTL 1
+#define I40E_AQ_OEM_PARAM_MAC 2
+ __le32 param_value1;
+ u8 param_value2[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_param_change);
+
+struct i40e_aqc_oem_state_change {
+ __le32 state;
+#define I40E_AQ_OEM_STATE_LINK_DOWN 0x0
+#define I40E_AQ_OEM_STATE_LINK_UP 0x1
+ u8 reserved[12];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_state_change);
+
+/* debug commands */
+
+/* get device id (0xFF00) uses the generic structure */
+
+/* set test more (0xFF01, internal) */
+
+struct i40e_acq_set_test_mode {
+ u8 mode;
+#define I40E_AQ_TEST_PARTIAL 0
+#define I40E_AQ_TEST_FULL 1
+#define I40E_AQ_TEST_NVM 2
+ u8 reserved[3];
+ u8 command;
+#define I40E_AQ_TEST_OPEN 0
+#define I40E_AQ_TEST_CLOSE 1
+#define I40E_AQ_TEST_INC 2
+ u8 reserved2[3];
+ __le32 address_high;
+ __le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_acq_set_test_mode);
+
+/* Debug Read Register command (0xFF03)
+ * Debug Write Register command (0xFF04)
+ */
+struct i40e_aqc_debug_reg_read_write {
+ __le32 reserved;
+ __le32 address;
+ __le32 value_high;
+ __le32 value_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_reg_read_write);
+
+/* Scatter/gather Reg Read (indirect 0xFF05)
+ * Scatter/gather Reg Write (indirect 0xFF06)
+ */
+
+/* i40e_aq_desc is used for the command */
+struct i40e_aqc_debug_reg_sg_element_data {
+ __le32 address;
+ __le32 value;
+};
+
+/* Debug Modify register (direct 0xFF07) */
+struct i40e_aqc_debug_modify_reg {
+ __le32 address;
+ __le32 value;
+ __le32 clear_mask;
+ __le32 set_mask;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_reg);
+
+/* dump internal data (0xFF08, indirect) */
+
+#define I40E_AQ_CLUSTER_ID_AUX 0
+#define I40E_AQ_CLUSTER_ID_SWITCH_FLU 1
+#define I40E_AQ_CLUSTER_ID_TXSCHED 2
+#define I40E_AQ_CLUSTER_ID_HMC 3
+#define I40E_AQ_CLUSTER_ID_MAC0 4
+#define I40E_AQ_CLUSTER_ID_MAC1 5
+#define I40E_AQ_CLUSTER_ID_MAC2 6
+#define I40E_AQ_CLUSTER_ID_MAC3 7
+#define I40E_AQ_CLUSTER_ID_DCB 8
+#define I40E_AQ_CLUSTER_ID_EMP_MEM 9
+#define I40E_AQ_CLUSTER_ID_PKT_BUF 10
+#define I40E_AQ_CLUSTER_ID_ALTRAM 11
+
+struct i40e_aqc_debug_dump_internals {
+ u8 cluster_id;
+ u8 table_id;
+ __le16 data_size;
+ __le32 idx;
+ __le32 address_high;
+ __le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_dump_internals);
+
+struct i40e_aqc_debug_modify_internals {
+ u8 cluster_id;
+ u8 cluster_specific_params[7];
+ __le32 address_high;
+ __le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_internals);
+
+#endif
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_alloc.h b/drivers/net/ethernet/intel/i40evf/i40e_alloc.h
new file mode 100644
index 000000000000..d8654fb9e525
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40e_alloc.h
@@ -0,0 +1,55 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_ALLOC_H_
+#define _I40E_ALLOC_H_
+
+struct i40e_hw;
+
+/* Memory allocation types */
+enum i40e_memory_type {
+ i40e_mem_arq_buf = 0, /* ARQ indirect command buffer */
+ i40e_mem_asq_buf = 1,
+ i40e_mem_atq_buf = 2, /* ATQ indirect command buffer */
+ i40e_mem_arq_ring = 3, /* ARQ descriptor ring */
+ i40e_mem_atq_ring = 4, /* ATQ descriptor ring */
+ i40e_mem_pd = 5, /* Page Descriptor */
+ i40e_mem_bp = 6, /* Backing Page - 4KB */
+ i40e_mem_bp_jumbo = 7, /* Backing Page - > 4KB */
+ i40e_mem_reserved
+};
+
+/* prototype for functions used for dynamic memory allocation */
+i40e_status i40e_allocate_dma_mem(struct i40e_hw *hw,
+ struct i40e_dma_mem *mem,
+ enum i40e_memory_type type,
+ u64 size, u32 alignment);
+i40e_status i40e_free_dma_mem(struct i40e_hw *hw,
+ struct i40e_dma_mem *mem);
+i40e_status i40e_allocate_virt_mem(struct i40e_hw *hw,
+ struct i40e_virt_mem *mem,
+ u32 size);
+i40e_status i40e_free_virt_mem(struct i40e_hw *hw,
+ struct i40e_virt_mem *mem);
+
+#endif /* _I40E_ALLOC_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c
new file mode 100644
index 000000000000..44b90b347bfd
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c
@@ -0,0 +1,254 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#include "i40e_type.h"
+#include "i40e_adminq.h"
+#include "i40e_prototype.h"
+#include "i40e_virtchnl.h"
+
+/**
+ * i40e_set_mac_type - Sets MAC type
+ * @hw: pointer to the HW structure
+ *
+ * This function sets the mac type of the adapter based on the
+ * vendor ID and device ID stored in the hw structure.
+ **/
+i40e_status i40e_set_mac_type(struct i40e_hw *hw)
+{
+ i40e_status status = 0;
+
+ if (hw->vendor_id == PCI_VENDOR_ID_INTEL) {
+ switch (hw->device_id) {
+ case I40E_SFP_XL710_DEVICE_ID:
+ case I40E_SFP_X710_DEVICE_ID:
+ case I40E_QEMU_DEVICE_ID:
+ case I40E_KX_A_DEVICE_ID:
+ case I40E_KX_B_DEVICE_ID:
+ case I40E_KX_C_DEVICE_ID:
+ case I40E_KX_D_DEVICE_ID:
+ case I40E_QSFP_A_DEVICE_ID:
+ case I40E_QSFP_B_DEVICE_ID:
+ case I40E_QSFP_C_DEVICE_ID:
+ hw->mac.type = I40E_MAC_XL710;
+ break;
+ case I40E_VF_DEVICE_ID:
+ case I40E_VF_HV_DEVICE_ID:
+ hw->mac.type = I40E_MAC_VF;
+ break;
+ default:
+ hw->mac.type = I40E_MAC_GENERIC;
+ break;
+ }
+ } else {
+ status = I40E_ERR_DEVICE_NOT_SUPPORTED;
+ }
+
+ hw_dbg(hw, "i40e_set_mac_type found mac: %d, returns: %d\n",
+ hw->mac.type, status);
+ return status;
+}
+
+/**
+ * i40evf_debug_aq
+ * @hw: debug mask related to admin queue
+ * @mask: debug mask
+ * @desc: pointer to admin queue descriptor
+ * @buffer: pointer to command buffer
+ *
+ * Dumps debug log about adminq command with descriptor contents.
+ **/
+void i40evf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
+ void *buffer)
+{
+ struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc;
+ u8 *aq_buffer = (u8 *)buffer;
+ u32 data[4];
+ u32 i = 0;
+
+ if ((!(mask & hw->debug_mask)) || (desc == NULL))
+ return;
+
+ i40e_debug(hw, mask,
+ "AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
+ aq_desc->opcode, aq_desc->flags, aq_desc->datalen,
+ aq_desc->retval);
+ i40e_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n",
+ aq_desc->cookie_high, aq_desc->cookie_low);
+ i40e_debug(hw, mask, "\tparam (0,1) 0x%08X 0x%08X\n",
+ aq_desc->params.internal.param0,
+ aq_desc->params.internal.param1);
+ i40e_debug(hw, mask, "\taddr (h,l) 0x%08X 0x%08X\n",
+ aq_desc->params.external.addr_high,
+ aq_desc->params.external.addr_low);
+
+ if ((buffer != NULL) && (aq_desc->datalen != 0)) {
+ memset(data, 0, sizeof(data));
+ i40e_debug(hw, mask, "AQ CMD Buffer:\n");
+ for (i = 0; i < le16_to_cpu(aq_desc->datalen); i++) {
+ data[((i % 16) / 4)] |=
+ ((u32)aq_buffer[i]) << (8 * (i % 4));
+ if ((i % 16) == 15) {
+ i40e_debug(hw, mask,
+ "\t0x%04X %08X %08X %08X %08X\n",
+ i - 15, data[0], data[1], data[2],
+ data[3]);
+ memset(data, 0, sizeof(data));
+ }
+ }
+ if ((i % 16) != 0)
+ i40e_debug(hw, mask, "\t0x%04X %08X %08X %08X %08X\n",
+ i - (i % 16), data[0], data[1], data[2],
+ data[3]);
+ }
+}
+
+/**
+ * i40evf_check_asq_alive
+ * @hw: pointer to the hw struct
+ *
+ * Returns true if Queue is enabled else false.
+ **/
+bool i40evf_check_asq_alive(struct i40e_hw *hw)
+{
+ return !!(rd32(hw, hw->aq.asq.len) & I40E_PF_ATQLEN_ATQENABLE_MASK);
+}
+
+/**
+ * i40evf_aq_queue_shutdown
+ * @hw: pointer to the hw struct
+ * @unloading: is the driver unloading itself
+ *
+ * Tell the Firmware that we're shutting down the AdminQ and whether
+ * or not the driver is unloading as well.
+ **/
+i40e_status i40evf_aq_queue_shutdown(struct i40e_hw *hw,
+ bool unloading)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_queue_shutdown *cmd =
+ (struct i40e_aqc_queue_shutdown *)&desc.params.raw;
+ i40e_status status;
+
+ i40evf_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_queue_shutdown);
+
+ if (unloading)
+ cmd->driver_unloading = cpu_to_le32(I40E_AQ_DRIVER_UNLOADING);
+ status = i40evf_asq_send_command(hw, &desc, NULL, 0, NULL);
+
+ return status;
+}
+
+
+/**
+ * i40e_aq_send_msg_to_pf
+ * @hw: pointer to the hardware structure
+ * @v_opcode: opcodes for VF-PF communication
+ * @v_retval: return error code
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ * @cmd_details: pointer to command details
+ *
+ * Send message to PF driver using admin queue. By default, this message
+ * is sent asynchronously, i.e. i40evf_asq_send_command() does not wait for
+ * completion before returning.
+ **/
+i40e_status i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
+ enum i40e_virtchnl_ops v_opcode,
+ i40e_status v_retval,
+ u8 *msg, u16 msglen,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ i40e_status status;
+
+ i40evf_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_pf);
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_SI);
+ desc.cookie_high = cpu_to_le32(v_opcode);
+ desc.cookie_low = cpu_to_le32(v_retval);
+ if (msglen) {
+ desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF
+ | I40E_AQ_FLAG_RD));
+ if (msglen > I40E_AQ_LARGE_BUF)
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ desc.datalen = cpu_to_le16(msglen);
+ }
+ if (!cmd_details) {
+ struct i40e_asq_cmd_details details;
+ memset(&details, 0, sizeof(details));
+ details.async = true;
+ cmd_details = &details;
+ }
+ status = i40evf_asq_send_command(hw, (struct i40e_aq_desc *)&desc, msg,
+ msglen, cmd_details);
+ return status;
+}
+
+/**
+ * i40e_vf_parse_hw_config
+ * @hw: pointer to the hardware structure
+ * @msg: pointer to the virtual channel VF resource structure
+ *
+ * Given a VF resource message from the PF, populate the hw struct
+ * with appropriate information.
+ **/
+void i40e_vf_parse_hw_config(struct i40e_hw *hw,
+ struct i40e_virtchnl_vf_resource *msg)
+{
+ struct i40e_virtchnl_vsi_resource *vsi_res;
+ int i;
+
+ vsi_res = &msg->vsi_res[0];
+
+ hw->dev_caps.num_vsis = msg->num_vsis;
+ hw->dev_caps.num_rx_qp = msg->num_queue_pairs;
+ hw->dev_caps.num_tx_qp = msg->num_queue_pairs;
+ hw->dev_caps.num_msix_vectors_vf = msg->max_vectors;
+ hw->dev_caps.dcb = msg->vf_offload_flags &
+ I40E_VIRTCHNL_VF_OFFLOAD_L2;
+ hw->dev_caps.fcoe = (msg->vf_offload_flags &
+ I40E_VIRTCHNL_VF_OFFLOAD_FCOE) ? 1 : 0;
+ for (i = 0; i < msg->num_vsis; i++) {
+ if (vsi_res->vsi_type == I40E_VSI_SRIOV) {
+ memcpy(hw->mac.perm_addr, vsi_res->default_mac_addr,
+ ETH_ALEN);
+ memcpy(hw->mac.addr, vsi_res->default_mac_addr,
+ ETH_ALEN);
+ }
+ vsi_res++;
+ }
+}
+
+/**
+ * i40e_vf_reset
+ * @hw: pointer to the hardware structure
+ *
+ * Send a VF_RESET message to the PF. Does not wait for response from PF
+ * as none will be forthcoming. Immediately after calling this function,
+ * the admin queue should be shut down and (optionally) reinitialized.
+ **/
+i40e_status i40e_vf_reset(struct i40e_hw *hw)
+{
+ return i40e_aq_send_msg_to_pf(hw, I40E_VIRTCHNL_OP_RESET_VF,
+ 0, NULL, 0, NULL);
+}
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_hmc.h b/drivers/net/ethernet/intel/i40evf/i40e_hmc.h
new file mode 100644
index 000000000000..cb97b3eed440
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40e_hmc.h
@@ -0,0 +1,238 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_HMC_H_
+#define _I40E_HMC_H_
+
+#define I40E_HMC_MAX_BP_COUNT 512
+
+/* forward-declare the HW struct for the compiler */
+struct i40e_hw;
+
+#define I40E_HMC_INFO_SIGNATURE 0x484D5347 /* HMSG */
+#define I40E_HMC_PD_CNT_IN_SD 512
+#define I40E_HMC_DIRECT_BP_SIZE 0x200000 /* 2M */
+#define I40E_HMC_PAGED_BP_SIZE 4096
+#define I40E_HMC_PD_BP_BUF_ALIGNMENT 4096
+#define I40E_FIRST_VF_FPM_ID 16
+
+struct i40e_hmc_obj_info {
+ u64 base; /* base addr in FPM */
+ u32 max_cnt; /* max count available for this hmc func */
+ u32 cnt; /* count of objects driver actually wants to create */
+ u64 size; /* size in bytes of one object */
+};
+
+enum i40e_sd_entry_type {
+ I40E_SD_TYPE_INVALID = 0,
+ I40E_SD_TYPE_PAGED = 1,
+ I40E_SD_TYPE_DIRECT = 2
+};
+
+struct i40e_hmc_bp {
+ enum i40e_sd_entry_type entry_type;
+ struct i40e_dma_mem addr; /* populate to be used by hw */
+ u32 sd_pd_index;
+ u32 ref_cnt;
+};
+
+struct i40e_hmc_pd_entry {
+ struct i40e_hmc_bp bp;
+ u32 sd_index;
+ bool valid;
+};
+
+struct i40e_hmc_pd_table {
+ struct i40e_dma_mem pd_page_addr; /* populate to be used by hw */
+ struct i40e_hmc_pd_entry *pd_entry; /* [512] for sw book keeping */
+ struct i40e_virt_mem pd_entry_virt_mem; /* virt mem for pd_entry */
+
+ u32 ref_cnt;
+ u32 sd_index;
+};
+
+struct i40e_hmc_sd_entry {
+ enum i40e_sd_entry_type entry_type;
+ bool valid;
+
+ union {
+ struct i40e_hmc_pd_table pd_table;
+ struct i40e_hmc_bp bp;
+ } u;
+};
+
+struct i40e_hmc_sd_table {
+ struct i40e_virt_mem addr; /* used to track sd_entry allocations */
+ u32 sd_cnt;
+ u32 ref_cnt;
+ struct i40e_hmc_sd_entry *sd_entry; /* (sd_cnt*512) entries max */
+};
+
+struct i40e_hmc_info {
+ u32 signature;
+ /* equals to pci func num for PF and dynamically allocated for VFs */
+ u8 hmc_fn_id;
+ u16 first_sd_index; /* index of the first available SD */
+
+ /* hmc objects */
+ struct i40e_hmc_obj_info *hmc_obj;
+ struct i40e_virt_mem hmc_obj_virt_mem;
+ struct i40e_hmc_sd_table sd_table;
+};
+
+#define I40E_INC_SD_REFCNT(sd_table) ((sd_table)->ref_cnt++)
+#define I40E_INC_PD_REFCNT(pd_table) ((pd_table)->ref_cnt++)
+#define I40E_INC_BP_REFCNT(bp) ((bp)->ref_cnt++)
+
+#define I40E_DEC_SD_REFCNT(sd_table) ((sd_table)->ref_cnt--)
+#define I40E_DEC_PD_REFCNT(pd_table) ((pd_table)->ref_cnt--)
+#define I40E_DEC_BP_REFCNT(bp) ((bp)->ref_cnt--)
+
+/**
+ * I40E_SET_PF_SD_ENTRY - marks the sd entry as valid in the hardware
+ * @hw: pointer to our hw struct
+ * @pa: pointer to physical address
+ * @sd_index: segment descriptor index
+ * @type: if sd entry is direct or paged
+ **/
+#define I40E_SET_PF_SD_ENTRY(hw, pa, sd_index, type) \
+{ \
+ u32 val1, val2, val3; \
+ val1 = (u32)(upper_32_bits(pa)); \
+ val2 = (u32)(pa) | (I40E_HMC_MAX_BP_COUNT << \
+ I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \
+ ((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \
+ I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT) | \
+ (1 << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT); \
+ val3 = (sd_index) | (1 << I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \
+ wr32((hw), I40E_PFHMC_SDDATAHIGH, val1); \
+ wr32((hw), I40E_PFHMC_SDDATALOW, val2); \
+ wr32((hw), I40E_PFHMC_SDCMD, val3); \
+}
+
+/**
+ * I40E_CLEAR_PF_SD_ENTRY - marks the sd entry as invalid in the hardware
+ * @hw: pointer to our hw struct
+ * @sd_index: segment descriptor index
+ * @type: if sd entry is direct or paged
+ **/
+#define I40E_CLEAR_PF_SD_ENTRY(hw, sd_index, type) \
+{ \
+ u32 val2, val3; \
+ val2 = (I40E_HMC_MAX_BP_COUNT << \
+ I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \
+ ((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \
+ I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT); \
+ val3 = (sd_index) | (1 << I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \
+ wr32((hw), I40E_PFHMC_SDDATAHIGH, 0); \
+ wr32((hw), I40E_PFHMC_SDDATALOW, val2); \
+ wr32((hw), I40E_PFHMC_SDCMD, val3); \
+}
+
+/**
+ * I40E_INVALIDATE_PF_HMC_PD - Invalidates the pd cache in the hardware
+ * @hw: pointer to our hw struct
+ * @sd_idx: segment descriptor index
+ * @pd_idx: page descriptor index
+ **/
+#define I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, pd_idx) \
+ wr32((hw), I40E_PFHMC_PDINV, \
+ (((sd_idx) << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) | \
+ ((pd_idx) << I40E_PFHMC_PDINV_PMPDIDX_SHIFT)))
+
+#define I40E_INVALIDATE_VF_HMC_PD(hw, sd_idx, pd_idx, hmc_fn_id) \
+ wr32((hw), I40E_GLHMC_VFPDINV((hmc_fn_id) - I40E_FIRST_VF_FPM_ID), \
+ (((sd_idx) << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) | \
+ ((pd_idx) << I40E_PFHMC_PDINV_PMPDIDX_SHIFT)))
+
+/**
+ * I40E_FIND_SD_INDEX_LIMIT - finds segment descriptor index limit
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @type: type of HMC resources we're searching
+ * @index: starting index for the object
+ * @cnt: number of objects we're trying to create
+ * @sd_idx: pointer to return index of the segment descriptor in question
+ * @sd_limit: pointer to return the maximum number of segment descriptors
+ *
+ * This function calculates the segment descriptor index and index limit
+ * for the resource defined by i40e_hmc_rsrc_type.
+ **/
+#define I40E_FIND_SD_INDEX_LIMIT(hmc_info, type, index, cnt, sd_idx, sd_limit)\
+{ \
+ u64 fpm_addr, fpm_limit; \
+ fpm_addr = (hmc_info)->hmc_obj[(type)].base + \
+ (hmc_info)->hmc_obj[(type)].size * (index); \
+ fpm_limit = fpm_addr + (hmc_info)->hmc_obj[(type)].size * (cnt);\
+ *(sd_idx) = (u32)(fpm_addr / I40E_HMC_DIRECT_BP_SIZE); \
+ *(sd_limit) = (u32)((fpm_limit - 1) / I40E_HMC_DIRECT_BP_SIZE); \
+ /* add one more to the limit to correct our range */ \
+ *(sd_limit) += 1; \
+}
+
+/**
+ * I40E_FIND_PD_INDEX_LIMIT - finds page descriptor index limit
+ * @hmc_info: pointer to the HMC configuration information struct
+ * @type: HMC resource type we're examining
+ * @idx: starting index for the object
+ * @cnt: number of objects we're trying to create
+ * @pd_index: pointer to return page descriptor index
+ * @pd_limit: pointer to return page descriptor index limit
+ *
+ * Calculates the page descriptor index and index limit for the resource
+ * defined by i40e_hmc_rsrc_type.
+ **/
+#define I40E_FIND_PD_INDEX_LIMIT(hmc_info, type, idx, cnt, pd_index, pd_limit)\
+{ \
+ u64 fpm_adr, fpm_limit; \
+ fpm_adr = (hmc_info)->hmc_obj[(type)].base + \
+ (hmc_info)->hmc_obj[(type)].size * (idx); \
+ fpm_limit = fpm_adr + (hmc_info)->hmc_obj[(type)].size * (cnt); \
+ *(pd_index) = (u32)(fpm_adr / I40E_HMC_PAGED_BP_SIZE); \
+ *(pd_limit) = (u32)((fpm_limit - 1) / I40E_HMC_PAGED_BP_SIZE); \
+ /* add one more to the limit to correct our range */ \
+ *(pd_limit) += 1; \
+}
+i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 sd_index,
+ enum i40e_sd_entry_type type,
+ u64 direct_mode_sz);
+
+i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 pd_index);
+i40e_status i40e_remove_pd_bp(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx, bool is_pf);
+i40e_status i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info,
+ u32 idx);
+i40e_status i40e_remove_sd_bp_new(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx, bool is_pf);
+i40e_status i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info,
+ u32 idx);
+i40e_status i40e_remove_pd_page_new(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx, bool is_pf);
+
+#endif /* _I40E_HMC_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h b/drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h
new file mode 100644
index 000000000000..17e42ca26d0b
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h
@@ -0,0 +1,165 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_LAN_HMC_H_
+#define _I40E_LAN_HMC_H_
+
+/* forward-declare the HW struct for the compiler */
+struct i40e_hw;
+
+/* HMC element context information */
+
+/* Rx queue context data */
+struct i40e_hmc_obj_rxq {
+ u16 head;
+ u8 cpuid;
+ u64 base;
+ u16 qlen;
+#define I40E_RXQ_CTX_DBUFF_SHIFT 7
+ u8 dbuff;
+#define I40E_RXQ_CTX_HBUFF_SHIFT 6
+ u8 hbuff;
+ u8 dtype;
+ u8 dsize;
+ u8 crcstrip;
+ u8 fc_ena;
+ u8 l2tsel;
+ u8 hsplit_0;
+ u8 hsplit_1;
+ u8 showiv;
+ u16 rxmax;
+ u8 tphrdesc_ena;
+ u8 tphwdesc_ena;
+ u8 tphdata_ena;
+ u8 tphhead_ena;
+ u8 lrxqthresh;
+};
+
+/* Tx queue context data */
+struct i40e_hmc_obj_txq {
+ u16 head;
+ u8 new_context;
+ u64 base;
+ u8 fc_ena;
+ u8 timesync_ena;
+ u8 fd_ena;
+ u8 alt_vlan_ena;
+ u16 thead_wb;
+ u16 cpuid;
+ u8 head_wb_ena;
+ u16 qlen;
+ u8 tphrdesc_ena;
+ u8 tphrpacket_ena;
+ u8 tphwdesc_ena;
+ u64 head_wb_addr;
+ u32 crc;
+ u16 rdylist;
+ u8 rdylist_act;
+};
+
+/* for hsplit_0 field of Rx HMC context */
+enum i40e_hmc_obj_rx_hsplit_0 {
+ I40E_HMC_OBJ_RX_HSPLIT_0_NO_SPLIT = 0,
+ I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_L2 = 1,
+ I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_IP = 2,
+ I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_TCP_UDP = 4,
+ I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_SCTP = 8,
+};
+
+/* fcoe_cntx and fcoe_filt are for debugging purpose only */
+struct i40e_hmc_obj_fcoe_cntx {
+ u32 rsv[32];
+};
+
+struct i40e_hmc_obj_fcoe_filt {
+ u32 rsv[8];
+};
+
+/* Context sizes for LAN objects */
+enum i40e_hmc_lan_object_size {
+ I40E_HMC_LAN_OBJ_SZ_8 = 0x3,
+ I40E_HMC_LAN_OBJ_SZ_16 = 0x4,
+ I40E_HMC_LAN_OBJ_SZ_32 = 0x5,
+ I40E_HMC_LAN_OBJ_SZ_64 = 0x6,
+ I40E_HMC_LAN_OBJ_SZ_128 = 0x7,
+ I40E_HMC_LAN_OBJ_SZ_256 = 0x8,
+ I40E_HMC_LAN_OBJ_SZ_512 = 0x9,
+};
+
+#define I40E_HMC_L2OBJ_BASE_ALIGNMENT 512
+#define I40E_HMC_OBJ_SIZE_TXQ 128
+#define I40E_HMC_OBJ_SIZE_RXQ 32
+#define I40E_HMC_OBJ_SIZE_FCOE_CNTX 128
+#define I40E_HMC_OBJ_SIZE_FCOE_FILT 64
+
+enum i40e_hmc_lan_rsrc_type {
+ I40E_HMC_LAN_FULL = 0,
+ I40E_HMC_LAN_TX = 1,
+ I40E_HMC_LAN_RX = 2,
+ I40E_HMC_FCOE_CTX = 3,
+ I40E_HMC_FCOE_FILT = 4,
+ I40E_HMC_LAN_MAX = 5
+};
+
+enum i40e_hmc_model {
+ I40E_HMC_MODEL_DIRECT_PREFERRED = 0,
+ I40E_HMC_MODEL_DIRECT_ONLY = 1,
+ I40E_HMC_MODEL_PAGED_ONLY = 2,
+ I40E_HMC_MODEL_UNKNOWN,
+};
+
+struct i40e_hmc_lan_create_obj_info {
+ struct i40e_hmc_info *hmc_info;
+ u32 rsrc_type;
+ u32 start_idx;
+ u32 count;
+ enum i40e_sd_entry_type entry_type;
+ u64 direct_mode_sz;
+};
+
+struct i40e_hmc_lan_delete_obj_info {
+ struct i40e_hmc_info *hmc_info;
+ u32 rsrc_type;
+ u32 start_idx;
+ u32 count;
+};
+
+i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
+ u32 rxq_num, u32 fcoe_cntx_num,
+ u32 fcoe_filt_num);
+i40e_status i40e_configure_lan_hmc(struct i40e_hw *hw,
+ enum i40e_hmc_model model);
+i40e_status i40e_shutdown_lan_hmc(struct i40e_hw *hw);
+
+i40e_status i40e_clear_lan_tx_queue_context(struct i40e_hw *hw,
+ u16 queue);
+i40e_status i40e_set_lan_tx_queue_context(struct i40e_hw *hw,
+ u16 queue,
+ struct i40e_hmc_obj_txq *s);
+i40e_status i40e_clear_lan_rx_queue_context(struct i40e_hw *hw,
+ u16 queue);
+i40e_status i40e_set_lan_rx_queue_context(struct i40e_hw *hw,
+ u16 queue,
+ struct i40e_hmc_obj_rxq *s);
+
+#endif /* _I40E_LAN_HMC_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_osdep.h b/drivers/net/ethernet/intel/i40evf/i40e_osdep.h
new file mode 100644
index 000000000000..622f373b745d
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40e_osdep.h
@@ -0,0 +1,72 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_OSDEP_H_
+#define _I40E_OSDEP_H_
+
+#include <linux/types.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/tcp.h>
+#include <linux/pci.h>
+
+/* get readq/writeq support for 32 bit kernels, use the low-first version */
+#include <asm-generic/io-64-nonatomic-lo-hi.h>
+
+/* File to be the magic between shared code and
+ * actual OS primitives
+ */
+
+#define hw_dbg(hw, S, A...) do {} while (0)
+
+#define wr32(a, reg, value) writel((value), ((a)->hw_addr + (reg)))
+#define rd32(a, reg) readl((a)->hw_addr + (reg))
+
+#define wr64(a, reg, value) writeq((value), ((a)->hw_addr + (reg)))
+#define rd64(a, reg) readq((a)->hw_addr + (reg))
+#define i40e_flush(a) readl((a)->hw_addr + I40E_VFGEN_RSTAT)
+
+/* memory allocation tracking */
+struct i40e_dma_mem {
+ void *va;
+ dma_addr_t pa;
+ u32 size;
+} __packed;
+
+#define i40e_allocate_dma_mem(h, m, unused, s, a) \
+ i40evf_allocate_dma_mem_d(h, m, s, a)
+#define i40e_free_dma_mem(h, m) i40evf_free_dma_mem_d(h, m)
+
+struct i40e_virt_mem {
+ void *va;
+ u32 size;
+} __packed;
+#define i40e_allocate_virt_mem(h, m, s) i40evf_allocate_virt_mem_d(h, m, s)
+#define i40e_free_virt_mem(h, m) i40evf_free_virt_mem_d(h, m)
+
+#define i40e_debug(h, m, s, ...) i40evf_debug_d(h, m, s, ##__VA_ARGS__)
+extern void i40evf_debug_d(void *hw, u32 mask, char *fmt_str, ...)
+ __attribute__ ((format(gnu_printf, 3, 4)));
+
+typedef enum i40e_status_code i40e_status;
+#endif /* _I40E_OSDEP_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
new file mode 100644
index 000000000000..7841573a58c9
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
@@ -0,0 +1,84 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_PROTOTYPE_H_
+#define _I40E_PROTOTYPE_H_
+
+#include "i40e_type.h"
+#include "i40e_alloc.h"
+#include "i40e_virtchnl.h"
+
+/* Prototypes for shared code functions that are not in
+ * the standard function pointer structures. These are
+ * mostly because they are needed even before the init
+ * has happened and will assist in the early SW and FW
+ * setup.
+ */
+
+/* adminq functions */
+i40e_status i40evf_init_adminq(struct i40e_hw *hw);
+i40e_status i40evf_shutdown_adminq(struct i40e_hw *hw);
+void i40e_adminq_init_ring_data(struct i40e_hw *hw);
+i40e_status i40evf_clean_arq_element(struct i40e_hw *hw,
+ struct i40e_arq_event_info *e,
+ u16 *events_pending);
+i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
+ struct i40e_aq_desc *desc,
+ void *buff, /* can be NULL */
+ u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details);
+bool i40evf_asq_done(struct i40e_hw *hw);
+
+/* debug function for adminq */
+void i40evf_debug_aq(struct i40e_hw *hw,
+ enum i40e_debug_mask mask,
+ void *desc,
+ void *buffer);
+
+void i40e_idle_aq(struct i40e_hw *hw);
+void i40evf_resume_aq(struct i40e_hw *hw);
+bool i40evf_check_asq_alive(struct i40e_hw *hw);
+i40e_status i40evf_aq_queue_shutdown(struct i40e_hw *hw,
+ bool unloading);
+
+i40e_status i40e_set_mac_type(struct i40e_hw *hw);
+
+/* prototype for functions used for SW locks */
+
+/* i40e_common for VF drivers*/
+void i40e_vf_parse_hw_config(struct i40e_hw *hw,
+ struct i40e_virtchnl_vf_resource *msg);
+i40e_status i40e_vf_reset(struct i40e_hw *hw);
+i40e_status i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
+ enum i40e_virtchnl_ops v_opcode,
+ i40e_status v_retval,
+ u8 *msg, u16 msglen,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_set_filter_control(struct i40e_hw *hw,
+ struct i40e_filter_control_settings *settings);
+i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
+ u8 *mac_addr, u16 ethtype, u16 flags,
+ u16 vsi_seid, u16 queue, bool is_add,
+ struct i40e_control_filter_stats *stats,
+ struct i40e_asq_cmd_details *cmd_details);
+#endif /* _I40E_PROTOTYPE_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_register.h b/drivers/net/ethernet/intel/i40evf/i40e_register.h
new file mode 100644
index 000000000000..30af953cf106
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40e_register.h
@@ -0,0 +1,4667 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_REGISTER_H_
+#define _I40E_REGISTER_H_
+
+#define I40E_GL_GP_FUSE(_i) (0x0009400C + ((_i) * 4)) /* _i=0...28 */
+#define I40E_GL_GP_FUSE_MAX_INDEX 28
+#define I40E_GL_GP_FUSE_GL_GP_FUSE_SHIFT 0
+#define I40E_GL_GP_FUSE_GL_GP_FUSE_MASK (0xFFFFFFFF << I40E_GL_GP_FUSE_GL_GP_FUSE_SHIFT)
+#define I40E_GLPCI_PM_MUX_NPQ 0x0009C4F4
+#define I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_SHIFT 0
+#define I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_MASK (0x7 << I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_SHIFT)
+#define I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_SHIFT 16
+#define I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_MASK (0x1F << I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_SHIFT)
+#define I40E_GLPCI_PM_MUX_PFB 0x0009C4F0
+#define I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_SHIFT 0
+#define I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_MASK (0x1F << I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_SHIFT)
+#define I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_SHIFT 16
+#define I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_MASK (0x7 << I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_SHIFT)
+#define I40E_GLPCI_PQ_MAX_USED_SPC 0x0009C4EC
+#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_SHIFT 0
+#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_MASK (0xFF << I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_SHIFT)
+#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_SHIFT 8
+#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_MASK (0xFF << I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_SHIFT)
+#define I40E_GLPCI_SPARE_BITS_0 0x0009C4F8
+#define I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_SHIFT 0
+#define I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_MASK (0xFFFFFFFF << I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_SHIFT)
+#define I40E_GLPCI_SPARE_BITS_1 0x0009C4FC
+#define I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_SHIFT 0
+#define I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_MASK (0xFFFFFFFF << I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_SHIFT)
+#define I40E_PFPCI_PF_FLUSH_DONE 0x0009C800
+#define I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_SHIFT 0
+#define I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_MASK (0x1 << I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_SHIFT)
+#define I40E_PFPCI_VF_FLUSH_DONE 0x0009C600
+#define I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_SHIFT 0
+#define I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_MASK (0x1 << I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_SHIFT)
+#define I40E_PFPCI_VF_FLUSH_DONE1(_VF) (0x0009C600 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_PFPCI_VF_FLUSH_DONE1_MAX_INDEX 127
+#define I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_SHIFT 0
+#define I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_MASK (0x1 << I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_SHIFT)
+#define I40E_PFPCI_VM_FLUSH_DONE 0x0009C880
+#define I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_SHIFT 0
+#define I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_MASK (0x1 << I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_SHIFT)
+
+#define I40E_PF_ARQBAH 0x00080180
+#define I40E_PF_ARQBAH_ARQBAH_SHIFT 0
+#define I40E_PF_ARQBAH_ARQBAH_MASK (0xFFFFFFFF << I40E_PF_ARQBAH_ARQBAH_SHIFT)
+#define I40E_PF_ARQBAL 0x00080080
+#define I40E_PF_ARQBAL_ARQBAL_SHIFT 0
+#define I40E_PF_ARQBAL_ARQBAL_MASK (0xFFFFFFFF << I40E_PF_ARQBAL_ARQBAL_SHIFT)
+#define I40E_PF_ARQH 0x00080380
+#define I40E_PF_ARQH_ARQH_SHIFT 0
+#define I40E_PF_ARQH_ARQH_MASK (0x3FF << I40E_PF_ARQH_ARQH_SHIFT)
+#define I40E_PF_ARQLEN 0x00080280
+#define I40E_PF_ARQLEN_ARQLEN_SHIFT 0
+#define I40E_PF_ARQLEN_ARQLEN_MASK (0x3FF << I40E_PF_ARQLEN_ARQLEN_SHIFT)
+#define I40E_PF_ARQLEN_ARQVFE_SHIFT 28
+#define I40E_PF_ARQLEN_ARQVFE_MASK (0x1 << I40E_PF_ARQLEN_ARQVFE_SHIFT)
+#define I40E_PF_ARQLEN_ARQOVFL_SHIFT 29
+#define I40E_PF_ARQLEN_ARQOVFL_MASK (0x1 << I40E_PF_ARQLEN_ARQOVFL_SHIFT)
+#define I40E_PF_ARQLEN_ARQCRIT_SHIFT 30
+#define I40E_PF_ARQLEN_ARQCRIT_MASK (0x1 << I40E_PF_ARQLEN_ARQCRIT_SHIFT)
+#define I40E_PF_ARQLEN_ARQENABLE_SHIFT 31
+#define I40E_PF_ARQLEN_ARQENABLE_MASK (0x1 << I40E_PF_ARQLEN_ARQENABLE_SHIFT)
+#define I40E_PF_ARQT 0x00080480
+#define I40E_PF_ARQT_ARQT_SHIFT 0
+#define I40E_PF_ARQT_ARQT_MASK (0x3FF << I40E_PF_ARQT_ARQT_SHIFT)
+#define I40E_PF_ATQBAH 0x00080100
+#define I40E_PF_ATQBAH_ATQBAH_SHIFT 0
+#define I40E_PF_ATQBAH_ATQBAH_MASK (0xFFFFFFFF << I40E_PF_ATQBAH_ATQBAH_SHIFT)
+#define I40E_PF_ATQBAL 0x00080000
+#define I40E_PF_ATQBAL_ATQBAL_SHIFT 0
+#define I40E_PF_ATQBAL_ATQBAL_MASK (0xFFFFFFFF << I40E_PF_ATQBAL_ATQBAL_SHIFT)
+#define I40E_PF_ATQH 0x00080300
+#define I40E_PF_ATQH_ATQH_SHIFT 0
+#define I40E_PF_ATQH_ATQH_MASK (0x3FF << I40E_PF_ATQH_ATQH_SHIFT)
+#define I40E_PF_ATQLEN 0x00080200
+#define I40E_PF_ATQLEN_ATQLEN_SHIFT 0
+#define I40E_PF_ATQLEN_ATQLEN_MASK (0x3FF << I40E_PF_ATQLEN_ATQLEN_SHIFT)
+#define I40E_PF_ATQLEN_ATQVFE_SHIFT 28
+#define I40E_PF_ATQLEN_ATQVFE_MASK (0x1 << I40E_PF_ATQLEN_ATQVFE_SHIFT)
+#define I40E_PF_ATQLEN_ATQOVFL_SHIFT 29
+#define I40E_PF_ATQLEN_ATQOVFL_MASK (0x1 << I40E_PF_ATQLEN_ATQOVFL_SHIFT)
+#define I40E_PF_ATQLEN_ATQCRIT_SHIFT 30
+#define I40E_PF_ATQLEN_ATQCRIT_MASK (0x1 << I40E_PF_ATQLEN_ATQCRIT_SHIFT)
+#define I40E_PF_ATQLEN_ATQENABLE_SHIFT 31
+#define I40E_PF_ATQLEN_ATQENABLE_MASK (0x1 << I40E_PF_ATQLEN_ATQENABLE_SHIFT)
+#define I40E_PF_ATQT 0x00080400
+#define I40E_PF_ATQT_ATQT_SHIFT 0
+#define I40E_PF_ATQT_ATQT_MASK (0x3FF << I40E_PF_ATQT_ATQT_SHIFT)
+#define I40E_VF_ARQBAH(_VF) (0x00081400 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ARQBAH_MAX_INDEX 127
+#define I40E_VF_ARQBAH_ARQBAH_SHIFT 0
+#define I40E_VF_ARQBAH_ARQBAH_MASK (0xFFFFFFFF << I40E_VF_ARQBAH_ARQBAH_SHIFT)
+#define I40E_VF_ARQBAL(_VF) (0x00080C00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ARQBAL_MAX_INDEX 127
+#define I40E_VF_ARQBAL_ARQBAL_SHIFT 0
+#define I40E_VF_ARQBAL_ARQBAL_MASK (0xFFFFFFFF << I40E_VF_ARQBAL_ARQBAL_SHIFT)
+#define I40E_VF_ARQH(_VF) (0x00082400 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ARQH_MAX_INDEX 127
+#define I40E_VF_ARQH_ARQH_SHIFT 0
+#define I40E_VF_ARQH_ARQH_MASK (0x3FF << I40E_VF_ARQH_ARQH_SHIFT)
+#define I40E_VF_ARQLEN(_VF) (0x00081C00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ARQLEN_MAX_INDEX 127
+#define I40E_VF_ARQLEN_ARQLEN_SHIFT 0
+#define I40E_VF_ARQLEN_ARQLEN_MASK (0x3FF << I40E_VF_ARQLEN_ARQLEN_SHIFT)
+#define I40E_VF_ARQLEN_ARQVFE_SHIFT 28
+#define I40E_VF_ARQLEN_ARQVFE_MASK (0x1 << I40E_VF_ARQLEN_ARQVFE_SHIFT)
+#define I40E_VF_ARQLEN_ARQOVFL_SHIFT 29
+#define I40E_VF_ARQLEN_ARQOVFL_MASK (0x1 << I40E_VF_ARQLEN_ARQOVFL_SHIFT)
+#define I40E_VF_ARQLEN_ARQCRIT_SHIFT 30
+#define I40E_VF_ARQLEN_ARQCRIT_MASK (0x1 << I40E_VF_ARQLEN_ARQCRIT_SHIFT)
+#define I40E_VF_ARQLEN_ARQENABLE_SHIFT 31
+#define I40E_VF_ARQLEN_ARQENABLE_MASK (0x1 << I40E_VF_ARQLEN_ARQENABLE_SHIFT)
+#define I40E_VF_ARQT(_VF) (0x00082C00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ARQT_MAX_INDEX 127
+#define I40E_VF_ARQT_ARQT_SHIFT 0
+#define I40E_VF_ARQT_ARQT_MASK (0x3FF << I40E_VF_ARQT_ARQT_SHIFT)
+#define I40E_VF_ATQBAH(_VF) (0x00081000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ATQBAH_MAX_INDEX 127
+#define I40E_VF_ATQBAH_ATQBAH_SHIFT 0
+#define I40E_VF_ATQBAH_ATQBAH_MASK (0xFFFFFFFF << I40E_VF_ATQBAH_ATQBAH_SHIFT)
+#define I40E_VF_ATQBAL(_VF) (0x00080800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ATQBAL_MAX_INDEX 127
+#define I40E_VF_ATQBAL_ATQBAL_SHIFT 0
+#define I40E_VF_ATQBAL_ATQBAL_MASK (0xFFFFFFFF << I40E_VF_ATQBAL_ATQBAL_SHIFT)
+#define I40E_VF_ATQH(_VF) (0x00082000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ATQH_MAX_INDEX 127
+#define I40E_VF_ATQH_ATQH_SHIFT 0
+#define I40E_VF_ATQH_ATQH_MASK (0x3FF << I40E_VF_ATQH_ATQH_SHIFT)
+#define I40E_VF_ATQLEN(_VF) (0x00081800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ATQLEN_MAX_INDEX 127
+#define I40E_VF_ATQLEN_ATQLEN_SHIFT 0
+#define I40E_VF_ATQLEN_ATQLEN_MASK (0x3FF << I40E_VF_ATQLEN_ATQLEN_SHIFT)
+#define I40E_VF_ATQLEN_ATQVFE_SHIFT 28
+#define I40E_VF_ATQLEN_ATQVFE_MASK (0x1 << I40E_VF_ATQLEN_ATQVFE_SHIFT)
+#define I40E_VF_ATQLEN_ATQOVFL_SHIFT 29
+#define I40E_VF_ATQLEN_ATQOVFL_MASK (0x1 << I40E_VF_ATQLEN_ATQOVFL_SHIFT)
+#define I40E_VF_ATQLEN_ATQCRIT_SHIFT 30
+#define I40E_VF_ATQLEN_ATQCRIT_MASK (0x1 << I40E_VF_ATQLEN_ATQCRIT_SHIFT)
+#define I40E_VF_ATQLEN_ATQENABLE_SHIFT 31
+#define I40E_VF_ATQLEN_ATQENABLE_MASK (0x1 << I40E_VF_ATQLEN_ATQENABLE_SHIFT)
+#define I40E_VF_ATQT(_VF) (0x00082800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ATQT_MAX_INDEX 127
+#define I40E_VF_ATQT_ATQT_SHIFT 0
+#define I40E_VF_ATQT_ATQT_MASK (0x3FF << I40E_VF_ATQT_ATQT_SHIFT)
+#define I40E_PRT_L2TAGSEN 0x001C0B20
+#define I40E_PRT_L2TAGSEN_ENABLE_SHIFT 0
+#define I40E_PRT_L2TAGSEN_ENABLE_MASK (0xFF << I40E_PRT_L2TAGSEN_ENABLE_SHIFT)
+#define I40E_PFCM_LAN_ERRDATA 0x0010C080
+#define I40E_PFCM_LAN_ERRDATA_ERROR_CODE_SHIFT 0
+#define I40E_PFCM_LAN_ERRDATA_ERROR_CODE_MASK (0xF << I40E_PFCM_LAN_ERRDATA_ERROR_CODE_SHIFT)
+#define I40E_PFCM_LAN_ERRDATA_Q_TYPE_SHIFT 4
+#define I40E_PFCM_LAN_ERRDATA_Q_TYPE_MASK (0x7 << I40E_PFCM_LAN_ERRDATA_Q_TYPE_SHIFT)
+#define I40E_PFCM_LAN_ERRDATA_Q_NUM_SHIFT 8
+#define I40E_PFCM_LAN_ERRDATA_Q_NUM_MASK (0xFFF << I40E_PFCM_LAN_ERRDATA_Q_NUM_SHIFT)
+#define I40E_PFCM_LAN_ERRINFO 0x0010C000
+#define I40E_PFCM_LAN_ERRINFO_ERROR_VALID_SHIFT 0
+#define I40E_PFCM_LAN_ERRINFO_ERROR_VALID_MASK (0x1 << I40E_PFCM_LAN_ERRINFO_ERROR_VALID_SHIFT)
+#define I40E_PFCM_LAN_ERRINFO_ERROR_INST_SHIFT 4
+#define I40E_PFCM_LAN_ERRINFO_ERROR_INST_MASK (0x7 << I40E_PFCM_LAN_ERRINFO_ERROR_INST_SHIFT)
+#define I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_SHIFT 8
+#define I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_MASK (0xFF << I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_SHIFT)
+#define I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_SHIFT 16
+#define I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_MASK (0xFF << I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_SHIFT)
+#define I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_SHIFT 24
+#define I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_MASK (0xFF << I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_SHIFT)
+#define I40E_PFCM_LANCTXCTL(_pf) (0x0010C300 + ((_pf) * 4))/* _pf=0..15 */
+#define I40E_PFCM_LANCTXCTL_QUEUE_NUM_SHIFT 0
+#define I40E_PFCM_LANCTXCTL_QUEUE_NUM_MASK (0xFFF << I40E_PFCM_LANCTXCTL_QUEUE_NUM_SHIFT)
+#define I40E_PFCM_LANCTXCTL_SUB_LINE_SHIFT 12
+#define I40E_PFCM_LANCTXCTL_SUB_LINE_MASK (0x7 << I40E_PFCM_LANCTXCTL_SUB_LINE_SHIFT)
+#define I40E_PFCM_LANCTXCTL_QUEUE_TYPE_SHIFT 15
+#define I40E_PFCM_LANCTXCTL_QUEUE_TYPE_MASK (0x3 << I40E_PFCM_LANCTXCTL_QUEUE_TYPE_SHIFT)
+#define I40E_PFCM_LANCTXCTL_OP_CODE_SHIFT 17
+#define I40E_PFCM_LANCTXCTL_OP_CODE_MASK (0x3 << I40E_PFCM_LANCTXCTL_OP_CODE_SHIFT)
+#define I40E_PFCM_LANCTXDATA(_i, _pf) (0x0010C100 + ((_i) * 4) + ((_pf) * 16))/* _i=0...3 _pf=0..15 */
+#define I40E_PFCM_LANCTXDATA_MAX_INDEX 3
+#define I40E_PFCM_LANCTXDATA_DATA_SHIFT 0
+#define I40E_PFCM_LANCTXDATA_DATA_MASK (0xFFFFFFFF << I40E_PFCM_LANCTXDATA_DATA_SHIFT)
+#define I40E_PFCM_LANCTXSTAT(_pf) (0x0010C380 + ((_pf) * 4))/* _pf=0..15 */
+#define I40E_PFCM_LANCTXSTAT_CTX_DONE_SHIFT 0
+#define I40E_PFCM_LANCTXSTAT_CTX_DONE_MASK (0x1 << I40E_PFCM_LANCTXSTAT_CTX_DONE_SHIFT)
+#define I40E_PFCM_LANCTXSTAT_CTX_MISS_SHIFT 1
+#define I40E_PFCM_LANCTXSTAT_CTX_MISS_MASK (0x1 << I40E_PFCM_LANCTXSTAT_CTX_MISS_SHIFT)
+#define I40E_PFCM_PE_ERRDATA 0x00138D00
+#define I40E_PFCM_PE_ERRDATA_ERROR_CODE_SHIFT 0
+#define I40E_PFCM_PE_ERRDATA_ERROR_CODE_MASK (0xF << I40E_PFCM_PE_ERRDATA_ERROR_CODE_SHIFT)
+#define I40E_PFCM_PE_ERRDATA_Q_TYPE_SHIFT 4
+#define I40E_PFCM_PE_ERRDATA_Q_TYPE_MASK (0x7 << I40E_PFCM_PE_ERRDATA_Q_TYPE_SHIFT)
+#define I40E_PFCM_PE_ERRDATA_Q_NUM_SHIFT 8
+#define I40E_PFCM_PE_ERRDATA_Q_NUM_MASK (0x3FFFF << I40E_PFCM_PE_ERRDATA_Q_NUM_SHIFT)
+#define I40E_PFCM_PE_ERRINFO 0x00138C80
+#define I40E_PFCM_PE_ERRINFO_ERROR_VALID_SHIFT 0
+#define I40E_PFCM_PE_ERRINFO_ERROR_VALID_MASK (0x1 << I40E_PFCM_PE_ERRINFO_ERROR_VALID_SHIFT)
+#define I40E_PFCM_PE_ERRINFO_ERROR_INST_SHIFT 4
+#define I40E_PFCM_PE_ERRINFO_ERROR_INST_MASK (0x7 << I40E_PFCM_PE_ERRINFO_ERROR_INST_SHIFT)
+#define I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT 8
+#define I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_MASK (0xFF << I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT)
+#define I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT 16
+#define I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_MASK (0xFF << I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT)
+#define I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT 24
+#define I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_MASK (0xFF << I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT)
+#define I40E_VFCM_PE_ERRDATA1(_VF) (0x00138800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFCM_PE_ERRDATA1_MAX_INDEX 127
+#define I40E_VFCM_PE_ERRDATA1_ERROR_CODE_SHIFT 0
+#define I40E_VFCM_PE_ERRDATA1_ERROR_CODE_MASK (0xF << I40E_VFCM_PE_ERRDATA1_ERROR_CODE_SHIFT)
+#define I40E_VFCM_PE_ERRDATA1_Q_TYPE_SHIFT 4
+#define I40E_VFCM_PE_ERRDATA1_Q_TYPE_MASK (0x7 << I40E_VFCM_PE_ERRDATA1_Q_TYPE_SHIFT)
+#define I40E_VFCM_PE_ERRDATA1_Q_NUM_SHIFT 8
+#define I40E_VFCM_PE_ERRDATA1_Q_NUM_MASK (0x3FFFF << I40E_VFCM_PE_ERRDATA1_Q_NUM_SHIFT)
+#define I40E_VFCM_PE_ERRINFO1(_VF) (0x00138400 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFCM_PE_ERRINFO1_MAX_INDEX 127
+#define I40E_VFCM_PE_ERRINFO1_ERROR_VALID_SHIFT 0
+#define I40E_VFCM_PE_ERRINFO1_ERROR_VALID_MASK (0x1 << I40E_VFCM_PE_ERRINFO1_ERROR_VALID_SHIFT)
+#define I40E_VFCM_PE_ERRINFO1_ERROR_INST_SHIFT 4
+#define I40E_VFCM_PE_ERRINFO1_ERROR_INST_MASK (0x7 << I40E_VFCM_PE_ERRINFO1_ERROR_INST_SHIFT)
+#define I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_SHIFT 8
+#define I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_MASK (0xFF << I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_SHIFT)
+#define I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_SHIFT 16
+#define I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_MASK (0xFF << I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_SHIFT)
+#define I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_SHIFT 24
+#define I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_MASK (0xFF << I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_SHIFT)
+#define I40E_GLDCB_GENC 0x00083044
+#define I40E_GLDCB_GENC_PCIRTT_SHIFT 0
+#define I40E_GLDCB_GENC_PCIRTT_MASK (0xFFFF << I40E_GLDCB_GENC_PCIRTT_SHIFT)
+#define I40E_GLDCB_RUPTI 0x00122618
+#define I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_SHIFT 0
+#define I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_MASK (0xFFFFFFFF << I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_SHIFT)
+#define I40E_PRTDCB_FCCFG 0x001E4640
+#define I40E_PRTDCB_FCCFG_TFCE_SHIFT 3
+#define I40E_PRTDCB_FCCFG_TFCE_MASK (0x3 << I40E_PRTDCB_FCCFG_TFCE_SHIFT)
+#define I40E_PRTDCB_FCRTV 0x001E4600
+#define I40E_PRTDCB_FCRTV_FC_REFRESH_TH_SHIFT 0
+#define I40E_PRTDCB_FCRTV_FC_REFRESH_TH_MASK (0xFFFF << I40E_PRTDCB_FCRTV_FC_REFRESH_TH_SHIFT)
+#define I40E_PRTDCB_FCTTVN(_i) (0x001E4580 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRTDCB_FCTTVN_MAX_INDEX 3
+#define I40E_PRTDCB_FCTTVN_TTV_2N_SHIFT 0
+#define I40E_PRTDCB_FCTTVN_TTV_2N_MASK (0xFFFF << I40E_PRTDCB_FCTTVN_TTV_2N_SHIFT)
+#define I40E_PRTDCB_FCTTVN_TTV_2N_P1_SHIFT 16
+#define I40E_PRTDCB_FCTTVN_TTV_2N_P1_MASK (0xFFFF << I40E_PRTDCB_FCTTVN_TTV_2N_P1_SHIFT)
+#define I40E_PRTDCB_GENC 0x00083000
+#define I40E_PRTDCB_GENC_RESERVED_1_SHIFT 0
+#define I40E_PRTDCB_GENC_RESERVED_1_MASK (0x3 << I40E_PRTDCB_GENC_RESERVED_1_SHIFT)
+#define I40E_PRTDCB_GENC_NUMTC_SHIFT 2
+#define I40E_PRTDCB_GENC_NUMTC_MASK (0xF << I40E_PRTDCB_GENC_NUMTC_SHIFT)
+#define I40E_PRTDCB_GENC_FCOEUP_SHIFT 6
+#define I40E_PRTDCB_GENC_FCOEUP_MASK (0x7 << I40E_PRTDCB_GENC_FCOEUP_SHIFT)
+#define I40E_PRTDCB_GENC_FCOEUP_VALID_SHIFT 9
+#define I40E_PRTDCB_GENC_FCOEUP_VALID_MASK (0x1 << I40E_PRTDCB_GENC_FCOEUP_VALID_SHIFT)
+#define I40E_PRTDCB_GENC_PFCLDA_SHIFT 16
+#define I40E_PRTDCB_GENC_PFCLDA_MASK (0xFFFF << I40E_PRTDCB_GENC_PFCLDA_SHIFT)
+#define I40E_PRTDCB_GENS 0x00083020
+#define I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT 0
+#define I40E_PRTDCB_GENS_DCBX_STATUS_MASK (0x7 << I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT)
+#define I40E_PRTDCB_MFLCN 0x001E2400
+#define I40E_PRTDCB_MFLCN_PMCF_SHIFT 0
+#define I40E_PRTDCB_MFLCN_PMCF_MASK (0x1 << I40E_PRTDCB_MFLCN_PMCF_SHIFT)
+#define I40E_PRTDCB_MFLCN_DPF_SHIFT 1
+#define I40E_PRTDCB_MFLCN_DPF_MASK (0x1 << I40E_PRTDCB_MFLCN_DPF_SHIFT)
+#define I40E_PRTDCB_MFLCN_RPFCM_SHIFT 2
+#define I40E_PRTDCB_MFLCN_RPFCM_MASK (0x1 << I40E_PRTDCB_MFLCN_RPFCM_SHIFT)
+#define I40E_PRTDCB_MFLCN_RFCE_SHIFT 3
+#define I40E_PRTDCB_MFLCN_RFCE_MASK (0x1 << I40E_PRTDCB_MFLCN_RFCE_SHIFT)
+#define I40E_PRTDCB_MFLCN_RPFCE_SHIFT 4
+#define I40E_PRTDCB_MFLCN_RPFCE_MASK (0xFF << I40E_PRTDCB_MFLCN_RPFCE_SHIFT)
+#define I40E_PRTDCB_RETSC 0x001223E0
+#define I40E_PRTDCB_RETSC_ETS_MODE_SHIFT 0
+#define I40E_PRTDCB_RETSC_ETS_MODE_MASK (0x1 << I40E_PRTDCB_RETSC_ETS_MODE_SHIFT)
+#define I40E_PRTDCB_RETSC_NON_ETS_MODE_SHIFT 1
+#define I40E_PRTDCB_RETSC_NON_ETS_MODE_MASK (0x1 << I40E_PRTDCB_RETSC_NON_ETS_MODE_SHIFT)
+#define I40E_PRTDCB_RETSC_ETS_MAX_EXP_SHIFT 2
+#define I40E_PRTDCB_RETSC_ETS_MAX_EXP_MASK (0xF << I40E_PRTDCB_RETSC_ETS_MAX_EXP_SHIFT)
+#define I40E_PRTDCB_RETSC_LLTC_SHIFT 8
+#define I40E_PRTDCB_RETSC_LLTC_MASK (0xFF << I40E_PRTDCB_RETSC_LLTC_SHIFT)
+#define I40E_PRTDCB_RETSTCC(_i) (0x00122180 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTDCB_RETSTCC_MAX_INDEX 7
+#define I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT 0
+#define I40E_PRTDCB_RETSTCC_BWSHARE_MASK (0x7F << I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT)
+#define I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT 30
+#define I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK (0x1 << I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT)
+#define I40E_PRTDCB_RETSTCC_ETSTC_SHIFT 31
+#define I40E_PRTDCB_RETSTCC_ETSTC_MASK (0x1 << I40E_PRTDCB_RETSTCC_ETSTC_SHIFT)
+#define I40E_PRTDCB_RPPMC 0x001223A0
+#define I40E_PRTDCB_RPPMC_LANRPPM_SHIFT 0
+#define I40E_PRTDCB_RPPMC_LANRPPM_MASK (0xFF << I40E_PRTDCB_RPPMC_LANRPPM_SHIFT)
+#define I40E_PRTDCB_RPPMC_RDMARPPM_SHIFT 8
+#define I40E_PRTDCB_RPPMC_RDMARPPM_MASK (0xFF << I40E_PRTDCB_RPPMC_RDMARPPM_SHIFT)
+#define I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_SHIFT 16
+#define I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_MASK (0xFF << I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_SHIFT)
+#define I40E_PRTDCB_RUP 0x001C0B00
+#define I40E_PRTDCB_RUP_NOVLANUP_SHIFT 0
+#define I40E_PRTDCB_RUP_NOVLANUP_MASK (0x7 << I40E_PRTDCB_RUP_NOVLANUP_SHIFT)
+#define I40E_PRTDCB_RUP2TC 0x001C09A0
+#define I40E_PRTDCB_RUP2TC_UP0TC_SHIFT 0
+#define I40E_PRTDCB_RUP2TC_UP0TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP0TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP1TC_SHIFT 3
+#define I40E_PRTDCB_RUP2TC_UP1TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP1TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP2TC_SHIFT 6
+#define I40E_PRTDCB_RUP2TC_UP2TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP2TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP3TC_SHIFT 9
+#define I40E_PRTDCB_RUP2TC_UP3TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP3TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP4TC_SHIFT 12
+#define I40E_PRTDCB_RUP2TC_UP4TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP4TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP5TC_SHIFT 15
+#define I40E_PRTDCB_RUP2TC_UP5TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP5TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP6TC_SHIFT 18
+#define I40E_PRTDCB_RUP2TC_UP6TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP6TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP7TC_SHIFT 21
+#define I40E_PRTDCB_RUP2TC_UP7TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP7TC_SHIFT)
+#define I40E_PRTDCB_TC2PFC 0x001C0980
+#define I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT 0
+#define I40E_PRTDCB_TC2PFC_TC2PFC_MASK (0xFF << I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT)
+#define I40E_PRTDCB_TCPMC 0x000A21A0
+#define I40E_PRTDCB_TCPMC_CPM_SHIFT 0
+#define I40E_PRTDCB_TCPMC_CPM_MASK (0x1FFF << I40E_PRTDCB_TCPMC_CPM_SHIFT)
+#define I40E_PRTDCB_TCPMC_LLTC_SHIFT 13
+#define I40E_PRTDCB_TCPMC_LLTC_MASK (0xFF << I40E_PRTDCB_TCPMC_LLTC_SHIFT)
+#define I40E_PRTDCB_TCPMC_TCPM_MODE_SHIFT 30
+#define I40E_PRTDCB_TCPMC_TCPM_MODE_MASK (0x1 << I40E_PRTDCB_TCPMC_TCPM_MODE_SHIFT)
+#define I40E_PRTDCB_TCWSTC(_i) (0x000A2040 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTDCB_TCWSTC_MAX_INDEX 7
+#define I40E_PRTDCB_TCWSTC_MSTC_SHIFT 0
+#define I40E_PRTDCB_TCWSTC_MSTC_MASK (0xFFFFF << I40E_PRTDCB_TCWSTC_MSTC_SHIFT)
+#define I40E_PRTDCB_TDPMC 0x000A0180
+#define I40E_PRTDCB_TDPMC_DPM_SHIFT 0
+#define I40E_PRTDCB_TDPMC_DPM_MASK (0xFF << I40E_PRTDCB_TDPMC_DPM_SHIFT)
+#define I40E_PRTDCB_TDPMC_TCPM_MODE_SHIFT 30
+#define I40E_PRTDCB_TDPMC_TCPM_MODE_MASK (0x1 << I40E_PRTDCB_TDPMC_TCPM_MODE_SHIFT)
+#define I40E_PRTDCB_TDPUC 0x00044100
+#define I40E_PRTDCB_TDPUC_MAX_TXFRAME_SHIFT 0
+#define I40E_PRTDCB_TDPUC_MAX_TXFRAME_MASK (0xFFFF << I40E_PRTDCB_TDPUC_MAX_TXFRAME_SHIFT)
+#define I40E_PRTDCB_TETSC_TCB 0x000AE060
+#define I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_SHIFT 0
+#define I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_MASK (0x1 << I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_SHIFT)
+#define I40E_PRTDCB_TETSC_TCB_LLTC_SHIFT 8
+#define I40E_PRTDCB_TETSC_TCB_LLTC_MASK (0xFF << I40E_PRTDCB_TETSC_TCB_LLTC_SHIFT)
+#define I40E_PRTDCB_TETSC_TPB 0x00098060
+#define I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_SHIFT 0
+#define I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_MASK (0x1 << I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_SHIFT)
+#define I40E_PRTDCB_TETSC_TPB_LLTC_SHIFT 8
+#define I40E_PRTDCB_TETSC_TPB_LLTC_MASK (0xFF << I40E_PRTDCB_TETSC_TPB_LLTC_SHIFT)
+#define I40E_PRTDCB_TFCS 0x001E4560
+#define I40E_PRTDCB_TFCS_TXOFF_SHIFT 0
+#define I40E_PRTDCB_TFCS_TXOFF_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF0_SHIFT 8
+#define I40E_PRTDCB_TFCS_TXOFF0_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF0_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF1_SHIFT 9
+#define I40E_PRTDCB_TFCS_TXOFF1_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF1_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF2_SHIFT 10
+#define I40E_PRTDCB_TFCS_TXOFF2_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF2_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF3_SHIFT 11
+#define I40E_PRTDCB_TFCS_TXOFF3_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF3_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF4_SHIFT 12
+#define I40E_PRTDCB_TFCS_TXOFF4_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF4_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF5_SHIFT 13
+#define I40E_PRTDCB_TFCS_TXOFF5_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF5_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF6_SHIFT 14
+#define I40E_PRTDCB_TFCS_TXOFF6_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF6_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF7_SHIFT 15
+#define I40E_PRTDCB_TFCS_TXOFF7_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF7_SHIFT)
+#define I40E_PRTDCB_TFWSTC(_i) (0x000A0040 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTDCB_TFWSTC_MAX_INDEX 7
+#define I40E_PRTDCB_TFWSTC_MSTC_SHIFT 0
+#define I40E_PRTDCB_TFWSTC_MSTC_MASK (0xFFFFF << I40E_PRTDCB_TFWSTC_MSTC_SHIFT)
+#define I40E_PRTDCB_TPFCTS(_i) (0x001E4660 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTDCB_TPFCTS_MAX_INDEX 7
+#define I40E_PRTDCB_TPFCTS_PFCTIMER_SHIFT 0
+#define I40E_PRTDCB_TPFCTS_PFCTIMER_MASK (0x3FFF << I40E_PRTDCB_TPFCTS_PFCTIMER_SHIFT)
+#define I40E_GLFCOE_RCTL 0x00269B94
+#define I40E_GLFCOE_RCTL_FCOEVER_SHIFT 0
+#define I40E_GLFCOE_RCTL_FCOEVER_MASK (0xF << I40E_GLFCOE_RCTL_FCOEVER_SHIFT)
+#define I40E_GLFCOE_RCTL_SAVBAD_SHIFT 4
+#define I40E_GLFCOE_RCTL_SAVBAD_MASK (0x1 << I40E_GLFCOE_RCTL_SAVBAD_SHIFT)
+#define I40E_GLFCOE_RCTL_ICRC_SHIFT 5
+#define I40E_GLFCOE_RCTL_ICRC_MASK (0x1 << I40E_GLFCOE_RCTL_ICRC_SHIFT)
+#define I40E_GLFCOE_RCTL_MAX_SIZE_SHIFT 16
+#define I40E_GLFCOE_RCTL_MAX_SIZE_MASK (0x3FFF << I40E_GLFCOE_RCTL_MAX_SIZE_SHIFT)
+#define I40E_GL_FWSTS 0x00083048
+#define I40E_GL_FWSTS_FWS0B_SHIFT 0
+#define I40E_GL_FWSTS_FWS0B_MASK (0xFF << I40E_GL_FWSTS_FWS0B_SHIFT)
+#define I40E_GL_FWSTS_FWRI_SHIFT 9
+#define I40E_GL_FWSTS_FWRI_MASK (0x1 << I40E_GL_FWSTS_FWRI_SHIFT)
+#define I40E_GL_FWSTS_FWS1B_SHIFT 16
+#define I40E_GL_FWSTS_FWS1B_MASK (0xFF << I40E_GL_FWSTS_FWS1B_SHIFT)
+#define I40E_GLGEN_CLKSTAT 0x000B8184
+#define I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT 0
+#define I40E_GLGEN_CLKSTAT_CLKMODE_MASK (0x1 << I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT)
+#define I40E_GLGEN_CLKSTAT_U_CLK_SPEED_SHIFT 4
+#define I40E_GLGEN_CLKSTAT_U_CLK_SPEED_MASK (0x3 << I40E_GLGEN_CLKSTAT_U_CLK_SPEED_SHIFT)
+#define I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_SHIFT 8
+#define I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_MASK (0x7 << I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_SHIFT)
+#define I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_SHIFT 12
+#define I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_MASK (0x7 << I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_SHIFT)
+#define I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_SHIFT 16
+#define I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_MASK (0x7 << I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_SHIFT)
+#define I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_SHIFT 20
+#define I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_MASK (0x7 << I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_SHIFT)
+#define I40E_GLGEN_GPIO_CTL(_i) (0x00088100 + ((_i) * 4)) /* _i=0...29 */
+#define I40E_GLGEN_GPIO_CTL_MAX_INDEX 29
+#define I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT 0
+#define I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK (0x3 << I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_SHIFT 3
+#define I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_MASK (0x1 << I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PIN_DIR_SHIFT 4
+#define I40E_GLGEN_GPIO_CTL_PIN_DIR_MASK (0x1 << I40E_GLGEN_GPIO_CTL_PIN_DIR_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_TRI_CTL_SHIFT 5
+#define I40E_GLGEN_GPIO_CTL_TRI_CTL_MASK (0x1 << I40E_GLGEN_GPIO_CTL_TRI_CTL_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_OUT_CTL_SHIFT 6
+#define I40E_GLGEN_GPIO_CTL_OUT_CTL_MASK (0x1 << I40E_GLGEN_GPIO_CTL_OUT_CTL_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT 7
+#define I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK (0x7 << I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_LED_INVRT_SHIFT 10
+#define I40E_GLGEN_GPIO_CTL_LED_INVRT_MASK (0x1 << I40E_GLGEN_GPIO_CTL_LED_INVRT_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT 11
+#define I40E_GLGEN_GPIO_CTL_LED_BLINK_MASK (0x1 << I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT 12
+#define I40E_GLGEN_GPIO_CTL_LED_MODE_MASK (0xF << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_INT_MODE_SHIFT 17
+#define I40E_GLGEN_GPIO_CTL_INT_MODE_MASK (0x3 << I40E_GLGEN_GPIO_CTL_INT_MODE_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT 19
+#define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_MASK (0x1 << I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT 20
+#define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_MASK (0x3F << I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT)
+#define I40E_GLGEN_GPIO_SET 0x00088184
+#define I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT 0
+#define I40E_GLGEN_GPIO_SET_GPIO_INDX_MASK (0x1F << I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT)
+#define I40E_GLGEN_GPIO_SET_SDP_DATA_SHIFT 5
+#define I40E_GLGEN_GPIO_SET_SDP_DATA_MASK (0x1 << I40E_GLGEN_GPIO_SET_SDP_DATA_SHIFT)
+#define I40E_GLGEN_GPIO_SET_DRIVE_SDP_SHIFT 6
+#define I40E_GLGEN_GPIO_SET_DRIVE_SDP_MASK (0x1 << I40E_GLGEN_GPIO_SET_DRIVE_SDP_SHIFT)
+#define I40E_GLGEN_GPIO_STAT 0x0008817C
+#define I40E_GLGEN_GPIO_STAT_GPIO_VALUE_SHIFT 0
+#define I40E_GLGEN_GPIO_STAT_GPIO_VALUE_MASK (0x3FFFFFFF << I40E_GLGEN_GPIO_STAT_GPIO_VALUE_SHIFT)
+#define I40E_GLGEN_GPIO_TRANSIT 0x00088180
+#define I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_SHIFT 0
+#define I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_MASK (0x3FFFFFFF << I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_SHIFT)
+#define I40E_GLGEN_I2CCMD(_i) (0x000881E0 + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLGEN_I2CCMD_MAX_INDEX 3
+#define I40E_GLGEN_I2CCMD_DATA_SHIFT 0
+#define I40E_GLGEN_I2CCMD_DATA_MASK (0xFFFF << I40E_GLGEN_I2CCMD_DATA_SHIFT)
+#define I40E_GLGEN_I2CCMD_REGADD_SHIFT 16
+#define I40E_GLGEN_I2CCMD_REGADD_MASK (0xFF << I40E_GLGEN_I2CCMD_REGADD_SHIFT)
+#define I40E_GLGEN_I2CCMD_PHYADD_SHIFT 24
+#define I40E_GLGEN_I2CCMD_PHYADD_MASK (0x7 << I40E_GLGEN_I2CCMD_PHYADD_SHIFT)
+#define I40E_GLGEN_I2CCMD_OP_SHIFT 27
+#define I40E_GLGEN_I2CCMD_OP_MASK (0x1 << I40E_GLGEN_I2CCMD_OP_SHIFT)
+#define I40E_GLGEN_I2CCMD_RESET_SHIFT 28
+#define I40E_GLGEN_I2CCMD_RESET_MASK (0x1 << I40E_GLGEN_I2CCMD_RESET_SHIFT)
+#define I40E_GLGEN_I2CCMD_R_SHIFT 29
+#define I40E_GLGEN_I2CCMD_R_MASK (0x1 << I40E_GLGEN_I2CCMD_R_SHIFT)
+#define I40E_GLGEN_I2CCMD_E_SHIFT 31
+#define I40E_GLGEN_I2CCMD_E_MASK (0x1 << I40E_GLGEN_I2CCMD_E_SHIFT)
+#define I40E_GLGEN_I2CPARAMS(_i) (0x000881AC + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLGEN_I2CPARAMS_MAX_INDEX 3
+#define I40E_GLGEN_I2CPARAMS_WRITE_TIME_SHIFT 0
+#define I40E_GLGEN_I2CPARAMS_WRITE_TIME_MASK (0x1F << I40E_GLGEN_I2CPARAMS_WRITE_TIME_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_READ_TIME_SHIFT 5
+#define I40E_GLGEN_I2CPARAMS_READ_TIME_MASK (0x7 << I40E_GLGEN_I2CPARAMS_READ_TIME_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_I2CBB_EN_SHIFT 8
+#define I40E_GLGEN_I2CPARAMS_I2CBB_EN_MASK (0x1 << I40E_GLGEN_I2CPARAMS_I2CBB_EN_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_CLK_SHIFT 9
+#define I40E_GLGEN_I2CPARAMS_CLK_MASK (0x1 << I40E_GLGEN_I2CPARAMS_CLK_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_DATA_OUT_SHIFT 10
+#define I40E_GLGEN_I2CPARAMS_DATA_OUT_MASK (0x1 << I40E_GLGEN_I2CPARAMS_DATA_OUT_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_DATA_OE_N_SHIFT 11
+#define I40E_GLGEN_I2CPARAMS_DATA_OE_N_MASK (0x1 << I40E_GLGEN_I2CPARAMS_DATA_OE_N_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_DATA_IN_SHIFT 12
+#define I40E_GLGEN_I2CPARAMS_DATA_IN_MASK (0x1 << I40E_GLGEN_I2CPARAMS_DATA_IN_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_CLK_OE_N_SHIFT 13
+#define I40E_GLGEN_I2CPARAMS_CLK_OE_N_MASK (0x1 << I40E_GLGEN_I2CPARAMS_CLK_OE_N_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_CLK_IN_SHIFT 14
+#define I40E_GLGEN_I2CPARAMS_CLK_IN_MASK (0x1 << I40E_GLGEN_I2CPARAMS_CLK_IN_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_SHIFT 15
+#define I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_MASK (0x1 << I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_SHIFT 31
+#define I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_MASK (0x1 << I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_SHIFT)
+#define I40E_GLGEN_LED_CTL 0x00088178
+#define I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_SHIFT 0
+#define I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_MASK (0x1 << I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_SHIFT)
+#define I40E_GLGEN_MDIO_CTRL(_i) (0x000881D0 + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLGEN_MDIO_CTRL_MAX_INDEX 3
+#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_SHIFT 0
+#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_MASK (0x1FFFF << I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_SHIFT)
+#define I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT 17
+#define I40E_GLGEN_MDIO_CTRL_CONTMDC_MASK (0x1 << I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT)
+#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT 18
+#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_MASK (0x3FFF << I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL(_i) (0x000881C0 + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLGEN_MDIO_I2C_SEL_MAX_INDEX 3
+#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT 0
+#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_MASK (0x1 << I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT 1
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_MASK (0xF << I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_SHIFT 5
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_MASK (0x1F << I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_SHIFT 10
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_MASK (0x1F << I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_SHIFT 15
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_MASK (0x1F << I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_SHIFT 20
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_MASK (0x1F << I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_SHIFT 25
+#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_MASK (0xF << I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_SHIFT 31
+#define I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_MASK (0x1 << I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_SHIFT)
+#define I40E_GLGEN_MSCA(_i) (0x0008818C + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLGEN_MSCA_MAX_INDEX 3
+#define I40E_GLGEN_MSCA_MDIADD_SHIFT 0
+#define I40E_GLGEN_MSCA_MDIADD_MASK (0xFFFF << I40E_GLGEN_MSCA_MDIADD_SHIFT)
+#define I40E_GLGEN_MSCA_DEVADD_SHIFT 16
+#define I40E_GLGEN_MSCA_DEVADD_MASK (0x1F << I40E_GLGEN_MSCA_DEVADD_SHIFT)
+#define I40E_GLGEN_MSCA_PHYADD_SHIFT 21
+#define I40E_GLGEN_MSCA_PHYADD_MASK (0x1F << I40E_GLGEN_MSCA_PHYADD_SHIFT)
+#define I40E_GLGEN_MSCA_OPCODE_SHIFT 26
+#define I40E_GLGEN_MSCA_OPCODE_MASK (0x3 << I40E_GLGEN_MSCA_OPCODE_SHIFT)
+#define I40E_GLGEN_MSCA_STCODE_SHIFT 28
+#define I40E_GLGEN_MSCA_STCODE_MASK (0x3 << I40E_GLGEN_MSCA_STCODE_SHIFT)
+#define I40E_GLGEN_MSCA_MDICMD_SHIFT 30
+#define I40E_GLGEN_MSCA_MDICMD_MASK (0x1 << I40E_GLGEN_MSCA_MDICMD_SHIFT)
+#define I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT 31
+#define I40E_GLGEN_MSCA_MDIINPROGEN_MASK (0x1 << I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT)
+#define I40E_GLGEN_MSRWD(_i) (0x0008819C + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLGEN_MSRWD_MAX_INDEX 3
+#define I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT 0
+#define I40E_GLGEN_MSRWD_MDIWRDATA_MASK (0xFFFF << I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT)
+#define I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT 16
+#define I40E_GLGEN_MSRWD_MDIRDDATA_MASK (0xFFFF << I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT)
+#define I40E_GLGEN_PCIFCNCNT 0x001C0AB4
+#define I40E_GLGEN_PCIFCNCNT_PCIPFCNT_SHIFT 0
+#define I40E_GLGEN_PCIFCNCNT_PCIPFCNT_MASK (0x1F << I40E_GLGEN_PCIFCNCNT_PCIPFCNT_SHIFT)
+#define I40E_GLGEN_PCIFCNCNT_PCIVFCNT_SHIFT 16
+#define I40E_GLGEN_PCIFCNCNT_PCIVFCNT_MASK (0xFF << I40E_GLGEN_PCIFCNCNT_PCIVFCNT_SHIFT)
+#define I40E_GLGEN_PE_ENA 0x000B81A0
+#define I40E_GLGEN_PE_ENA_PE_ENA_SHIFT 0
+#define I40E_GLGEN_PE_ENA_PE_ENA_MASK (0x1 << I40E_GLGEN_PE_ENA_PE_ENA_SHIFT)
+#define I40E_GLGEN_PE_ENA_PE_CLK_SRC_SEL_SHIFT 1
+#define I40E_GLGEN_PE_ENA_PE_CLK_SRC_SEL_MASK (0x3 << I40E_GLGEN_PE_ENA_PE_CLK_SRC_SEL_SHIFT)
+#define I40E_GLGEN_RSTAT 0x000B8188
+#define I40E_GLGEN_RSTAT_DEVSTATE_SHIFT 0
+#define I40E_GLGEN_RSTAT_DEVSTATE_MASK (0x3 << I40E_GLGEN_RSTAT_DEVSTATE_SHIFT)
+#define I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT 2
+#define I40E_GLGEN_RSTAT_RESET_TYPE_MASK (0x3 << I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT)
+#define I40E_GLGEN_RSTAT_CORERCNT_SHIFT 4
+#define I40E_GLGEN_RSTAT_CORERCNT_MASK (0x3 << I40E_GLGEN_RSTAT_CORERCNT_SHIFT)
+#define I40E_GLGEN_RSTAT_GLOBRCNT_SHIFT 6
+#define I40E_GLGEN_RSTAT_GLOBRCNT_MASK (0x3 << I40E_GLGEN_RSTAT_GLOBRCNT_SHIFT)
+#define I40E_GLGEN_RSTAT_EMPRCNT_SHIFT 8
+#define I40E_GLGEN_RSTAT_EMPRCNT_MASK (0x3 << I40E_GLGEN_RSTAT_EMPRCNT_SHIFT)
+#define I40E_GLGEN_RSTAT_TIME_TO_RST_SHIFT 10
+#define I40E_GLGEN_RSTAT_TIME_TO_RST_MASK (0x3F << I40E_GLGEN_RSTAT_TIME_TO_RST_SHIFT)
+#define I40E_GLGEN_RSTCTL 0x000B8180
+#define I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT 0
+#define I40E_GLGEN_RSTCTL_GRSTDEL_MASK (0x3F << I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT)
+#define I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT 8
+#define I40E_GLGEN_RSTCTL_ECC_RST_ENA_MASK (0x1 << I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT)
+#define I40E_GLGEN_RSTENA_EMP 0x000B818C
+#define I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_SHIFT 0
+#define I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_MASK (0x1 << I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_SHIFT)
+#define I40E_GLGEN_RTRIG 0x000B8190
+#define I40E_GLGEN_RTRIG_CORER_SHIFT 0
+#define I40E_GLGEN_RTRIG_CORER_MASK (0x1 << I40E_GLGEN_RTRIG_CORER_SHIFT)
+#define I40E_GLGEN_RTRIG_GLOBR_SHIFT 1
+#define I40E_GLGEN_RTRIG_GLOBR_MASK (0x1 << I40E_GLGEN_RTRIG_GLOBR_SHIFT)
+#define I40E_GLGEN_RTRIG_EMPFWR_SHIFT 2
+#define I40E_GLGEN_RTRIG_EMPFWR_MASK (0x1 << I40E_GLGEN_RTRIG_EMPFWR_SHIFT)
+#define I40E_GLGEN_STAT 0x000B612C
+#define I40E_GLGEN_STAT_HWRSVD0_SHIFT 0
+#define I40E_GLGEN_STAT_HWRSVD0_MASK (0x3 << I40E_GLGEN_STAT_HWRSVD0_SHIFT)
+#define I40E_GLGEN_STAT_DCBEN_SHIFT 2
+#define I40E_GLGEN_STAT_DCBEN_MASK (0x1 << I40E_GLGEN_STAT_DCBEN_SHIFT)
+#define I40E_GLGEN_STAT_VTEN_SHIFT 3
+#define I40E_GLGEN_STAT_VTEN_MASK (0x1 << I40E_GLGEN_STAT_VTEN_SHIFT)
+#define I40E_GLGEN_STAT_FCOEN_SHIFT 4
+#define I40E_GLGEN_STAT_FCOEN_MASK (0x1 << I40E_GLGEN_STAT_FCOEN_SHIFT)
+#define I40E_GLGEN_STAT_EVBEN_SHIFT 5
+#define I40E_GLGEN_STAT_EVBEN_MASK (0x1 << I40E_GLGEN_STAT_EVBEN_SHIFT)
+#define I40E_GLGEN_STAT_HWRSVD1_SHIFT 6
+#define I40E_GLGEN_STAT_HWRSVD1_MASK (0x3 << I40E_GLGEN_STAT_HWRSVD1_SHIFT)
+#define I40E_GLGEN_VFLRSTAT(_i) (0x00092600 + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLGEN_VFLRSTAT_MAX_INDEX 3
+#define I40E_GLGEN_VFLRSTAT_VFLRE_SHIFT 0
+#define I40E_GLGEN_VFLRSTAT_VFLRE_MASK (0xFFFFFFFF << I40E_GLGEN_VFLRSTAT_VFLRE_SHIFT)
+#define I40E_GLVFGEN_TIMER 0x000881BC
+#define I40E_GLVFGEN_TIMER_GTIME_SHIFT 0
+#define I40E_GLVFGEN_TIMER_GTIME_MASK (0xFFFFFFFF << I40E_GLVFGEN_TIMER_GTIME_SHIFT)
+#define I40E_PFGEN_CTRL 0x00092400
+#define I40E_PFGEN_CTRL_PFSWR_SHIFT 0
+#define I40E_PFGEN_CTRL_PFSWR_MASK (0x1 << I40E_PFGEN_CTRL_PFSWR_SHIFT)
+#define I40E_PFGEN_DRUN 0x00092500
+#define I40E_PFGEN_DRUN_DRVUNLD_SHIFT 0
+#define I40E_PFGEN_DRUN_DRVUNLD_MASK (0x1 << I40E_PFGEN_DRUN_DRVUNLD_SHIFT)
+#define I40E_PFGEN_PORTNUM 0x001C0480
+#define I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT 0
+#define I40E_PFGEN_PORTNUM_PORT_NUM_MASK (0x3 << I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT)
+#define I40E_PFGEN_STATE 0x00088000
+#define I40E_PFGEN_STATE_PFPEEN_SHIFT 0
+#define I40E_PFGEN_STATE_PFPEEN_MASK (0x1 << I40E_PFGEN_STATE_PFPEEN_SHIFT)
+#define I40E_PFGEN_STATE_PFFCEN_SHIFT 1
+#define I40E_PFGEN_STATE_PFFCEN_MASK (0x1 << I40E_PFGEN_STATE_PFFCEN_SHIFT)
+#define I40E_PFGEN_STATE_PFLINKEN_SHIFT 2
+#define I40E_PFGEN_STATE_PFLINKEN_MASK (0x1 << I40E_PFGEN_STATE_PFLINKEN_SHIFT)
+#define I40E_PFGEN_STATE_PFSCEN_SHIFT 3
+#define I40E_PFGEN_STATE_PFSCEN_MASK (0x1 << I40E_PFGEN_STATE_PFSCEN_SHIFT)
+#define I40E_PRTGEN_CNF 0x000B8120
+#define I40E_PRTGEN_CNF_PORT_DIS_SHIFT 0
+#define I40E_PRTGEN_CNF_PORT_DIS_MASK (0x1 << I40E_PRTGEN_CNF_PORT_DIS_SHIFT)
+#define I40E_PRTGEN_CNF_ALLOW_PORT_DIS_SHIFT 1
+#define I40E_PRTGEN_CNF_ALLOW_PORT_DIS_MASK (0x1 << I40E_PRTGEN_CNF_ALLOW_PORT_DIS_SHIFT)
+#define I40E_PRTGEN_CNF_EMP_PORT_DIS_SHIFT 2
+#define I40E_PRTGEN_CNF_EMP_PORT_DIS_MASK (0x1 << I40E_PRTGEN_CNF_EMP_PORT_DIS_SHIFT)
+#define I40E_PRTGEN_CNF2 0x000B8160
+#define I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_SHIFT 0
+#define I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_MASK (0x1 << I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_SHIFT)
+#define I40E_PRTGEN_STATUS 0x000B8100
+#define I40E_PRTGEN_STATUS_PORT_VALID_SHIFT 0
+#define I40E_PRTGEN_STATUS_PORT_VALID_MASK (0x1 << I40E_PRTGEN_STATUS_PORT_VALID_SHIFT)
+#define I40E_PRTGEN_STATUS_PORT_ACTIVE_SHIFT 1
+#define I40E_PRTGEN_STATUS_PORT_ACTIVE_MASK (0x1 << I40E_PRTGEN_STATUS_PORT_ACTIVE_SHIFT)
+#define I40E_VFGEN_RSTAT1(_VF) (0x00074400 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFGEN_RSTAT1_MAX_INDEX 127
+#define I40E_VFGEN_RSTAT1_VFR_STATE_SHIFT 0
+#define I40E_VFGEN_RSTAT1_VFR_STATE_MASK (0x3 << I40E_VFGEN_RSTAT1_VFR_STATE_SHIFT)
+#define I40E_VPGEN_VFRSTAT(_VF) (0x00091C00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VPGEN_VFRSTAT_MAX_INDEX 127
+#define I40E_VPGEN_VFRSTAT_VFRD_SHIFT 0
+#define I40E_VPGEN_VFRSTAT_VFRD_MASK (0x1 << I40E_VPGEN_VFRSTAT_VFRD_SHIFT)
+#define I40E_VPGEN_VFRTRIG(_VF) (0x00091800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VPGEN_VFRTRIG_MAX_INDEX 127
+#define I40E_VPGEN_VFRTRIG_VFSWR_SHIFT 0
+#define I40E_VPGEN_VFRTRIG_VFSWR_MASK (0x1 << I40E_VPGEN_VFRTRIG_VFSWR_SHIFT)
+#define I40E_VSIGEN_RSTAT(_VSI) (0x00090800 + ((_VSI) * 4)) /* _i=0...383 */
+#define I40E_VSIGEN_RSTAT_MAX_INDEX 383
+#define I40E_VSIGEN_RSTAT_VMRD_SHIFT 0
+#define I40E_VSIGEN_RSTAT_VMRD_MASK (0x1 << I40E_VSIGEN_RSTAT_VMRD_SHIFT)
+#define I40E_VSIGEN_RTRIG(_VSI) (0x00090000 + ((_VSI) * 4)) /* _i=0...383 */
+#define I40E_VSIGEN_RTRIG_MAX_INDEX 383
+#define I40E_VSIGEN_RTRIG_VMSWR_SHIFT 0
+#define I40E_VSIGEN_RTRIG_VMSWR_MASK (0x1 << I40E_VSIGEN_RTRIG_VMSWR_SHIFT)
+#define I40E_GLHMC_APBVTINUSEBASE(_i) (0x000C4a00 + ((_i) * 4))
+#define I40E_GLHMC_APBVTINUSEBASE_MAX_INDEX 15
+#define I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT 0
+#define I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_MASK (0xFFFFFF << I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT)
+#define I40E_GLHMC_CEQPART(_i) (0x001312C0 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_CEQPART_MAX_INDEX 15
+#define I40E_GLHMC_CEQPART_PMCEQBASE_SHIFT 0
+#define I40E_GLHMC_CEQPART_PMCEQBASE_MASK (0xFF << I40E_GLHMC_CEQPART_PMCEQBASE_SHIFT)
+#define I40E_GLHMC_CEQPART_PMCEQSIZE_SHIFT 16
+#define I40E_GLHMC_CEQPART_PMCEQSIZE_MASK (0x1FF << I40E_GLHMC_CEQPART_PMCEQSIZE_SHIFT)
+#define I40E_GLHMC_DBCQPART(_i) (0x00131240 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_DBCQPART_MAX_INDEX 15
+#define I40E_GLHMC_DBCQPART_PMDBCQBASE_SHIFT 0
+#define I40E_GLHMC_DBCQPART_PMDBCQBASE_MASK (0x3FFF << I40E_GLHMC_DBCQPART_PMDBCQBASE_SHIFT)
+#define I40E_GLHMC_DBCQPART_PMDBCQSIZE_SHIFT 16
+#define I40E_GLHMC_DBCQPART_PMDBCQSIZE_MASK (0x7FFF << I40E_GLHMC_DBCQPART_PMDBCQSIZE_SHIFT)
+#define I40E_GLHMC_DBQPPART(_i) (0x00138D80 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_DBQPPART_MAX_INDEX 15
+#define I40E_GLHMC_DBQPPART_PMDBQPBASE_SHIFT 0
+#define I40E_GLHMC_DBQPPART_PMDBQPBASE_MASK (0x3FFF << I40E_GLHMC_DBQPPART_PMDBQPBASE_SHIFT)
+#define I40E_GLHMC_DBQPPART_PMDBQPSIZE_SHIFT 16
+#define I40E_GLHMC_DBQPPART_PMDBQPSIZE_MASK (0x7FFF << I40E_GLHMC_DBQPPART_PMDBQPSIZE_SHIFT)
+#define I40E_GLHMC_FCOEDDPBASE(_i) (0x000C6600 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FCOEDDPBASE_MAX_INDEX 15
+#define I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_SHIFT 0
+#define I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_MASK (0xFFFFFF << I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_SHIFT)
+#define I40E_GLHMC_FCOEDDPCNT(_i) (0x000C6700 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FCOEDDPCNT_MAX_INDEX 15
+#define I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_SHIFT 0
+#define I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_MASK (0xFFFFF << I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_SHIFT)
+#define I40E_GLHMC_FCOEDDPOBJSZ 0x000C2010
+#define I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_SHIFT 0
+#define I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_MASK (0xF << I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_SHIFT)
+#define I40E_GLHMC_FCOEFBASE(_i) (0x000C6800 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FCOEFBASE_MAX_INDEX 15
+#define I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_SHIFT 0
+#define I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_MASK (0xFFFFFF << I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_SHIFT)
+#define I40E_GLHMC_FCOEFCNT(_i) (0x000C6900 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FCOEFCNT_MAX_INDEX 15
+#define I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_SHIFT 0
+#define I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_MASK (0x7FFFFF << I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_SHIFT)
+#define I40E_GLHMC_FCOEFMAX 0x000C20D0
+#define I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT 0
+#define I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK (0xFFFF << I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT)
+#define I40E_GLHMC_FCOEFOBJSZ 0x000C2018
+#define I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_SHIFT 0
+#define I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_MASK (0xF << I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_SHIFT)
+#define I40E_GLHMC_FCOEMAX 0x000C2014
+#define I40E_GLHMC_FCOEMAX_PMFCOEMAX_SHIFT 0
+#define I40E_GLHMC_FCOEMAX_PMFCOEMAX_MASK (0x1FFF << I40E_GLHMC_FCOEMAX_PMFCOEMAX_SHIFT)
+#define I40E_GLHMC_FSIAVBASE(_i) (0x000C5600 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FSIAVBASE_MAX_INDEX 15
+#define I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_SHIFT 0
+#define I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_MASK (0xFFFFFF << I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_SHIFT)
+#define I40E_GLHMC_FSIAVCNT(_i) (0x000C5700 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FSIAVCNT_MAX_INDEX 15
+#define I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_SHIFT 0
+#define I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_MASK (0x1FFFFFFF << I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_SHIFT)
+#define I40E_GLHMC_FSIAVCNT_RSVD_SHIFT 29
+#define I40E_GLHMC_FSIAVCNT_RSVD_MASK (0x7 << I40E_GLHMC_FSIAVCNT_RSVD_SHIFT)
+#define I40E_GLHMC_FSIAVMAX 0x000C2068
+#define I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_SHIFT 0
+#define I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_MASK (0x1FFFF << I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_SHIFT)
+#define I40E_GLHMC_FSIAVOBJSZ 0x000C2064
+#define I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_SHIFT 0
+#define I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_MASK (0xF << I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_SHIFT)
+#define I40E_GLHMC_FSIMCBASE(_i) (0x000C6000 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FSIMCBASE_MAX_INDEX 15
+#define I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_SHIFT 0
+#define I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_MASK (0xFFFFFF << I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_SHIFT)
+#define I40E_GLHMC_FSIMCCNT(_i) (0x000C6100 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FSIMCCNT_MAX_INDEX 15
+#define I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_SHIFT 0
+#define I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_MASK (0x1FFFFFFF << I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_SHIFT)
+#define I40E_GLHMC_FSIMCMAX 0x000C2060
+#define I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_SHIFT 0
+#define I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_MASK (0x3FFF << I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_SHIFT)
+#define I40E_GLHMC_FSIMCOBJSZ 0x000C205c
+#define I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_SHIFT 0
+#define I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_MASK (0xF << I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_SHIFT)
+#define I40E_GLHMC_LANQMAX 0x000C2008
+#define I40E_GLHMC_LANQMAX_PMLANQMAX_SHIFT 0
+#define I40E_GLHMC_LANQMAX_PMLANQMAX_MASK (0x7FF << I40E_GLHMC_LANQMAX_PMLANQMAX_SHIFT)
+#define I40E_GLHMC_LANRXBASE(_i) (0x000C6400 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_LANRXBASE_MAX_INDEX 15
+#define I40E_GLHMC_LANRXBASE_FPMLANRXBASE_SHIFT 0
+#define I40E_GLHMC_LANRXBASE_FPMLANRXBASE_MASK (0xFFFFFF << I40E_GLHMC_LANRXBASE_FPMLANRXBASE_SHIFT)
+#define I40E_GLHMC_LANRXCNT(_i) (0x000C6500 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_LANRXCNT_MAX_INDEX 15
+#define I40E_GLHMC_LANRXCNT_FPMLANRXCNT_SHIFT 0
+#define I40E_GLHMC_LANRXCNT_FPMLANRXCNT_MASK (0x7FF << I40E_GLHMC_LANRXCNT_FPMLANRXCNT_SHIFT)
+#define I40E_GLHMC_LANRXOBJSZ 0x000C200c
+#define I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_SHIFT 0
+#define I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_MASK (0xF << I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_SHIFT)
+#define I40E_GLHMC_LANTXBASE(_i) (0x000C6200 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_LANTXBASE_MAX_INDEX 15
+#define I40E_GLHMC_LANTXBASE_FPMLANTXBASE_SHIFT 0
+#define I40E_GLHMC_LANTXBASE_FPMLANTXBASE_MASK (0xFFFFFF << I40E_GLHMC_LANTXBASE_FPMLANTXBASE_SHIFT)
+#define I40E_GLHMC_LANTXBASE_RSVD_SHIFT 24
+#define I40E_GLHMC_LANTXBASE_RSVD_MASK (0xFF << I40E_GLHMC_LANTXBASE_RSVD_SHIFT)
+#define I40E_GLHMC_LANTXCNT(_i) (0x000C6300 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_LANTXCNT_MAX_INDEX 15
+#define I40E_GLHMC_LANTXCNT_FPMLANTXCNT_SHIFT 0
+#define I40E_GLHMC_LANTXCNT_FPMLANTXCNT_MASK (0x7FF << I40E_GLHMC_LANTXCNT_FPMLANTXCNT_SHIFT)
+#define I40E_GLHMC_LANTXOBJSZ 0x000C2004
+#define I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_SHIFT 0
+#define I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_MASK (0xF << I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_SHIFT)
+#define I40E_GLHMC_PEARPBASE(_i) (0x000C4800 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEARPBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEARPBASE_FPMPEARPBASE_SHIFT 0
+#define I40E_GLHMC_PEARPBASE_FPMPEARPBASE_MASK (0xFFFFFF << I40E_GLHMC_PEARPBASE_FPMPEARPBASE_SHIFT)
+#define I40E_GLHMC_PEARPCNT(_i) (0x000C4900 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEARPCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEARPCNT_FPMPEARPCNT_SHIFT 0
+#define I40E_GLHMC_PEARPCNT_FPMPEARPCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEARPCNT_FPMPEARPCNT_SHIFT)
+#define I40E_GLHMC_PEARPMAX 0x000C2038
+#define I40E_GLHMC_PEARPMAX_PMPEARPMAX_SHIFT 0
+#define I40E_GLHMC_PEARPMAX_PMPEARPMAX_MASK (0x1FFFF << I40E_GLHMC_PEARPMAX_PMPEARPMAX_SHIFT)
+#define I40E_GLHMC_PEARPOBJSZ 0x000C2034
+#define I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_SHIFT 0
+#define I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_MASK (0x7 << I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_SHIFT)
+#define I40E_GLHMC_PECQBASE(_i) (0x000C4200 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PECQBASE_MAX_INDEX 15
+#define I40E_GLHMC_PECQBASE_FPMPECQBASE_SHIFT 0
+#define I40E_GLHMC_PECQBASE_FPMPECQBASE_MASK (0xFFFFFF << I40E_GLHMC_PECQBASE_FPMPECQBASE_SHIFT)
+#define I40E_GLHMC_PECQCNT(_i) (0x000C4300 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PECQCNT_MAX_INDEX 15
+#define I40E_GLHMC_PECQCNT_FPMPECQCNT_SHIFT 0
+#define I40E_GLHMC_PECQCNT_FPMPECQCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PECQCNT_FPMPECQCNT_SHIFT)
+#define I40E_GLHMC_PECQOBJSZ 0x000C2020
+#define I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_SHIFT 0
+#define I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_MASK (0xF << I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_SHIFT)
+#define I40E_GLHMC_PEHTCNT(_i) (0x000C4700 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEHTCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEHTCNT_FPMPEHTCNT_SHIFT 0
+#define I40E_GLHMC_PEHTCNT_FPMPEHTCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEHTCNT_FPMPEHTCNT_SHIFT)
+#define I40E_GLHMC_PEHTEBASE(_i) (0x000C4600 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEHTEBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_SHIFT 0
+#define I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_MASK (0xFFFFFF << I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_SHIFT)
+#define I40E_GLHMC_PEHTEOBJSZ 0x000C202c
+#define I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_SHIFT 0
+#define I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_MASK (0xF << I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_SHIFT)
+#define I40E_GLHMC_PEHTMAX 0x000C2030
+#define I40E_GLHMC_PEHTMAX_PMPEHTMAX_SHIFT 0
+#define I40E_GLHMC_PEHTMAX_PMPEHTMAX_MASK (0x1FFFFF << I40E_GLHMC_PEHTMAX_PMPEHTMAX_SHIFT)
+#define I40E_GLHMC_PEMRBASE(_i) (0x000C4c00 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEMRBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEMRBASE_FPMPEMRBASE_SHIFT 0
+#define I40E_GLHMC_PEMRBASE_FPMPEMRBASE_MASK (0xFFFFFF << I40E_GLHMC_PEMRBASE_FPMPEMRBASE_SHIFT)
+#define I40E_GLHMC_PEMRCNT(_i) (0x000C4d00 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEMRCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEMRCNT_FPMPEMRSZ_SHIFT 0
+#define I40E_GLHMC_PEMRCNT_FPMPEMRSZ_MASK (0x1FFFFFFF << I40E_GLHMC_PEMRCNT_FPMPEMRSZ_SHIFT)
+#define I40E_GLHMC_PEMRMAX 0x000C2040
+#define I40E_GLHMC_PEMRMAX_PMPEMRMAX_SHIFT 0
+#define I40E_GLHMC_PEMRMAX_PMPEMRMAX_MASK (0x7FFFFF << I40E_GLHMC_PEMRMAX_PMPEMRMAX_SHIFT)
+#define I40E_GLHMC_PEMROBJSZ 0x000C203c
+#define I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_SHIFT 0
+#define I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_MASK (0xF << I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_SHIFT)
+#define I40E_GLHMC_PEPBLBASE(_i) (0x000C5800 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEPBLBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_SHIFT 0
+#define I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_MASK (0xFFFFFF << I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_SHIFT)
+#define I40E_GLHMC_PEPBLCNT(_i) (0x000C5900 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEPBLCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_SHIFT 0
+#define I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_SHIFT)
+#define I40E_GLHMC_PEPBLMAX 0x000C206c
+#define I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_SHIFT 0
+#define I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_MASK (0x1FFFFFFF << I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_SHIFT)
+#define I40E_GLHMC_PEQ1BASE(_i) (0x000C5200 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEQ1BASE_MAX_INDEX 15
+#define I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_SHIFT 0
+#define I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_MASK (0xFFFFFF << I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_SHIFT)
+#define I40E_GLHMC_PEQ1CNT(_i) (0x000C5300 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEQ1CNT_MAX_INDEX 15
+#define I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_SHIFT 0
+#define I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_SHIFT)
+#define I40E_GLHMC_PEQ1FLBASE(_i) (0x000C5400 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEQ1FLBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_SHIFT 0
+#define I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_MASK (0xFFFFFF << I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_SHIFT)
+#define I40E_GLHMC_PEQ1FLCNT(_i) (0x000C5500 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEQ1FLCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEQ1FLCNT_FPMPEQ1FLCNT_SHIFT 0
+#define I40E_GLHMC_PEQ1FLCNT_FPMPEQ1FLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEQ1FLCNT_FPMPEQ1FLCNT_SHIFT)
+#define I40E_GLHMC_PEQ1FLMAX 0x000C2058
+#define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT 0
+#define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_MASK (0x3FFFFFF << I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT)
+#define I40E_GLHMC_PEQ1MAX 0x000C2054
+#define I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_SHIFT 0
+#define I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_MASK (0x3FFFFFF << I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_SHIFT)
+#define I40E_GLHMC_PEQ1OBJSZ 0x000C2050
+#define I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_SHIFT 0
+#define I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_MASK (0xF << I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_SHIFT)
+#define I40E_GLHMC_PEQPBASE(_i) (0x000C4000 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEQPBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEQPBASE_FPMPEQPBASE_SHIFT 0
+#define I40E_GLHMC_PEQPBASE_FPMPEQPBASE_MASK (0xFFFFFF << I40E_GLHMC_PEQPBASE_FPMPEQPBASE_SHIFT)
+#define I40E_GLHMC_PEQPCNT(_i) (0x000C4100 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEQPCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEQPCNT_FPMPEQPCNT_SHIFT 0
+#define I40E_GLHMC_PEQPCNT_FPMPEQPCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEQPCNT_FPMPEQPCNT_SHIFT)
+#define I40E_GLHMC_PEQPOBJSZ 0x000C201c
+#define I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_SHIFT 0
+#define I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_MASK (0xF << I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_SHIFT)
+#define I40E_GLHMC_PESRQBASE(_i) (0x000C4400 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PESRQBASE_MAX_INDEX 15
+#define I40E_GLHMC_PESRQBASE_FPMPESRQBASE_SHIFT 0
+#define I40E_GLHMC_PESRQBASE_FPMPESRQBASE_MASK (0xFFFFFF << I40E_GLHMC_PESRQBASE_FPMPESRQBASE_SHIFT)
+#define I40E_GLHMC_PESRQCNT(_i) (0x000C4500 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PESRQCNT_MAX_INDEX 15
+#define I40E_GLHMC_PESRQCNT_FPMPESRQCNT_SHIFT 0
+#define I40E_GLHMC_PESRQCNT_FPMPESRQCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PESRQCNT_FPMPESRQCNT_SHIFT)
+#define I40E_GLHMC_PESRQMAX 0x000C2028
+#define I40E_GLHMC_PESRQMAX_PMPESRQMAX_SHIFT 0
+#define I40E_GLHMC_PESRQMAX_PMPESRQMAX_MASK (0xFFFF << I40E_GLHMC_PESRQMAX_PMPESRQMAX_SHIFT)
+#define I40E_GLHMC_PESRQOBJSZ 0x000C2024
+#define I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_SHIFT 0
+#define I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_MASK (0xF << I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_SHIFT)
+#define I40E_GLHMC_PESRQOBJSZ_RSVD_SHIFT 4
+#define I40E_GLHMC_PESRQOBJSZ_RSVD_MASK (0xFFFFFFF << I40E_GLHMC_PESRQOBJSZ_RSVD_SHIFT)
+#define I40E_GLHMC_PETIMERBASE(_i) (0x000C5A00 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PETIMERBASE_MAX_INDEX 15
+#define I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_SHIFT 0
+#define I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_MASK (0xFFFFFF << I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_SHIFT)
+#define I40E_GLHMC_PETIMERCNT(_i) (0x000C5B00 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PETIMERCNT_MAX_INDEX 15
+#define I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_SHIFT 0
+#define I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_SHIFT)
+#define I40E_GLHMC_PETIMERMAX 0x000C2084
+#define I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_SHIFT 0
+#define I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_MASK (0x1FFFFFFF << I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_SHIFT)
+#define I40E_GLHMC_PETIMEROBJSZ 0x000C2080
+#define I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_SHIFT 0
+#define I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_MASK (0xF << I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_SHIFT)
+#define I40E_GLHMC_PEXFBASE(_i) (0x000C4e00 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEXFBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEXFBASE_FPMPEXFBASE_SHIFT 0
+#define I40E_GLHMC_PEXFBASE_FPMPEXFBASE_MASK (0xFFFFFF << I40E_GLHMC_PEXFBASE_FPMPEXFBASE_SHIFT)
+#define I40E_GLHMC_PEXFCNT(_i) (0x000C4f00 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEXFCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEXFCNT_FPMPEXFCNT_SHIFT 0
+#define I40E_GLHMC_PEXFCNT_FPMPEXFCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEXFCNT_FPMPEXFCNT_SHIFT)
+#define I40E_GLHMC_PEXFFLBASE(_i) (0x000C5000 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEXFFLBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_SHIFT 0
+#define I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_MASK (0xFFFFFF << I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_SHIFT)
+#define I40E_GLHMC_PEXFFLCNT(_i) (0x000C5100 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEXFFLCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEXFFLCNT_FPMPEXFFLCNT_SHIFT 0
+#define I40E_GLHMC_PEXFFLCNT_FPMPEXFFLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEXFFLCNT_FPMPEXFFLCNT_SHIFT)
+#define I40E_GLHMC_PEXFFLMAX 0x000C204c
+#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT 0
+#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_MASK (0x1FFFFFF << I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT)
+#define I40E_GLHMC_PEXFMAX 0x000C2048
+#define I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT 0
+#define I40E_GLHMC_PEXFMAX_PMPEXFMAX_MASK (0x3FFFFFF << I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT)
+#define I40E_GLHMC_PEXFOBJSZ 0x000C2044
+#define I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_SHIFT 0
+#define I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_MASK (0xF << I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_SHIFT)
+#define I40E_GLHMC_PEXFOBJSZ_RSVD_SHIFT 4
+#define I40E_GLHMC_PEXFOBJSZ_RSVD_MASK (0xFFFFFFF << I40E_GLHMC_PEXFOBJSZ_RSVD_SHIFT)
+#define I40E_GLHMC_PFASSIGN(_i) (0x000C0c00 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PFASSIGN_MAX_INDEX 15
+#define I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_SHIFT 0
+#define I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_MASK (0xF << I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_SHIFT)
+#define I40E_GLHMC_SDPART(_i) (0x000C0800 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_SDPART_MAX_INDEX 15
+#define I40E_GLHMC_SDPART_PMSDBASE_SHIFT 0
+#define I40E_GLHMC_SDPART_PMSDBASE_MASK (0xFFF << I40E_GLHMC_SDPART_PMSDBASE_SHIFT)
+#define I40E_GLHMC_SDPART_PMSDSIZE_SHIFT 16
+#define I40E_GLHMC_SDPART_PMSDSIZE_MASK (0x1FFF << I40E_GLHMC_SDPART_PMSDSIZE_SHIFT)
+#define I40E_GLHMC_VFAPBVTINUSEBASE(_i) (0x000Cca00 + ((_i) * 4))
+#define I40E_GLHMC_VFAPBVTINUSEBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT 0
+#define I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_MASK (0xFFFFFF << I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT)
+#define I40E_GLHMC_VFCEQPART(_i) (0x00132240 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFCEQPART_MAX_INDEX 31
+#define I40E_GLHMC_VFCEQPART_PMCEQBASE_SHIFT 0
+#define I40E_GLHMC_VFCEQPART_PMCEQBASE_MASK (0xFF << I40E_GLHMC_VFCEQPART_PMCEQBASE_SHIFT)
+#define I40E_GLHMC_VFCEQPART_PMCEQSIZE_SHIFT 16
+#define I40E_GLHMC_VFCEQPART_PMCEQSIZE_MASK (0x1FF << I40E_GLHMC_VFCEQPART_PMCEQSIZE_SHIFT)
+#define I40E_GLHMC_VFDBCQPART(_i) (0x00132140 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFDBCQPART_MAX_INDEX 31
+#define I40E_GLHMC_VFDBCQPART_PMDBCQBASE_SHIFT 0
+#define I40E_GLHMC_VFDBCQPART_PMDBCQBASE_MASK (0x3FFF << I40E_GLHMC_VFDBCQPART_PMDBCQBASE_SHIFT)
+#define I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_SHIFT 16
+#define I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_MASK (0x7FFF << I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_SHIFT)
+#define I40E_GLHMC_VFDBQPPART(_i) (0x00138E00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFDBQPPART_MAX_INDEX 31
+#define I40E_GLHMC_VFDBQPPART_PMDBQPBASE_SHIFT 0
+#define I40E_GLHMC_VFDBQPPART_PMDBQPBASE_MASK (0x3FFF << I40E_GLHMC_VFDBQPPART_PMDBQPBASE_SHIFT)
+#define I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_SHIFT 16
+#define I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_MASK (0x7FFF << I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_SHIFT)
+#define I40E_GLHMC_VFFSIAVBASE(_i) (0x000Cd600 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFFSIAVBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_SHIFT 0
+#define I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_MASK (0xFFFFFF << I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_SHIFT)
+#define I40E_GLHMC_VFFSIAVCNT(_i) (0x000Cd700 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFFSIAVCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_SHIFT 0
+#define I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_SHIFT)
+#define I40E_GLHMC_VFFSIAVCNT_RSVD_SHIFT 29
+#define I40E_GLHMC_VFFSIAVCNT_RSVD_MASK (0x7 << I40E_GLHMC_VFFSIAVCNT_RSVD_SHIFT)
+#define I40E_GLHMC_VFPDINV(_i) (0x000C8300 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPDINV_MAX_INDEX 31
+#define I40E_GLHMC_VFPDINV_PMSDIDX_SHIFT 0
+#define I40E_GLHMC_VFPDINV_PMSDIDX_MASK (0xFFF << I40E_GLHMC_VFPDINV_PMSDIDX_SHIFT)
+#define I40E_GLHMC_VFPDINV_PMPDIDX_SHIFT 16
+#define I40E_GLHMC_VFPDINV_PMPDIDX_MASK (0x1FF << I40E_GLHMC_VFPDINV_PMPDIDX_SHIFT)
+#define I40E_GLHMC_VFPEARPBASE(_i) (0x000Cc800 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEARPBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_SHIFT 0
+#define I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_SHIFT)
+#define I40E_GLHMC_VFPEARPCNT(_i) (0x000Cc900 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEARPCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_SHIFT 0
+#define I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_SHIFT)
+#define I40E_GLHMC_VFPECQBASE(_i) (0x000Cc200 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPECQBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPECQBASE_FPMPECQBASE_SHIFT 0
+#define I40E_GLHMC_VFPECQBASE_FPMPECQBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPECQBASE_FPMPECQBASE_SHIFT)
+#define I40E_GLHMC_VFPECQCNT(_i) (0x000Cc300 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPECQCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPECQCNT_FPMPECQCNT_SHIFT 0
+#define I40E_GLHMC_VFPECQCNT_FPMPECQCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPECQCNT_FPMPECQCNT_SHIFT)
+#define I40E_GLHMC_VFPEHTCNT(_i) (0x000Cc700 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEHTCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_SHIFT 0
+#define I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_SHIFT)
+#define I40E_GLHMC_VFPEHTEBASE(_i) (0x000Cc600 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEHTEBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_SHIFT 0
+#define I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_SHIFT)
+#define I40E_GLHMC_VFPEMRBASE(_i) (0x000Ccc00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEMRBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_SHIFT 0
+#define I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_SHIFT)
+#define I40E_GLHMC_VFPEMRCNT(_i) (0x000Ccd00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEMRCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_SHIFT 0
+#define I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_SHIFT)
+#define I40E_GLHMC_VFPEPBLBASE(_i) (0x000Cd800 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEPBLBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_SHIFT 0
+#define I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_SHIFT)
+#define I40E_GLHMC_VFPEPBLCNT(_i) (0x000Cd900 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEPBLCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_SHIFT 0
+#define I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_SHIFT)
+#define I40E_GLHMC_VFPEQ1BASE(_i) (0x000Cd200 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEQ1BASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_SHIFT 0
+#define I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_SHIFT)
+#define I40E_GLHMC_VFPEQ1CNT(_i) (0x000Cd300 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEQ1CNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_SHIFT 0
+#define I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_SHIFT)
+#define I40E_GLHMC_VFPEQ1FLBASE(_i) (0x000Cd400 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEQ1FLBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_SHIFT 0
+#define I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_SHIFT)
+#define I40E_GLHMC_VFPEQ1FLCNT(_i) (0x000Cd500 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEQ1FLCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQ1FLCNT_FPMPEQ1FLCNT_SHIFT 0
+#define I40E_GLHMC_VFPEQ1FLCNT_FPMPEQ1FLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEQ1FLCNT_FPMPEQ1FLCNT_SHIFT)
+#define I40E_GLHMC_VFPEQPBASE(_i) (0x000Cc000 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEQPBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_SHIFT 0
+#define I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_SHIFT)
+#define I40E_GLHMC_VFPEQPCNT(_i) (0x000Cc100 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEQPCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_SHIFT 0
+#define I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_SHIFT)
+#define I40E_GLHMC_VFPESRQBASE(_i) (0x000Cc400 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPESRQBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_SHIFT 0
+#define I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_SHIFT)
+#define I40E_GLHMC_VFPESRQCNT(_i) (0x000Cc500 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPESRQCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_SHIFT 0
+#define I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_SHIFT)
+#define I40E_GLHMC_VFPETIMERBASE(_i) (0x000CDA00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPETIMERBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_SHIFT 0
+#define I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_SHIFT)
+#define I40E_GLHMC_VFPETIMERCNT(_i) (0x000CDB00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPETIMERCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_SHIFT 0
+#define I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_SHIFT)
+#define I40E_GLHMC_VFPEXFBASE(_i) (0x000Cce00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEXFBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_SHIFT 0
+#define I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_SHIFT)
+#define I40E_GLHMC_VFPEXFCNT(_i) (0x000Ccf00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEXFCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_SHIFT 0
+#define I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_SHIFT)
+#define I40E_GLHMC_VFPEXFFLBASE(_i) (0x000Cd000 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEXFFLBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_SHIFT 0
+#define I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_SHIFT)
+#define I40E_GLHMC_VFPEXFFLCNT(_i) (0x000Cd100 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEXFFLCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEXFFLCNT_FPMPEXFFLCNT_SHIFT 0
+#define I40E_GLHMC_VFPEXFFLCNT_FPMPEXFFLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEXFFLCNT_FPMPEXFFLCNT_SHIFT)
+#define I40E_GLHMC_VFSDPART(_i) (0x000C8800 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFSDPART_MAX_INDEX 31
+#define I40E_GLHMC_VFSDPART_PMSDBASE_SHIFT 0
+#define I40E_GLHMC_VFSDPART_PMSDBASE_MASK (0xFFF << I40E_GLHMC_VFSDPART_PMSDBASE_SHIFT)
+#define I40E_GLHMC_VFSDPART_PMSDSIZE_SHIFT 16
+#define I40E_GLHMC_VFSDPART_PMSDSIZE_MASK (0x1FFF << I40E_GLHMC_VFSDPART_PMSDSIZE_SHIFT)
+#define I40E_PFHMC_ERRORDATA 0x000C0500
+#define I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_SHIFT 0
+#define I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_MASK (0x3FFFFFFF << I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_SHIFT)
+#define I40E_PFHMC_ERRORINFO 0x000C0400
+#define I40E_PFHMC_ERRORINFO_PMF_INDEX_SHIFT 0
+#define I40E_PFHMC_ERRORINFO_PMF_INDEX_MASK (0x1F << I40E_PFHMC_ERRORINFO_PMF_INDEX_SHIFT)
+#define I40E_PFHMC_ERRORINFO_PMF_ISVF_SHIFT 7
+#define I40E_PFHMC_ERRORINFO_PMF_ISVF_MASK (0x1 << I40E_PFHMC_ERRORINFO_PMF_ISVF_SHIFT)
+#define I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_SHIFT 8
+#define I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_MASK (0xF << I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_SHIFT)
+#define I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_SHIFT 16
+#define I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_MASK (0x1F << I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_SHIFT)
+#define I40E_PFHMC_ERRORINFO_ERROR_DETECTED_SHIFT 31
+#define I40E_PFHMC_ERRORINFO_ERROR_DETECTED_MASK (0x1 << I40E_PFHMC_ERRORINFO_ERROR_DETECTED_SHIFT)
+#define I40E_PFHMC_PDINV 0x000C0300
+#define I40E_PFHMC_PDINV_PMSDIDX_SHIFT 0
+#define I40E_PFHMC_PDINV_PMSDIDX_MASK (0xFFF << I40E_PFHMC_PDINV_PMSDIDX_SHIFT)
+#define I40E_PFHMC_PDINV_PMPDIDX_SHIFT 16
+#define I40E_PFHMC_PDINV_PMPDIDX_MASK (0x1FF << I40E_PFHMC_PDINV_PMPDIDX_SHIFT)
+#define I40E_PFHMC_SDCMD 0x000C0000
+#define I40E_PFHMC_SDCMD_PMSDIDX_SHIFT 0
+#define I40E_PFHMC_SDCMD_PMSDIDX_MASK (0xFFF << I40E_PFHMC_SDCMD_PMSDIDX_SHIFT)
+#define I40E_PFHMC_SDCMD_PMSDWR_SHIFT 31
+#define I40E_PFHMC_SDCMD_PMSDWR_MASK (0x1 << I40E_PFHMC_SDCMD_PMSDWR_SHIFT)
+#define I40E_PFHMC_SDDATAHIGH 0x000C0200
+#define I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_SHIFT 0
+#define I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_MASK (0xFFFFFFFF << I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_SHIFT)
+#define I40E_PFHMC_SDDATALOW 0x000C0100
+#define I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT 0
+#define I40E_PFHMC_SDDATALOW_PMSDVALID_MASK (0x1 << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT)
+#define I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT 1
+#define I40E_PFHMC_SDDATALOW_PMSDTYPE_MASK (0x1 << I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT)
+#define I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT 2
+#define I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_MASK (0x3FF << I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT)
+#define I40E_PFHMC_SDDATALOW_PMSDDATALOW_SHIFT 12
+#define I40E_PFHMC_SDDATALOW_PMSDDATALOW_MASK (0xFFFFF << I40E_PFHMC_SDDATALOW_PMSDDATALOW_SHIFT)
+#define I40E_GL_UFUSE 0x00094008
+#define I40E_GL_UFUSE_FOUR_PORT_ENABLE_SHIFT 1
+#define I40E_GL_UFUSE_FOUR_PORT_ENABLE_MASK (0x1 << I40E_GL_UFUSE_FOUR_PORT_ENABLE_SHIFT)
+#define I40E_GL_UFUSE_NIC_ID_SHIFT 2
+#define I40E_GL_UFUSE_NIC_ID_MASK (0x1 << I40E_GL_UFUSE_NIC_ID_SHIFT)
+#define I40E_GL_UFUSE_ULT_LOCKOUT_SHIFT 10
+#define I40E_GL_UFUSE_ULT_LOCKOUT_MASK (0x1 << I40E_GL_UFUSE_ULT_LOCKOUT_SHIFT)
+#define I40E_GL_UFUSE_CLS_LOCKOUT_SHIFT 11
+#define I40E_GL_UFUSE_CLS_LOCKOUT_MASK (0x1 << I40E_GL_UFUSE_CLS_LOCKOUT_SHIFT)
+#define I40E_EMPINT_GPIO_ENA 0x00088188
+#define I40E_EMPINT_GPIO_ENA_GPIO0_ENA_SHIFT 0
+#define I40E_EMPINT_GPIO_ENA_GPIO0_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO0_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO1_ENA_SHIFT 1
+#define I40E_EMPINT_GPIO_ENA_GPIO1_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO1_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO2_ENA_SHIFT 2
+#define I40E_EMPINT_GPIO_ENA_GPIO2_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO2_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO3_ENA_SHIFT 3
+#define I40E_EMPINT_GPIO_ENA_GPIO3_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO3_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO4_ENA_SHIFT 4
+#define I40E_EMPINT_GPIO_ENA_GPIO4_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO4_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO5_ENA_SHIFT 5
+#define I40E_EMPINT_GPIO_ENA_GPIO5_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO5_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO6_ENA_SHIFT 6
+#define I40E_EMPINT_GPIO_ENA_GPIO6_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO6_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO7_ENA_SHIFT 7
+#define I40E_EMPINT_GPIO_ENA_GPIO7_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO7_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO8_ENA_SHIFT 8
+#define I40E_EMPINT_GPIO_ENA_GPIO8_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO8_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO9_ENA_SHIFT 9
+#define I40E_EMPINT_GPIO_ENA_GPIO9_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO9_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO10_ENA_SHIFT 10
+#define I40E_EMPINT_GPIO_ENA_GPIO10_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO10_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO11_ENA_SHIFT 11
+#define I40E_EMPINT_GPIO_ENA_GPIO11_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO11_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO12_ENA_SHIFT 12
+#define I40E_EMPINT_GPIO_ENA_GPIO12_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO12_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO13_ENA_SHIFT 13
+#define I40E_EMPINT_GPIO_ENA_GPIO13_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO13_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO14_ENA_SHIFT 14
+#define I40E_EMPINT_GPIO_ENA_GPIO14_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO14_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO15_ENA_SHIFT 15
+#define I40E_EMPINT_GPIO_ENA_GPIO15_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO15_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO16_ENA_SHIFT 16
+#define I40E_EMPINT_GPIO_ENA_GPIO16_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO16_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO17_ENA_SHIFT 17
+#define I40E_EMPINT_GPIO_ENA_GPIO17_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO17_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO18_ENA_SHIFT 18
+#define I40E_EMPINT_GPIO_ENA_GPIO18_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO18_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO19_ENA_SHIFT 19
+#define I40E_EMPINT_GPIO_ENA_GPIO19_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO19_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO20_ENA_SHIFT 20
+#define I40E_EMPINT_GPIO_ENA_GPIO20_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO20_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO21_ENA_SHIFT 21
+#define I40E_EMPINT_GPIO_ENA_GPIO21_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO21_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO22_ENA_SHIFT 22
+#define I40E_EMPINT_GPIO_ENA_GPIO22_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO22_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO23_ENA_SHIFT 23
+#define I40E_EMPINT_GPIO_ENA_GPIO23_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO23_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO24_ENA_SHIFT 24
+#define I40E_EMPINT_GPIO_ENA_GPIO24_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO24_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO25_ENA_SHIFT 25
+#define I40E_EMPINT_GPIO_ENA_GPIO25_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO25_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO26_ENA_SHIFT 26
+#define I40E_EMPINT_GPIO_ENA_GPIO26_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO26_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO27_ENA_SHIFT 27
+#define I40E_EMPINT_GPIO_ENA_GPIO27_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO27_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO28_ENA_SHIFT 28
+#define I40E_EMPINT_GPIO_ENA_GPIO28_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO28_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO29_ENA_SHIFT 29
+#define I40E_EMPINT_GPIO_ENA_GPIO29_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO29_ENA_SHIFT)
+#define I40E_PFGEN_PORTMDIO_NUM 0x0003F100
+#define I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_SHIFT 0
+#define I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_MASK (0x3 << I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_SHIFT)
+#define I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_SHIFT 4
+#define I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK (0x1 << I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_SHIFT)
+#define I40E_PFINT_AEQCTL 0x00038700
+#define I40E_PFINT_AEQCTL_MSIX_INDX_SHIFT 0
+#define I40E_PFINT_AEQCTL_MSIX_INDX_MASK (0xFF << I40E_PFINT_AEQCTL_MSIX_INDX_SHIFT)
+#define I40E_PFINT_AEQCTL_ITR_INDX_SHIFT 11
+#define I40E_PFINT_AEQCTL_ITR_INDX_MASK (0x3 << I40E_PFINT_AEQCTL_ITR_INDX_SHIFT)
+#define I40E_PFINT_AEQCTL_MSIX0_INDX_SHIFT 13
+#define I40E_PFINT_AEQCTL_MSIX0_INDX_MASK (0x7 << I40E_PFINT_AEQCTL_MSIX0_INDX_SHIFT)
+#define I40E_PFINT_AEQCTL_CAUSE_ENA_SHIFT 30
+#define I40E_PFINT_AEQCTL_CAUSE_ENA_MASK (0x1 << I40E_PFINT_AEQCTL_CAUSE_ENA_SHIFT)
+#define I40E_PFINT_AEQCTL_INTEVENT_SHIFT 31
+#define I40E_PFINT_AEQCTL_INTEVENT_MASK (0x1 << I40E_PFINT_AEQCTL_INTEVENT_SHIFT)
+#define I40E_PFINT_CEQCTL(_INTPF) (0x00036800 + ((_INTPF) * 4)) /* _i=0...511 */
+#define I40E_PFINT_CEQCTL_MAX_INDEX 511
+#define I40E_PFINT_CEQCTL_MSIX_INDX_SHIFT 0
+#define I40E_PFINT_CEQCTL_MSIX_INDX_MASK (0xFF << I40E_PFINT_CEQCTL_MSIX_INDX_SHIFT)
+#define I40E_PFINT_CEQCTL_ITR_INDX_SHIFT 11
+#define I40E_PFINT_CEQCTL_ITR_INDX_MASK (0x3 << I40E_PFINT_CEQCTL_ITR_INDX_SHIFT)
+#define I40E_PFINT_CEQCTL_MSIX0_INDX_SHIFT 13
+#define I40E_PFINT_CEQCTL_MSIX0_INDX_MASK (0x7 << I40E_PFINT_CEQCTL_MSIX0_INDX_SHIFT)
+#define I40E_PFINT_CEQCTL_NEXTQ_INDX_SHIFT 16
+#define I40E_PFINT_CEQCTL_NEXTQ_INDX_MASK (0x7FF << I40E_PFINT_CEQCTL_NEXTQ_INDX_SHIFT)
+#define I40E_PFINT_CEQCTL_NEXTQ_TYPE_SHIFT 27
+#define I40E_PFINT_CEQCTL_NEXTQ_TYPE_MASK (0x3 << I40E_PFINT_CEQCTL_NEXTQ_TYPE_SHIFT)
+#define I40E_PFINT_CEQCTL_CAUSE_ENA_SHIFT 30
+#define I40E_PFINT_CEQCTL_CAUSE_ENA_MASK (0x1 << I40E_PFINT_CEQCTL_CAUSE_ENA_SHIFT)
+#define I40E_PFINT_CEQCTL_INTEVENT_SHIFT 31
+#define I40E_PFINT_CEQCTL_INTEVENT_MASK (0x1 << I40E_PFINT_CEQCTL_INTEVENT_SHIFT)
+#define I40E_PFINT_DYN_CTL0 0x00038480
+#define I40E_PFINT_DYN_CTL0_INTENA_SHIFT 0
+#define I40E_PFINT_DYN_CTL0_INTENA_MASK (0x1 << I40E_PFINT_DYN_CTL0_INTENA_SHIFT)
+#define I40E_PFINT_DYN_CTL0_CLEARPBA_SHIFT 1
+#define I40E_PFINT_DYN_CTL0_CLEARPBA_MASK (0x1 << I40E_PFINT_DYN_CTL0_CLEARPBA_SHIFT)
+#define I40E_PFINT_DYN_CTL0_SWINT_TRIG_SHIFT 2
+#define I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK (0x1 << I40E_PFINT_DYN_CTL0_SWINT_TRIG_SHIFT)
+#define I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT 3
+#define I40E_PFINT_DYN_CTL0_ITR_INDX_MASK (0x3 << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT)
+#define I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT 5
+#define I40E_PFINT_DYN_CTL0_INTERVAL_MASK (0xFFF << I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT)
+#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT 24
+#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK (0x1 << I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_SHIFT 25
+#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK (0x3 << I40E_PFINT_DYN_CTL0_SW_ITR_INDX_SHIFT)
+#define I40E_PFINT_DYN_CTL0_INTENA_MSK_SHIFT 31
+#define I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK (0x1 << I40E_PFINT_DYN_CTL0_INTENA_MSK_SHIFT)
+#define I40E_PFINT_DYN_CTLN(_INTPF) (0x00034800 + ((_INTPF) * 4)) /* _i=0...511 */
+#define I40E_PFINT_DYN_CTLN_MAX_INDEX 511
+#define I40E_PFINT_DYN_CTLN_INTENA_SHIFT 0
+#define I40E_PFINT_DYN_CTLN_INTENA_MASK (0x1 << I40E_PFINT_DYN_CTLN_INTENA_SHIFT)
+#define I40E_PFINT_DYN_CTLN_CLEARPBA_SHIFT 1
+#define I40E_PFINT_DYN_CTLN_CLEARPBA_MASK (0x1 << I40E_PFINT_DYN_CTLN_CLEARPBA_SHIFT)
+#define I40E_PFINT_DYN_CTLN_SWINT_TRIG_SHIFT 2
+#define I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK (0x1 << I40E_PFINT_DYN_CTLN_SWINT_TRIG_SHIFT)
+#define I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT 3
+#define I40E_PFINT_DYN_CTLN_ITR_INDX_MASK (0x3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT)
+#define I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT 5
+#define I40E_PFINT_DYN_CTLN_INTERVAL_MASK (0xFFF << I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT)
+#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT 24
+#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK (0x1 << I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_SHIFT 25
+#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK (0x3 << I40E_PFINT_DYN_CTLN_SW_ITR_INDX_SHIFT)
+#define I40E_PFINT_DYN_CTLN_INTENA_MSK_SHIFT 31
+#define I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK (0x1 << I40E_PFINT_DYN_CTLN_INTENA_MSK_SHIFT)
+#define I40E_PFINT_GPIO_ENA 0x00088080
+#define I40E_PFINT_GPIO_ENA_GPIO0_ENA_SHIFT 0
+#define I40E_PFINT_GPIO_ENA_GPIO0_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO0_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO1_ENA_SHIFT 1
+#define I40E_PFINT_GPIO_ENA_GPIO1_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO1_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO2_ENA_SHIFT 2
+#define I40E_PFINT_GPIO_ENA_GPIO2_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO2_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO3_ENA_SHIFT 3
+#define I40E_PFINT_GPIO_ENA_GPIO3_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO3_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO4_ENA_SHIFT 4
+#define I40E_PFINT_GPIO_ENA_GPIO4_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO4_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO5_ENA_SHIFT 5
+#define I40E_PFINT_GPIO_ENA_GPIO5_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO5_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO6_ENA_SHIFT 6
+#define I40E_PFINT_GPIO_ENA_GPIO6_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO6_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO7_ENA_SHIFT 7
+#define I40E_PFINT_GPIO_ENA_GPIO7_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO7_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO8_ENA_SHIFT 8
+#define I40E_PFINT_GPIO_ENA_GPIO8_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO8_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO9_ENA_SHIFT 9
+#define I40E_PFINT_GPIO_ENA_GPIO9_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO9_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO10_ENA_SHIFT 10
+#define I40E_PFINT_GPIO_ENA_GPIO10_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO10_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO11_ENA_SHIFT 11
+#define I40E_PFINT_GPIO_ENA_GPIO11_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO11_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO12_ENA_SHIFT 12
+#define I40E_PFINT_GPIO_ENA_GPIO12_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO12_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO13_ENA_SHIFT 13
+#define I40E_PFINT_GPIO_ENA_GPIO13_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO13_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO14_ENA_SHIFT 14
+#define I40E_PFINT_GPIO_ENA_GPIO14_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO14_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO15_ENA_SHIFT 15
+#define I40E_PFINT_GPIO_ENA_GPIO15_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO15_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO16_ENA_SHIFT 16
+#define I40E_PFINT_GPIO_ENA_GPIO16_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO16_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO17_ENA_SHIFT 17
+#define I40E_PFINT_GPIO_ENA_GPIO17_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO17_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO18_ENA_SHIFT 18
+#define I40E_PFINT_GPIO_ENA_GPIO18_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO18_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO19_ENA_SHIFT 19
+#define I40E_PFINT_GPIO_ENA_GPIO19_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO19_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO20_ENA_SHIFT 20
+#define I40E_PFINT_GPIO_ENA_GPIO20_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO20_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO21_ENA_SHIFT 21
+#define I40E_PFINT_GPIO_ENA_GPIO21_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO21_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO22_ENA_SHIFT 22
+#define I40E_PFINT_GPIO_ENA_GPIO22_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO22_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO23_ENA_SHIFT 23
+#define I40E_PFINT_GPIO_ENA_GPIO23_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO23_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO24_ENA_SHIFT 24
+#define I40E_PFINT_GPIO_ENA_GPIO24_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO24_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO25_ENA_SHIFT 25
+#define I40E_PFINT_GPIO_ENA_GPIO25_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO25_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO26_ENA_SHIFT 26
+#define I40E_PFINT_GPIO_ENA_GPIO26_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO26_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO27_ENA_SHIFT 27
+#define I40E_PFINT_GPIO_ENA_GPIO27_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO27_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO28_ENA_SHIFT 28
+#define I40E_PFINT_GPIO_ENA_GPIO28_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO28_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO29_ENA_SHIFT 29
+#define I40E_PFINT_GPIO_ENA_GPIO29_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO29_ENA_SHIFT)
+#define I40E_PFINT_ICR0 0x00038780
+#define I40E_PFINT_ICR0_INTEVENT_SHIFT 0
+#define I40E_PFINT_ICR0_INTEVENT_MASK (0x1 << I40E_PFINT_ICR0_INTEVENT_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_0_SHIFT 1
+#define I40E_PFINT_ICR0_QUEUE_0_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_0_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_1_SHIFT 2
+#define I40E_PFINT_ICR0_QUEUE_1_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_1_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_2_SHIFT 3
+#define I40E_PFINT_ICR0_QUEUE_2_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_2_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_3_SHIFT 4
+#define I40E_PFINT_ICR0_QUEUE_3_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_3_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_4_SHIFT 5
+#define I40E_PFINT_ICR0_QUEUE_4_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_4_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_5_SHIFT 6
+#define I40E_PFINT_ICR0_QUEUE_5_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_5_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_6_SHIFT 7
+#define I40E_PFINT_ICR0_QUEUE_6_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_6_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_7_SHIFT 8
+#define I40E_PFINT_ICR0_QUEUE_7_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_7_SHIFT)
+#define I40E_PFINT_ICR0_ECC_ERR_SHIFT 16
+#define I40E_PFINT_ICR0_ECC_ERR_MASK (0x1 << I40E_PFINT_ICR0_ECC_ERR_SHIFT)
+#define I40E_PFINT_ICR0_MAL_DETECT_SHIFT 19
+#define I40E_PFINT_ICR0_MAL_DETECT_MASK (0x1 << I40E_PFINT_ICR0_MAL_DETECT_SHIFT)
+#define I40E_PFINT_ICR0_GRST_SHIFT 20
+#define I40E_PFINT_ICR0_GRST_MASK (0x1 << I40E_PFINT_ICR0_GRST_SHIFT)
+#define I40E_PFINT_ICR0_PCI_EXCEPTION_SHIFT 21
+#define I40E_PFINT_ICR0_PCI_EXCEPTION_MASK (0x1 << I40E_PFINT_ICR0_PCI_EXCEPTION_SHIFT)
+#define I40E_PFINT_ICR0_GPIO_SHIFT 22
+#define I40E_PFINT_ICR0_GPIO_MASK (0x1 << I40E_PFINT_ICR0_GPIO_SHIFT)
+#define I40E_PFINT_ICR0_TIMESYNC_SHIFT 23
+#define I40E_PFINT_ICR0_TIMESYNC_MASK (0x1 << I40E_PFINT_ICR0_TIMESYNC_SHIFT)
+#define I40E_PFINT_ICR0_STORM_DETECT_SHIFT 24
+#define I40E_PFINT_ICR0_STORM_DETECT_MASK (0x1 << I40E_PFINT_ICR0_STORM_DETECT_SHIFT)
+#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT 25
+#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK (0x1 << I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT)
+#define I40E_PFINT_ICR0_HMC_ERR_SHIFT 26
+#define I40E_PFINT_ICR0_HMC_ERR_MASK (0x1 << I40E_PFINT_ICR0_HMC_ERR_SHIFT)
+#define I40E_PFINT_ICR0_PE_CRITERR_SHIFT 28
+#define I40E_PFINT_ICR0_PE_CRITERR_MASK (0x1 << I40E_PFINT_ICR0_PE_CRITERR_SHIFT)
+#define I40E_PFINT_ICR0_VFLR_SHIFT 29
+#define I40E_PFINT_ICR0_VFLR_MASK (0x1 << I40E_PFINT_ICR0_VFLR_SHIFT)
+#define I40E_PFINT_ICR0_ADMINQ_SHIFT 30
+#define I40E_PFINT_ICR0_ADMINQ_MASK (0x1 << I40E_PFINT_ICR0_ADMINQ_SHIFT)
+#define I40E_PFINT_ICR0_SWINT_SHIFT 31
+#define I40E_PFINT_ICR0_SWINT_MASK (0x1 << I40E_PFINT_ICR0_SWINT_SHIFT)
+#define I40E_PFINT_ICR0_ENA 0x00038800
+#define I40E_PFINT_ICR0_ENA_ECC_ERR_SHIFT 16
+#define I40E_PFINT_ICR0_ENA_ECC_ERR_MASK (0x1 << I40E_PFINT_ICR0_ENA_ECC_ERR_SHIFT)
+#define I40E_PFINT_ICR0_ENA_MAL_DETECT_SHIFT 19
+#define I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK (0x1 << I40E_PFINT_ICR0_ENA_MAL_DETECT_SHIFT)
+#define I40E_PFINT_ICR0_ENA_GRST_SHIFT 20
+#define I40E_PFINT_ICR0_ENA_GRST_MASK (0x1 << I40E_PFINT_ICR0_ENA_GRST_SHIFT)
+#define I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_SHIFT 21
+#define I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK (0x1 << I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_SHIFT)
+#define I40E_PFINT_ICR0_ENA_GPIO_SHIFT 22
+#define I40E_PFINT_ICR0_ENA_GPIO_MASK (0x1 << I40E_PFINT_ICR0_ENA_GPIO_SHIFT)
+#define I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT 23
+#define I40E_PFINT_ICR0_ENA_TIMESYNC_MASK (0x1 << I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT)
+#define I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT 24
+#define I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK (0x1 << I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT)
+#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT 25
+#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK (0x1 << I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT)
+#define I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT 26
+#define I40E_PFINT_ICR0_ENA_HMC_ERR_MASK (0x1 << I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT)
+#define I40E_PFINT_ICR0_ENA_PE_CRITERR_SHIFT 28
+#define I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK (0x1 << I40E_PFINT_ICR0_ENA_PE_CRITERR_SHIFT)
+#define I40E_PFINT_ICR0_ENA_VFLR_SHIFT 29
+#define I40E_PFINT_ICR0_ENA_VFLR_MASK (0x1 << I40E_PFINT_ICR0_ENA_VFLR_SHIFT)
+#define I40E_PFINT_ICR0_ENA_ADMINQ_SHIFT 30
+#define I40E_PFINT_ICR0_ENA_ADMINQ_MASK (0x1 << I40E_PFINT_ICR0_ENA_ADMINQ_SHIFT)
+#define I40E_PFINT_ICR0_ENA_RSVD_SHIFT 31
+#define I40E_PFINT_ICR0_ENA_RSVD_MASK (0x1 << I40E_PFINT_ICR0_ENA_RSVD_SHIFT)
+#define I40E_PFINT_ITR0(_i) (0x00038000 + ((_i) * 128)) /* _i=0...2 */
+#define I40E_PFINT_ITR0_MAX_INDEX 2
+#define I40E_PFINT_ITR0_INTERVAL_SHIFT 0
+#define I40E_PFINT_ITR0_INTERVAL_MASK (0xFFF << I40E_PFINT_ITR0_INTERVAL_SHIFT)
+#define I40E_PFINT_ITRN(_i, _INTPF) (0x00030000 + ((_i) * 2048 + (_INTPF) * 4))
+#define I40E_PFINT_ITRN_MAX_INDEX 2
+#define I40E_PFINT_ITRN_INTERVAL_SHIFT 0
+#define I40E_PFINT_ITRN_INTERVAL_MASK (0xFFF << I40E_PFINT_ITRN_INTERVAL_SHIFT)
+#define I40E_PFINT_LNKLST0 0x00038500
+#define I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT 0
+#define I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK (0x7FF << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT)
+#define I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT 11
+#define I40E_PFINT_LNKLST0_FIRSTQ_TYPE_MASK (0x3 << I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT)
+#define I40E_PFINT_LNKLSTN(_INTPF) (0x00035000 + ((_INTPF) * 4)) /* _i=0...511 */
+#define I40E_PFINT_LNKLSTN_MAX_INDEX 511
+#define I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT 0
+#define I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK (0x7FF << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT)
+#define I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT 11
+#define I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_MASK (0x3 << I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)
+#define I40E_PFINT_RATE0 0x00038580
+#define I40E_PFINT_RATE0_INTERVAL_SHIFT 0
+#define I40E_PFINT_RATE0_INTERVAL_MASK (0x3F << I40E_PFINT_RATE0_INTERVAL_SHIFT)
+#define I40E_PFINT_RATE0_INTRL_ENA_SHIFT 6
+#define I40E_PFINT_RATE0_INTRL_ENA_MASK (0x1 << I40E_PFINT_RATE0_INTRL_ENA_SHIFT)
+#define I40E_PFINT_RATEN(_INTPF) (0x00035800 + ((_INTPF) * 4)) /* _i=0...511 */
+#define I40E_PFINT_RATEN_MAX_INDEX 511
+#define I40E_PFINT_RATEN_INTERVAL_SHIFT 0
+#define I40E_PFINT_RATEN_INTERVAL_MASK (0x3F << I40E_PFINT_RATEN_INTERVAL_SHIFT)
+#define I40E_PFINT_RATEN_INTRL_ENA_SHIFT 6
+#define I40E_PFINT_RATEN_INTRL_ENA_MASK (0x1 << I40E_PFINT_RATEN_INTRL_ENA_SHIFT)
+#define I40E_PFINT_STAT_CTL0 0x00038400
+#define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2
+#define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK (0x3 << I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT)
+#define I40E_QINT_RQCTL(_Q) (0x0003A000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_QINT_RQCTL_MAX_INDEX 1535
+#define I40E_QINT_RQCTL_MSIX_INDX_SHIFT 0
+#define I40E_QINT_RQCTL_MSIX_INDX_MASK (0xFF << I40E_QINT_RQCTL_MSIX_INDX_SHIFT)
+#define I40E_QINT_RQCTL_ITR_INDX_SHIFT 11
+#define I40E_QINT_RQCTL_ITR_INDX_MASK (0x3 << I40E_QINT_RQCTL_ITR_INDX_SHIFT)
+#define I40E_QINT_RQCTL_MSIX0_INDX_SHIFT 13
+#define I40E_QINT_RQCTL_MSIX0_INDX_MASK (0x7 << I40E_QINT_RQCTL_MSIX0_INDX_SHIFT)
+#define I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT 16
+#define I40E_QINT_RQCTL_NEXTQ_INDX_MASK (0x7FF << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)
+#define I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT 27
+#define I40E_QINT_RQCTL_NEXTQ_TYPE_MASK (0x3 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT)
+#define I40E_QINT_RQCTL_CAUSE_ENA_SHIFT 30
+#define I40E_QINT_RQCTL_CAUSE_ENA_MASK (0x1 << I40E_QINT_RQCTL_CAUSE_ENA_SHIFT)
+#define I40E_QINT_RQCTL_INTEVENT_SHIFT 31
+#define I40E_QINT_RQCTL_INTEVENT_MASK (0x1 << I40E_QINT_RQCTL_INTEVENT_SHIFT)
+#define I40E_QINT_TQCTL(_Q) (0x0003C000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_QINT_TQCTL_MAX_INDEX 1535
+#define I40E_QINT_TQCTL_MSIX_INDX_SHIFT 0
+#define I40E_QINT_TQCTL_MSIX_INDX_MASK (0xFF << I40E_QINT_TQCTL_MSIX_INDX_SHIFT)
+#define I40E_QINT_TQCTL_ITR_INDX_SHIFT 11
+#define I40E_QINT_TQCTL_ITR_INDX_MASK (0x3 << I40E_QINT_TQCTL_ITR_INDX_SHIFT)
+#define I40E_QINT_TQCTL_MSIX0_INDX_SHIFT 13
+#define I40E_QINT_TQCTL_MSIX0_INDX_MASK (0x7 << I40E_QINT_TQCTL_MSIX0_INDX_SHIFT)
+#define I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT 16
+#define I40E_QINT_TQCTL_NEXTQ_INDX_MASK (0x7FF << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT)
+#define I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT 27
+#define I40E_QINT_TQCTL_NEXTQ_TYPE_MASK (0x3 << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT)
+#define I40E_QINT_TQCTL_CAUSE_ENA_SHIFT 30
+#define I40E_QINT_TQCTL_CAUSE_ENA_MASK (0x1 << I40E_QINT_TQCTL_CAUSE_ENA_SHIFT)
+#define I40E_QINT_TQCTL_INTEVENT_SHIFT 31
+#define I40E_QINT_TQCTL_INTEVENT_MASK (0x1 << I40E_QINT_TQCTL_INTEVENT_SHIFT)
+#define I40E_VFINT_DYN_CTL0(_VF) (0x0002A400 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFINT_DYN_CTL0_MAX_INDEX 127
+#define I40E_VFINT_DYN_CTL0_INTENA_SHIFT 0
+#define I40E_VFINT_DYN_CTL0_INTENA_MASK (0x1 << I40E_VFINT_DYN_CTL0_INTENA_SHIFT)
+#define I40E_VFINT_DYN_CTL0_CLEARPBA_SHIFT 1
+#define I40E_VFINT_DYN_CTL0_CLEARPBA_MASK (0x1 << I40E_VFINT_DYN_CTL0_CLEARPBA_SHIFT)
+#define I40E_VFINT_DYN_CTL0_SWINT_TRIG_SHIFT 2
+#define I40E_VFINT_DYN_CTL0_SWINT_TRIG_MASK (0x1 << I40E_VFINT_DYN_CTL0_SWINT_TRIG_SHIFT)
+#define I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT 3
+#define I40E_VFINT_DYN_CTL0_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTL0_INTERVAL_SHIFT 5
+#define I40E_VFINT_DYN_CTL0_INTERVAL_MASK (0xFFF << I40E_VFINT_DYN_CTL0_INTERVAL_SHIFT)
+#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT 24
+#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK (0x1 << I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_SHIFT 25
+#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTL0_SW_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTL0_INTENA_MSK_SHIFT 31
+#define I40E_VFINT_DYN_CTL0_INTENA_MSK_MASK (0x1 << I40E_VFINT_DYN_CTL0_INTENA_MSK_SHIFT)
+#define I40E_VFINT_DYN_CTLN(_INTVF) (0x00024800 + ((_INTVF) * 4)) /* _i=0...511 */
+#define I40E_VFINT_DYN_CTLN_MAX_INDEX 511
+#define I40E_VFINT_DYN_CTLN_INTENA_SHIFT 0
+#define I40E_VFINT_DYN_CTLN_INTENA_MASK (0x1 << I40E_VFINT_DYN_CTLN_INTENA_SHIFT)
+#define I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT 1
+#define I40E_VFINT_DYN_CTLN_CLEARPBA_MASK (0x1 << I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT)
+#define I40E_VFINT_DYN_CTLN_SWINT_TRIG_SHIFT 2
+#define I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK (0x1 << I40E_VFINT_DYN_CTLN_SWINT_TRIG_SHIFT)
+#define I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT 3
+#define I40E_VFINT_DYN_CTLN_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTLN_INTERVAL_SHIFT 5
+#define I40E_VFINT_DYN_CTLN_INTERVAL_MASK (0xFFF << I40E_VFINT_DYN_CTLN_INTERVAL_SHIFT)
+#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT 24
+#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK (0x1 << I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_SHIFT 25
+#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTLN_SW_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTLN_INTENA_MSK_SHIFT 31
+#define I40E_VFINT_DYN_CTLN_INTENA_MSK_MASK (0x1 << I40E_VFINT_DYN_CTLN_INTENA_MSK_SHIFT)
+#define I40E_VFINT_ICR0(_VF) (0x0002BC00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFINT_ICR0_MAX_INDEX 127
+#define I40E_VFINT_ICR0_INTEVENT_SHIFT 0
+#define I40E_VFINT_ICR0_INTEVENT_MASK (0x1 << I40E_VFINT_ICR0_INTEVENT_SHIFT)
+#define I40E_VFINT_ICR0_QUEUE_0_SHIFT 1
+#define I40E_VFINT_ICR0_QUEUE_0_MASK (0x1 << I40E_VFINT_ICR0_QUEUE_0_SHIFT)
+#define I40E_VFINT_ICR0_QUEUE_1_SHIFT 2
+#define I40E_VFINT_ICR0_QUEUE_1_MASK (0x1 << I40E_VFINT_ICR0_QUEUE_1_SHIFT)
+#define I40E_VFINT_ICR0_QUEUE_2_SHIFT 3
+#define I40E_VFINT_ICR0_QUEUE_2_MASK (0x1 << I40E_VFINT_ICR0_QUEUE_2_SHIFT)
+#define I40E_VFINT_ICR0_QUEUE_3_SHIFT 4
+#define I40E_VFINT_ICR0_QUEUE_3_MASK (0x1 << I40E_VFINT_ICR0_QUEUE_3_SHIFT)
+#define I40E_VFINT_ICR0_LINK_STAT_CHANGE_SHIFT 25
+#define I40E_VFINT_ICR0_LINK_STAT_CHANGE_MASK (0x1 << I40E_VFINT_ICR0_LINK_STAT_CHANGE_SHIFT)
+#define I40E_VFINT_ICR0_ADMINQ_SHIFT 30
+#define I40E_VFINT_ICR0_ADMINQ_MASK (0x1 << I40E_VFINT_ICR0_ADMINQ_SHIFT)
+#define I40E_VFINT_ICR0_SWINT_SHIFT 31
+#define I40E_VFINT_ICR0_SWINT_MASK (0x1 << I40E_VFINT_ICR0_SWINT_SHIFT)
+#define I40E_VFINT_ICR0_ENA(_VF) (0x0002C000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFINT_ICR0_ENA_MAX_INDEX 127
+#define I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT 25
+#define I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK (0x1 << I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT)
+#define I40E_VFINT_ICR0_ENA_ADMINQ_SHIFT 30
+#define I40E_VFINT_ICR0_ENA_ADMINQ_MASK (0x1 << I40E_VFINT_ICR0_ENA_ADMINQ_SHIFT)
+#define I40E_VFINT_ICR0_ENA_RSVD_SHIFT 31
+#define I40E_VFINT_ICR0_ENA_RSVD_MASK (0x1 << I40E_VFINT_ICR0_ENA_RSVD_SHIFT)
+#define I40E_VFINT_ITR0(_i, _VF) (0x00028000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...2, _VF=0...127 */
+#define I40E_VFINT_ITR0_MAX_INDEX 2
+#define I40E_VFINT_ITR0_INTERVAL_SHIFT 0
+#define I40E_VFINT_ITR0_INTERVAL_MASK (0xFFF << I40E_VFINT_ITR0_INTERVAL_SHIFT)
+#define I40E_VFINT_ITRN(_i, _INTVF) (0x00020000 + ((_i) * 2048 + (_INTVF) * 4))
+#define I40E_VFINT_ITRN_MAX_INDEX 2
+#define I40E_VFINT_ITRN_INTERVAL_SHIFT 0
+#define I40E_VFINT_ITRN_INTERVAL_MASK (0xFFF << I40E_VFINT_ITRN_INTERVAL_SHIFT)
+#define I40E_VFINT_STAT_CTL0(_VF) (0x0002A000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFINT_STAT_CTL0_MAX_INDEX 127
+#define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2
+#define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_MASK (0x3 << I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT)
+#define I40E_VPINT_AEQCTL(_VF) (0x0002B800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VPINT_AEQCTL_MAX_INDEX 127
+#define I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT 0
+#define I40E_VPINT_AEQCTL_MSIX_INDX_MASK (0xFF << I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT)
+#define I40E_VPINT_AEQCTL_ITR_INDX_SHIFT 11
+#define I40E_VPINT_AEQCTL_ITR_INDX_MASK (0x3 << I40E_VPINT_AEQCTL_ITR_INDX_SHIFT)
+#define I40E_VPINT_AEQCTL_MSIX0_INDX_SHIFT 13
+#define I40E_VPINT_AEQCTL_MSIX0_INDX_MASK (0x7 << I40E_VPINT_AEQCTL_MSIX0_INDX_SHIFT)
+#define I40E_VPINT_AEQCTL_CAUSE_ENA_SHIFT 30
+#define I40E_VPINT_AEQCTL_CAUSE_ENA_MASK (0x1 << I40E_VPINT_AEQCTL_CAUSE_ENA_SHIFT)
+#define I40E_VPINT_AEQCTL_INTEVENT_SHIFT 31
+#define I40E_VPINT_AEQCTL_INTEVENT_MASK (0x1 << I40E_VPINT_AEQCTL_INTEVENT_SHIFT)
+#define I40E_VPINT_CEQCTL(_INTVF) (0x00026800 + ((_INTVF) * 4)) /* _i=0...511 */
+#define I40E_VPINT_CEQCTL_MAX_INDEX 511
+#define I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT 0
+#define I40E_VPINT_CEQCTL_MSIX_INDX_MASK (0xFF << I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT)
+#define I40E_VPINT_CEQCTL_ITR_INDX_SHIFT 11
+#define I40E_VPINT_CEQCTL_ITR_INDX_MASK (0x3 << I40E_VPINT_CEQCTL_ITR_INDX_SHIFT)
+#define I40E_VPINT_CEQCTL_MSIX0_INDX_SHIFT 13
+#define I40E_VPINT_CEQCTL_MSIX0_INDX_MASK (0x7 << I40E_VPINT_CEQCTL_MSIX0_INDX_SHIFT)
+#define I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT 16
+#define I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK (0x7FF << I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT)
+#define I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT 27
+#define I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK (0x3 << I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT)
+#define I40E_VPINT_CEQCTL_CAUSE_ENA_SHIFT 30
+#define I40E_VPINT_CEQCTL_CAUSE_ENA_MASK (0x1 << I40E_VPINT_CEQCTL_CAUSE_ENA_SHIFT)
+#define I40E_VPINT_CEQCTL_INTEVENT_SHIFT 31
+#define I40E_VPINT_CEQCTL_INTEVENT_MASK (0x1 << I40E_VPINT_CEQCTL_INTEVENT_SHIFT)
+#define I40E_VPINT_LNKLST0(_VF) (0x0002A800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VPINT_LNKLST0_MAX_INDEX 127
+#define I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT 0
+#define I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK (0x7FF << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT)
+#define I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT 11
+#define I40E_VPINT_LNKLST0_FIRSTQ_TYPE_MASK (0x3 << I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT)
+#define I40E_VPINT_LNKLSTN(_INTVF) (0x00025000 + ((_INTVF) * 4)) /* _i=0...511 */
+#define I40E_VPINT_LNKLSTN_MAX_INDEX 511
+#define I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT 0
+#define I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK (0x7FF << I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT)
+#define I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT 11
+#define I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK (0x3 << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)
+#define I40E_VPINT_RATE0(_VF) (0x0002AC00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VPINT_RATE0_MAX_INDEX 127
+#define I40E_VPINT_RATE0_INTERVAL_SHIFT 0
+#define I40E_VPINT_RATE0_INTERVAL_MASK (0x3F << I40E_VPINT_RATE0_INTERVAL_SHIFT)
+#define I40E_VPINT_RATE0_INTRL_ENA_SHIFT 6
+#define I40E_VPINT_RATE0_INTRL_ENA_MASK (0x1 << I40E_VPINT_RATE0_INTRL_ENA_SHIFT)
+#define I40E_VPINT_RATEN(_INTVF) (0x00025800 + ((_INTVF) * 4)) /* _i=0...511 */
+#define I40E_VPINT_RATEN_MAX_INDEX 511
+#define I40E_VPINT_RATEN_INTERVAL_SHIFT 0
+#define I40E_VPINT_RATEN_INTERVAL_MASK (0x3F << I40E_VPINT_RATEN_INTERVAL_SHIFT)
+#define I40E_VPINT_RATEN_INTRL_ENA_SHIFT 6
+#define I40E_VPINT_RATEN_INTRL_ENA_MASK (0x1 << I40E_VPINT_RATEN_INTRL_ENA_SHIFT)
+#define I40E_GL_RDPU_CNTRL 0x00051060
+#define I40E_GL_RDPU_CNTRL_RX_PAD_EN_SHIFT 0
+#define I40E_GL_RDPU_CNTRL_RX_PAD_EN_MASK (0x1 << I40E_GL_RDPU_CNTRL_RX_PAD_EN_SHIFT)
+#define I40E_GL_RDPU_CNTRL_ECO_SHIFT 1
+#define I40E_GL_RDPU_CNTRL_ECO_MASK (0x7FFFFFFF << I40E_GL_RDPU_CNTRL_ECO_SHIFT)
+#define I40E_GLLAN_RCTL_0 0x0012A500
+#define I40E_GLLAN_RCTL_0_PXE_MODE_SHIFT 0
+#define I40E_GLLAN_RCTL_0_PXE_MODE_MASK (0x1 << I40E_GLLAN_RCTL_0_PXE_MODE_SHIFT)
+#define I40E_GLLAN_TSOMSK_F 0x000442D8
+#define I40E_GLLAN_TSOMSK_F_TCPMSKF_SHIFT 0
+#define I40E_GLLAN_TSOMSK_F_TCPMSKF_MASK (0xFFF << I40E_GLLAN_TSOMSK_F_TCPMSKF_SHIFT)
+#define I40E_GLLAN_TSOMSK_L 0x000442E0
+#define I40E_GLLAN_TSOMSK_L_TCPMSKL_SHIFT 0
+#define I40E_GLLAN_TSOMSK_L_TCPMSKL_MASK (0xFFF << I40E_GLLAN_TSOMSK_L_TCPMSKL_SHIFT)
+#define I40E_GLLAN_TSOMSK_M 0x000442DC
+#define I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT 0
+#define I40E_GLLAN_TSOMSK_M_TCPMSKM_MASK (0xFFF << I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT)
+#define I40E_PFLAN_QALLOC 0x001C0400
+#define I40E_PFLAN_QALLOC_FIRSTQ_SHIFT 0
+#define I40E_PFLAN_QALLOC_FIRSTQ_MASK (0x7FF << I40E_PFLAN_QALLOC_FIRSTQ_SHIFT)
+#define I40E_PFLAN_QALLOC_LASTQ_SHIFT 16
+#define I40E_PFLAN_QALLOC_LASTQ_MASK (0x7FF << I40E_PFLAN_QALLOC_LASTQ_SHIFT)
+#define I40E_PFLAN_QALLOC_VALID_SHIFT 31
+#define I40E_PFLAN_QALLOC_VALID_MASK (0x1 << I40E_PFLAN_QALLOC_VALID_SHIFT)
+#define I40E_QRX_ENA(_Q) (0x00120000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_QRX_ENA_MAX_INDEX 1535
+#define I40E_QRX_ENA_QENA_REQ_SHIFT 0
+#define I40E_QRX_ENA_QENA_REQ_MASK (0x1 << I40E_QRX_ENA_QENA_REQ_SHIFT)
+#define I40E_QRX_ENA_FAST_QDIS_SHIFT 1
+#define I40E_QRX_ENA_FAST_QDIS_MASK (0x1 << I40E_QRX_ENA_FAST_QDIS_SHIFT)
+#define I40E_QRX_ENA_QENA_STAT_SHIFT 2
+#define I40E_QRX_ENA_QENA_STAT_MASK (0x1 << I40E_QRX_ENA_QENA_STAT_SHIFT)
+#define I40E_QRX_TAIL(_Q) (0x00128000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_QRX_TAIL_MAX_INDEX 1535
+#define I40E_QRX_TAIL_TAIL_SHIFT 0
+#define I40E_QRX_TAIL_TAIL_MASK (0x1FFF << I40E_QRX_TAIL_TAIL_SHIFT)
+#define I40E_QTX_CTL(_Q) (0x00104000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_QTX_CTL_MAX_INDEX 1535
+#define I40E_QTX_CTL_PFVF_Q_SHIFT 0
+#define I40E_QTX_CTL_PFVF_Q_MASK (0x3 << I40E_QTX_CTL_PFVF_Q_SHIFT)
+#define I40E_QTX_CTL_PF_INDX_SHIFT 2
+#define I40E_QTX_CTL_PF_INDX_MASK (0xF << I40E_QTX_CTL_PF_INDX_SHIFT)
+#define I40E_QTX_CTL_VFVM_INDX_SHIFT 7
+#define I40E_QTX_CTL_VFVM_INDX_MASK (0x1FF << I40E_QTX_CTL_VFVM_INDX_SHIFT)
+#define I40E_QTX_ENA(_Q) (0x00100000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_QTX_ENA_MAX_INDEX 1535
+#define I40E_QTX_ENA_QENA_REQ_SHIFT 0
+#define I40E_QTX_ENA_QENA_REQ_MASK (0x1 << I40E_QTX_ENA_QENA_REQ_SHIFT)
+#define I40E_QTX_ENA_FAST_QDIS_SHIFT 1
+#define I40E_QTX_ENA_FAST_QDIS_MASK (0x1 << I40E_QTX_ENA_FAST_QDIS_SHIFT)
+#define I40E_QTX_ENA_QENA_STAT_SHIFT 2
+#define I40E_QTX_ENA_QENA_STAT_MASK (0x1 << I40E_QTX_ENA_QENA_STAT_SHIFT)
+#define I40E_QTX_HEAD(_Q) (0x000E4000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_QTX_HEAD_MAX_INDEX 1535
+#define I40E_QTX_HEAD_HEAD_SHIFT 0
+#define I40E_QTX_HEAD_HEAD_MASK (0x1FFF << I40E_QTX_HEAD_HEAD_SHIFT)
+#define I40E_QTX_HEAD_RS_PENDING_SHIFT 16
+#define I40E_QTX_HEAD_RS_PENDING_MASK (0x1 << I40E_QTX_HEAD_RS_PENDING_SHIFT)
+#define I40E_QTX_TAIL(_Q) (0x00108000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_QTX_TAIL_MAX_INDEX 1535
+#define I40E_QTX_TAIL_TAIL_SHIFT 0
+#define I40E_QTX_TAIL_TAIL_MASK (0x1FFF << I40E_QTX_TAIL_TAIL_SHIFT)
+#define I40E_VPLAN_MAPENA(_VF) (0x00074000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VPLAN_MAPENA_MAX_INDEX 127
+#define I40E_VPLAN_MAPENA_TXRX_ENA_SHIFT 0
+#define I40E_VPLAN_MAPENA_TXRX_ENA_MASK (0x1 << I40E_VPLAN_MAPENA_TXRX_ENA_SHIFT)
+#define I40E_VPLAN_QTABLE(_i, _VF) (0x00070000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...15, _VF=0...127 */
+#define I40E_VPLAN_QTABLE_MAX_INDEX 15
+#define I40E_VPLAN_QTABLE_QINDEX_SHIFT 0
+#define I40E_VPLAN_QTABLE_QINDEX_MASK (0x7FF << I40E_VPLAN_QTABLE_QINDEX_SHIFT)
+#define I40E_VSILAN_QBASE(_VSI) (0x0020C800 + ((_VSI) * 4)) /* _i=0...383 */
+#define I40E_VSILAN_QBASE_MAX_INDEX 383
+#define I40E_VSILAN_QBASE_VSIBASE_SHIFT 0
+#define I40E_VSILAN_QBASE_VSIBASE_MASK (0x7FF << I40E_VSILAN_QBASE_VSIBASE_SHIFT)
+#define I40E_VSILAN_QBASE_VSIQTABLE_ENA_SHIFT 11
+#define I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK (0x1 << I40E_VSILAN_QBASE_VSIQTABLE_ENA_SHIFT)
+#define I40E_VSILAN_QTABLE(_i, _VSI) (0x00200000 + ((_i) * 2048 + (_VSI) * 4))
+#define I40E_VSILAN_QTABLE_MAX_INDEX 7
+#define I40E_VSILAN_QTABLE_QINDEX_0_SHIFT 0
+#define I40E_VSILAN_QTABLE_QINDEX_0_MASK (0x7FF << I40E_VSILAN_QTABLE_QINDEX_0_SHIFT)
+#define I40E_VSILAN_QTABLE_QINDEX_1_SHIFT 16
+#define I40E_VSILAN_QTABLE_QINDEX_1_MASK (0x7FF << I40E_VSILAN_QTABLE_QINDEX_1_SHIFT)
+#define I40E_PRTGL_SAH 0x001E2140
+#define I40E_PRTGL_SAH_FC_SAH_SHIFT 0
+#define I40E_PRTGL_SAH_FC_SAH_MASK (0xFFFF << I40E_PRTGL_SAH_FC_SAH_SHIFT)
+#define I40E_PRTGL_SAH_MFS_SHIFT 16
+#define I40E_PRTGL_SAH_MFS_MASK (0xFFFF << I40E_PRTGL_SAH_MFS_SHIFT)
+#define I40E_PRTGL_SAL 0x001E2120
+#define I40E_PRTGL_SAL_FC_SAL_SHIFT 0
+#define I40E_PRTGL_SAL_FC_SAL_MASK (0xFFFFFFFF << I40E_PRTGL_SAL_FC_SAL_SHIFT)
+#define I40E_PRTMAC_HLCTLA 0x001E4760
+#define I40E_PRTMAC_HLCTLA_DROP_US_PKTS_SHIFT 0
+#define I40E_PRTMAC_HLCTLA_DROP_US_PKTS_MASK (0x1 << I40E_PRTMAC_HLCTLA_DROP_US_PKTS_SHIFT)
+#define I40E_PRTMAC_HLCTLA_RX_FWRD_CTRL_SHIFT 1
+#define I40E_PRTMAC_HLCTLA_RX_FWRD_CTRL_MASK (0x1 << I40E_PRTMAC_HLCTLA_RX_FWRD_CTRL_SHIFT)
+#define I40E_PRTMAC_HLCTLA_CHOP_OS_PKT_SHIFT 2
+#define I40E_PRTMAC_HLCTLA_CHOP_OS_PKT_MASK (0x1 << I40E_PRTMAC_HLCTLA_CHOP_OS_PKT_SHIFT)
+#define I40E_PRTMAC_HLCTLA_TX_HYSTERESIS_SHIFT 4
+#define I40E_PRTMAC_HLCTLA_TX_HYSTERESIS_MASK (0x7 << I40E_PRTMAC_HLCTLA_TX_HYSTERESIS_SHIFT)
+#define I40E_PRTMAC_HLCTLA_HYS_FLUSH_PKT_SHIFT 7
+#define I40E_PRTMAC_HLCTLA_HYS_FLUSH_PKT_MASK (0x1 << I40E_PRTMAC_HLCTLA_HYS_FLUSH_PKT_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GCP 0x001E3130
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GCP_HSEC_CTL_RX_CHECK_SA_GCP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GCP_HSEC_CTL_RX_CHECK_SA_GCP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GCP_HSEC_CTL_RX_CHECK_SA_GCP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GPP 0x001E3290
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GPP_HSEC_CTL_RX_CHECK_SA_GPP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GPP_HSEC_CTL_RX_CHECK_SA_GPP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GPP_HSEC_CTL_RX_CHECK_SA_GPP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_PPP 0x001E3310
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_PPP_HSEC_CTL_RX_CHECK_SA_PPP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_PPP_HSEC_CTL_RX_CHECK_SA_PPP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_PPP_HSEC_CTL_RX_CHECK_SA_PPP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GCP 0x001E3100
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GCP_HSEC_CTL_RX_CHECK_UCAST_GCP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GCP_HSEC_CTL_RX_CHECK_UCAST_GCP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GCP_HSEC_CTL_RX_CHECK_UCAST_GCP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GPP 0x001E3280
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GPP_HSEC_CTL_RX_CHECK_UCAST_GPP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GPP_HSEC_CTL_RX_CHECK_UCAST_GPP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GPP_HSEC_CTL_RX_CHECK_UCAST_GPP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_PPP 0x001E3300
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_PPP_HSEC_CTL_RX_CHECK_UCAST_PPP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_PPP_HSEC_CTL_RX_CHECK_UCAST_PPP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_PPP_HSEC_CTL_RX_CHECK_UCAST_PPP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP 0x001E30E0
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP 0x001E3260
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP 0x001E32E0
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL 0x001E3360
+#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1 0x001E3110
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_MASK (0xFFFFFFFF << I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2 0x001E3120
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_MASK (0xFFFF << I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE 0x001E30C0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_MASK (0x1FF << I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1 0x001E3140
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_MASK (0xFFFFFFFF << I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2 0x001E3150
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_MASK (0xFFFF << I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_ENABLE 0x001E3000
+#define I40E_PRTMAC_HSEC_CTL_TX_ENABLE_HSEC_CTL_TX_ENABLE_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_ENABLE_HSEC_CTL_TX_ENABLE_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_TX_ENABLE_HSEC_CTL_TX_ENABLE_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE 0x001E30D0
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_MASK (0x1FF << I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(_i) (0x001E3370 + ((_i) * 16))
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX 8
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_MASK (0xFFFF << I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(_i) (0x001E3400 + ((_i) * 16))
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MAX_INDEX 8
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MASK (0xFFFF << I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1 0x001E34B0
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_MASK (0xFFFFFFFF << I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2 0x001E34C0
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_MASK (0xFFFF << I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_SHIFT)
+#define I40E_PRTMAC_HSECTL1 0x001E3560
+#define I40E_PRTMAC_HSECTL1_DROP_US_PKTS_SHIFT 0
+#define I40E_PRTMAC_HSECTL1_DROP_US_PKTS_MASK (0x1 << I40E_PRTMAC_HSECTL1_DROP_US_PKTS_SHIFT)
+#define I40E_PRTMAC_HSECTL1_PAD_US_PKT_SHIFT 3
+#define I40E_PRTMAC_HSECTL1_PAD_US_PKT_MASK (0x1 << I40E_PRTMAC_HSECTL1_PAD_US_PKT_SHIFT)
+#define I40E_PRTMAC_HSECTL1_TX_HYSTERESIS_SHIFT 4
+#define I40E_PRTMAC_HSECTL1_TX_HYSTERESIS_MASK (0x7 << I40E_PRTMAC_HSECTL1_TX_HYSTERESIS_SHIFT)
+#define I40E_PRTMAC_HSECTL1_HYS_FLUSH_PKT_SHIFT 7
+#define I40E_PRTMAC_HSECTL1_HYS_FLUSH_PKT_MASK (0x1 << I40E_PRTMAC_HSECTL1_HYS_FLUSH_PKT_SHIFT)
+#define I40E_PRTMAC_HSECTL1_EN_SFD_CHECK_SHIFT 30
+#define I40E_PRTMAC_HSECTL1_EN_SFD_CHECK_MASK (0x1 << I40E_PRTMAC_HSECTL1_EN_SFD_CHECK_SHIFT)
+#define I40E_PRTMAC_HSECTL1_EN_PREAMBLE_CHECK_SHIFT 31
+#define I40E_PRTMAC_HSECTL1_EN_PREAMBLE_CHECK_MASK (0x1 << I40E_PRTMAC_HSECTL1_EN_PREAMBLE_CHECK_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A 0x0008C480
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_SHIFT 0
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_SHIFT 2
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_SHIFT 4
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_SHIFT 6
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_SHIFT 8
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_SHIFT 10
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_SHIFT 12
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_SHIFT 14
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B 0x0008C484
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_SHIFT 0
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_SHIFT 2
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_SHIFT 4
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_SHIFT 6
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_SHIFT 8
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_SHIFT 10
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_SHIFT 12
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_SHIFT 14
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_SHIFT)
+#define I40E_GL_MNG_FWSM 0x000B6134
+#define I40E_GL_MNG_FWSM_FW_MODES_SHIFT 1
+#define I40E_GL_MNG_FWSM_FW_MODES_MASK (0x7 << I40E_GL_MNG_FWSM_FW_MODES_SHIFT)
+#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT 6
+#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_MASK (0x1 << I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT)
+#define I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_SHIFT 11
+#define I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_MASK (0xF << I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_SHIFT)
+#define I40E_GL_MNG_FWSM_FW_STATUS_VALID_SHIFT 15
+#define I40E_GL_MNG_FWSM_FW_STATUS_VALID_MASK (0x1 << I40E_GL_MNG_FWSM_FW_STATUS_VALID_SHIFT)
+#define I40E_GL_MNG_FWSM_RESET_CNT_SHIFT 16
+#define I40E_GL_MNG_FWSM_RESET_CNT_MASK (0x7 << I40E_GL_MNG_FWSM_RESET_CNT_SHIFT)
+#define I40E_GL_MNG_FWSM_EXT_ERR_IND_SHIFT 19
+#define I40E_GL_MNG_FWSM_EXT_ERR_IND_MASK (0x3F << I40E_GL_MNG_FWSM_EXT_ERR_IND_SHIFT)
+#define I40E_GL_MNG_FWSM_RSVD_SHIFT 25
+#define I40E_GL_MNG_FWSM_RSVD_MASK (0x1 << I40E_GL_MNG_FWSM_RSVD_SHIFT)
+#define I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_SHIFT 26
+#define I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_MASK (0x1 << I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_SHIFT)
+#define I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_SHIFT 27
+#define I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_MASK (0x1 << I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_SHIFT)
+#define I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_SHIFT 28
+#define I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_MASK (0x1 << I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_SHIFT)
+#define I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_SHIFT 29
+#define I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_MASK (0x1 << I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_SHIFT)
+#define I40E_GL_MNG_HWARB_CTRL 0x000B6130
+#define I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_SHIFT 0
+#define I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_MASK (0x1 << I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_SHIFT)
+#define I40E_PRT_MNG_FTFT_DATA(_i) (0x000852A0 + ((_i) * 32)) /* _i=0...31 */
+#define I40E_PRT_MNG_FTFT_DATA_MAX_INDEX 31
+#define I40E_PRT_MNG_FTFT_DATA_DWORD_SHIFT 0
+#define I40E_PRT_MNG_FTFT_DATA_DWORD_MASK (0xFFFFFFFF << I40E_PRT_MNG_FTFT_DATA_DWORD_SHIFT)
+#define I40E_PRT_MNG_FTFT_LENGTH 0x00085260
+#define I40E_PRT_MNG_FTFT_LENGTH_LENGTH_SHIFT 0
+#define I40E_PRT_MNG_FTFT_LENGTH_LENGTH_MASK (0xFF << I40E_PRT_MNG_FTFT_LENGTH_LENGTH_SHIFT)
+#define I40E_PRT_MNG_FTFT_MASK(_i) (0x00085160 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRT_MNG_FTFT_MASK_MAX_INDEX 7
+#define I40E_PRT_MNG_FTFT_MASK_MASK_SHIFT 0
+#define I40E_PRT_MNG_FTFT_MASK_MASK_MASK (0xFFFF << I40E_PRT_MNG_FTFT_MASK_MASK_SHIFT)
+#define I40E_PRT_MNG_MANC 0x00256A20
+#define I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_SHIFT 0
+#define I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_MASK (0x1 << I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_SHIFT)
+#define I40E_PRT_MNG_MANC_NCSI_DISCARD_SHIFT 1
+#define I40E_PRT_MNG_MANC_NCSI_DISCARD_MASK (0x1 << I40E_PRT_MNG_MANC_NCSI_DISCARD_SHIFT)
+#define I40E_PRT_MNG_MANC_RCV_TCO_EN_SHIFT 17
+#define I40E_PRT_MNG_MANC_RCV_TCO_EN_MASK (0x1 << I40E_PRT_MNG_MANC_RCV_TCO_EN_SHIFT)
+#define I40E_PRT_MNG_MANC_RCV_ALL_SHIFT 19
+#define I40E_PRT_MNG_MANC_RCV_ALL_MASK (0x1 << I40E_PRT_MNG_MANC_RCV_ALL_SHIFT)
+#define I40E_PRT_MNG_MANC_FIXED_NET_TYPE_SHIFT 25
+#define I40E_PRT_MNG_MANC_FIXED_NET_TYPE_MASK (0x1 << I40E_PRT_MNG_MANC_FIXED_NET_TYPE_SHIFT)
+#define I40E_PRT_MNG_MANC_NET_TYPE_SHIFT 26
+#define I40E_PRT_MNG_MANC_NET_TYPE_MASK (0x1 << I40E_PRT_MNG_MANC_NET_TYPE_SHIFT)
+#define I40E_PRT_MNG_MANC_EN_BMC2OS_SHIFT 28
+#define I40E_PRT_MNG_MANC_EN_BMC2OS_MASK (0x1 << I40E_PRT_MNG_MANC_EN_BMC2OS_SHIFT)
+#define I40E_PRT_MNG_MANC_EN_BMC2NET_SHIFT 29
+#define I40E_PRT_MNG_MANC_EN_BMC2NET_MASK (0x1 << I40E_PRT_MNG_MANC_EN_BMC2NET_SHIFT)
+#define I40E_PRT_MNG_MAVTV(_i) (0x00255900 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRT_MNG_MAVTV_MAX_INDEX 7
+#define I40E_PRT_MNG_MAVTV_VID_SHIFT 0
+#define I40E_PRT_MNG_MAVTV_VID_MASK (0xFFF << I40E_PRT_MNG_MAVTV_VID_SHIFT)
+#define I40E_PRT_MNG_MDEF(_i) (0x00255D00 + ((_i) * 32))
+#define I40E_PRT_MNG_MDEF_MAX_INDEX 7
+#define I40E_PRT_MNG_MDEF_MAC_EXACT_AND_SHIFT 0
+#define I40E_PRT_MNG_MDEF_MAC_EXACT_AND_MASK (0xF << I40E_PRT_MNG_MDEF_MAC_EXACT_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_BROADCAST_AND_SHIFT 4
+#define I40E_PRT_MNG_MDEF_BROADCAST_AND_MASK (0x1 << I40E_PRT_MNG_MDEF_BROADCAST_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_VLAN_AND_SHIFT 5
+#define I40E_PRT_MNG_MDEF_VLAN_AND_MASK (0xFF << I40E_PRT_MNG_MDEF_VLAN_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_SHIFT 13
+#define I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_MASK (0xF << I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_SHIFT 17
+#define I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_MASK (0xF << I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_MAC_EXACT_OR_SHIFT 21
+#define I40E_PRT_MNG_MDEF_MAC_EXACT_OR_MASK (0xF << I40E_PRT_MNG_MDEF_MAC_EXACT_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_BROADCAST_OR_SHIFT 25
+#define I40E_PRT_MNG_MDEF_BROADCAST_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_BROADCAST_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_MULTICAST_AND_SHIFT 26
+#define I40E_PRT_MNG_MDEF_MULTICAST_AND_MASK (0x1 << I40E_PRT_MNG_MDEF_MULTICAST_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_SHIFT 27
+#define I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_SHIFT 28
+#define I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_SHIFT 29
+#define I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_PORT_0X298_OR_SHIFT 30
+#define I40E_PRT_MNG_MDEF_PORT_0X298_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_PORT_0X298_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_PORT_0X26F_OR_SHIFT 31
+#define I40E_PRT_MNG_MDEF_PORT_0X26F_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_PORT_0X26F_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT(_i) (0x00255F00 + ((_i) * 32))
+#define I40E_PRT_MNG_MDEF_EXT_MAX_INDEX 7
+#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_SHIFT 0
+#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_MASK (0xF << I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_SHIFT 4
+#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_MASK (0xF << I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_SHIFT 8
+#define I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_MASK (0xFFFF << I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_SHIFT 24
+#define I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_SHIFT 25
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_SHIFT 26
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_SHIFT 27
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_ICMP_OR_SHIFT 28
+#define I40E_PRT_MNG_MDEF_EXT_ICMP_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_ICMP_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_MLD_SHIFT 29
+#define I40E_PRT_MNG_MDEF_EXT_MLD_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_MLD_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_SHIFT 30
+#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_SHIFT 31
+#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_SHIFT)
+#define I40E_PRT_MNG_MDEFVSI(_i) (0x00256580 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRT_MNG_MDEFVSI_MAX_INDEX 3
+#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_SHIFT 0
+#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_MASK (0xFFFF << I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_SHIFT)
+#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_SHIFT 16
+#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_MASK (0xFFFF << I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_SHIFT)
+#define I40E_PRT_MNG_METF(_i) (0x00256780 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRT_MNG_METF_MAX_INDEX 3
+#define I40E_PRT_MNG_METF_ETYPE_SHIFT 0
+#define I40E_PRT_MNG_METF_ETYPE_MASK (0xFFFF << I40E_PRT_MNG_METF_ETYPE_SHIFT)
+#define I40E_PRT_MNG_METF_POLARITY_SHIFT 30
+#define I40E_PRT_MNG_METF_POLARITY_MASK (0x1 << I40E_PRT_MNG_METF_POLARITY_SHIFT)
+#define I40E_PRT_MNG_MFUTP(_i) (0x00254E00 + ((_i) * 32)) /* _i=0...15 */
+#define I40E_PRT_MNG_MFUTP_MAX_INDEX 15
+#define I40E_PRT_MNG_MFUTP_MFUTP_N_SHIFT 0
+#define I40E_PRT_MNG_MFUTP_MFUTP_N_MASK (0xFFFF << I40E_PRT_MNG_MFUTP_MFUTP_N_SHIFT)
+#define I40E_PRT_MNG_MFUTP_UDP_SHIFT 16
+#define I40E_PRT_MNG_MFUTP_UDP_MASK (0x1 << I40E_PRT_MNG_MFUTP_UDP_SHIFT)
+#define I40E_PRT_MNG_MFUTP_TCP_SHIFT 17
+#define I40E_PRT_MNG_MFUTP_TCP_MASK (0x1 << I40E_PRT_MNG_MFUTP_TCP_SHIFT)
+#define I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_SHIFT 18
+#define I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_MASK (0x1 << I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_SHIFT)
+#define I40E_PRT_MNG_MIPAF4(_i) (0x00256280 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRT_MNG_MIPAF4_MAX_INDEX 3
+#define I40E_PRT_MNG_MIPAF4_MIPAF_SHIFT 0
+#define I40E_PRT_MNG_MIPAF4_MIPAF_MASK (0xFFFFFFFF << I40E_PRT_MNG_MIPAF4_MIPAF_SHIFT)
+#define I40E_PRT_MNG_MIPAF6(_i) (0x00254200 + ((_i) * 32)) /* _i=0...15 */
+#define I40E_PRT_MNG_MIPAF6_MAX_INDEX 15
+#define I40E_PRT_MNG_MIPAF6_MIPAF_SHIFT 0
+#define I40E_PRT_MNG_MIPAF6_MIPAF_MASK (0xFFFFFFFF << I40E_PRT_MNG_MIPAF6_MIPAF_SHIFT)
+#define I40E_PRT_MNG_MMAH(_i) (0x00256380 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRT_MNG_MMAH_MAX_INDEX 3
+#define I40E_PRT_MNG_MMAH_MMAH_SHIFT 0
+#define I40E_PRT_MNG_MMAH_MMAH_MASK (0xFFFF << I40E_PRT_MNG_MMAH_MMAH_SHIFT)
+#define I40E_PRT_MNG_MMAL(_i) (0x00256480 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRT_MNG_MMAL_MAX_INDEX 3
+#define I40E_PRT_MNG_MMAL_MMAL_SHIFT 0
+#define I40E_PRT_MNG_MMAL_MMAL_MASK (0xFFFFFFFF << I40E_PRT_MNG_MMAL_MMAL_SHIFT)
+#define I40E_PRT_MNG_MNGONLY 0x00256A60
+#define I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_SHIFT 0
+#define I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_MASK (0xFF << I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_SHIFT)
+#define I40E_PRT_MNG_MSFM 0x00256AA0
+#define I40E_PRT_MNG_MSFM_PORT_26F_UDP_SHIFT 0
+#define I40E_PRT_MNG_MSFM_PORT_26F_UDP_MASK (0x1 << I40E_PRT_MNG_MSFM_PORT_26F_UDP_SHIFT)
+#define I40E_PRT_MNG_MSFM_PORT_26F_TCP_SHIFT 1
+#define I40E_PRT_MNG_MSFM_PORT_26F_TCP_MASK (0x1 << I40E_PRT_MNG_MSFM_PORT_26F_TCP_SHIFT)
+#define I40E_PRT_MNG_MSFM_PORT_298_UDP_SHIFT 2
+#define I40E_PRT_MNG_MSFM_PORT_298_UDP_MASK (0x1 << I40E_PRT_MNG_MSFM_PORT_298_UDP_SHIFT)
+#define I40E_PRT_MNG_MSFM_PORT_298_TCP_SHIFT 3
+#define I40E_PRT_MNG_MSFM_PORT_298_TCP_MASK (0x1 << I40E_PRT_MNG_MSFM_PORT_298_TCP_SHIFT)
+#define I40E_PRT_MNG_MSFM_IPV6_0_MASK_SHIFT 4
+#define I40E_PRT_MNG_MSFM_IPV6_0_MASK_MASK (0x1 << I40E_PRT_MNG_MSFM_IPV6_0_MASK_SHIFT)
+#define I40E_PRT_MNG_MSFM_IPV6_1_MASK_SHIFT 5
+#define I40E_PRT_MNG_MSFM_IPV6_1_MASK_MASK (0x1 << I40E_PRT_MNG_MSFM_IPV6_1_MASK_SHIFT)
+#define I40E_PRT_MNG_MSFM_IPV6_2_MASK_SHIFT 6
+#define I40E_PRT_MNG_MSFM_IPV6_2_MASK_MASK (0x1 << I40E_PRT_MNG_MSFM_IPV6_2_MASK_SHIFT)
+#define I40E_PRT_MNG_MSFM_IPV6_3_MASK_SHIFT 7
+#define I40E_PRT_MNG_MSFM_IPV6_3_MASK_MASK (0x1 << I40E_PRT_MNG_MSFM_IPV6_3_MASK_SHIFT)
+#define I40E_MSIX_PBA(_i) (0x00004900 + ((_i) * 4)) /* _i=0...5 */
+#define I40E_MSIX_PBA_MAX_INDEX 5
+#define I40E_MSIX_PBA_PENBIT_SHIFT 0
+#define I40E_MSIX_PBA_PENBIT_MASK (0xFFFFFFFF << I40E_MSIX_PBA_PENBIT_SHIFT)
+#define I40E_MSIX_TADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...128 */
+#define I40E_MSIX_TADD_MAX_INDEX 128
+#define I40E_MSIX_TADD_MSIXTADD10_SHIFT 0
+#define I40E_MSIX_TADD_MSIXTADD10_MASK (0x3 << I40E_MSIX_TADD_MSIXTADD10_SHIFT)
+#define I40E_MSIX_TADD_MSIXTADD_SHIFT 2
+#define I40E_MSIX_TADD_MSIXTADD_MASK (0x3FFFFFFF << I40E_MSIX_TADD_MSIXTADD_SHIFT)
+#define I40E_MSIX_TMSG(_i) (0x00000008 + ((_i) * 16)) /* _i=0...128 */
+#define I40E_MSIX_TMSG_MAX_INDEX 128
+#define I40E_MSIX_TMSG_MSIXTMSG_SHIFT 0
+#define I40E_MSIX_TMSG_MSIXTMSG_MASK (0xFFFFFFFF << I40E_MSIX_TMSG_MSIXTMSG_SHIFT)
+#define I40E_MSIX_TUADD(_i) (0x00000004 + ((_i) * 16)) /* _i=0...128 */
+#define I40E_MSIX_TUADD_MAX_INDEX 128
+#define I40E_MSIX_TUADD_MSIXTUADD_SHIFT 0
+#define I40E_MSIX_TUADD_MSIXTUADD_MASK (0xFFFFFFFF << I40E_MSIX_TUADD_MSIXTUADD_SHIFT)
+#define I40E_MSIX_TVCTRL(_i) (0x0000000C + ((_i) * 16)) /* _i=0...128 */
+#define I40E_MSIX_TVCTRL_MAX_INDEX 128
+#define I40E_MSIX_TVCTRL_MASK_SHIFT 0
+#define I40E_MSIX_TVCTRL_MASK_MASK (0x1 << I40E_MSIX_TVCTRL_MASK_SHIFT)
+#define I40E_VFMSIX_PBA1(_i) (0x00004944 + ((_i) * 4)) /* _i=0...19 */
+#define I40E_VFMSIX_PBA1_MAX_INDEX 19
+#define I40E_VFMSIX_PBA1_PENBIT_SHIFT 0
+#define I40E_VFMSIX_PBA1_PENBIT_MASK (0xFFFFFFFF << I40E_VFMSIX_PBA1_PENBIT_SHIFT)
+#define I40E_VFMSIX_TADD1(_i) (0x00002100 + ((_i) * 16)) /* _i=0...639 */
+#define I40E_VFMSIX_TADD1_MAX_INDEX 639
+#define I40E_VFMSIX_TADD1_MSIXTADD10_SHIFT 0
+#define I40E_VFMSIX_TADD1_MSIXTADD10_MASK (0x3 << I40E_VFMSIX_TADD1_MSIXTADD10_SHIFT)
+#define I40E_VFMSIX_TADD1_MSIXTADD_SHIFT 2
+#define I40E_VFMSIX_TADD1_MSIXTADD_MASK (0x3FFFFFFF << I40E_VFMSIX_TADD1_MSIXTADD_SHIFT)
+#define I40E_VFMSIX_TMSG1(_i) (0x00002108 + ((_i) * 16)) /* _i=0...639 */
+#define I40E_VFMSIX_TMSG1_MAX_INDEX 639
+#define I40E_VFMSIX_TMSG1_MSIXTMSG_SHIFT 0
+#define I40E_VFMSIX_TMSG1_MSIXTMSG_MASK (0xFFFFFFFF << I40E_VFMSIX_TMSG1_MSIXTMSG_SHIFT)
+#define I40E_VFMSIX_TUADD1(_i) (0x00002104 + ((_i) * 16)) /* _i=0...639 */
+#define I40E_VFMSIX_TUADD1_MAX_INDEX 639
+#define I40E_VFMSIX_TUADD1_MSIXTUADD_SHIFT 0
+#define I40E_VFMSIX_TUADD1_MSIXTUADD_MASK (0xFFFFFFFF << I40E_VFMSIX_TUADD1_MSIXTUADD_SHIFT)
+#define I40E_VFMSIX_TVCTRL1(_i) (0x0000210C + ((_i) * 16)) /* _i=0...639 */
+#define I40E_VFMSIX_TVCTRL1_MAX_INDEX 639
+#define I40E_VFMSIX_TVCTRL1_MASK_SHIFT 0
+#define I40E_VFMSIX_TVCTRL1_MASK_MASK (0x1 << I40E_VFMSIX_TVCTRL1_MASK_SHIFT)
+#define I40E_GLNVM_FLA 0x000B6108
+#define I40E_GLNVM_FLA_FL_SCK_SHIFT 0
+#define I40E_GLNVM_FLA_FL_SCK_MASK (0x1 << I40E_GLNVM_FLA_FL_SCK_SHIFT)
+#define I40E_GLNVM_FLA_FL_CE_SHIFT 1
+#define I40E_GLNVM_FLA_FL_CE_MASK (0x1 << I40E_GLNVM_FLA_FL_CE_SHIFT)
+#define I40E_GLNVM_FLA_FL_SI_SHIFT 2
+#define I40E_GLNVM_FLA_FL_SI_MASK (0x1 << I40E_GLNVM_FLA_FL_SI_SHIFT)
+#define I40E_GLNVM_FLA_FL_SO_SHIFT 3
+#define I40E_GLNVM_FLA_FL_SO_MASK (0x1 << I40E_GLNVM_FLA_FL_SO_SHIFT)
+#define I40E_GLNVM_FLA_FL_REQ_SHIFT 4
+#define I40E_GLNVM_FLA_FL_REQ_MASK (0x1 << I40E_GLNVM_FLA_FL_REQ_SHIFT)
+#define I40E_GLNVM_FLA_FL_GNT_SHIFT 5
+#define I40E_GLNVM_FLA_FL_GNT_MASK (0x1 << I40E_GLNVM_FLA_FL_GNT_SHIFT)
+#define I40E_GLNVM_FLA_LOCKED_SHIFT 6
+#define I40E_GLNVM_FLA_LOCKED_MASK (0x1 << I40E_GLNVM_FLA_LOCKED_SHIFT)
+#define I40E_GLNVM_FLA_FL_SADDR_SHIFT 18
+#define I40E_GLNVM_FLA_FL_SADDR_MASK (0x7FF << I40E_GLNVM_FLA_FL_SADDR_SHIFT)
+#define I40E_GLNVM_FLA_FL_BUSY_SHIFT 30
+#define I40E_GLNVM_FLA_FL_BUSY_MASK (0x1 << I40E_GLNVM_FLA_FL_BUSY_SHIFT)
+#define I40E_GLNVM_FLA_FL_DER_SHIFT 31
+#define I40E_GLNVM_FLA_FL_DER_MASK (0x1 << I40E_GLNVM_FLA_FL_DER_SHIFT)
+#define I40E_GLNVM_FLASHID 0x000B6104
+#define I40E_GLNVM_FLASHID_FLASHID_SHIFT 0
+#define I40E_GLNVM_FLASHID_FLASHID_MASK (0xFFFFFF << I40E_GLNVM_FLASHID_FLASHID_SHIFT)
+#define I40E_GLNVM_GENS 0x000B6100
+#define I40E_GLNVM_GENS_NVM_PRES_SHIFT 0
+#define I40E_GLNVM_GENS_NVM_PRES_MASK (0x1 << I40E_GLNVM_GENS_NVM_PRES_SHIFT)
+#define I40E_GLNVM_GENS_SR_SIZE_SHIFT 5
+#define I40E_GLNVM_GENS_SR_SIZE_MASK (0x7 << I40E_GLNVM_GENS_SR_SIZE_SHIFT)
+#define I40E_GLNVM_GENS_BANK1VAL_SHIFT 8
+#define I40E_GLNVM_GENS_BANK1VAL_MASK (0x1 << I40E_GLNVM_GENS_BANK1VAL_SHIFT)
+#define I40E_GLNVM_GENS_ALT_PRST_SHIFT 23
+#define I40E_GLNVM_GENS_ALT_PRST_MASK (0x1 << I40E_GLNVM_GENS_ALT_PRST_SHIFT)
+#define I40E_GLNVM_GENS_FL_AUTO_RD_SHIFT 25
+#define I40E_GLNVM_GENS_FL_AUTO_RD_MASK (0x1 << I40E_GLNVM_GENS_FL_AUTO_RD_SHIFT)
+#define I40E_GLNVM_PROTCSR(_i) (0x000B6010 + ((_i) * 4)) /* _i=0...59 */
+#define I40E_GLNVM_PROTCSR_MAX_INDEX 59
+#define I40E_GLNVM_PROTCSR_ADDR_BLOCK_SHIFT 0
+#define I40E_GLNVM_PROTCSR_ADDR_BLOCK_MASK (0xFFFFFF << I40E_GLNVM_PROTCSR_ADDR_BLOCK_SHIFT)
+#define I40E_GLNVM_SRCTL 0x000B6110
+#define I40E_GLNVM_SRCTL_SRBUSY_SHIFT 0
+#define I40E_GLNVM_SRCTL_SRBUSY_MASK (0x1 << I40E_GLNVM_SRCTL_SRBUSY_SHIFT)
+#define I40E_GLNVM_SRCTL_ADDR_SHIFT 14
+#define I40E_GLNVM_SRCTL_ADDR_MASK (0x7FFF << I40E_GLNVM_SRCTL_ADDR_SHIFT)
+#define I40E_GLNVM_SRCTL_WRITE_SHIFT 29
+#define I40E_GLNVM_SRCTL_WRITE_MASK (0x1 << I40E_GLNVM_SRCTL_WRITE_SHIFT)
+#define I40E_GLNVM_SRCTL_START_SHIFT 30
+#define I40E_GLNVM_SRCTL_START_MASK (0x1 << I40E_GLNVM_SRCTL_START_SHIFT)
+#define I40E_GLNVM_SRCTL_DONE_SHIFT 31
+#define I40E_GLNVM_SRCTL_DONE_MASK (0x1 << I40E_GLNVM_SRCTL_DONE_SHIFT)
+#define I40E_GLNVM_SRDATA 0x000B6114
+#define I40E_GLNVM_SRDATA_WRDATA_SHIFT 0
+#define I40E_GLNVM_SRDATA_WRDATA_MASK (0xFFFF << I40E_GLNVM_SRDATA_WRDATA_SHIFT)
+#define I40E_GLNVM_SRDATA_RDDATA_SHIFT 16
+#define I40E_GLNVM_SRDATA_RDDATA_MASK (0xFFFF << I40E_GLNVM_SRDATA_RDDATA_SHIFT)
+#define I40E_GLNVM_ULD 0x000B6008
+#define I40E_GLNVM_ULD_CONF_PCIR_DONE_SHIFT 0
+#define I40E_GLNVM_ULD_CONF_PCIR_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_PCIR_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_PCIRTL_DONE_SHIFT 1
+#define I40E_GLNVM_ULD_CONF_PCIRTL_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_PCIRTL_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_LCB_DONE_SHIFT 2
+#define I40E_GLNVM_ULD_CONF_LCB_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_LCB_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_CORE_DONE_SHIFT 3
+#define I40E_GLNVM_ULD_CONF_CORE_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_CORE_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_GLOBAL_DONE_SHIFT 4
+#define I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_GLOBAL_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_POR_DONE_SHIFT 5
+#define I40E_GLNVM_ULD_CONF_POR_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_POR_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_SHIFT 6
+#define I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_SHIFT 7
+#define I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_EMP_DONE_SHIFT 8
+#define I40E_GLNVM_ULD_CONF_EMP_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_EMP_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_PCIALT_DONE_SHIFT 9
+#define I40E_GLNVM_ULD_CONF_PCIALT_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_PCIALT_DONE_SHIFT)
+
+#define I40E_GLPCI_BYTCTH 0x0009C484
+#define I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_SHIFT 0
+#define I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_MASK (0xFFFFFFFF << I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_SHIFT)
+#define I40E_GLPCI_BYTCTL 0x0009C488
+#define I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_SHIFT 0
+#define I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_MASK (0xFFFFFFFF << I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_SHIFT)
+#define I40E_GLPCI_CAPCTRL 0x000BE4A4
+#define I40E_GLPCI_CAPCTRL_VPD_EN_SHIFT 0
+#define I40E_GLPCI_CAPCTRL_VPD_EN_MASK (0x1 << I40E_GLPCI_CAPCTRL_VPD_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP 0x000BE4A8
+#define I40E_GLPCI_CAPSUP_PCIE_VER_SHIFT 0
+#define I40E_GLPCI_CAPSUP_PCIE_VER_MASK (0x1 << I40E_GLPCI_CAPSUP_PCIE_VER_SHIFT)
+#define I40E_GLPCI_CAPSUP_LTR_EN_SHIFT 2
+#define I40E_GLPCI_CAPSUP_LTR_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_LTR_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_TPH_EN_SHIFT 3
+#define I40E_GLPCI_CAPSUP_TPH_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_TPH_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_ARI_EN_SHIFT 4
+#define I40E_GLPCI_CAPSUP_ARI_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_ARI_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_IOV_EN_SHIFT 5
+#define I40E_GLPCI_CAPSUP_IOV_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_IOV_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_ACS_EN_SHIFT 6
+#define I40E_GLPCI_CAPSUP_ACS_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_ACS_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_SEC_EN_SHIFT 7
+#define I40E_GLPCI_CAPSUP_SEC_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_SEC_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_ECRC_GEN_EN_SHIFT 16
+#define I40E_GLPCI_CAPSUP_ECRC_GEN_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_ECRC_GEN_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_ECRC_CHK_EN_SHIFT 17
+#define I40E_GLPCI_CAPSUP_ECRC_CHK_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_ECRC_CHK_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_IDO_EN_SHIFT 18
+#define I40E_GLPCI_CAPSUP_IDO_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_IDO_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_MSI_MASK_SHIFT 19
+#define I40E_GLPCI_CAPSUP_MSI_MASK_MASK (0x1 << I40E_GLPCI_CAPSUP_MSI_MASK_SHIFT)
+#define I40E_GLPCI_CAPSUP_CSR_CONF_EN_SHIFT 20
+#define I40E_GLPCI_CAPSUP_CSR_CONF_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_CSR_CONF_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_SHIFT 30
+#define I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_MASK (0x1 << I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_SHIFT)
+#define I40E_GLPCI_CAPSUP_LOAD_DEV_ID_SHIFT 31
+#define I40E_GLPCI_CAPSUP_LOAD_DEV_ID_MASK (0x1 << I40E_GLPCI_CAPSUP_LOAD_DEV_ID_SHIFT)
+#define I40E_GLPCI_CNF 0x000BE4C0
+#define I40E_GLPCI_CNF_FLEX10_SHIFT 1
+#define I40E_GLPCI_CNF_FLEX10_MASK (0x1 << I40E_GLPCI_CNF_FLEX10_SHIFT)
+#define I40E_GLPCI_CNF_WAKE_PIN_EN_SHIFT 2
+#define I40E_GLPCI_CNF_WAKE_PIN_EN_MASK (0x1 << I40E_GLPCI_CNF_WAKE_PIN_EN_SHIFT)
+#define I40E_GLPCI_CNF2 0x000BE494
+#define I40E_GLPCI_CNF2_RO_DIS_SHIFT 0
+#define I40E_GLPCI_CNF2_RO_DIS_MASK (0x1 << I40E_GLPCI_CNF2_RO_DIS_SHIFT)
+#define I40E_GLPCI_CNF2_CACHELINE_SIZE_SHIFT 1
+#define I40E_GLPCI_CNF2_CACHELINE_SIZE_MASK (0x1 << I40E_GLPCI_CNF2_CACHELINE_SIZE_SHIFT)
+#define I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT 2
+#define I40E_GLPCI_CNF2_MSI_X_PF_N_MASK (0x7FF << I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT)
+#define I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT 13
+#define I40E_GLPCI_CNF2_MSI_X_VF_N_MASK (0x7FF << I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT)
+#define I40E_GLPCI_DREVID 0x0009C480
+#define I40E_GLPCI_DREVID_DEFAULT_REVID_SHIFT 0
+#define I40E_GLPCI_DREVID_DEFAULT_REVID_MASK (0xFF << I40E_GLPCI_DREVID_DEFAULT_REVID_SHIFT)
+#define I40E_GLPCI_GSCL_1 0x0009C48C
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_SHIFT 0
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_SHIFT 1
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_SHIFT 2
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_SHIFT 3
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_SHIFT)
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_0_SHIFT 4
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_0_MASK (0x1 << I40E_GLPCI_GSCL_1_LBC_ENABLE_0_SHIFT)
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_1_SHIFT 5
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_1_MASK (0x1 << I40E_GLPCI_GSCL_1_LBC_ENABLE_1_SHIFT)
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_2_SHIFT 6
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_2_MASK (0x1 << I40E_GLPCI_GSCL_1_LBC_ENABLE_2_SHIFT)
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_3_SHIFT 7
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_3_MASK (0x1 << I40E_GLPCI_GSCL_1_LBC_ENABLE_3_SHIFT)
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_SHIFT 8
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_MASK (0x1 << I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_SHIFT)
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_SHIFT 9
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_MASK (0x1F << I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_SHIFT)
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_SHIFT 14
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_MASK (0x1 << I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_SHIFT)
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_SHIFT 15
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_MASK (0x1F << I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_SHIFT 28
+#define I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_SHIFT 29
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_SHIFT 30
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_START_SHIFT 31
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_START_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_START_SHIFT)
+#define I40E_GLPCI_GSCL_2 0x0009C490
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_SHIFT 0
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_MASK (0xFF << I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_SHIFT)
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_SHIFT 8
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_MASK (0xFF << I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_SHIFT)
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_SHIFT 16
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_MASK (0xFF << I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_SHIFT)
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_SHIFT 24
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_MASK (0xFF << I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_SHIFT)
+#define I40E_GLPCI_GSCL_5_8(_i) (0x0009C494 + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLPCI_GSCL_5_8_MAX_INDEX 3
+#define I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_SHIFT 0
+#define I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_MASK (0xFFFF << I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_SHIFT)
+#define I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_SHIFT 16
+#define I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_MASK (0xFFFF << I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_SHIFT)
+#define I40E_GLPCI_GSCN_0_3(_i) (0x0009C4A4 + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLPCI_GSCN_0_3_MAX_INDEX 3
+#define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT 0
+#define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_MASK (0xFFFFFFFF << I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT)
+#define I40E_GLPCI_LATCT 0x0009C4B4
+#define I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_SHIFT 0
+#define I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_MASK (0xFFFFFFFF << I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_SHIFT)
+#define I40E_GLPCI_LBARCTRL 0x000BE484
+#define I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT 0
+#define I40E_GLPCI_LBARCTRL_PREFBAR_MASK (0x1 << I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT)
+#define I40E_GLPCI_LBARCTRL_BAR32_SHIFT 1
+#define I40E_GLPCI_LBARCTRL_BAR32_MASK (0x1 << I40E_GLPCI_LBARCTRL_BAR32_SHIFT)
+#define I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_SHIFT 3
+#define I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_MASK (0x1 << I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_SHIFT)
+#define I40E_GLPCI_LBARCTRL_PE_DB_SIZE_SHIFT 4
+#define I40E_GLPCI_LBARCTRL_PE_DB_SIZE_MASK (0x3 << I40E_GLPCI_LBARCTRL_PE_DB_SIZE_SHIFT)
+#define I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT 6
+#define I40E_GLPCI_LBARCTRL_FL_SIZE_MASK (0x7 << I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT)
+#define I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_SHIFT 10
+#define I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_MASK (0x1 << I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_SHIFT)
+#define I40E_GLPCI_LBARCTRL_EXROM_SIZE_SHIFT 11
+#define I40E_GLPCI_LBARCTRL_EXROM_SIZE_MASK (0x7 << I40E_GLPCI_LBARCTRL_EXROM_SIZE_SHIFT)
+#define I40E_GLPCI_LINKCAP 0x000BE4AC
+#define I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_SHIFT 0
+#define I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_MASK (0x3F << I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_SHIFT)
+#define I40E_GLPCI_LINKCAP_MAX_PAYLOAD_SHIFT 6
+#define I40E_GLPCI_LINKCAP_MAX_PAYLOAD_MASK (0x7 << I40E_GLPCI_LINKCAP_MAX_PAYLOAD_SHIFT)
+#define I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_SHIFT 9
+#define I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_MASK (0xF << I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_SHIFT)
+#define I40E_GLPCI_PCIERR 0x000BE4FC
+#define I40E_GLPCI_PCIERR_PCIE_ERR_REP_SHIFT 0
+#define I40E_GLPCI_PCIERR_PCIE_ERR_REP_MASK (0xFFFFFFFF << I40E_GLPCI_PCIERR_PCIE_ERR_REP_SHIFT)
+#define I40E_GLPCI_PCITEST2 0x000BE4BC
+#define I40E_GLPCI_PCITEST2_IOV_TEST_MODE_SHIFT 0
+#define I40E_GLPCI_PCITEST2_IOV_TEST_MODE_MASK (0x1 << I40E_GLPCI_PCITEST2_IOV_TEST_MODE_SHIFT)
+#define I40E_GLPCI_PCITEST2_TAG_ALLOC_SHIFT 1
+#define I40E_GLPCI_PCITEST2_TAG_ALLOC_MASK (0x1 << I40E_GLPCI_PCITEST2_TAG_ALLOC_SHIFT)
+
+#define I40E_GLPCI_PKTCT 0x0009C4BC
+#define I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_SHIFT 0
+#define I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_MASK (0xFFFFFFFF << I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_SHIFT)
+#define I40E_GLPCI_PMSUP 0x000BE4B0
+#define I40E_GLPCI_PMSUP_ASPM_SUP_SHIFT 0
+#define I40E_GLPCI_PMSUP_ASPM_SUP_MASK (0x3 << I40E_GLPCI_PMSUP_ASPM_SUP_SHIFT)
+#define I40E_GLPCI_PMSUP_L0S_EXIT_LAT_SHIFT 2
+#define I40E_GLPCI_PMSUP_L0S_EXIT_LAT_MASK (0x7 << I40E_GLPCI_PMSUP_L0S_EXIT_LAT_SHIFT)
+#define I40E_GLPCI_PMSUP_L1_EXIT_LAT_SHIFT 5
+#define I40E_GLPCI_PMSUP_L1_EXIT_LAT_MASK (0x7 << I40E_GLPCI_PMSUP_L1_EXIT_LAT_SHIFT)
+#define I40E_GLPCI_PMSUP_L0S_ACC_LAT_SHIFT 8
+#define I40E_GLPCI_PMSUP_L0S_ACC_LAT_MASK (0x7 << I40E_GLPCI_PMSUP_L0S_ACC_LAT_SHIFT)
+#define I40E_GLPCI_PMSUP_L1_ACC_LAT_SHIFT 11
+#define I40E_GLPCI_PMSUP_L1_ACC_LAT_MASK (0x7 << I40E_GLPCI_PMSUP_L1_ACC_LAT_SHIFT)
+#define I40E_GLPCI_PMSUP_SLOT_CLK_SHIFT 14
+#define I40E_GLPCI_PMSUP_SLOT_CLK_MASK (0x1 << I40E_GLPCI_PMSUP_SLOT_CLK_SHIFT)
+#define I40E_GLPCI_PMSUP_OBFF_SUP_SHIFT 15
+#define I40E_GLPCI_PMSUP_OBFF_SUP_MASK (0x3 << I40E_GLPCI_PMSUP_OBFF_SUP_SHIFT)
+#define I40E_GLPCI_PWRDATA 0x000BE490
+#define I40E_GLPCI_PWRDATA_D0_POWER_SHIFT 0
+#define I40E_GLPCI_PWRDATA_D0_POWER_MASK (0xFF << I40E_GLPCI_PWRDATA_D0_POWER_SHIFT)
+#define I40E_GLPCI_PWRDATA_COMM_POWER_SHIFT 8
+#define I40E_GLPCI_PWRDATA_COMM_POWER_MASK (0xFF << I40E_GLPCI_PWRDATA_COMM_POWER_SHIFT)
+#define I40E_GLPCI_PWRDATA_D3_POWER_SHIFT 16
+#define I40E_GLPCI_PWRDATA_D3_POWER_MASK (0xFF << I40E_GLPCI_PWRDATA_D3_POWER_SHIFT)
+#define I40E_GLPCI_PWRDATA_DATA_SCALE_SHIFT 24
+#define I40E_GLPCI_PWRDATA_DATA_SCALE_MASK (0x3 << I40E_GLPCI_PWRDATA_DATA_SCALE_SHIFT)
+#define I40E_GLPCI_REVID 0x000BE4B4
+#define I40E_GLPCI_REVID_NVM_REVID_SHIFT 0
+#define I40E_GLPCI_REVID_NVM_REVID_MASK (0xFF << I40E_GLPCI_REVID_NVM_REVID_SHIFT)
+#define I40E_GLPCI_SERH 0x000BE49C
+#define I40E_GLPCI_SERH_SER_NUM_H_SHIFT 0
+#define I40E_GLPCI_SERH_SER_NUM_H_MASK (0xFFFF << I40E_GLPCI_SERH_SER_NUM_H_SHIFT)
+#define I40E_GLPCI_SERL 0x000BE498
+#define I40E_GLPCI_SERL_SER_NUM_L_SHIFT 0
+#define I40E_GLPCI_SERL_SER_NUM_L_MASK (0xFFFFFFFF << I40E_GLPCI_SERL_SER_NUM_L_SHIFT)
+#define I40E_GLPCI_SUBSYSID 0x000BE48C
+#define I40E_GLPCI_SUBSYSID_SUB_VEN_ID_SHIFT 0
+#define I40E_GLPCI_SUBSYSID_SUB_VEN_ID_MASK (0xFFFF << I40E_GLPCI_SUBSYSID_SUB_VEN_ID_SHIFT)
+#define I40E_GLPCI_SUBSYSID_SUB_ID_SHIFT 16
+#define I40E_GLPCI_SUBSYSID_SUB_ID_MASK (0xFFFF << I40E_GLPCI_SUBSYSID_SUB_ID_SHIFT)
+#define I40E_GLPCI_UPADD 0x000BE4F8
+#define I40E_GLPCI_UPADD_ADDRESS_SHIFT 1
+#define I40E_GLPCI_UPADD_ADDRESS_MASK (0x7FFFFFFF << I40E_GLPCI_UPADD_ADDRESS_SHIFT)
+#define I40E_GLPCI_VFSUP 0x000BE4B8
+#define I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT 0
+#define I40E_GLPCI_VFSUP_VF_PREFETCH_MASK (0x1 << I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT)
+#define I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT 1
+#define I40E_GLPCI_VFSUP_VR_BAR_TYPE_MASK (0x1 << I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT)
+#define I40E_PF_FUNC_RID 0x0009C000
+#define I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT 0
+#define I40E_PF_FUNC_RID_FUNCTION_NUMBER_MASK (0x7 << I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT)
+#define I40E_PF_FUNC_RID_DEVICE_NUMBER_SHIFT 3
+#define I40E_PF_FUNC_RID_DEVICE_NUMBER_MASK (0x1F << I40E_PF_FUNC_RID_DEVICE_NUMBER_SHIFT)
+#define I40E_PF_FUNC_RID_BUS_NUMBER_SHIFT 8
+#define I40E_PF_FUNC_RID_BUS_NUMBER_MASK (0xFF << I40E_PF_FUNC_RID_BUS_NUMBER_SHIFT)
+#define I40E_PF_PCI_CIAA 0x0009C080
+#define I40E_PF_PCI_CIAA_ADDRESS_SHIFT 0
+#define I40E_PF_PCI_CIAA_ADDRESS_MASK (0xFFF << I40E_PF_PCI_CIAA_ADDRESS_SHIFT)
+#define I40E_PF_PCI_CIAA_VF_NUM_SHIFT 12
+#define I40E_PF_PCI_CIAA_VF_NUM_MASK (0x7F << I40E_PF_PCI_CIAA_VF_NUM_SHIFT)
+#define I40E_PF_PCI_CIAD 0x0009C100
+#define I40E_PF_PCI_CIAD_DATA_SHIFT 0
+#define I40E_PF_PCI_CIAD_DATA_MASK (0xFFFFFFFF << I40E_PF_PCI_CIAD_DATA_SHIFT)
+#define I40E_PFPCI_CLASS 0x000BE400
+#define I40E_PFPCI_CLASS_STORAGE_CLASS_SHIFT 0
+#define I40E_PFPCI_CLASS_STORAGE_CLASS_MASK (0x1 << I40E_PFPCI_CLASS_STORAGE_CLASS_SHIFT)
+#define I40E_PFPCI_CNF 0x000BE000
+#define I40E_PFPCI_CNF_MSI_EN_SHIFT 2
+#define I40E_PFPCI_CNF_MSI_EN_MASK (0x1 << I40E_PFPCI_CNF_MSI_EN_SHIFT)
+#define I40E_PFPCI_CNF_EXROM_DIS_SHIFT 3
+#define I40E_PFPCI_CNF_EXROM_DIS_MASK (0x1 << I40E_PFPCI_CNF_EXROM_DIS_SHIFT)
+#define I40E_PFPCI_CNF_IO_BAR_SHIFT 4
+#define I40E_PFPCI_CNF_IO_BAR_MASK (0x1 << I40E_PFPCI_CNF_IO_BAR_SHIFT)
+#define I40E_PFPCI_CNF_INT_PIN_SHIFT 5
+#define I40E_PFPCI_CNF_INT_PIN_MASK (0x3 << I40E_PFPCI_CNF_INT_PIN_SHIFT)
+#define I40E_PFPCI_FACTPS 0x0009C180
+#define I40E_PFPCI_FACTPS_FUNC_POWER_STATE_SHIFT 0
+#define I40E_PFPCI_FACTPS_FUNC_POWER_STATE_MASK (0x3 << I40E_PFPCI_FACTPS_FUNC_POWER_STATE_SHIFT)
+#define I40E_PFPCI_FACTPS_FUNC_AUX_EN_SHIFT 3
+#define I40E_PFPCI_FACTPS_FUNC_AUX_EN_MASK (0x1 << I40E_PFPCI_FACTPS_FUNC_AUX_EN_SHIFT)
+#define I40E_PFPCI_FUNC 0x000BE200
+#define I40E_PFPCI_FUNC_FUNC_DIS_SHIFT 0
+#define I40E_PFPCI_FUNC_FUNC_DIS_MASK (0x1 << I40E_PFPCI_FUNC_FUNC_DIS_SHIFT)
+#define I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_SHIFT 1
+#define I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_MASK (0x1 << I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_SHIFT)
+#define I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_SHIFT 2
+#define I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_MASK (0x1 << I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_SHIFT)
+#define I40E_PFPCI_FUNC2 0x000BE180
+#define I40E_PFPCI_FUNC2_EMP_FUNC_DIS_SHIFT 0
+#define I40E_PFPCI_FUNC2_EMP_FUNC_DIS_MASK (0x1 << I40E_PFPCI_FUNC2_EMP_FUNC_DIS_SHIFT)
+#define I40E_PFPCI_ICAUSE 0x0009C200
+#define I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_SHIFT 0
+#define I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_MASK (0xFFFFFFFF << I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_SHIFT)
+#define I40E_PFPCI_IENA 0x0009C280
+#define I40E_PFPCI_IENA_PCIE_ERR_EN_SHIFT 0
+#define I40E_PFPCI_IENA_PCIE_ERR_EN_MASK (0xFFFFFFFF << I40E_PFPCI_IENA_PCIE_ERR_EN_SHIFT)
+#define I40E_PFPCI_PFDEVID 0x000BE080
+#define I40E_PFPCI_PFDEVID_PF_DEV_ID_LAN_SHIFT 0
+#define I40E_PFPCI_PFDEVID_PF_DEV_ID_LAN_MASK (0xFFFF << I40E_PFPCI_PFDEVID_PF_DEV_ID_LAN_SHIFT)
+#define I40E_PFPCI_PFDEVID_PF_DEV_ID_SAN_SHIFT 16
+#define I40E_PFPCI_PFDEVID_PF_DEV_ID_SAN_MASK (0xFFFF << I40E_PFPCI_PFDEVID_PF_DEV_ID_SAN_SHIFT)
+#define I40E_PFPCI_PM 0x000BE300
+#define I40E_PFPCI_PM_PME_EN_SHIFT 0
+#define I40E_PFPCI_PM_PME_EN_MASK (0x1 << I40E_PFPCI_PM_PME_EN_SHIFT)
+#define I40E_PFPCI_STATUS1 0x000BE280
+#define I40E_PFPCI_STATUS1_FUNC_VALID_SHIFT 0
+#define I40E_PFPCI_STATUS1_FUNC_VALID_MASK (0x1 << I40E_PFPCI_STATUS1_FUNC_VALID_SHIFT)
+#define I40E_PFPCI_VFDEVID 0x000BE100
+#define I40E_PFPCI_VFDEVID_VF_DEV_ID_LAN_SHIFT 0
+#define I40E_PFPCI_VFDEVID_VF_DEV_ID_LAN_MASK (0xFFFF << I40E_PFPCI_VFDEVID_VF_DEV_ID_LAN_SHIFT)
+#define I40E_PFPCI_VFDEVID_VF_DEV_ID_SAN_SHIFT 16
+#define I40E_PFPCI_VFDEVID_VF_DEV_ID_SAN_MASK (0xFFFF << I40E_PFPCI_VFDEVID_VF_DEV_ID_SAN_SHIFT)
+#define I40E_PFPCI_VMINDEX 0x0009C300
+#define I40E_PFPCI_VMINDEX_VMINDEX_SHIFT 0
+#define I40E_PFPCI_VMINDEX_VMINDEX_MASK (0x1FF << I40E_PFPCI_VMINDEX_VMINDEX_SHIFT)
+#define I40E_PFPCI_VMPEND 0x0009C380
+#define I40E_PFPCI_VMPEND_PENDING_SHIFT 0
+#define I40E_PFPCI_VMPEND_PENDING_MASK (0x1 << I40E_PFPCI_VMPEND_PENDING_SHIFT)
+#define I40E_GLPE_CPUSTATUS0 0x0000D040
+#define I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_SHIFT 0
+#define I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_MASK (0xFFFFFFFF << I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_SHIFT)
+#define I40E_GLPE_CPUSTATUS1 0x0000D044
+#define I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_SHIFT 0
+#define I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_MASK (0xFFFFFFFF << I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_SHIFT)
+#define I40E_GLPE_CPUSTATUS2 0x0000D048
+#define I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_SHIFT 0
+#define I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_MASK (0xFFFFFFFF << I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_SHIFT)
+#define I40E_GLPE_PFFLMOBJCTRL(_i) (0x0000D480 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLPE_PFFLMOBJCTRL_MAX_INDEX 15
+#define I40E_GLPE_PFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT 0
+#define I40E_GLPE_PFFLMOBJCTRL_XMIT_BLOCKSIZE_MASK (0x7 << I40E_GLPE_PFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT)
+#define I40E_GLPE_PFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT 8
+#define I40E_GLPE_PFFLMOBJCTRL_Q1_BLOCKSIZE_MASK (0x7 << I40E_GLPE_PFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT)
+#define I40E_GLPE_VFFLMOBJCTRL(_i) (0x0000D400 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPE_VFFLMOBJCTRL_MAX_INDEX 31
+#define I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT 0
+#define I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_MASK (0x7 << I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT)
+#define I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT 8
+#define I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_MASK (0x7 << I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT)
+#define I40E_GLPE_VFFLMQ1ALLOCERR(_i) (0x0000C700 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPE_VFFLMQ1ALLOCERR_MAX_INDEX 31
+#define I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_MASK (0xFFFF << I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_GLPE_VFFLMXMITALLOCERR(_i) (0x0000C600 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPE_VFFLMXMITALLOCERR_MAX_INDEX 31
+#define I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_MASK (0xFFFF << I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_GLPE_VFUDACTRL(_i) (0x0000C000 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPE_VFUDACTRL_MAX_INDEX 31
+#define I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_SHIFT 0
+#define I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_MASK (0x1 << I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_SHIFT 1
+#define I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_MASK (0x1 << I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_SHIFT 2
+#define I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_MASK (0x1 << I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_SHIFT 3
+#define I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_MASK (0x1 << I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_SHIFT 4
+#define I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_MASK (0x1 << I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_SHIFT)
+#define I40E_GLPE_VFUDAUCFBQPN(_i) (0x0000C100 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPE_VFUDAUCFBQPN_MAX_INDEX 31
+#define I40E_GLPE_VFUDAUCFBQPN_QPN_SHIFT 0
+#define I40E_GLPE_VFUDAUCFBQPN_QPN_MASK (0x3FFFF << I40E_GLPE_VFUDAUCFBQPN_QPN_SHIFT)
+#define I40E_GLPE_VFUDAUCFBQPN_VALID_SHIFT 31
+#define I40E_GLPE_VFUDAUCFBQPN_VALID_MASK (0x1 << I40E_GLPE_VFUDAUCFBQPN_VALID_SHIFT)
+#define I40E_PFPE_AEQALLOC 0x00131180
+#define I40E_PFPE_AEQALLOC_AECOUNT_SHIFT 0
+#define I40E_PFPE_AEQALLOC_AECOUNT_MASK (0xFFFFFFFF << I40E_PFPE_AEQALLOC_AECOUNT_SHIFT)
+#define I40E_PFPE_CCQPHIGH 0x00008200
+#define I40E_PFPE_CCQPHIGH_PECCQPHIGH_SHIFT 0
+#define I40E_PFPE_CCQPHIGH_PECCQPHIGH_MASK (0xFFFFFFFF << I40E_PFPE_CCQPHIGH_PECCQPHIGH_SHIFT)
+#define I40E_PFPE_CCQPLOW 0x00008180
+#define I40E_PFPE_CCQPLOW_PECCQPLOW_SHIFT 0
+#define I40E_PFPE_CCQPLOW_PECCQPLOW_MASK (0xFFFFFFFF << I40E_PFPE_CCQPLOW_PECCQPLOW_SHIFT)
+#define I40E_PFPE_CCQPSTATUS 0x00008100
+#define I40E_PFPE_CCQPSTATUS_CCQP_DONE_SHIFT 0
+#define I40E_PFPE_CCQPSTATUS_CCQP_DONE_MASK (0x1 << I40E_PFPE_CCQPSTATUS_CCQP_DONE_SHIFT)
+#define I40E_PFPE_CCQPSTATUS_CCQP_ERR_SHIFT 31
+#define I40E_PFPE_CCQPSTATUS_CCQP_ERR_MASK (0x1 << I40E_PFPE_CCQPSTATUS_CCQP_ERR_SHIFT)
+#define I40E_PFPE_CQACK 0x00131100
+#define I40E_PFPE_CQACK_PECQID_SHIFT 0
+#define I40E_PFPE_CQACK_PECQID_MASK (0x1FFFF << I40E_PFPE_CQACK_PECQID_SHIFT)
+#define I40E_PFPE_CQARM 0x00131080
+#define I40E_PFPE_CQARM_PECQID_SHIFT 0
+#define I40E_PFPE_CQARM_PECQID_MASK (0x1FFFF << I40E_PFPE_CQARM_PECQID_SHIFT)
+#define I40E_PFPE_CQPDB 0x00008000
+#define I40E_PFPE_CQPDB_WQHEAD_SHIFT 0
+#define I40E_PFPE_CQPDB_WQHEAD_MASK (0x7FF << I40E_PFPE_CQPDB_WQHEAD_SHIFT)
+#define I40E_PFPE_CQPERRCODES 0x00008880
+#define I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT 0
+#define I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_MASK (0xFFFF << I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT)
+#define I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT 16
+#define I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_MASK (0xFFFF << I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT)
+#define I40E_PFPE_CQPTAIL 0x00008080
+#define I40E_PFPE_CQPTAIL_WQTAIL_SHIFT 0
+#define I40E_PFPE_CQPTAIL_WQTAIL_MASK (0x7FF << I40E_PFPE_CQPTAIL_WQTAIL_SHIFT)
+#define I40E_PFPE_CQPTAIL_CQP_OP_ERR_SHIFT 31
+#define I40E_PFPE_CQPTAIL_CQP_OP_ERR_MASK (0x1 << I40E_PFPE_CQPTAIL_CQP_OP_ERR_SHIFT)
+#define I40E_PFPE_FLMQ1ALLOCERR 0x00008980
+#define I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_MASK (0xFFFF << I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_PFPE_FLMXMITALLOCERR 0x00008900
+#define I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_MASK (0xFFFF << I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_PFPE_IPCONFIG0 0x00008280
+#define I40E_PFPE_IPCONFIG0_PEIPID_SHIFT 0
+#define I40E_PFPE_IPCONFIG0_PEIPID_MASK (0xFFFF << I40E_PFPE_IPCONFIG0_PEIPID_SHIFT)
+#define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16
+#define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_MASK (0x1 << I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT)
+
+#define I40E_PFPE_MRTEIDXMASK 0x00008600
+#define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0
+#define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_MASK (0x1F << I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT)
+#define I40E_PFPE_RCVUNEXPECTEDERROR 0x00008680
+#define I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT 0
+#define I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_MASK (0xFFFFFF << I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT)
+#define I40E_PFPE_TCPNOWTIMER 0x00008580
+#define I40E_PFPE_TCPNOWTIMER_TCP_NOW_SHIFT 0
+#define I40E_PFPE_TCPNOWTIMER_TCP_NOW_MASK (0xFFFFFFFF << I40E_PFPE_TCPNOWTIMER_TCP_NOW_SHIFT)
+#define I40E_PFPE_UDACTRL 0x00008700
+#define I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_SHIFT 0
+#define I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_MASK (0x1 << I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_SHIFT)
+#define I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_SHIFT 1
+#define I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_MASK (0x1 << I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_SHIFT)
+#define I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_SHIFT 2
+#define I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_MASK (0x1 << I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_SHIFT)
+#define I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_SHIFT 3
+#define I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_MASK (0x1 << I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_SHIFT)
+#define I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_SHIFT 4
+#define I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_MASK (0x1 << I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_SHIFT)
+#define I40E_PFPE_UDAUCFBQPN 0x00008780
+#define I40E_PFPE_UDAUCFBQPN_QPN_SHIFT 0
+#define I40E_PFPE_UDAUCFBQPN_QPN_MASK (0x3FFFF << I40E_PFPE_UDAUCFBQPN_QPN_SHIFT)
+#define I40E_PFPE_UDAUCFBQPN_VALID_SHIFT 31
+#define I40E_PFPE_UDAUCFBQPN_VALID_MASK (0x1 << I40E_PFPE_UDAUCFBQPN_VALID_SHIFT)
+#define I40E_PFPE_WQEALLOC 0x00138C00
+#define I40E_PFPE_WQEALLOC_PEQPID_SHIFT 0
+#define I40E_PFPE_WQEALLOC_PEQPID_MASK (0x3FFFF << I40E_PFPE_WQEALLOC_PEQPID_SHIFT)
+#define I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT 20
+#define I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_MASK (0xFFF << I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT)
+#define I40E_VFPE_AEQALLOC(_VF) (0x00130C00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_AEQALLOC_MAX_INDEX 127
+#define I40E_VFPE_AEQALLOC_AECOUNT_SHIFT 0
+#define I40E_VFPE_AEQALLOC_AECOUNT_MASK (0xFFFFFFFF << I40E_VFPE_AEQALLOC_AECOUNT_SHIFT)
+#define I40E_VFPE_CCQPHIGH(_VF) (0x00001000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_CCQPHIGH_MAX_INDEX 127
+#define I40E_VFPE_CCQPHIGH_PECCQPHIGH_SHIFT 0
+#define I40E_VFPE_CCQPHIGH_PECCQPHIGH_MASK (0xFFFFFFFF << I40E_VFPE_CCQPHIGH_PECCQPHIGH_SHIFT)
+#define I40E_VFPE_CCQPLOW(_VF) (0x00000C00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_CCQPLOW_MAX_INDEX 127
+#define I40E_VFPE_CCQPLOW_PECCQPLOW_SHIFT 0
+#define I40E_VFPE_CCQPLOW_PECCQPLOW_MASK (0xFFFFFFFF << I40E_VFPE_CCQPLOW_PECCQPLOW_SHIFT)
+#define I40E_VFPE_CCQPSTATUS(_VF) (0x00000800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_CCQPSTATUS_MAX_INDEX 127
+#define I40E_VFPE_CCQPSTATUS_CCQP_DONE_SHIFT 0
+#define I40E_VFPE_CCQPSTATUS_CCQP_DONE_MASK (0x1 << I40E_VFPE_CCQPSTATUS_CCQP_DONE_SHIFT)
+#define I40E_VFPE_CCQPSTATUS_CCQP_ERR_SHIFT 31
+#define I40E_VFPE_CCQPSTATUS_CCQP_ERR_MASK (0x1 << I40E_VFPE_CCQPSTATUS_CCQP_ERR_SHIFT)
+#define I40E_VFPE_CQACK(_VF) (0x00130800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_CQACK_MAX_INDEX 127
+#define I40E_VFPE_CQACK_PECQID_SHIFT 0
+#define I40E_VFPE_CQACK_PECQID_MASK (0x1FFFF << I40E_VFPE_CQACK_PECQID_SHIFT)
+#define I40E_VFPE_CQARM(_VF) (0x00130400 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_CQARM_MAX_INDEX 127
+#define I40E_VFPE_CQARM_PECQID_SHIFT 0
+#define I40E_VFPE_CQARM_PECQID_MASK (0x1FFFF << I40E_VFPE_CQARM_PECQID_SHIFT)
+#define I40E_VFPE_CQPDB(_VF) (0x00000000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_CQPDB_MAX_INDEX 127
+#define I40E_VFPE_CQPDB_WQHEAD_SHIFT 0
+#define I40E_VFPE_CQPDB_WQHEAD_MASK (0x7FF << I40E_VFPE_CQPDB_WQHEAD_SHIFT)
+#define I40E_VFPE_CQPERRCODES(_VF) (0x00001800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_CQPERRCODES_MAX_INDEX 127
+#define I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT 0
+#define I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_MASK (0xFFFF << I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT)
+#define I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT 16
+#define I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_MASK (0xFFFF << I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT)
+#define I40E_VFPE_CQPTAIL(_VF) (0x00000400 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_CQPTAIL_MAX_INDEX 127
+#define I40E_VFPE_CQPTAIL_WQTAIL_SHIFT 0
+#define I40E_VFPE_CQPTAIL_WQTAIL_MASK (0x7FF << I40E_VFPE_CQPTAIL_WQTAIL_SHIFT)
+#define I40E_VFPE_CQPTAIL_CQP_OP_ERR_SHIFT 31
+#define I40E_VFPE_CQPTAIL_CQP_OP_ERR_MASK (0x1 << I40E_VFPE_CQPTAIL_CQP_OP_ERR_SHIFT)
+#define I40E_VFPE_IPCONFIG0(_VF) (0x00001400 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_IPCONFIG0_MAX_INDEX 127
+#define I40E_VFPE_IPCONFIG0_PEIPID_SHIFT 0
+#define I40E_VFPE_IPCONFIG0_PEIPID_MASK (0xFFFF << I40E_VFPE_IPCONFIG0_PEIPID_SHIFT)
+#define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16
+#define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_MASK (0x1 << I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT)
+#define I40E_VFPE_MRTEIDXMASK(_VF) (0x00003000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_MRTEIDXMASK_MAX_INDEX 127
+#define I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0
+#define I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_MASK (0x1F << I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT)
+#define I40E_VFPE_RCVUNEXPECTEDERROR(_VF) (0x00003400 + ((_VF) * 4))
+#define I40E_VFPE_RCVUNEXPECTEDERROR_MAX_INDEX 127
+#define I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT 0
+#define I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_MASK (0xFFFFFF << I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT)
+#define I40E_VFPE_TCPNOWTIMER(_VF) (0x00002C00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_TCPNOWTIMER_MAX_INDEX 127
+#define I40E_VFPE_TCPNOWTIMER_TCP_NOW_SHIFT 0
+#define I40E_VFPE_TCPNOWTIMER_TCP_NOW_MASK (0xFFFFFFFF << I40E_VFPE_TCPNOWTIMER_TCP_NOW_SHIFT)
+#define I40E_VFPE_WQEALLOC(_VF) (0x00138000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_WQEALLOC_MAX_INDEX 127
+#define I40E_VFPE_WQEALLOC_PEQPID_SHIFT 0
+#define I40E_VFPE_WQEALLOC_PEQPID_MASK (0x3FFFF << I40E_VFPE_WQEALLOC_PEQPID_SHIFT)
+#define I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT 20
+#define I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_MASK (0xFFF << I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT)
+#define I40E_GLPES_PFIP4RXDISCARD(_i) (0x00010600 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4RXDISCARD_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_SHIFT 0
+#define I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_SHIFT)
+#define I40E_GLPES_PFIP4RXFRAGSHI(_i) (0x00010804 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4RXFRAGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXFRAGSLO(_i) (0x00010800 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4RXFRAGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXMCOCTSHI(_i) (0x00010A04 + ((_i) * 8))
+#define I40E_GLPES_PFIP4RXMCOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXMCOCTSLO(_i) (0x00010A00 + ((_i) * 8))
+#define I40E_GLPES_PFIP4RXMCOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXMCPKTSHI(_i) (0x00010C04 + ((_i) * 8))
+#define I40E_GLPES_PFIP4RXMCPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXMCPKTSLO(_i) (0x00010C00 + ((_i) * 8))
+#define I40E_GLPES_PFIP4RXMCPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXOCTSHI(_i) (0x00010204 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4RXOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXOCTSLO(_i) (0x00010200 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4RXOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXPKTSHI(_i) (0x00010404 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4RXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXPKTSLO(_i) (0x00010400 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4RXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXTRUNC(_i) (0x00010700 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4RXTRUNC_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_SHIFT 0
+#define I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_SHIFT)
+#define I40E_GLPES_PFIP4TXFRAGSHI(_i) (0x00011E04 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4TXFRAGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXFRAGSLO(_i) (0x00011E00 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4TXFRAGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXMCOCTSHI(_i) (0x00012004 + ((_i) * 8))
+#define I40E_GLPES_PFIP4TXMCOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXMCOCTSLO(_i) (0x00012000 + ((_i) * 8))
+#define I40E_GLPES_PFIP4TXMCOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXMCPKTSHI(_i) (0x00012204 + ((_i) * 8))
+#define I40E_GLPES_PFIP4TXMCPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXMCPKTSLO(_i) (0x00012200 + ((_i) * 8))
+#define I40E_GLPES_PFIP4TXMCPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXNOROUTE(_i) (0x00012E00 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4TXNOROUTE_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT 0
+#define I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_MASK (0xFFFFFF << I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT)
+#define I40E_GLPES_PFIP4TXOCTSHI(_i) (0x00011A04 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4TXOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXOCTSLO(_i) (0x00011A00 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4TXOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXPKTSHI(_i) (0x00011C04 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4TXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXPKTSLO(_i) (0x00011C00 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4TXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXDISCARD(_i) (0x00011200 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6RXDISCARD_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_SHIFT 0
+#define I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_SHIFT)
+#define I40E_GLPES_PFIP6RXFRAGSHI(_i) (0x00011404 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6RXFRAGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXFRAGSLO(_i) (0x00011400 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6RXFRAGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXMCOCTSHI(_i) (0x00011604 + ((_i) * 8))
+#define I40E_GLPES_PFIP6RXMCOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXMCOCTSLO(_i) (0x00011600 + ((_i) * 8))
+#define I40E_GLPES_PFIP6RXMCOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXMCPKTSHI(_i) (0x00011804 + ((_i) * 8))
+#define I40E_GLPES_PFIP6RXMCPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXMCPKTSLO(_i) (0x00011800 + ((_i) * 8))
+#define I40E_GLPES_PFIP6RXMCPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXOCTSHI(_i) (0x00010E04 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6RXOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXOCTSLO(_i) (0x00010E00 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6RXOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXPKTSHI(_i) (0x00011004 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6RXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXPKTSLO(_i) (0x00011000 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6RXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXTRUNC(_i) (0x00011300 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6RXTRUNC_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_SHIFT 0
+#define I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_SHIFT)
+#define I40E_GLPES_PFIP6TXFRAGSHI(_i) (0x00012804 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6TXFRAGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXFRAGSLO(_i) (0x00012800 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6TXFRAGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXMCOCTSHI(_i) (0x00012A04 + ((_i) * 8))
+#define I40E_GLPES_PFIP6TXMCOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXMCOCTSLO(_i) (0x00012A00 + ((_i) * 8))
+#define I40E_GLPES_PFIP6TXMCOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXMCPKTSHI(_i) (0x00012C04 + ((_i) * 8))
+#define I40E_GLPES_PFIP6TXMCPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXMCPKTSLO(_i) (0x00012C00 + ((_i) * 8))
+#define I40E_GLPES_PFIP6TXMCPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXNOROUTE(_i) (0x00012F00 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6TXNOROUTE_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT 0
+#define I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_MASK (0xFFFFFF << I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT)
+#define I40E_GLPES_PFIP6TXOCTSHI(_i) (0x00012404 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6TXOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXOCTSLO(_i) (0x00012400 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6TXOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXPKTSHI(_i) (0x00012604 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6TXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXPKTSLO(_i) (0x00012600 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6TXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT)
+#define I40E_GLPES_PFRDMARXRDSHI(_i) (0x00013E04 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMARXRDSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_MASK (0xFFFF << I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_PFRDMARXRDSLO(_i) (0x00013E00 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMARXRDSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_PFRDMARXSNDSHI(_i) (0x00014004 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMARXSNDSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_MASK (0xFFFF << I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_PFRDMARXSNDSLO(_i) (0x00014000 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMARXSNDSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_PFRDMARXWRSHI(_i) (0x00013C04 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMARXWRSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_MASK (0xFFFF << I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_PFRDMARXWRSLO(_i) (0x00013C00 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMARXWRSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_PFRDMATXRDSHI(_i) (0x00014404 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMATXRDSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_MASK (0xFFFF << I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_PFRDMATXRDSLO(_i) (0x00014400 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMATXRDSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_PFRDMATXSNDSHI(_i) (0x00014604 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMATXSNDSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_MASK (0xFFFF << I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_PFRDMATXSNDSLO(_i) (0x00014600 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMATXSNDSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_PFRDMATXWRSHI(_i) (0x00014204 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMATXWRSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_MASK (0xFFFF << I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_PFRDMATXWRSLO(_i) (0x00014200 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMATXWRSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_PFRDMAVBNDHI(_i) (0x00014804 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMAVBNDHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_SHIFT 0
+#define I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_SHIFT)
+#define I40E_GLPES_PFRDMAVBNDLO(_i) (0x00014800 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMAVBNDLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_SHIFT 0
+#define I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_SHIFT)
+#define I40E_GLPES_PFRDMAVINVHI(_i) (0x00014A04 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMAVINVHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_SHIFT 0
+#define I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_SHIFT)
+#define I40E_GLPES_PFRDMAVINVLO(_i) (0x00014A00 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMAVINVLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_SHIFT 0
+#define I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_SHIFT)
+#define I40E_GLPES_PFRXVLANERR(_i) (0x00010000 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLPES_PFRXVLANERR_MAX_INDEX 15
+#define I40E_GLPES_PFRXVLANERR_RXVLANERR_SHIFT 0
+#define I40E_GLPES_PFRXVLANERR_RXVLANERR_MASK (0xFFFFFF << I40E_GLPES_PFRXVLANERR_RXVLANERR_SHIFT)
+#define I40E_GLPES_PFTCPRTXSEG(_i) (0x00013600 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLPES_PFTCPRTXSEG_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_SHIFT 0
+#define I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_MASK (0xFFFFFFFF << I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_SHIFT)
+#define I40E_GLPES_PFTCPRXOPTERR(_i) (0x00013200 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLPES_PFTCPRXOPTERR_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_SHIFT 0
+#define I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_MASK (0xFFFFFF << I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_SHIFT)
+#define I40E_GLPES_PFTCPRXPROTOERR(_i) (0x00013300 + ((_i) * 4))
+#define I40E_GLPES_PFTCPRXPROTOERR_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT 0
+#define I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_MASK (0xFFFFFF << I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT)
+#define I40E_GLPES_PFTCPRXSEGSHI(_i) (0x00013004 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFTCPRXSEGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT 0
+#define I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_MASK (0xFFFF << I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT)
+#define I40E_GLPES_PFTCPRXSEGSLO(_i) (0x00013000 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFTCPRXSEGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT 0
+#define I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT)
+#define I40E_GLPES_PFTCPTXSEGHI(_i) (0x00013404 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFTCPTXSEGHI_MAX_INDEX 15
+#define I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_SHIFT 0
+#define I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_MASK (0xFFFF << I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_SHIFT)
+#define I40E_GLPES_PFTCPTXSEGLO(_i) (0x00013400 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFTCPTXSEGLO_MAX_INDEX 15
+#define I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_SHIFT 0
+#define I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_MASK (0xFFFFFFFF << I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_SHIFT)
+#define I40E_GLPES_PFUDPRXPKTSHI(_i) (0x00013804 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFUDPRXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT)
+#define I40E_GLPES_PFUDPRXPKTSLO(_i) (0x00013800 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFUDPRXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT)
+#define I40E_GLPES_PFUDPTXPKTSHI(_i) (0x00013A04 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFUDPTXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT)
+#define I40E_GLPES_PFUDPTXPKTSLO(_i) (0x00013A00 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFUDPTXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT)
+#define I40E_GLPES_RDMARXMULTFPDUSHI 0x0001E014
+#define I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_SHIFT 0
+#define I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_MASK (0xFFFFFF << I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_SHIFT)
+#define I40E_GLPES_RDMARXMULTFPDUSLO 0x0001E010
+#define I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_SHIFT 0
+#define I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_MASK (0xFFFFFFFF << I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_SHIFT)
+#define I40E_GLPES_RDMARXOOODDPHI 0x0001E01C
+#define I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_SHIFT 0
+#define I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_MASK (0xFFFFFF << I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_SHIFT)
+#define I40E_GLPES_RDMARXOOODDPLO 0x0001E018
+#define I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_SHIFT 0
+#define I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_MASK (0xFFFFFFFF << I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_SHIFT)
+#define I40E_GLPES_RDMARXOOONOMARK 0x0001E004
+#define I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_SHIFT 0
+#define I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_MASK (0xFFFFFFFF << I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_SHIFT)
+#define I40E_GLPES_RDMARXUNALIGN 0x0001E000
+#define I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_SHIFT 0
+#define I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_MASK (0xFFFFFFFF << I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_SHIFT)
+#define I40E_GLPES_TCPRXFOURHOLEHI 0x0001E044
+#define I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXFOURHOLELO 0x0001E040
+#define I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_SHIFT)
+#define I40E_GLPES_TCPRXONEHOLEHI 0x0001E02C
+#define I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXONEHOLELO 0x0001E028
+#define I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_SHIFT)
+#define I40E_GLPES_TCPRXPUREACKHI 0x0001E024
+#define I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_SHIFT 0
+#define I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_SHIFT)
+#define I40E_GLPES_TCPRXPUREACKSLO 0x0001E020
+#define I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_SHIFT 0
+#define I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_SHIFT)
+#define I40E_GLPES_TCPRXTHREEHOLEHI 0x0001E03C
+#define I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXTHREEHOLELO 0x0001E038
+#define I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_SHIFT)
+#define I40E_GLPES_TCPRXTWOHOLEHI 0x0001E034
+#define I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXTWOHOLELO 0x0001E030
+#define I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_SHIFT)
+#define I40E_GLPES_TCPRXUNEXPERR 0x0001E008
+#define I40E_GLPES_TCPRXUNEXPERR_TCPRXUNEXPERR_SHIFT 0
+#define I40E_GLPES_TCPRXUNEXPERR_TCPRXUNEXPERR_MASK (0xFFFFFF << I40E_GLPES_TCPRXUNEXPERR_TCPRXUNEXPERR_SHIFT)
+#define I40E_GLPES_TCPTXRETRANSFASTHI 0x0001E04C
+#define I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_SHIFT 0
+#define I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_MASK (0xFFFFFF << I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_SHIFT)
+#define I40E_GLPES_TCPTXRETRANSFASTLO 0x0001E048
+#define I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_SHIFT 0
+#define I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_MASK (0xFFFFFFFF << I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSFASTHI 0x0001E054
+#define I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_MASK (0xFFFFFF << I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSFASTLO 0x0001E050
+#define I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_MASK (0xFFFFFFFF << I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSHI 0x0001E05C
+#define I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_MASK (0xFFFFFF << I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSLO 0x0001E058
+#define I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_MASK (0xFFFFFFFF << I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXDISCARD(_i) (0x00018600 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4RXDISCARD_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_SHIFT 0
+#define I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_SHIFT)
+#define I40E_GLPES_VFIP4RXFRAGSHI(_i) (0x00018804 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4RXFRAGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXFRAGSLO(_i) (0x00018800 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4RXFRAGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXMCOCTSHI(_i) (0x00018A04 + ((_i) * 4))
+#define I40E_GLPES_VFIP4RXMCOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXMCOCTSLO(_i) (0x00018A00 + ((_i) * 4))
+#define I40E_GLPES_VFIP4RXMCOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXMCPKTSHI(_i) (0x00018C04 + ((_i) * 4))
+#define I40E_GLPES_VFIP4RXMCPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXMCPKTSLO(_i) (0x00018C00 + ((_i) * 4))
+#define I40E_GLPES_VFIP4RXMCPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXOCTSHI(_i) (0x00018204 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4RXOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXOCTSLO(_i) (0x00018200 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4RXOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXPKTSHI(_i) (0x00018404 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4RXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXPKTSLO(_i) (0x00018400 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4RXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXTRUNC(_i) (0x00018700 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4RXTRUNC_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_SHIFT 0
+#define I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_SHIFT)
+#define I40E_GLPES_VFIP4TXFRAGSHI(_i) (0x00019E04 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4TXFRAGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXFRAGSLO(_i) (0x00019E00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4TXFRAGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXMCOCTSHI(_i) (0x0001A004 + ((_i) * 4))
+#define I40E_GLPES_VFIP4TXMCOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXMCOCTSLO(_i) (0x0001A000 + ((_i) * 4))
+#define I40E_GLPES_VFIP4TXMCOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXMCPKTSHI(_i) (0x0001A204 + ((_i) * 4))
+#define I40E_GLPES_VFIP4TXMCPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXMCPKTSLO(_i) (0x0001A200 + ((_i) * 4))
+#define I40E_GLPES_VFIP4TXMCPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXNOROUTE(_i) (0x0001AE00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4TXNOROUTE_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT 0
+#define I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_MASK (0xFFFFFF << I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT)
+#define I40E_GLPES_VFIP4TXOCTSHI(_i) (0x00019A04 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4TXOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXOCTSLO(_i) (0x00019A00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4TXOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXPKTSHI(_i) (0x00019C04 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4TXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXPKTSLO(_i) (0x00019C00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4TXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXDISCARD(_i) (0x00019200 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6RXDISCARD_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_SHIFT 0
+#define I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_SHIFT)
+#define I40E_GLPES_VFIP6RXFRAGSHI(_i) (0x00019404 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6RXFRAGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXFRAGSLO(_i) (0x00019400 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6RXFRAGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXMCOCTSHI(_i) (0x00019604 + ((_i) * 4))
+#define I40E_GLPES_VFIP6RXMCOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXMCOCTSLO(_i) (0x00019600 + ((_i) * 4))
+#define I40E_GLPES_VFIP6RXMCOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXMCPKTSHI(_i) (0x00019804 + ((_i) * 4))
+#define I40E_GLPES_VFIP6RXMCPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXMCPKTSLO(_i) (0x00019800 + ((_i) * 4))
+#define I40E_GLPES_VFIP6RXMCPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXOCTSHI(_i) (0x00018E04 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6RXOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXOCTSLO(_i) (0x00018E00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6RXOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXPKTSHI(_i) (0x00019004 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6RXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXPKTSLO(_i) (0x00019000 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6RXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXTRUNC(_i) (0x00019300 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6RXTRUNC_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_SHIFT 0
+#define I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_SHIFT)
+#define I40E_GLPES_VFIP6TXFRAGSHI(_i) (0x0001A804 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6TXFRAGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXFRAGSLO(_i) (0x0001A800 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6TXFRAGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXMCOCTSHI(_i) (0x0001AA04 + ((_i) * 4))
+#define I40E_GLPES_VFIP6TXMCOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXMCOCTSLO(_i) (0x0001AA00 + ((_i) * 4))
+#define I40E_GLPES_VFIP6TXMCOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXMCPKTSHI(_i) (0x0001AC04 + ((_i) * 4))
+#define I40E_GLPES_VFIP6TXMCPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXMCPKTSLO(_i) (0x0001AC00 + ((_i) * 4))
+#define I40E_GLPES_VFIP6TXMCPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXNOROUTE(_i) (0x0001AF00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6TXNOROUTE_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT 0
+#define I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_MASK (0xFFFFFF << I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT)
+#define I40E_GLPES_VFIP6TXOCTSHI(_i) (0x0001A404 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6TXOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXOCTSLO(_i) (0x0001A400 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6TXOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXPKTSHI(_i) (0x0001A604 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6TXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXPKTSLO(_i) (0x0001A600 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6TXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT)
+#define I40E_GLPES_VFRDMARXRDSHI(_i) (0x0001BE04 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMARXRDSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_MASK (0xFFFF << I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_VFRDMARXRDSLO(_i) (0x0001BE00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMARXRDSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_VFRDMARXSNDSHI(_i) (0x0001C004 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMARXSNDSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_MASK (0xFFFF << I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_VFRDMARXSNDSLO(_i) (0x0001C000 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMARXSNDSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_VFRDMARXWRSHI(_i) (0x0001BC04 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMARXWRSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_MASK (0xFFFF << I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_VFRDMARXWRSLO(_i) (0x0001BC00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMARXWRSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_VFRDMATXRDSHI(_i) (0x0001C404 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMATXRDSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_MASK (0xFFFF << I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_VFRDMATXRDSLO(_i) (0x0001C400 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMATXRDSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_VFRDMATXSNDSHI(_i) (0x0001C604 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMATXSNDSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_MASK (0xFFFF << I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_VFRDMATXSNDSLO(_i) (0x0001C600 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMATXSNDSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_VFRDMATXWRSHI(_i) (0x0001C204 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMATXWRSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_MASK (0xFFFF << I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_VFRDMATXWRSLO(_i) (0x0001C200 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMATXWRSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_VFRDMAVBNDHI(_i) (0x0001C804 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMAVBNDHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_SHIFT 0
+#define I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_SHIFT)
+#define I40E_GLPES_VFRDMAVBNDLO(_i) (0x0001C800 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMAVBNDLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_SHIFT 0
+#define I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_SHIFT)
+#define I40E_GLPES_VFRDMAVINVHI(_i) (0x0001CA04 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMAVINVHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_SHIFT 0
+#define I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_SHIFT)
+#define I40E_GLPES_VFRDMAVINVLO(_i) (0x0001CA00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMAVINVLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_SHIFT 0
+#define I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_SHIFT)
+#define I40E_GLPES_VFRXVLANERR(_i) (0x00018000 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRXVLANERR_MAX_INDEX 31
+#define I40E_GLPES_VFRXVLANERR_RXVLANERR_SHIFT 0
+#define I40E_GLPES_VFRXVLANERR_RXVLANERR_MASK (0xFFFFFF << I40E_GLPES_VFRXVLANERR_RXVLANERR_SHIFT)
+#define I40E_GLPES_VFTCPRTXSEG(_i) (0x0001B600 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFTCPRTXSEG_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_SHIFT 0
+#define I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_MASK (0xFFFFFFFF << I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_SHIFT)
+#define I40E_GLPES_VFTCPRXOPTERR(_i) (0x0001B200 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFTCPRXOPTERR_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_SHIFT 0
+#define I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_MASK (0xFFFFFF << I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_SHIFT)
+#define I40E_GLPES_VFTCPRXPROTOERR(_i) (0x0001B300 + ((_i) * 4))
+#define I40E_GLPES_VFTCPRXPROTOERR_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT 0
+#define I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_MASK (0xFFFFFF << I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT)
+#define I40E_GLPES_VFTCPRXSEGSHI(_i) (0x0001B004 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFTCPRXSEGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT 0
+#define I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_MASK (0xFFFF << I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT)
+#define I40E_GLPES_VFTCPRXSEGSLO(_i) (0x0001B000 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFTCPRXSEGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT 0
+#define I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT)
+#define I40E_GLPES_VFTCPTXSEGHI(_i) (0x0001B404 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFTCPTXSEGHI_MAX_INDEX 31
+#define I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_SHIFT 0
+#define I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_MASK (0xFFFF << I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_SHIFT)
+#define I40E_GLPES_VFTCPTXSEGLO(_i) (0x0001B400 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFTCPTXSEGLO_MAX_INDEX 31
+#define I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_SHIFT 0
+#define I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_MASK (0xFFFFFFFF << I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_SHIFT)
+#define I40E_GLPES_VFUDPRXPKTSHI(_i) (0x0001B804 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFUDPRXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT)
+#define I40E_GLPES_VFUDPRXPKTSLO(_i) (0x0001B800 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFUDPRXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT)
+#define I40E_GLPES_VFUDPTXPKTSHI(_i) (0x0001BA04 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFUDPTXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT)
+#define I40E_GLPES_VFUDPTXPKTSLO(_i) (0x0001BA00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFUDPTXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT)
+#define I40E_PRTPM_EEE_STAT 0x001E4320
+#define I40E_PRTPM_EEE_STAT_EEE_NEG_SHIFT 29
+#define I40E_PRTPM_EEE_STAT_EEE_NEG_MASK (0x1 << I40E_PRTPM_EEE_STAT_EEE_NEG_SHIFT)
+#define I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT 30
+#define I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK (0x1 << I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT)
+#define I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT 31
+#define I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK (0x1 << I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT)
+#define I40E_PRTPM_EEEC 0x001E4380
+#define I40E_PRTPM_EEEC_TW_WAKE_MIN_SHIFT 16
+#define I40E_PRTPM_EEEC_TW_WAKE_MIN_MASK (0x3F << I40E_PRTPM_EEEC_TW_WAKE_MIN_SHIFT)
+#define I40E_PRTPM_EEEC_TX_LU_LPI_DLY_SHIFT 24
+#define I40E_PRTPM_EEEC_TX_LU_LPI_DLY_MASK (0x3 << I40E_PRTPM_EEEC_TX_LU_LPI_DLY_SHIFT)
+#define I40E_PRTPM_EEEC_TEEE_DLY_SHIFT 26
+#define I40E_PRTPM_EEEC_TEEE_DLY_MASK (0x3F << I40E_PRTPM_EEEC_TEEE_DLY_SHIFT)
+#define I40E_PRTPM_EEEFWD 0x001E4400
+#define I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_SHIFT 31
+#define I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_MASK (0x1 << I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_SHIFT)
+#define I40E_PRTPM_EEER 0x001E4360
+#define I40E_PRTPM_EEER_TW_SYSTEM_SHIFT 0
+#define I40E_PRTPM_EEER_TW_SYSTEM_MASK (0xFFFF << I40E_PRTPM_EEER_TW_SYSTEM_SHIFT)
+#define I40E_PRTPM_EEER_TX_LPI_EN_SHIFT 16
+#define I40E_PRTPM_EEER_TX_LPI_EN_MASK (0x1 << I40E_PRTPM_EEER_TX_LPI_EN_SHIFT)
+#define I40E_PRTPM_EEETXC 0x001E43E0
+#define I40E_PRTPM_EEETXC_TW_PHY_SHIFT 0
+#define I40E_PRTPM_EEETXC_TW_PHY_MASK (0xFFFF << I40E_PRTPM_EEETXC_TW_PHY_SHIFT)
+#define I40E_PRTPM_GC 0x000B8140
+#define I40E_PRTPM_GC_EMP_LINK_ON_SHIFT 0
+#define I40E_PRTPM_GC_EMP_LINK_ON_MASK (0x1 << I40E_PRTPM_GC_EMP_LINK_ON_SHIFT)
+#define I40E_PRTPM_GC_MNG_VETO_SHIFT 1
+#define I40E_PRTPM_GC_MNG_VETO_MASK (0x1 << I40E_PRTPM_GC_MNG_VETO_SHIFT)
+#define I40E_PRTPM_GC_RATD_SHIFT 2
+#define I40E_PRTPM_GC_RATD_MASK (0x1 << I40E_PRTPM_GC_RATD_SHIFT)
+#define I40E_PRTPM_GC_LCDMP_SHIFT 3
+#define I40E_PRTPM_GC_LCDMP_MASK (0x1 << I40E_PRTPM_GC_LCDMP_SHIFT)
+#define I40E_PRTPM_GC_LPLU_ASSERTED_SHIFT 31
+#define I40E_PRTPM_GC_LPLU_ASSERTED_MASK (0x1 << I40E_PRTPM_GC_LPLU_ASSERTED_SHIFT)
+#define I40E_PRTPM_RLPIC 0x001E43A0
+#define I40E_PRTPM_RLPIC_ERLPIC_SHIFT 0
+#define I40E_PRTPM_RLPIC_ERLPIC_MASK (0xFFFFFFFF << I40E_PRTPM_RLPIC_ERLPIC_SHIFT)
+#define I40E_PRTPM_TLPIC 0x001E43C0
+#define I40E_PRTPM_TLPIC_ETLPIC_SHIFT 0
+#define I40E_PRTPM_TLPIC_ETLPIC_MASK (0xFFFFFFFF << I40E_PRTPM_TLPIC_ETLPIC_SHIFT)
+#define I40E_GLRPB_DPSS 0x000AC828
+#define I40E_GLRPB_DPSS_DPS_TCN_SHIFT 0
+#define I40E_GLRPB_DPSS_DPS_TCN_MASK (0xFFFFF << I40E_GLRPB_DPSS_DPS_TCN_SHIFT)
+#define I40E_GLRPB_GHW 0x000AC830
+#define I40E_GLRPB_GHW_GHW_SHIFT 0
+#define I40E_GLRPB_GHW_GHW_MASK (0xFFFFF << I40E_GLRPB_GHW_GHW_SHIFT)
+#define I40E_GLRPB_GLW 0x000AC834
+#define I40E_GLRPB_GLW_GLW_SHIFT 0
+#define I40E_GLRPB_GLW_GLW_MASK (0xFFFFF << I40E_GLRPB_GLW_GLW_SHIFT)
+#define I40E_GLRPB_PHW 0x000AC844
+#define I40E_GLRPB_PHW_PHW_SHIFT 0
+#define I40E_GLRPB_PHW_PHW_MASK (0xFFFFF << I40E_GLRPB_PHW_PHW_SHIFT)
+#define I40E_GLRPB_PLW 0x000AC848
+#define I40E_GLRPB_PLW_PLW_SHIFT 0
+#define I40E_GLRPB_PLW_PLW_MASK (0xFFFFF << I40E_GLRPB_PLW_PLW_SHIFT)
+#define I40E_PRTRPB_DHW(_i) (0x000AC100 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTRPB_DHW_MAX_INDEX 7
+#define I40E_PRTRPB_DHW_DHW_TCN_SHIFT 0
+#define I40E_PRTRPB_DHW_DHW_TCN_MASK (0xFFFFF << I40E_PRTRPB_DHW_DHW_TCN_SHIFT)
+#define I40E_PRTRPB_DLW(_i) (0x000AC220 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTRPB_DLW_MAX_INDEX 7
+#define I40E_PRTRPB_DLW_DLW_TCN_SHIFT 0
+#define I40E_PRTRPB_DLW_DLW_TCN_MASK (0xFFFFF << I40E_PRTRPB_DLW_DLW_TCN_SHIFT)
+#define I40E_PRTRPB_DPS(_i) (0x000AC320 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTRPB_DPS_MAX_INDEX 7
+#define I40E_PRTRPB_DPS_DPS_TCN_SHIFT 0
+#define I40E_PRTRPB_DPS_DPS_TCN_MASK (0xFFFFF << I40E_PRTRPB_DPS_DPS_TCN_SHIFT)
+#define I40E_PRTRPB_SHT(_i) (0x000AC480 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTRPB_SHT_MAX_INDEX 7
+#define I40E_PRTRPB_SHT_SHT_TCN_SHIFT 0
+#define I40E_PRTRPB_SHT_SHT_TCN_MASK (0xFFFFF << I40E_PRTRPB_SHT_SHT_TCN_SHIFT)
+#define I40E_PRTRPB_SHW 0x000AC580
+#define I40E_PRTRPB_SHW_SHW_SHIFT 0
+#define I40E_PRTRPB_SHW_SHW_MASK (0xFFFFF << I40E_PRTRPB_SHW_SHW_SHIFT)
+#define I40E_PRTRPB_SLT(_i) (0x000AC5A0 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTRPB_SLT_MAX_INDEX 7
+#define I40E_PRTRPB_SLT_SLT_TCN_SHIFT 0
+#define I40E_PRTRPB_SLT_SLT_TCN_MASK (0xFFFFF << I40E_PRTRPB_SLT_SLT_TCN_SHIFT)
+#define I40E_PRTRPB_SLW 0x000AC6A0
+#define I40E_PRTRPB_SLW_SLW_SHIFT 0
+#define I40E_PRTRPB_SLW_SLW_MASK (0xFFFFF << I40E_PRTRPB_SLW_SLW_SHIFT)
+#define I40E_PRTRPB_SPS 0x000AC7C0
+#define I40E_PRTRPB_SPS_SPS_SHIFT 0
+#define I40E_PRTRPB_SPS_SPS_MASK (0xFFFFF << I40E_PRTRPB_SPS_SPS_SHIFT)
+#define I40E_GLQF_APBVT(_i) (0x00260000 + ((_i) * 4)) /* _i=0...2047 */
+#define I40E_GLQF_APBVT_MAX_INDEX 2047
+#define I40E_GLQF_APBVT_APBVT_SHIFT 0
+#define I40E_GLQF_APBVT_APBVT_MASK (0xFFFFFFFF << I40E_GLQF_APBVT_APBVT_SHIFT)
+#define I40E_GLQF_CTL 0x00269BA4
+#define I40E_GLQF_CTL_HTOEP_SHIFT 1
+#define I40E_GLQF_CTL_HTOEP_MASK (0x1 << I40E_GLQF_CTL_HTOEP_SHIFT)
+#define I40E_GLQF_CTL_HTOEP_FCOE_SHIFT 2
+#define I40E_GLQF_CTL_HTOEP_FCOE_MASK (0x1 << I40E_GLQF_CTL_HTOEP_FCOE_SHIFT)
+#define I40E_GLQF_CTL_PCNT_ALLOC_SHIFT 3
+#define I40E_GLQF_CTL_PCNT_ALLOC_MASK (0x7 << I40E_GLQF_CTL_PCNT_ALLOC_SHIFT)
+#define I40E_GLQF_CTL_RSVD_SHIFT 7
+#define I40E_GLQF_CTL_RSVD_MASK (0x1 << I40E_GLQF_CTL_RSVD_SHIFT)
+#define I40E_GLQF_CTL_MAXPEBLEN_SHIFT 8
+#define I40E_GLQF_CTL_MAXPEBLEN_MASK (0x7 << I40E_GLQF_CTL_MAXPEBLEN_SHIFT)
+#define I40E_GLQF_CTL_MAXFCBLEN_SHIFT 11
+#define I40E_GLQF_CTL_MAXFCBLEN_MASK (0x7 << I40E_GLQF_CTL_MAXFCBLEN_SHIFT)
+#define I40E_GLQF_CTL_MAXFDBLEN_SHIFT 14
+#define I40E_GLQF_CTL_MAXFDBLEN_MASK (0x7 << I40E_GLQF_CTL_MAXFDBLEN_SHIFT)
+#define I40E_GLQF_CTL_FDBEST_SHIFT 17
+#define I40E_GLQF_CTL_FDBEST_MASK (0xFF << I40E_GLQF_CTL_FDBEST_SHIFT)
+#define I40E_GLQF_CTL_PROGPRIO_SHIFT 25
+#define I40E_GLQF_CTL_PROGPRIO_MASK (0x1 << I40E_GLQF_CTL_PROGPRIO_SHIFT)
+#define I40E_GLQF_CTL_INVALPRIO_SHIFT 26
+#define I40E_GLQF_CTL_INVALPRIO_MASK (0x1 << I40E_GLQF_CTL_INVALPRIO_SHIFT)
+#define I40E_GLQF_CTL_IGNORE_IP_SHIFT 27
+#define I40E_GLQF_CTL_IGNORE_IP_MASK (0x1 << I40E_GLQF_CTL_IGNORE_IP_SHIFT)
+#define I40E_GLQF_FDCNT_0 0x00269BAC
+#define I40E_GLQF_FDCNT_0_GUARANT_CNT_SHIFT 0
+#define I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK (0x1FFF << I40E_GLQF_FDCNT_0_GUARANT_CNT_SHIFT)
+#define I40E_GLQF_FDCNT_0_BESTCNT_SHIFT 13
+#define I40E_GLQF_FDCNT_0_BESTCNT_MASK (0x1FFF << I40E_GLQF_FDCNT_0_BESTCNT_SHIFT)
+#define I40E_GLQF_HSYM(_i) (0x00269D00 + ((_i) * 4)) /* _i=0...63 */
+#define I40E_GLQF_HSYM_MAX_INDEX 63
+#define I40E_GLQF_HSYM_SYMH_ENA_SHIFT 0
+#define I40E_GLQF_HSYM_SYMH_ENA_MASK (0x1 << I40E_GLQF_HSYM_SYMH_ENA_SHIFT)
+#define I40E_GLQF_PCNT(_i) (0x00266800 + ((_i) * 4)) /* _i=0...511 */
+#define I40E_GLQF_PCNT_MAX_INDEX 511
+#define I40E_GLQF_PCNT_PCNT_SHIFT 0
+#define I40E_GLQF_PCNT_PCNT_MASK (0xFFFFFFFF << I40E_GLQF_PCNT_PCNT_SHIFT)
+#define I40E_GLQF_SWAP(_i, _j) (0x00267E00 + ((_i) * 4 + (_j) * 8)) /* _i=0...1, _j=0...63 */
+#define I40E_GLQF_SWAP_MAX_INDEX 1
+#define I40E_GLQF_SWAP_OFF0_SRC0_SHIFT 0
+#define I40E_GLQF_SWAP_OFF0_SRC0_MASK (0x3F << I40E_GLQF_SWAP_OFF0_SRC0_SHIFT)
+#define I40E_GLQF_SWAP_OFF0_SRC1_SHIFT 6
+#define I40E_GLQF_SWAP_OFF0_SRC1_MASK (0x3F << I40E_GLQF_SWAP_OFF0_SRC1_SHIFT)
+#define I40E_GLQF_SWAP_FLEN0_SHIFT 12
+#define I40E_GLQF_SWAP_FLEN0_MASK (0xF << I40E_GLQF_SWAP_FLEN0_SHIFT)
+#define I40E_GLQF_SWAP_OFF1_SRC0_SHIFT 16
+#define I40E_GLQF_SWAP_OFF1_SRC0_MASK (0x3F << I40E_GLQF_SWAP_OFF1_SRC0_SHIFT)
+#define I40E_GLQF_SWAP_OFF1_SRC1_SHIFT 22
+#define I40E_GLQF_SWAP_OFF1_SRC1_MASK (0x3F << I40E_GLQF_SWAP_OFF1_SRC1_SHIFT)
+#define I40E_GLQF_SWAP_FLEN1_SHIFT 28
+#define I40E_GLQF_SWAP_FLEN1_MASK (0xF << I40E_GLQF_SWAP_FLEN1_SHIFT)
+#define I40E_PFQF_CTL_0 0x001C0AC0
+#define I40E_PFQF_CTL_0_PEHSIZE_SHIFT 0
+#define I40E_PFQF_CTL_0_PEHSIZE_MASK (0x1F << I40E_PFQF_CTL_0_PEHSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_PEDSIZE_SHIFT 5
+#define I40E_PFQF_CTL_0_PEDSIZE_MASK (0x1F << I40E_PFQF_CTL_0_PEDSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT 10
+#define I40E_PFQF_CTL_0_PFFCHSIZE_MASK (0xF << I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT 14
+#define I40E_PFQF_CTL_0_PFFCDSIZE_MASK (0x3 << I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT 16
+#define I40E_PFQF_CTL_0_HASHLUTSIZE_MASK (0x1 << I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_FD_ENA_SHIFT 17
+#define I40E_PFQF_CTL_0_FD_ENA_MASK (0x1 << I40E_PFQF_CTL_0_FD_ENA_SHIFT)
+#define I40E_PFQF_CTL_0_ETYPE_ENA_SHIFT 18
+#define I40E_PFQF_CTL_0_ETYPE_ENA_MASK (0x1 << I40E_PFQF_CTL_0_ETYPE_ENA_SHIFT)
+#define I40E_PFQF_CTL_0_MACVLAN_ENA_SHIFT 19
+#define I40E_PFQF_CTL_0_MACVLAN_ENA_MASK (0x1 << I40E_PFQF_CTL_0_MACVLAN_ENA_SHIFT)
+#define I40E_PFQF_CTL_0_VFFCHSIZE_SHIFT 20
+#define I40E_PFQF_CTL_0_VFFCHSIZE_MASK (0xF << I40E_PFQF_CTL_0_VFFCHSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_VFFCDSIZE_SHIFT 24
+#define I40E_PFQF_CTL_0_VFFCDSIZE_MASK (0x3 << I40E_PFQF_CTL_0_VFFCDSIZE_SHIFT)
+#define I40E_PFQF_CTL_1 0x00245D80
+#define I40E_PFQF_CTL_1_CLEARFDTABLE_SHIFT 0
+#define I40E_PFQF_CTL_1_CLEARFDTABLE_MASK (0x1 << I40E_PFQF_CTL_1_CLEARFDTABLE_SHIFT)
+#define I40E_PFQF_FDALLOC 0x00246280
+#define I40E_PFQF_FDALLOC_FDALLOC_SHIFT 0
+#define I40E_PFQF_FDALLOC_FDALLOC_MASK (0xFF << I40E_PFQF_FDALLOC_FDALLOC_SHIFT)
+#define I40E_PFQF_FDALLOC_FDBEST_SHIFT 8
+#define I40E_PFQF_FDALLOC_FDBEST_MASK (0xFF << I40E_PFQF_FDALLOC_FDBEST_SHIFT)
+#define I40E_PFQF_FDSTAT 0x00246380
+#define I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT 0
+#define I40E_PFQF_FDSTAT_GUARANT_CNT_MASK (0x1FFF << I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT)
+#define I40E_PFQF_FDSTAT_BEST_CNT_SHIFT 16
+#define I40E_PFQF_FDSTAT_BEST_CNT_MASK (0x1FFF << I40E_PFQF_FDSTAT_BEST_CNT_SHIFT)
+#define I40E_PFQF_HENA(_i) (0x00245900 + ((_i) * 128)) /* _i=0...1 */
+#define I40E_PFQF_HENA_MAX_INDEX 1
+#define I40E_PFQF_HENA_PTYPE_ENA_SHIFT 0
+#define I40E_PFQF_HENA_PTYPE_ENA_MASK (0xFFFFFFFF << I40E_PFQF_HENA_PTYPE_ENA_SHIFT)
+#define I40E_PFQF_HKEY(_i) (0x00244800 + ((_i) * 128)) /* _i=0...12 */
+#define I40E_PFQF_HKEY_MAX_INDEX 12
+#define I40E_PFQF_HKEY_KEY_0_SHIFT 0
+#define I40E_PFQF_HKEY_KEY_0_MASK (0xFF << I40E_PFQF_HKEY_KEY_0_SHIFT)
+#define I40E_PFQF_HKEY_KEY_1_SHIFT 8
+#define I40E_PFQF_HKEY_KEY_1_MASK (0xFF << I40E_PFQF_HKEY_KEY_1_SHIFT)
+#define I40E_PFQF_HKEY_KEY_2_SHIFT 16
+#define I40E_PFQF_HKEY_KEY_2_MASK (0xFF << I40E_PFQF_HKEY_KEY_2_SHIFT)
+#define I40E_PFQF_HKEY_KEY_3_SHIFT 24
+#define I40E_PFQF_HKEY_KEY_3_MASK (0xFF << I40E_PFQF_HKEY_KEY_3_SHIFT)
+#define I40E_PFQF_HLUT(_i) (0x00240000 + ((_i) * 128)) /* _i=0...127 */
+#define I40E_PFQF_HLUT_MAX_INDEX 127
+#define I40E_PFQF_HLUT_LUT0_SHIFT 0
+#define I40E_PFQF_HLUT_LUT0_MASK (0x3F << I40E_PFQF_HLUT_LUT0_SHIFT)
+#define I40E_PFQF_HLUT_LUT1_SHIFT 8
+#define I40E_PFQF_HLUT_LUT1_MASK (0x3F << I40E_PFQF_HLUT_LUT1_SHIFT)
+#define I40E_PFQF_HLUT_LUT2_SHIFT 16
+#define I40E_PFQF_HLUT_LUT2_MASK (0x3F << I40E_PFQF_HLUT_LUT2_SHIFT)
+#define I40E_PFQF_HLUT_LUT3_SHIFT 24
+#define I40E_PFQF_HLUT_LUT3_MASK (0x3F << I40E_PFQF_HLUT_LUT3_SHIFT)
+#define I40E_PFQF_HREGION(_i) (0x00245400 + ((_i) * 128)) /* _i=0...7 */
+#define I40E_PFQF_HREGION_MAX_INDEX 7
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT 0
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_0_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT)
+#define I40E_PFQF_HREGION_REGION_0_SHIFT 1
+#define I40E_PFQF_HREGION_REGION_0_MASK (0x7 << I40E_PFQF_HREGION_REGION_0_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT 4
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_1_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT)
+#define I40E_PFQF_HREGION_REGION_1_SHIFT 5
+#define I40E_PFQF_HREGION_REGION_1_MASK (0x7 << I40E_PFQF_HREGION_REGION_1_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT 8
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_2_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT)
+#define I40E_PFQF_HREGION_REGION_2_SHIFT 9
+#define I40E_PFQF_HREGION_REGION_2_MASK (0x7 << I40E_PFQF_HREGION_REGION_2_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT 12
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_3_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT)
+#define I40E_PFQF_HREGION_REGION_3_SHIFT 13
+#define I40E_PFQF_HREGION_REGION_3_MASK (0x7 << I40E_PFQF_HREGION_REGION_3_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT 16
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_4_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT)
+#define I40E_PFQF_HREGION_REGION_4_SHIFT 17
+#define I40E_PFQF_HREGION_REGION_4_MASK (0x7 << I40E_PFQF_HREGION_REGION_4_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT 20
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_5_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT)
+#define I40E_PFQF_HREGION_REGION_5_SHIFT 21
+#define I40E_PFQF_HREGION_REGION_5_MASK (0x7 << I40E_PFQF_HREGION_REGION_5_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT 24
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_6_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT)
+#define I40E_PFQF_HREGION_REGION_6_SHIFT 25
+#define I40E_PFQF_HREGION_REGION_6_MASK (0x7 << I40E_PFQF_HREGION_REGION_6_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT 28
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_7_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT)
+#define I40E_PFQF_HREGION_REGION_7_SHIFT 29
+#define I40E_PFQF_HREGION_REGION_7_MASK (0x7 << I40E_PFQF_HREGION_REGION_7_SHIFT)
+#define I40E_PRTQF_CTL_0 0x00256E60
+#define I40E_PRTQF_CTL_0_HSYM_ENA_SHIFT 0
+#define I40E_PRTQF_CTL_0_HSYM_ENA_MASK (0x1 << I40E_PRTQF_CTL_0_HSYM_ENA_SHIFT)
+#define I40E_PRTQF_FD_FLXINSET(_i) (0x00253800 + ((_i) * 32)) /* _i=0...63 */
+#define I40E_PRTQF_FD_FLXINSET_MAX_INDEX 63
+#define I40E_PRTQF_FD_FLXINSET_INSET_SHIFT 0
+#define I40E_PRTQF_FD_FLXINSET_INSET_MASK (0xFF << I40E_PRTQF_FD_FLXINSET_INSET_SHIFT)
+#define I40E_PRTQF_FD_MSK(_i, _j) (0x00252000 + ((_i) * 64 + (_j) * 32)) /* _i=0...63, _j=0...1 */
+#define I40E_PRTQF_FD_MSK_MAX_INDEX 63
+#define I40E_PRTQF_FD_MSK_MASK_SHIFT 0
+#define I40E_PRTQF_FD_MSK_MASK_MASK (0xFFFF << I40E_PRTQF_FD_MSK_MASK_SHIFT)
+#define I40E_PRTQF_FD_MSK_OFFSET_SHIFT 16
+#define I40E_PRTQF_FD_MSK_OFFSET_MASK (0x3F << I40E_PRTQF_FD_MSK_OFFSET_SHIFT)
+#define I40E_PRTQF_FLX_PIT(_i) (0x00255200 + ((_i) * 32)) /* _i=0...8 */
+#define I40E_PRTQF_FLX_PIT_MAX_INDEX 8
+#define I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT 0
+#define I40E_PRTQF_FLX_PIT_SOURCE_OFF_MASK (0x1F << I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT)
+#define I40E_PRTQF_FLX_PIT_FSIZE_SHIFT 5
+#define I40E_PRTQF_FLX_PIT_FSIZE_MASK (0x1F << I40E_PRTQF_FLX_PIT_FSIZE_SHIFT)
+#define I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT 10
+#define I40E_PRTQF_FLX_PIT_DEST_OFF_MASK (0x3F << I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT)
+#define I40E_VFQF_HENA1(_i, _VF) (0x00230800 + ((_i) * 1024 + (_VF) * 4))
+#define I40E_VFQF_HENA1_MAX_INDEX 1
+#define I40E_VFQF_HENA1_PTYPE_ENA_SHIFT 0
+#define I40E_VFQF_HENA1_PTYPE_ENA_MASK (0xFFFFFFFF << I40E_VFQF_HENA1_PTYPE_ENA_SHIFT)
+#define I40E_VFQF_HKEY1(_i, _VF) (0x00228000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...12, _VF=0...127 */
+#define I40E_VFQF_HKEY1_MAX_INDEX 12
+#define I40E_VFQF_HKEY1_KEY_0_SHIFT 0
+#define I40E_VFQF_HKEY1_KEY_0_MASK (0xFF << I40E_VFQF_HKEY1_KEY_0_SHIFT)
+#define I40E_VFQF_HKEY1_KEY_1_SHIFT 8
+#define I40E_VFQF_HKEY1_KEY_1_MASK (0xFF << I40E_VFQF_HKEY1_KEY_1_SHIFT)
+#define I40E_VFQF_HKEY1_KEY_2_SHIFT 16
+#define I40E_VFQF_HKEY1_KEY_2_MASK (0xFF << I40E_VFQF_HKEY1_KEY_2_SHIFT)
+#define I40E_VFQF_HKEY1_KEY_3_SHIFT 24
+#define I40E_VFQF_HKEY1_KEY_3_MASK (0xFF << I40E_VFQF_HKEY1_KEY_3_SHIFT)
+#define I40E_VFQF_HLUT1(_i, _VF) (0x00220000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...15, _VF=0...127 */
+#define I40E_VFQF_HLUT1_MAX_INDEX 15
+#define I40E_VFQF_HLUT1_LUT0_SHIFT 0
+#define I40E_VFQF_HLUT1_LUT0_MASK (0xF << I40E_VFQF_HLUT1_LUT0_SHIFT)
+#define I40E_VFQF_HLUT1_LUT1_SHIFT 8
+#define I40E_VFQF_HLUT1_LUT1_MASK (0xF << I40E_VFQF_HLUT1_LUT1_SHIFT)
+#define I40E_VFQF_HLUT1_LUT2_SHIFT 16
+#define I40E_VFQF_HLUT1_LUT2_MASK (0xF << I40E_VFQF_HLUT1_LUT2_SHIFT)
+#define I40E_VFQF_HLUT1_LUT3_SHIFT 24
+#define I40E_VFQF_HLUT1_LUT3_MASK (0xF << I40E_VFQF_HLUT1_LUT3_SHIFT)
+#define I40E_VFQF_HREGION1(_i, _VF) (0x0022E000 + ((_i) * 1024 + (_VF) * 4))
+#define I40E_VFQF_HREGION1_MAX_INDEX 7
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_0_SHIFT 0
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_0_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_0_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_0_SHIFT 1
+#define I40E_VFQF_HREGION1_REGION_0_MASK (0x7 << I40E_VFQF_HREGION1_REGION_0_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_1_SHIFT 4
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_1_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_1_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_1_SHIFT 5
+#define I40E_VFQF_HREGION1_REGION_1_MASK (0x7 << I40E_VFQF_HREGION1_REGION_1_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_2_SHIFT 8
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_2_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_2_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_2_SHIFT 9
+#define I40E_VFQF_HREGION1_REGION_2_MASK (0x7 << I40E_VFQF_HREGION1_REGION_2_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_3_SHIFT 12
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_3_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_3_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_3_SHIFT 13
+#define I40E_VFQF_HREGION1_REGION_3_MASK (0x7 << I40E_VFQF_HREGION1_REGION_3_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_4_SHIFT 16
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_4_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_4_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_4_SHIFT 17
+#define I40E_VFQF_HREGION1_REGION_4_MASK (0x7 << I40E_VFQF_HREGION1_REGION_4_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_5_SHIFT 20
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_5_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_5_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_5_SHIFT 21
+#define I40E_VFQF_HREGION1_REGION_5_MASK (0x7 << I40E_VFQF_HREGION1_REGION_5_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_6_SHIFT 24
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_6_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_6_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_6_SHIFT 25
+#define I40E_VFQF_HREGION1_REGION_6_MASK (0x7 << I40E_VFQF_HREGION1_REGION_6_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_7_SHIFT 28
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_7_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_7_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_7_SHIFT 29
+#define I40E_VFQF_HREGION1_REGION_7_MASK (0x7 << I40E_VFQF_HREGION1_REGION_7_SHIFT)
+#define I40E_VPQF_CTL(_VF) (0x001C0000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VPQF_CTL_MAX_INDEX 127
+#define I40E_VPQF_CTL_PEHSIZE_SHIFT 0
+#define I40E_VPQF_CTL_PEHSIZE_MASK (0x1F << I40E_VPQF_CTL_PEHSIZE_SHIFT)
+#define I40E_VPQF_CTL_PEDSIZE_SHIFT 5
+#define I40E_VPQF_CTL_PEDSIZE_MASK (0x1F << I40E_VPQF_CTL_PEDSIZE_SHIFT)
+#define I40E_VPQF_CTL_FCHSIZE_SHIFT 10
+#define I40E_VPQF_CTL_FCHSIZE_MASK (0xF << I40E_VPQF_CTL_FCHSIZE_SHIFT)
+#define I40E_VPQF_CTL_FCDSIZE_SHIFT 14
+#define I40E_VPQF_CTL_FCDSIZE_MASK (0x3 << I40E_VPQF_CTL_FCDSIZE_SHIFT)
+#define I40E_VSIQF_CTL(_VSI) (0x0020D800 + ((_VSI) * 4)) /* _i=0...383 */
+#define I40E_VSIQF_CTL_MAX_INDEX 383
+#define I40E_VSIQF_CTL_FCOE_ENA_SHIFT 0
+#define I40E_VSIQF_CTL_FCOE_ENA_MASK (0x1 << I40E_VSIQF_CTL_FCOE_ENA_SHIFT)
+#define I40E_VSIQF_CTL_PETCP_ENA_SHIFT 1
+#define I40E_VSIQF_CTL_PETCP_ENA_MASK (0x1 << I40E_VSIQF_CTL_PETCP_ENA_SHIFT)
+#define I40E_VSIQF_CTL_PEUUDP_ENA_SHIFT 2
+#define I40E_VSIQF_CTL_PEUUDP_ENA_MASK (0x1 << I40E_VSIQF_CTL_PEUUDP_ENA_SHIFT)
+#define I40E_VSIQF_CTL_PEMUDP_ENA_SHIFT 3
+#define I40E_VSIQF_CTL_PEMUDP_ENA_MASK (0x1 << I40E_VSIQF_CTL_PEMUDP_ENA_SHIFT)
+#define I40E_VSIQF_CTL_PEUFRAG_ENA_SHIFT 4
+#define I40E_VSIQF_CTL_PEUFRAG_ENA_MASK (0x1 << I40E_VSIQF_CTL_PEUFRAG_ENA_SHIFT)
+#define I40E_VSIQF_CTL_PEMFRAG_ENA_SHIFT 5
+#define I40E_VSIQF_CTL_PEMFRAG_ENA_MASK (0x1 << I40E_VSIQF_CTL_PEMFRAG_ENA_SHIFT)
+#define I40E_VSIQF_TCREGION(_i, _VSI) (0x00206000 + ((_i) * 2048 + (_VSI) * 4))
+#define I40E_VSIQF_TCREGION_MAX_INDEX 3
+#define I40E_VSIQF_TCREGION_TC_OFFSET_SHIFT 0
+#define I40E_VSIQF_TCREGION_TC_OFFSET_MASK (0x1FF << I40E_VSIQF_TCREGION_TC_OFFSET_SHIFT)
+#define I40E_VSIQF_TCREGION_TC_SIZE_SHIFT 9
+#define I40E_VSIQF_TCREGION_TC_SIZE_MASK (0x7 << I40E_VSIQF_TCREGION_TC_SIZE_SHIFT)
+#define I40E_VSIQF_TCREGION_TC_OFFSET2_SHIFT 16
+#define I40E_VSIQF_TCREGION_TC_OFFSET2_MASK (0x1FF << I40E_VSIQF_TCREGION_TC_OFFSET2_SHIFT)
+#define I40E_VSIQF_TCREGION_TC_SIZE2_SHIFT 25
+#define I40E_VSIQF_TCREGION_TC_SIZE2_MASK (0x7 << I40E_VSIQF_TCREGION_TC_SIZE2_SHIFT)
+#define I40E_GL_FCOECRC(_i) (0x00314d80 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOECRC_MAX_INDEX 143
+#define I40E_GL_FCOECRC_FCOECRC_SHIFT 0
+#define I40E_GL_FCOECRC_FCOECRC_MASK (0xFFFFFFFF << I40E_GL_FCOECRC_FCOECRC_SHIFT)
+#define I40E_GL_FCOEDDPC(_i) (0x00314480 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDDPC_MAX_INDEX 143
+#define I40E_GL_FCOEDDPC_FCOEDDPC_SHIFT 0
+#define I40E_GL_FCOEDDPC_FCOEDDPC_MASK (0xFFFFFFFF << I40E_GL_FCOEDDPC_FCOEDDPC_SHIFT)
+/* _i=0...143 */
+#define I40E_GL_FCOEDIFEC(_i) (0x00318480 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDIFEC_MAX_INDEX 143
+#define I40E_GL_FCOEDIFEC_FCOEDIFRC_SHIFT 0
+#define I40E_GL_FCOEDIFEC_FCOEDIFRC_MASK (0xFFFFFFFF << I40E_GL_FCOEDIFEC_FCOEDIFRC_SHIFT)
+#define I40E_GL_FCOEDIFRC(_i) (0x00318000 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDIFRC_MAX_INDEX 143
+#define I40E_GL_FCOEDIFRC_FCOEDIFRC_SHIFT 0
+#define I40E_GL_FCOEDIFRC_FCOEDIFRC_MASK (0xFFFFFFFF << I40E_GL_FCOEDIFRC_FCOEDIFRC_SHIFT)
+#define I40E_GL_FCOEDIFTCL(_i) (0x00354000 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDIFTCL_MAX_INDEX 143
+#define I40E_GL_FCOEDIFTCL_FCOEDIFTC_SHIFT 0
+#define I40E_GL_FCOEDIFTCL_FCOEDIFTC_MASK (0xFFFFFFFF << I40E_GL_FCOEDIFTCL_FCOEDIFTC_SHIFT)
+#define I40E_GL_FCOEDIXAC(_i) (0x0031c000 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDIXAC_MAX_INDEX 143
+#define I40E_GL_FCOEDIXAC_FCOEDIXAC_SHIFT 0
+#define I40E_GL_FCOEDIXAC_FCOEDIXAC_MASK (0xFFFFFFFF << I40E_GL_FCOEDIXAC_FCOEDIXAC_SHIFT)
+#define I40E_GL_FCOEDIXEC(_i) (0x0034c000 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDIXEC_MAX_INDEX 143
+#define I40E_GL_FCOEDIXEC_FCOEDIXEC_SHIFT 0
+#define I40E_GL_FCOEDIXEC_FCOEDIXEC_MASK (0xFFFFFFFF << I40E_GL_FCOEDIXEC_FCOEDIXEC_SHIFT)
+#define I40E_GL_FCOEDIXVC(_i) (0x00350000 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDIXVC_MAX_INDEX 143
+#define I40E_GL_FCOEDIXVC_FCOEDIXVC_SHIFT 0
+#define I40E_GL_FCOEDIXVC_FCOEDIXVC_MASK (0xFFFFFFFF << I40E_GL_FCOEDIXVC_FCOEDIXVC_SHIFT)
+#define I40E_GL_FCOEDWRCH(_i) (0x00320004 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDWRCH_MAX_INDEX 143
+#define I40E_GL_FCOEDWRCH_FCOEDWRCH_SHIFT 0
+#define I40E_GL_FCOEDWRCH_FCOEDWRCH_MASK (0xFFFF << I40E_GL_FCOEDWRCH_FCOEDWRCH_SHIFT)
+#define I40E_GL_FCOEDWRCL(_i) (0x00320000 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDWRCL_MAX_INDEX 143
+#define I40E_GL_FCOEDWRCL_FCOEDWRCL_SHIFT 0
+#define I40E_GL_FCOEDWRCL_FCOEDWRCL_MASK (0xFFFFFFFF << I40E_GL_FCOEDWRCL_FCOEDWRCL_SHIFT)
+#define I40E_GL_FCOEDWTCH(_i) (0x00348084 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDWTCH_MAX_INDEX 143
+#define I40E_GL_FCOEDWTCH_FCOEDWTCH_SHIFT 0
+#define I40E_GL_FCOEDWTCH_FCOEDWTCH_MASK (0xFFFF << I40E_GL_FCOEDWTCH_FCOEDWTCH_SHIFT)
+#define I40E_GL_FCOEDWTCL(_i) (0x00348080 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDWTCL_MAX_INDEX 143
+#define I40E_GL_FCOEDWTCL_FCOEDWTCL_SHIFT 0
+#define I40E_GL_FCOEDWTCL_FCOEDWTCL_MASK (0xFFFFFFFF << I40E_GL_FCOEDWTCL_FCOEDWTCL_SHIFT)
+#define I40E_GL_FCOELAST(_i) (0x00314000 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOELAST_MAX_INDEX 143
+#define I40E_GL_FCOELAST_FCOELAST_SHIFT 0
+#define I40E_GL_FCOELAST_FCOELAST_MASK (0xFFFFFFFF << I40E_GL_FCOELAST_FCOELAST_SHIFT)
+#define I40E_GL_FCOEPRC(_i) (0x00315200 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEPRC_MAX_INDEX 143
+#define I40E_GL_FCOEPRC_FCOEPRC_SHIFT 0
+#define I40E_GL_FCOEPRC_FCOEPRC_MASK (0xFFFFFFFF << I40E_GL_FCOEPRC_FCOEPRC_SHIFT)
+#define I40E_GL_FCOEPTC(_i) (0x00344C00 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEPTC_MAX_INDEX 143
+#define I40E_GL_FCOEPTC_FCOEPTC_SHIFT 0
+#define I40E_GL_FCOEPTC_FCOEPTC_MASK (0xFFFFFFFF << I40E_GL_FCOEPTC_FCOEPTC_SHIFT)
+#define I40E_GL_FCOERPDC(_i) (0x00324000 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOERPDC_MAX_INDEX 143
+#define I40E_GL_FCOERPDC_FCOERPDC_SHIFT 0
+#define I40E_GL_FCOERPDC_FCOERPDC_MASK (0xFFFFFFFF << I40E_GL_FCOERPDC_FCOERPDC_SHIFT)
+#define I40E_GLPRT_BPRCH(_i) (0x003005E4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_BPRCH_MAX_INDEX 3
+#define I40E_GLPRT_BPRCH_UPRCH_SHIFT 0
+#define I40E_GLPRT_BPRCH_UPRCH_MASK (0xFFFF << I40E_GLPRT_BPRCH_UPRCH_SHIFT)
+#define I40E_GLPRT_BPRCL(_i) (0x003005E0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_BPRCL_MAX_INDEX 3
+#define I40E_GLPRT_BPRCL_UPRCH_SHIFT 0
+#define I40E_GLPRT_BPRCL_UPRCH_MASK (0xFFFFFFFF << I40E_GLPRT_BPRCL_UPRCH_SHIFT)
+#define I40E_GLPRT_BPTCH(_i) (0x00300A04 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_BPTCH_MAX_INDEX 3
+#define I40E_GLPRT_BPTCH_UPRCH_SHIFT 0
+#define I40E_GLPRT_BPTCH_UPRCH_MASK (0xFFFF << I40E_GLPRT_BPTCH_UPRCH_SHIFT)
+#define I40E_GLPRT_BPTCL(_i) (0x00300A00 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_BPTCL_MAX_INDEX 3
+#define I40E_GLPRT_BPTCL_UPRCH_SHIFT 0
+#define I40E_GLPRT_BPTCL_UPRCH_MASK (0xFFFFFFFF << I40E_GLPRT_BPTCL_UPRCH_SHIFT)
+#define I40E_GLPRT_CRCERRS(_i) (0x00300080 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_CRCERRS_MAX_INDEX 3
+#define I40E_GLPRT_CRCERRS_CRCERRS_SHIFT 0
+#define I40E_GLPRT_CRCERRS_CRCERRS_MASK (0xFFFFFFFF << I40E_GLPRT_CRCERRS_CRCERRS_SHIFT)
+#define I40E_GLPRT_GORCH(_i) (0x00300004 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_GORCH_MAX_INDEX 3
+#define I40E_GLPRT_GORCH_GORCH_SHIFT 0
+#define I40E_GLPRT_GORCH_GORCH_MASK (0xFFFF << I40E_GLPRT_GORCH_GORCH_SHIFT)
+#define I40E_GLPRT_GORCL(_i) (0x00300000 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_GORCL_MAX_INDEX 3
+#define I40E_GLPRT_GORCL_GORCL_SHIFT 0
+#define I40E_GLPRT_GORCL_GORCL_MASK (0xFFFFFFFF << I40E_GLPRT_GORCL_GORCL_SHIFT)
+#define I40E_GLPRT_GOTCH(_i) (0x00300684 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_GOTCH_MAX_INDEX 3
+#define I40E_GLPRT_GOTCH_GOTCH_SHIFT 0
+#define I40E_GLPRT_GOTCH_GOTCH_MASK (0xFFFF << I40E_GLPRT_GOTCH_GOTCH_SHIFT)
+#define I40E_GLPRT_GOTCL(_i) (0x00300680 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_GOTCL_MAX_INDEX 3
+#define I40E_GLPRT_GOTCL_GOTCL_SHIFT 0
+#define I40E_GLPRT_GOTCL_GOTCL_MASK (0xFFFFFFFF << I40E_GLPRT_GOTCL_GOTCL_SHIFT)
+#define I40E_GLPRT_ILLERRC(_i) (0x003000E0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_ILLERRC_MAX_INDEX 3
+#define I40E_GLPRT_ILLERRC_ILLERRC_SHIFT 0
+#define I40E_GLPRT_ILLERRC_ILLERRC_MASK (0xFFFFFFFF << I40E_GLPRT_ILLERRC_ILLERRC_SHIFT)
+#define I40E_GLPRT_LDPC(_i) (0x00300620 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_LDPC_MAX_INDEX 3
+#define I40E_GLPRT_LDPC_LDPC_SHIFT 0
+#define I40E_GLPRT_LDPC_LDPC_MASK (0xFFFFFFFF << I40E_GLPRT_LDPC_LDPC_SHIFT)
+#define I40E_GLPRT_LXOFFRXC(_i) (0x00300160 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_LXOFFRXC_MAX_INDEX 3
+#define I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_SHIFT 0
+#define I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_MASK (0xFFFFFFFF << I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_SHIFT)
+#define I40E_GLPRT_LXOFFTXC(_i) (0x003009A0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_LXOFFTXC_MAX_INDEX 3
+#define I40E_GLPRT_LXOFFTXC_LXOFFTXC_SHIFT 0
+#define I40E_GLPRT_LXOFFTXC_LXOFFTXC_MASK (0xFFFFFFFF << I40E_GLPRT_LXOFFTXC_LXOFFTXC_SHIFT)
+#define I40E_GLPRT_LXONRXC(_i) (0x00300140 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_LXONRXC_MAX_INDEX 3
+#define I40E_GLPRT_LXONRXC_LXONRXCNT_SHIFT 0
+#define I40E_GLPRT_LXONRXC_LXONRXCNT_MASK (0xFFFFFFFF << I40E_GLPRT_LXONRXC_LXONRXCNT_SHIFT)
+#define I40E_GLPRT_LXONTXC(_i) (0x00300980 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_LXONTXC_MAX_INDEX 3
+#define I40E_GLPRT_LXONTXC_LXONTXC_SHIFT 0
+#define I40E_GLPRT_LXONTXC_LXONTXC_MASK (0xFFFFFFFF << I40E_GLPRT_LXONTXC_LXONTXC_SHIFT)
+#define I40E_GLPRT_MLFC(_i) (0x00300020 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_MLFC_MAX_INDEX 3
+#define I40E_GLPRT_MLFC_MLFC_SHIFT 0
+#define I40E_GLPRT_MLFC_MLFC_MASK (0xFFFFFFFF << I40E_GLPRT_MLFC_MLFC_SHIFT)
+#define I40E_GLPRT_MPRCH(_i) (0x003005C4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_MPRCH_MAX_INDEX 3
+#define I40E_GLPRT_MPRCH_MPRCH_SHIFT 0
+#define I40E_GLPRT_MPRCH_MPRCH_MASK (0xFFFF << I40E_GLPRT_MPRCH_MPRCH_SHIFT)
+#define I40E_GLPRT_MPRCL(_i) (0x003005C0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_MPRCL_MAX_INDEX 3
+#define I40E_GLPRT_MPRCL_MPRCL_SHIFT 0
+#define I40E_GLPRT_MPRCL_MPRCL_MASK (0xFFFFFFFF << I40E_GLPRT_MPRCL_MPRCL_SHIFT)
+#define I40E_GLPRT_MPTCH(_i) (0x003009E4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_MPTCH_MAX_INDEX 3
+#define I40E_GLPRT_MPTCH_MPTCH_SHIFT 0
+#define I40E_GLPRT_MPTCH_MPTCH_MASK (0xFFFF << I40E_GLPRT_MPTCH_MPTCH_SHIFT)
+#define I40E_GLPRT_MPTCL(_i) (0x003009E0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_MPTCL_MAX_INDEX 3
+#define I40E_GLPRT_MPTCL_MPTCL_SHIFT 0
+#define I40E_GLPRT_MPTCL_MPTCL_MASK (0xFFFFFFFF << I40E_GLPRT_MPTCL_MPTCL_SHIFT)
+#define I40E_GLPRT_MRFC(_i) (0x00300040 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_MRFC_MAX_INDEX 3
+#define I40E_GLPRT_MRFC_MRFC_SHIFT 0
+#define I40E_GLPRT_MRFC_MRFC_MASK (0xFFFFFFFF << I40E_GLPRT_MRFC_MRFC_SHIFT)
+#define I40E_GLPRT_PRC1023H(_i) (0x00300504 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC1023H_MAX_INDEX 3
+#define I40E_GLPRT_PRC1023H_PRC1023H_SHIFT 0
+#define I40E_GLPRT_PRC1023H_PRC1023H_MASK (0xFFFF << I40E_GLPRT_PRC1023H_PRC1023H_SHIFT)
+#define I40E_GLPRT_PRC1023L(_i) (0x00300500 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC1023L_MAX_INDEX 3
+#define I40E_GLPRT_PRC1023L_PRC1023L_SHIFT 0
+#define I40E_GLPRT_PRC1023L_PRC1023L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC1023L_PRC1023L_SHIFT)
+#define I40E_GLPRT_PRC127H(_i) (0x003004A4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC127H_MAX_INDEX 3
+#define I40E_GLPRT_PRC127H_PRC127H_SHIFT 0
+#define I40E_GLPRT_PRC127H_PRC127H_MASK (0xFFFF << I40E_GLPRT_PRC127H_PRC127H_SHIFT)
+#define I40E_GLPRT_PRC127L(_i) (0x003004A0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC127L_MAX_INDEX 3
+#define I40E_GLPRT_PRC127L_PRC127L_SHIFT 0
+#define I40E_GLPRT_PRC127L_PRC127L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC127L_PRC127L_SHIFT)
+#define I40E_GLPRT_PRC1522H(_i) (0x00300524 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC1522H_MAX_INDEX 3
+#define I40E_GLPRT_PRC1522H_PRC1522H_SHIFT 0
+#define I40E_GLPRT_PRC1522H_PRC1522H_MASK (0xFFFF << I40E_GLPRT_PRC1522H_PRC1522H_SHIFT)
+#define I40E_GLPRT_PRC1522L(_i) (0x00300520 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC1522L_MAX_INDEX 3
+#define I40E_GLPRT_PRC1522L_PRC1522L_SHIFT 0
+#define I40E_GLPRT_PRC1522L_PRC1522L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC1522L_PRC1522L_SHIFT)
+#define I40E_GLPRT_PRC255H(_i) (0x003004C4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC255H_MAX_INDEX 3
+#define I40E_GLPRT_PRC255H_PRTPRC255H_SHIFT 0
+#define I40E_GLPRT_PRC255H_PRTPRC255H_MASK (0xFFFF << I40E_GLPRT_PRC255H_PRTPRC255H_SHIFT)
+#define I40E_GLPRT_PRC255L(_i) (0x003004C0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC255L_MAX_INDEX 3
+#define I40E_GLPRT_PRC255L_PRC255L_SHIFT 0
+#define I40E_GLPRT_PRC255L_PRC255L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC255L_PRC255L_SHIFT)
+#define I40E_GLPRT_PRC511H(_i) (0x003004E4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC511H_MAX_INDEX 3
+#define I40E_GLPRT_PRC511H_PRC511H_SHIFT 0
+#define I40E_GLPRT_PRC511H_PRC511H_MASK (0xFFFF << I40E_GLPRT_PRC511H_PRC511H_SHIFT)
+#define I40E_GLPRT_PRC511L(_i) (0x003004E0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC511L_MAX_INDEX 3
+#define I40E_GLPRT_PRC511L_PRC511L_SHIFT 0
+#define I40E_GLPRT_PRC511L_PRC511L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC511L_PRC511L_SHIFT)
+#define I40E_GLPRT_PRC64H(_i) (0x00300484 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC64H_MAX_INDEX 3
+#define I40E_GLPRT_PRC64H_PRC64H_SHIFT 0
+#define I40E_GLPRT_PRC64H_PRC64H_MASK (0xFFFF << I40E_GLPRT_PRC64H_PRC64H_SHIFT)
+#define I40E_GLPRT_PRC64L(_i) (0x00300480 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC64L_MAX_INDEX 3
+#define I40E_GLPRT_PRC64L_PRC64L_SHIFT 0
+#define I40E_GLPRT_PRC64L_PRC64L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC64L_PRC64L_SHIFT)
+#define I40E_GLPRT_PRC9522H(_i) (0x00300544 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC9522H_MAX_INDEX 3
+#define I40E_GLPRT_PRC9522H_PRC1522H_SHIFT 0
+#define I40E_GLPRT_PRC9522H_PRC1522H_MASK (0xFFFF << I40E_GLPRT_PRC9522H_PRC1522H_SHIFT)
+#define I40E_GLPRT_PRC9522L(_i) (0x00300540 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC9522L_MAX_INDEX 3
+#define I40E_GLPRT_PRC9522L_PRC1522L_SHIFT 0
+#define I40E_GLPRT_PRC9522L_PRC1522L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC9522L_PRC1522L_SHIFT)
+#define I40E_GLPRT_PTC1023H(_i) (0x00300724 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC1023H_MAX_INDEX 3
+#define I40E_GLPRT_PTC1023H_PTC1023H_SHIFT 0
+#define I40E_GLPRT_PTC1023H_PTC1023H_MASK (0xFFFF << I40E_GLPRT_PTC1023H_PTC1023H_SHIFT)
+#define I40E_GLPRT_PTC1023L(_i) (0x00300720 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC1023L_MAX_INDEX 3
+#define I40E_GLPRT_PTC1023L_PTC1023L_SHIFT 0
+#define I40E_GLPRT_PTC1023L_PTC1023L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC1023L_PTC1023L_SHIFT)
+#define I40E_GLPRT_PTC127H(_i) (0x003006C4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC127H_MAX_INDEX 3
+#define I40E_GLPRT_PTC127H_PTC127H_SHIFT 0
+#define I40E_GLPRT_PTC127H_PTC127H_MASK (0xFFFF << I40E_GLPRT_PTC127H_PTC127H_SHIFT)
+#define I40E_GLPRT_PTC127L(_i) (0x003006C0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC127L_MAX_INDEX 3
+#define I40E_GLPRT_PTC127L_PTC127L_SHIFT 0
+#define I40E_GLPRT_PTC127L_PTC127L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC127L_PTC127L_SHIFT)
+#define I40E_GLPRT_PTC1522H(_i) (0x00300744 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC1522H_MAX_INDEX 3
+#define I40E_GLPRT_PTC1522H_PTC1522H_SHIFT 0
+#define I40E_GLPRT_PTC1522H_PTC1522H_MASK (0xFFFF << I40E_GLPRT_PTC1522H_PTC1522H_SHIFT)
+#define I40E_GLPRT_PTC1522L(_i) (0x00300740 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC1522L_MAX_INDEX 3
+#define I40E_GLPRT_PTC1522L_PTC1522L_SHIFT 0
+#define I40E_GLPRT_PTC1522L_PTC1522L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC1522L_PTC1522L_SHIFT)
+#define I40E_GLPRT_PTC255H(_i) (0x003006E4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC255H_MAX_INDEX 3
+#define I40E_GLPRT_PTC255H_PTC255H_SHIFT 0
+#define I40E_GLPRT_PTC255H_PTC255H_MASK (0xFFFF << I40E_GLPRT_PTC255H_PTC255H_SHIFT)
+#define I40E_GLPRT_PTC255L(_i) (0x003006E0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC255L_MAX_INDEX 3
+#define I40E_GLPRT_PTC255L_PTC255L_SHIFT 0
+#define I40E_GLPRT_PTC255L_PTC255L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC255L_PTC255L_SHIFT)
+#define I40E_GLPRT_PTC511H(_i) (0x00300704 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC511H_MAX_INDEX 3
+#define I40E_GLPRT_PTC511H_PTC511H_SHIFT 0
+#define I40E_GLPRT_PTC511H_PTC511H_MASK (0xFFFF << I40E_GLPRT_PTC511H_PTC511H_SHIFT)
+#define I40E_GLPRT_PTC511L(_i) (0x00300700 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC511L_MAX_INDEX 3
+#define I40E_GLPRT_PTC511L_PTC511L_SHIFT 0
+#define I40E_GLPRT_PTC511L_PTC511L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC511L_PTC511L_SHIFT)
+#define I40E_GLPRT_PTC64H(_i) (0x003006A4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC64H_MAX_INDEX 3
+#define I40E_GLPRT_PTC64H_PTC64H_SHIFT 0
+#define I40E_GLPRT_PTC64H_PTC64H_MASK (0xFFFF << I40E_GLPRT_PTC64H_PTC64H_SHIFT)
+#define I40E_GLPRT_PTC64L(_i) (0x003006A0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC64L_MAX_INDEX 3
+#define I40E_GLPRT_PTC64L_PTC64L_SHIFT 0
+#define I40E_GLPRT_PTC64L_PTC64L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC64L_PTC64L_SHIFT)
+#define I40E_GLPRT_PTC9522H(_i) (0x00300764 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC9522H_MAX_INDEX 3
+#define I40E_GLPRT_PTC9522H_PTC9522H_SHIFT 0
+#define I40E_GLPRT_PTC9522H_PTC9522H_MASK (0xFFFF << I40E_GLPRT_PTC9522H_PTC9522H_SHIFT)
+#define I40E_GLPRT_PTC9522L(_i) (0x00300760 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC9522L_MAX_INDEX 3
+#define I40E_GLPRT_PTC9522L_PTC9522L_SHIFT 0
+#define I40E_GLPRT_PTC9522L_PTC9522L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC9522L_PTC9522L_SHIFT)
+#define I40E_GLPRT_PXOFFRXC(_i, _j) (0x00300280 + ((_i) * 8 + (_j) * 32))
+#define I40E_GLPRT_PXOFFRXC_MAX_INDEX 3
+#define I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_SHIFT 0
+#define I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_MASK (0xFFFFFFFF << I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_SHIFT)
+#define I40E_GLPRT_PXOFFTXC(_i, _j) (0x00300880 + ((_i) * 8 + (_j) * 32))
+#define I40E_GLPRT_PXOFFTXC_MAX_INDEX 3
+#define I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_SHIFT 0
+#define I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_MASK (0xFFFFFFFF << I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_SHIFT)
+#define I40E_GLPRT_PXONRXC(_i, _j) (0x00300180 + ((_i) * 8 + (_j) * 32))
+#define I40E_GLPRT_PXONRXC_MAX_INDEX 3
+#define I40E_GLPRT_PXONRXC_PRPXONRXCNT_SHIFT 0
+#define I40E_GLPRT_PXONRXC_PRPXONRXCNT_MASK (0xFFFFFFFF << I40E_GLPRT_PXONRXC_PRPXONRXCNT_SHIFT)
+#define I40E_GLPRT_PXONTXC(_i, _j) (0x00300780 + ((_i) * 8 + (_j) * 32))
+#define I40E_GLPRT_PXONTXC_MAX_INDEX 3
+#define I40E_GLPRT_PXONTXC_PRPXONTXC_SHIFT 0
+#define I40E_GLPRT_PXONTXC_PRPXONTXC_MASK (0xFFFFFFFF << I40E_GLPRT_PXONTXC_PRPXONTXC_SHIFT)
+#define I40E_GLPRT_RDPC(_i) (0x00300600 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_RDPC_MAX_INDEX 3
+#define I40E_GLPRT_RDPC_RDPC_SHIFT 0
+#define I40E_GLPRT_RDPC_RDPC_MASK (0xFFFFFFFF << I40E_GLPRT_RDPC_RDPC_SHIFT)
+#define I40E_GLPRT_RFC(_i) (0x00300560 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_RFC_MAX_INDEX 3
+#define I40E_GLPRT_RFC_RFC_SHIFT 0
+#define I40E_GLPRT_RFC_RFC_MASK (0xFFFFFFFF << I40E_GLPRT_RFC_RFC_SHIFT)
+#define I40E_GLPRT_RJC(_i) (0x00300580 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_RJC_MAX_INDEX 3
+#define I40E_GLPRT_RJC_RJC_SHIFT 0
+#define I40E_GLPRT_RJC_RJC_MASK (0xFFFFFFFF << I40E_GLPRT_RJC_RJC_SHIFT)
+#define I40E_GLPRT_RLEC(_i) (0x003000A0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_RLEC_MAX_INDEX 3
+#define I40E_GLPRT_RLEC_RLEC_SHIFT 0
+#define I40E_GLPRT_RLEC_RLEC_MASK (0xFFFFFFFF << I40E_GLPRT_RLEC_RLEC_SHIFT)
+#define I40E_GLPRT_ROC(_i) (0x00300120 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_ROC_MAX_INDEX 3
+#define I40E_GLPRT_ROC_ROC_SHIFT 0
+#define I40E_GLPRT_ROC_ROC_MASK (0xFFFFFFFF << I40E_GLPRT_ROC_ROC_SHIFT)
+#define I40E_GLPRT_RUC(_i) (0x00300100 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_RUC_MAX_INDEX 3
+#define I40E_GLPRT_RUC_RUC_SHIFT 0
+#define I40E_GLPRT_RUC_RUC_MASK (0xFFFFFFFF << I40E_GLPRT_RUC_RUC_SHIFT)
+#define I40E_GLPRT_RUPP(_i) (0x00300660 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_RUPP_MAX_INDEX 3
+#define I40E_GLPRT_RUPP_RUPP_SHIFT 0
+#define I40E_GLPRT_RUPP_RUPP_MASK (0xFFFFFFFF << I40E_GLPRT_RUPP_RUPP_SHIFT)
+#define I40E_GLPRT_RXON2OFFCNT(_i, _j) (0x00300380 + ((_i) * 8 + (_j) * 32))
+#define I40E_GLPRT_RXON2OFFCNT_MAX_INDEX 3
+#define I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_SHIFT 0
+#define I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_MASK (0xFFFFFFFF << I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_SHIFT)
+#define I40E_GLPRT_STDC(_i) (0x00300640 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_STDC_MAX_INDEX 3
+#define I40E_GLPRT_STDC_STDC_SHIFT 0
+#define I40E_GLPRT_STDC_STDC_MASK (0xFFFFFFFF << I40E_GLPRT_STDC_STDC_SHIFT)
+#define I40E_GLPRT_TDOLD(_i) (0x00300A20 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_TDOLD_MAX_INDEX 3
+#define I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT 0
+#define I40E_GLPRT_TDOLD_GLPRT_TDOLD_MASK (0xFFFFFFFF << I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT)
+#define I40E_GLPRT_TDPC(_i) (0x00375400 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_TDPC_MAX_INDEX 3
+#define I40E_GLPRT_TDPC_TDPC_SHIFT 0
+#define I40E_GLPRT_TDPC_TDPC_MASK (0xFFFFFFFF << I40E_GLPRT_TDPC_TDPC_SHIFT)
+#define I40E_GLPRT_UPRCH(_i) (0x003005A4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_UPRCH_MAX_INDEX 3
+#define I40E_GLPRT_UPRCH_UPRCH_SHIFT 0
+#define I40E_GLPRT_UPRCH_UPRCH_MASK (0xFFFF << I40E_GLPRT_UPRCH_UPRCH_SHIFT)
+#define I40E_GLPRT_UPRCL(_i) (0x003005A0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_UPRCL_MAX_INDEX 3
+#define I40E_GLPRT_UPRCL_UPRCL_SHIFT 0
+#define I40E_GLPRT_UPRCL_UPRCL_MASK (0xFFFFFFFF << I40E_GLPRT_UPRCL_UPRCL_SHIFT)
+#define I40E_GLPRT_UPTCH(_i) (0x003009C4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_UPTCH_MAX_INDEX 3
+#define I40E_GLPRT_UPTCH_UPTCH_SHIFT 0
+#define I40E_GLPRT_UPTCH_UPTCH_MASK (0xFFFF << I40E_GLPRT_UPTCH_UPTCH_SHIFT)
+#define I40E_GLPRT_UPTCL(_i) (0x003009C0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_UPTCL_MAX_INDEX 3
+#define I40E_GLPRT_UPTCL_VUPTCH_SHIFT 0
+#define I40E_GLPRT_UPTCL_VUPTCH_MASK (0xFFFFFFFF << I40E_GLPRT_UPTCL_VUPTCH_SHIFT)
+#define I40E_GLSW_BPRCH(_i) (0x00370104 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_BPRCH_MAX_INDEX 15
+#define I40E_GLSW_BPRCH_BPRCH_SHIFT 0
+#define I40E_GLSW_BPRCH_BPRCH_MASK (0xFFFF << I40E_GLSW_BPRCH_BPRCH_SHIFT)
+#define I40E_GLSW_BPRCL(_i) (0x00370100 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_BPRCL_MAX_INDEX 15
+#define I40E_GLSW_BPRCL_BPRCL_SHIFT 0
+#define I40E_GLSW_BPRCL_BPRCL_MASK (0xFFFFFFFF << I40E_GLSW_BPRCL_BPRCL_SHIFT)
+#define I40E_GLSW_BPTCH(_i) (0x00340104 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_BPTCH_MAX_INDEX 15
+#define I40E_GLSW_BPTCH_BPTCH_SHIFT 0
+#define I40E_GLSW_BPTCH_BPTCH_MASK (0xFFFF << I40E_GLSW_BPTCH_BPTCH_SHIFT)
+#define I40E_GLSW_BPTCL(_i) (0x00340100 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_BPTCL_MAX_INDEX 15
+#define I40E_GLSW_BPTCL_BPTCL_SHIFT 0
+#define I40E_GLSW_BPTCL_BPTCL_MASK (0xFFFFFFFF << I40E_GLSW_BPTCL_BPTCL_SHIFT)
+#define I40E_GLSW_GORCH(_i) (0x0035C004 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_GORCH_MAX_INDEX 15
+#define I40E_GLSW_GORCH_GORCH_SHIFT 0
+#define I40E_GLSW_GORCH_GORCH_MASK (0xFFFF << I40E_GLSW_GORCH_GORCH_SHIFT)
+#define I40E_GLSW_GORCL(_i) (0x0035c000 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_GORCL_MAX_INDEX 15
+#define I40E_GLSW_GORCL_GORCL_SHIFT 0
+#define I40E_GLSW_GORCL_GORCL_MASK (0xFFFFFFFF << I40E_GLSW_GORCL_GORCL_SHIFT)
+#define I40E_GLSW_GOTCH(_i) (0x0032C004 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_GOTCH_MAX_INDEX 15
+#define I40E_GLSW_GOTCH_GOTCH_SHIFT 0
+#define I40E_GLSW_GOTCH_GOTCH_MASK (0xFFFF << I40E_GLSW_GOTCH_GOTCH_SHIFT)
+#define I40E_GLSW_GOTCL(_i) (0x0032c000 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_GOTCL_MAX_INDEX 15
+#define I40E_GLSW_GOTCL_GOTCL_SHIFT 0
+#define I40E_GLSW_GOTCL_GOTCL_MASK (0xFFFFFFFF << I40E_GLSW_GOTCL_GOTCL_SHIFT)
+#define I40E_GLSW_MPRCH(_i) (0x00370084 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_MPRCH_MAX_INDEX 15
+#define I40E_GLSW_MPRCH_MPRCH_SHIFT 0
+#define I40E_GLSW_MPRCH_MPRCH_MASK (0xFFFF << I40E_GLSW_MPRCH_MPRCH_SHIFT)
+#define I40E_GLSW_MPRCL(_i) (0x00370080 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_MPRCL_MAX_INDEX 15
+#define I40E_GLSW_MPRCL_MPRCL_SHIFT 0
+#define I40E_GLSW_MPRCL_MPRCL_MASK (0xFFFFFFFF << I40E_GLSW_MPRCL_MPRCL_SHIFT)
+#define I40E_GLSW_MPTCH(_i) (0x00340084 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_MPTCH_MAX_INDEX 15
+#define I40E_GLSW_MPTCH_MPTCH_SHIFT 0
+#define I40E_GLSW_MPTCH_MPTCH_MASK (0xFFFF << I40E_GLSW_MPTCH_MPTCH_SHIFT)
+#define I40E_GLSW_MPTCL(_i) (0x00340080 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_MPTCL_MAX_INDEX 15
+#define I40E_GLSW_MPTCL_MPTCL_SHIFT 0
+#define I40E_GLSW_MPTCL_MPTCL_MASK (0xFFFFFFFF << I40E_GLSW_MPTCL_MPTCL_SHIFT)
+#define I40E_GLSW_RUPP(_i) (0x00370180 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_RUPP_MAX_INDEX 15
+#define I40E_GLSW_RUPP_RUPP_SHIFT 0
+#define I40E_GLSW_RUPP_RUPP_MASK (0xFFFFFFFF << I40E_GLSW_RUPP_RUPP_SHIFT)
+#define I40E_GLSW_TDPC(_i) (0x00348000 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_TDPC_MAX_INDEX 15
+#define I40E_GLSW_TDPC_TDPC_SHIFT 0
+#define I40E_GLSW_TDPC_TDPC_MASK (0xFFFFFFFF << I40E_GLSW_TDPC_TDPC_SHIFT)
+#define I40E_GLSW_UPRCH(_i) (0x00370004 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_UPRCH_MAX_INDEX 15
+#define I40E_GLSW_UPRCH_UPRCH_SHIFT 0
+#define I40E_GLSW_UPRCH_UPRCH_MASK (0xFFFF << I40E_GLSW_UPRCH_UPRCH_SHIFT)
+#define I40E_GLSW_UPRCL(_i) (0x00370000 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_UPRCL_MAX_INDEX 15
+#define I40E_GLSW_UPRCL_UPRCL_SHIFT 0
+#define I40E_GLSW_UPRCL_UPRCL_MASK (0xFFFFFFFF << I40E_GLSW_UPRCL_UPRCL_SHIFT)
+#define I40E_GLSW_UPTCH(_i) (0x00340004 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_UPTCH_MAX_INDEX 15
+#define I40E_GLSW_UPTCH_UPTCH_SHIFT 0
+#define I40E_GLSW_UPTCH_UPTCH_MASK (0xFFFF << I40E_GLSW_UPTCH_UPTCH_SHIFT)
+#define I40E_GLSW_UPTCL(_i) (0x00340000 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_UPTCL_MAX_INDEX 15
+#define I40E_GLSW_UPTCL_UPTCL_SHIFT 0
+#define I40E_GLSW_UPTCL_UPTCL_MASK (0xFFFFFFFF << I40E_GLSW_UPTCL_UPTCL_SHIFT)
+#define I40E_GLV_BPRCH(_i) (0x0036D804 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_BPRCH_MAX_INDEX 383
+#define I40E_GLV_BPRCH_BPRCH_SHIFT 0
+#define I40E_GLV_BPRCH_BPRCH_MASK (0xFFFF << I40E_GLV_BPRCH_BPRCH_SHIFT)
+#define I40E_GLV_BPRCL(_i) (0x0036d800 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_BPRCL_MAX_INDEX 383
+#define I40E_GLV_BPRCL_BPRCL_SHIFT 0
+#define I40E_GLV_BPRCL_BPRCL_MASK (0xFFFFFFFF << I40E_GLV_BPRCL_BPRCL_SHIFT)
+#define I40E_GLV_BPTCH(_i) (0x0033D804 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_BPTCH_MAX_INDEX 383
+#define I40E_GLV_BPTCH_BPTCH_SHIFT 0
+#define I40E_GLV_BPTCH_BPTCH_MASK (0xFFFF << I40E_GLV_BPTCH_BPTCH_SHIFT)
+#define I40E_GLV_BPTCL(_i) (0x0033d800 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_BPTCL_MAX_INDEX 383
+#define I40E_GLV_BPTCL_BPTCL_SHIFT 0
+#define I40E_GLV_BPTCL_BPTCL_MASK (0xFFFFFFFF << I40E_GLV_BPTCL_BPTCL_SHIFT)
+#define I40E_GLV_GORCH(_i) (0x00358004 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_GORCH_MAX_INDEX 383
+#define I40E_GLV_GORCH_GORCH_SHIFT 0
+#define I40E_GLV_GORCH_GORCH_MASK (0xFFFF << I40E_GLV_GORCH_GORCH_SHIFT)
+#define I40E_GLV_GORCL(_i) (0x00358000 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_GORCL_MAX_INDEX 383
+#define I40E_GLV_GORCL_GORCL_SHIFT 0
+#define I40E_GLV_GORCL_GORCL_MASK (0xFFFFFFFF << I40E_GLV_GORCL_GORCL_SHIFT)
+#define I40E_GLV_GOTCH(_i) (0x00328004 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_GOTCH_MAX_INDEX 383
+#define I40E_GLV_GOTCH_GOTCH_SHIFT 0
+#define I40E_GLV_GOTCH_GOTCH_MASK (0xFFFF << I40E_GLV_GOTCH_GOTCH_SHIFT)
+#define I40E_GLV_GOTCL(_i) (0x00328000 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_GOTCL_MAX_INDEX 383
+#define I40E_GLV_GOTCL_GOTCL_SHIFT 0
+#define I40E_GLV_GOTCL_GOTCL_MASK (0xFFFFFFFF << I40E_GLV_GOTCL_GOTCL_SHIFT)
+#define I40E_GLV_MPRCH(_i) (0x0036CC04 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_MPRCH_MAX_INDEX 383
+#define I40E_GLV_MPRCH_MPRCH_SHIFT 0
+#define I40E_GLV_MPRCH_MPRCH_MASK (0xFFFF << I40E_GLV_MPRCH_MPRCH_SHIFT)
+#define I40E_GLV_MPRCL(_i) (0x0036cc00 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_MPRCL_MAX_INDEX 383
+#define I40E_GLV_MPRCL_MPRCL_SHIFT 0
+#define I40E_GLV_MPRCL_MPRCL_MASK (0xFFFFFFFF << I40E_GLV_MPRCL_MPRCL_SHIFT)
+#define I40E_GLV_MPTCH(_i) (0x0033CC04 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_MPTCH_MAX_INDEX 383
+#define I40E_GLV_MPTCH_MPTCH_SHIFT 0
+#define I40E_GLV_MPTCH_MPTCH_MASK (0xFFFF << I40E_GLV_MPTCH_MPTCH_SHIFT)
+#define I40E_GLV_MPTCL(_i) (0x0033cc00 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_MPTCL_MAX_INDEX 383
+#define I40E_GLV_MPTCL_MPTCL_SHIFT 0
+#define I40E_GLV_MPTCL_MPTCL_MASK (0xFFFFFFFF << I40E_GLV_MPTCL_MPTCL_SHIFT)
+#define I40E_GLV_RDPC(_i) (0x00310000 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_RDPC_MAX_INDEX 383
+#define I40E_GLV_RDPC_RDPC_SHIFT 0
+#define I40E_GLV_RDPC_RDPC_MASK (0xFFFFFFFF << I40E_GLV_RDPC_RDPC_SHIFT)
+#define I40E_GLV_RUPP(_i) (0x0036E400 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_RUPP_MAX_INDEX 383
+#define I40E_GLV_RUPP_RUPP_SHIFT 0
+#define I40E_GLV_RUPP_RUPP_MASK (0xFFFFFFFF << I40E_GLV_RUPP_RUPP_SHIFT)
+#define I40E_GLV_TEPC(_VSI) (0x00344000 + ((_VSI) * 8)) /* _i=0...383 */
+#define I40E_GLV_TEPC_MAX_INDEX 383
+#define I40E_GLV_TEPC_TEPC_SHIFT 0
+#define I40E_GLV_TEPC_TEPC_MASK (0xFFFFFFFF << I40E_GLV_TEPC_TEPC_SHIFT)
+#define I40E_GLV_UPRCH(_i) (0x0036C004 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_UPRCH_MAX_INDEX 383
+#define I40E_GLV_UPRCH_UPRCH_SHIFT 0
+#define I40E_GLV_UPRCH_UPRCH_MASK (0xFFFF << I40E_GLV_UPRCH_UPRCH_SHIFT)
+#define I40E_GLV_UPRCL(_i) (0x0036c000 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_UPRCL_MAX_INDEX 383
+#define I40E_GLV_UPRCL_UPRCL_SHIFT 0
+#define I40E_GLV_UPRCL_UPRCL_MASK (0xFFFFFFFF << I40E_GLV_UPRCL_UPRCL_SHIFT)
+#define I40E_GLV_UPTCH(_i) (0x0033C004 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_UPTCH_MAX_INDEX 383
+#define I40E_GLV_UPTCH_GLVUPTCH_SHIFT 0
+#define I40E_GLV_UPTCH_GLVUPTCH_MASK (0xFFFF << I40E_GLV_UPTCH_GLVUPTCH_SHIFT)
+#define I40E_GLV_UPTCL(_i) (0x0033c000 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_UPTCL_MAX_INDEX 383
+#define I40E_GLV_UPTCL_UPTCL_SHIFT 0
+#define I40E_GLV_UPTCL_UPTCL_MASK (0xFFFFFFFF << I40E_GLV_UPTCL_UPTCL_SHIFT)
+#define I40E_GLVEBTC_RBCH(_i, _j) (0x00364004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_RBCH_MAX_INDEX 7
+#define I40E_GLVEBTC_RBCH_TCBCH_SHIFT 0
+#define I40E_GLVEBTC_RBCH_TCBCH_MASK (0xFFFF << I40E_GLVEBTC_RBCH_TCBCH_SHIFT)
+#define I40E_GLVEBTC_RBCL(_i, _j) (0x00364000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_RBCL_MAX_INDEX 7
+#define I40E_GLVEBTC_RBCL_TCBCL_SHIFT 0
+#define I40E_GLVEBTC_RBCL_TCBCL_MASK (0xFFFFFFFF << I40E_GLVEBTC_RBCL_TCBCL_SHIFT)
+#define I40E_GLVEBTC_RPCH(_i, _j) (0x00368004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_RPCH_MAX_INDEX 7
+#define I40E_GLVEBTC_RPCH_TCPCH_SHIFT 0
+#define I40E_GLVEBTC_RPCH_TCPCH_MASK (0xFFFF << I40E_GLVEBTC_RPCH_TCPCH_SHIFT)
+#define I40E_GLVEBTC_RPCL(_i, _j) (0x00368000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_RPCL_MAX_INDEX 7
+#define I40E_GLVEBTC_RPCL_TCPCL_SHIFT 0
+#define I40E_GLVEBTC_RPCL_TCPCL_MASK (0xFFFFFFFF << I40E_GLVEBTC_RPCL_TCPCL_SHIFT)
+#define I40E_GLVEBTC_TBCH(_i, _j) (0x00334004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_TBCH_MAX_INDEX 7
+#define I40E_GLVEBTC_TBCH_TCBCH_SHIFT 0
+#define I40E_GLVEBTC_TBCH_TCBCH_MASK (0xFFFF << I40E_GLVEBTC_TBCH_TCBCH_SHIFT)
+#define I40E_GLVEBTC_TBCL(_i, _j) (0x00334000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_TBCL_MAX_INDEX 7
+#define I40E_GLVEBTC_TBCL_TCBCL_SHIFT 0
+#define I40E_GLVEBTC_TBCL_TCBCL_MASK (0xFFFFFFFF << I40E_GLVEBTC_TBCL_TCBCL_SHIFT)
+#define I40E_GLVEBTC_TPCH(_i, _j) (0x00338004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_TPCH_MAX_INDEX 7
+#define I40E_GLVEBTC_TPCH_TCPCH_SHIFT 0
+#define I40E_GLVEBTC_TPCH_TCPCH_MASK (0xFFFF << I40E_GLVEBTC_TPCH_TCPCH_SHIFT)
+#define I40E_GLVEBTC_TPCL(_i, _j) (0x00338000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_TPCL_MAX_INDEX 7
+#define I40E_GLVEBTC_TPCL_TCPCL_SHIFT 0
+#define I40E_GLVEBTC_TPCL_TCPCL_MASK (0xFFFFFFFF << I40E_GLVEBTC_TPCL_TCPCL_SHIFT)
+#define I40E_GLVEBVL_BPCH(_i) (0x00374804 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_BPCH_MAX_INDEX 127
+#define I40E_GLVEBVL_BPCH_VLBPCH_SHIFT 0
+#define I40E_GLVEBVL_BPCH_VLBPCH_MASK (0xFFFF << I40E_GLVEBVL_BPCH_VLBPCH_SHIFT)
+#define I40E_GLVEBVL_BPCL(_i) (0x00374800 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_BPCL_MAX_INDEX 127
+#define I40E_GLVEBVL_BPCL_VLBPCL_SHIFT 0
+#define I40E_GLVEBVL_BPCL_VLBPCL_MASK (0xFFFFFFFF << I40E_GLVEBVL_BPCL_VLBPCL_SHIFT)
+#define I40E_GLVEBVL_GORCH(_i) (0x00360004 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_GORCH_MAX_INDEX 127
+#define I40E_GLVEBVL_GORCH_VLBCH_SHIFT 0
+#define I40E_GLVEBVL_GORCH_VLBCH_MASK (0xFFFF << I40E_GLVEBVL_GORCH_VLBCH_SHIFT)
+#define I40E_GLVEBVL_GORCL(_i) (0x00360000 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_GORCL_MAX_INDEX 127
+#define I40E_GLVEBVL_GORCL_VLBCL_SHIFT 0
+#define I40E_GLVEBVL_GORCL_VLBCL_MASK (0xFFFFFFFF << I40E_GLVEBVL_GORCL_VLBCL_SHIFT)
+#define I40E_GLVEBVL_GOTCH(_i) (0x00330004 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_GOTCH_MAX_INDEX 127
+#define I40E_GLVEBVL_GOTCH_VLBCH_SHIFT 0
+#define I40E_GLVEBVL_GOTCH_VLBCH_MASK (0xFFFF << I40E_GLVEBVL_GOTCH_VLBCH_SHIFT)
+#define I40E_GLVEBVL_GOTCL(_i) (0x00330000 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_GOTCL_MAX_INDEX 127
+#define I40E_GLVEBVL_GOTCL_VLBCL_SHIFT 0
+#define I40E_GLVEBVL_GOTCL_VLBCL_MASK (0xFFFFFFFF << I40E_GLVEBVL_GOTCL_VLBCL_SHIFT)
+#define I40E_GLVEBVL_MPCH(_i) (0x00374404 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_MPCH_MAX_INDEX 127
+#define I40E_GLVEBVL_MPCH_VLMPCH_SHIFT 0
+#define I40E_GLVEBVL_MPCH_VLMPCH_MASK (0xFFFF << I40E_GLVEBVL_MPCH_VLMPCH_SHIFT)
+#define I40E_GLVEBVL_MPCL(_i) (0x00374400 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_MPCL_MAX_INDEX 127
+#define I40E_GLVEBVL_MPCL_VLMPCL_SHIFT 0
+#define I40E_GLVEBVL_MPCL_VLMPCL_MASK (0xFFFFFFFF << I40E_GLVEBVL_MPCL_VLMPCL_SHIFT)
+#define I40E_GLVEBVL_UPCH(_i) (0x00374004 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_UPCH_MAX_INDEX 127
+#define I40E_GLVEBVL_UPCH_VLUPCH_SHIFT 0
+#define I40E_GLVEBVL_UPCH_VLUPCH_MASK (0xFFFF << I40E_GLVEBVL_UPCH_VLUPCH_SHIFT)
+#define I40E_GLVEBVL_UPCL(_i) (0x00374000 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_UPCL_MAX_INDEX 127
+#define I40E_GLVEBVL_UPCL_VLUPCL_SHIFT 0
+#define I40E_GLVEBVL_UPCL_VLUPCL_MASK (0xFFFFFFFF << I40E_GLVEBVL_UPCL_VLUPCL_SHIFT)
+#define I40E_GL_MTG_FLU_MSK_H 0x00269F4C
+#define I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_SHIFT 0
+#define I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_MASK (0xFFFF << I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_SHIFT)
+#define I40E_GL_MTG_FLU_MSK_L 0x00269F44
+#define I40E_GL_MTG_FLU_MSK_L_MASK_LOW_SHIFT 0
+#define I40E_GL_MTG_FLU_MSK_L_MASK_LOW_MASK (0xFFFFFFFF << I40E_GL_MTG_FLU_MSK_L_MASK_LOW_SHIFT)
+#define I40E_GL_SWR_DEF_ACT(_i) (0x0026CF00 + ((_i) * 4)) /* _i=0...25 */
+#define I40E_GL_SWR_DEF_ACT_MAX_INDEX 25
+#define I40E_GL_SWR_DEF_ACT_DEF_ACTION_SHIFT 0
+#define I40E_GL_SWR_DEF_ACT_DEF_ACTION_MASK (0xFFFFFFFF << I40E_GL_SWR_DEF_ACT_DEF_ACTION_SHIFT)
+#define I40E_GL_SWR_DEF_ACT_EN 0x0026CF84
+#define I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_SHIFT 0
+#define I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_MASK (0xFFFFFFFF << I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_SHIFT)
+#define I40E_PRT_MSCCNT 0x00256BA0
+#define I40E_PRT_MSCCNT_CCOUNT_SHIFT 0
+#define I40E_PRT_MSCCNT_CCOUNT_MASK (0x1FFFFFF << I40E_PRT_MSCCNT_CCOUNT_SHIFT)
+#define I40E_PRT_SCSTS 0x00256C20
+#define I40E_PRT_SCSTS_BSCA_SHIFT 0
+#define I40E_PRT_SCSTS_BSCA_MASK (0x1 << I40E_PRT_SCSTS_BSCA_SHIFT)
+#define I40E_PRT_SCSTS_BSCAP_SHIFT 1
+#define I40E_PRT_SCSTS_BSCAP_MASK (0x1 << I40E_PRT_SCSTS_BSCAP_SHIFT)
+#define I40E_PRT_SCSTS_MSCA_SHIFT 2
+#define I40E_PRT_SCSTS_MSCA_MASK (0x1 << I40E_PRT_SCSTS_MSCA_SHIFT)
+#define I40E_PRT_SCSTS_MSCAP_SHIFT 3
+#define I40E_PRT_SCSTS_MSCAP_MASK (0x1 << I40E_PRT_SCSTS_MSCAP_SHIFT)
+#define I40E_PRT_SWT_BSCCNT 0x00256C60
+#define I40E_PRT_SWT_BSCCNT_CCOUNT_SHIFT 0
+#define I40E_PRT_SWT_BSCCNT_CCOUNT_MASK (0x1FFFFFF << I40E_PRT_SWT_BSCCNT_CCOUNT_SHIFT)
+#define I40E_PRTTSYN_ADJ 0x001E4280
+#define I40E_PRTTSYN_ADJ_TSYNADJ_SHIFT 0
+#define I40E_PRTTSYN_ADJ_TSYNADJ_MASK (0x7FFFFFFF << I40E_PRTTSYN_ADJ_TSYNADJ_SHIFT)
+#define I40E_PRTTSYN_ADJ_SIGN_SHIFT 31
+#define I40E_PRTTSYN_ADJ_SIGN_MASK (0x1 << I40E_PRTTSYN_ADJ_SIGN_SHIFT)
+#define I40E_PRTTSYN_AUX_0(_i) (0x001E42A0 + ((_i) * 32)) /* _i=0...1 */
+#define I40E_PRTTSYN_AUX_0_MAX_INDEX 1
+#define I40E_PRTTSYN_AUX_0_OUT_ENA_SHIFT 0
+#define I40E_PRTTSYN_AUX_0_OUT_ENA_MASK (0x1 << I40E_PRTTSYN_AUX_0_OUT_ENA_SHIFT)
+#define I40E_PRTTSYN_AUX_0_OUTMOD_SHIFT 1
+#define I40E_PRTTSYN_AUX_0_OUTMOD_MASK (0x3 << I40E_PRTTSYN_AUX_0_OUTMOD_SHIFT)
+#define I40E_PRTTSYN_AUX_0_OUTLVL_SHIFT 3
+#define I40E_PRTTSYN_AUX_0_OUTLVL_MASK (0x1 << I40E_PRTTSYN_AUX_0_OUTLVL_SHIFT)
+#define I40E_PRTTSYN_AUX_0_PULSEW_SHIFT 8
+#define I40E_PRTTSYN_AUX_0_PULSEW_MASK (0xF << I40E_PRTTSYN_AUX_0_PULSEW_SHIFT)
+#define I40E_PRTTSYN_AUX_0_EVNTLVL_SHIFT 16
+#define I40E_PRTTSYN_AUX_0_EVNTLVL_MASK (0x3 << I40E_PRTTSYN_AUX_0_EVNTLVL_SHIFT)
+#define I40E_PRTTSYN_AUX_1(_i) (0x001E42E0 + ((_i) * 32)) /* _i=0...1 */
+#define I40E_PRTTSYN_AUX_1_MAX_INDEX 1
+#define I40E_PRTTSYN_AUX_1_INSTNT_SHIFT 0
+#define I40E_PRTTSYN_AUX_1_INSTNT_MASK (0x1 << I40E_PRTTSYN_AUX_1_INSTNT_SHIFT)
+#define I40E_PRTTSYN_AUX_1_SAMPLE_TIME_SHIFT 1
+#define I40E_PRTTSYN_AUX_1_SAMPLE_TIME_MASK (0x1 << I40E_PRTTSYN_AUX_1_SAMPLE_TIME_SHIFT)
+#define I40E_PRTTSYN_CLKO(_i) (0x001E4240 + ((_i) * 32)) /* _i=0...1 */
+#define I40E_PRTTSYN_CLKO_MAX_INDEX 1
+#define I40E_PRTTSYN_CLKO_TSYNCLKO_SHIFT 0
+#define I40E_PRTTSYN_CLKO_TSYNCLKO_MASK (0xFFFFFFFF << I40E_PRTTSYN_CLKO_TSYNCLKO_SHIFT)
+#define I40E_PRTTSYN_CTL0 0x001E4200
+#define I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_SHIFT 0
+#define I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_MASK (0x1 << I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_SHIFT)
+#define I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_SHIFT 1
+#define I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_MASK (0x1 << I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_SHIFT)
+#define I40E_PRTTSYN_CTL0_EVENT_INT_ENA_SHIFT 2
+#define I40E_PRTTSYN_CTL0_EVENT_INT_ENA_MASK (0x1 << I40E_PRTTSYN_CTL0_EVENT_INT_ENA_SHIFT)
+#define I40E_PRTTSYN_CTL0_TGT_INT_ENA_SHIFT 3
+#define I40E_PRTTSYN_CTL0_TGT_INT_ENA_MASK (0x1 << I40E_PRTTSYN_CTL0_TGT_INT_ENA_SHIFT)
+#define I40E_PRTTSYN_CTL0_PF_ID_SHIFT 8
+#define I40E_PRTTSYN_CTL0_PF_ID_MASK (0xF << I40E_PRTTSYN_CTL0_PF_ID_SHIFT)
+#define I40E_PRTTSYN_CTL0_TSYNACT_SHIFT 12
+#define I40E_PRTTSYN_CTL0_TSYNACT_MASK (0x3 << I40E_PRTTSYN_CTL0_TSYNACT_SHIFT)
+#define I40E_PRTTSYN_CTL0_TSYNENA_SHIFT 31
+#define I40E_PRTTSYN_CTL0_TSYNENA_MASK (0x1 << I40E_PRTTSYN_CTL0_TSYNENA_SHIFT)
+#define I40E_PRTTSYN_CTL1 0x00085020
+#define I40E_PRTTSYN_CTL1_V1MESSTYPE0_SHIFT 0
+#define I40E_PRTTSYN_CTL1_V1MESSTYPE0_MASK (0xFF << I40E_PRTTSYN_CTL1_V1MESSTYPE0_SHIFT)
+#define I40E_PRTTSYN_CTL1_V1MESSTYPE1_SHIFT 8
+#define I40E_PRTTSYN_CTL1_V1MESSTYPE1_MASK (0xFF << I40E_PRTTSYN_CTL1_V1MESSTYPE1_SHIFT)
+#define I40E_PRTTSYN_CTL1_V2MESSTYPE0_SHIFT 16
+#define I40E_PRTTSYN_CTL1_V2MESSTYPE0_MASK (0xF << I40E_PRTTSYN_CTL1_V2MESSTYPE0_SHIFT)
+#define I40E_PRTTSYN_CTL1_V2MESSTYPE1_SHIFT 20
+#define I40E_PRTTSYN_CTL1_V2MESSTYPE1_MASK (0xF << I40E_PRTTSYN_CTL1_V2MESSTYPE1_SHIFT)
+#define I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT 24
+#define I40E_PRTTSYN_CTL1_TSYNTYPE_MASK (0x3 << I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
+#define I40E_PRTTSYN_CTL1_UDP_ENA_SHIFT 26
+#define I40E_PRTTSYN_CTL1_UDP_ENA_MASK (0x3 << I40E_PRTTSYN_CTL1_UDP_ENA_SHIFT)
+#define I40E_PRTTSYN_CTL1_TSYNENA_SHIFT 31
+#define I40E_PRTTSYN_CTL1_TSYNENA_MASK (0x1 << I40E_PRTTSYN_CTL1_TSYNENA_SHIFT)
+#define I40E_PRTTSYN_EVNT_H(_i) (0x001E40C0 + ((_i) * 32)) /* _i=0...1 */
+#define I40E_PRTTSYN_EVNT_H_MAX_INDEX 1
+#define I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_SHIFT 0
+#define I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_MASK (0xFFFFFFFF << I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_SHIFT)
+#define I40E_PRTTSYN_EVNT_L(_i) (0x001E4080 + ((_i) * 32)) /* _i=0...1 */
+#define I40E_PRTTSYN_EVNT_L_MAX_INDEX 1
+#define I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_SHIFT 0
+#define I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_MASK (0xFFFFFFFF << I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_SHIFT)
+#define I40E_PRTTSYN_INC_H 0x001E4060
+#define I40E_PRTTSYN_INC_H_TSYNINC_H_SHIFT 0
+#define I40E_PRTTSYN_INC_H_TSYNINC_H_MASK (0x3F << I40E_PRTTSYN_INC_H_TSYNINC_H_SHIFT)
+#define I40E_PRTTSYN_INC_L 0x001E4040
+#define I40E_PRTTSYN_INC_L_TSYNINC_L_SHIFT 0
+#define I40E_PRTTSYN_INC_L_TSYNINC_L_MASK (0xFFFFFFFF << I40E_PRTTSYN_INC_L_TSYNINC_L_SHIFT)
+#define I40E_PRTTSYN_RXTIME_H(_i) (0x00085040 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRTTSYN_RXTIME_H_MAX_INDEX 3
+#define I40E_PRTTSYN_RXTIME_H_RXTIEM_H_SHIFT 0
+#define I40E_PRTTSYN_RXTIME_H_RXTIEM_H_MASK (0xFFFFFFFF << I40E_PRTTSYN_RXTIME_H_RXTIEM_H_SHIFT)
+#define I40E_PRTTSYN_RXTIME_L(_i) (0x000850C0 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRTTSYN_RXTIME_L_MAX_INDEX 3
+#define I40E_PRTTSYN_RXTIME_L_RXTIEM_L_SHIFT 0
+#define I40E_PRTTSYN_RXTIME_L_RXTIEM_L_MASK (0xFFFFFFFF << I40E_PRTTSYN_RXTIME_L_RXTIEM_L_SHIFT)
+#define I40E_PRTTSYN_STAT_0 0x001E4220
+#define I40E_PRTTSYN_STAT_0_EVENT0_SHIFT 0
+#define I40E_PRTTSYN_STAT_0_EVENT0_MASK (0x1 << I40E_PRTTSYN_STAT_0_EVENT0_SHIFT)
+#define I40E_PRTTSYN_STAT_0_EVENT1_SHIFT 1
+#define I40E_PRTTSYN_STAT_0_EVENT1_MASK (0x1 << I40E_PRTTSYN_STAT_0_EVENT1_SHIFT)
+#define I40E_PRTTSYN_STAT_0_TGT0_SHIFT 2
+#define I40E_PRTTSYN_STAT_0_TGT0_MASK (0x1 << I40E_PRTTSYN_STAT_0_TGT0_SHIFT)
+#define I40E_PRTTSYN_STAT_0_TGT1_SHIFT 3
+#define I40E_PRTTSYN_STAT_0_TGT1_MASK (0x1 << I40E_PRTTSYN_STAT_0_TGT1_SHIFT)
+#define I40E_PRTTSYN_STAT_0_TXTIME_SHIFT 4
+#define I40E_PRTTSYN_STAT_0_TXTIME_MASK (0x1 << I40E_PRTTSYN_STAT_0_TXTIME_SHIFT)
+#define I40E_PRTTSYN_STAT_1 0x00085140
+#define I40E_PRTTSYN_STAT_1_RXT0_SHIFT 0
+#define I40E_PRTTSYN_STAT_1_RXT0_MASK (0x1 << I40E_PRTTSYN_STAT_1_RXT0_SHIFT)
+#define I40E_PRTTSYN_STAT_1_RXT1_SHIFT 1
+#define I40E_PRTTSYN_STAT_1_RXT1_MASK (0x1 << I40E_PRTTSYN_STAT_1_RXT1_SHIFT)
+#define I40E_PRTTSYN_STAT_1_RXT2_SHIFT 2
+#define I40E_PRTTSYN_STAT_1_RXT2_MASK (0x1 << I40E_PRTTSYN_STAT_1_RXT2_SHIFT)
+#define I40E_PRTTSYN_STAT_1_RXT3_SHIFT 3
+#define I40E_PRTTSYN_STAT_1_RXT3_MASK (0x1 << I40E_PRTTSYN_STAT_1_RXT3_SHIFT)
+#define I40E_PRTTSYN_TGT_H(_i) (0x001E4180 + ((_i) * 32)) /* _i=0...1 */
+#define I40E_PRTTSYN_TGT_H_MAX_INDEX 1
+#define I40E_PRTTSYN_TGT_H_TSYNTGTT_H_SHIFT 0
+#define I40E_PRTTSYN_TGT_H_TSYNTGTT_H_MASK (0xFFFFFFFF << I40E_PRTTSYN_TGT_H_TSYNTGTT_H_SHIFT)
+#define I40E_PRTTSYN_TGT_L(_i) (0x001E4140 + ((_i) * 32)) /* _i=0...1 */
+#define I40E_PRTTSYN_TGT_L_MAX_INDEX 1
+#define I40E_PRTTSYN_TGT_L_TSYNTGTT_L_SHIFT 0
+#define I40E_PRTTSYN_TGT_L_TSYNTGTT_L_MASK (0xFFFFFFFF << I40E_PRTTSYN_TGT_L_TSYNTGTT_L_SHIFT)
+#define I40E_PRTTSYN_TIME_H 0x001E4120
+#define I40E_PRTTSYN_TIME_H_TSYNTIME_H_SHIFT 0
+#define I40E_PRTTSYN_TIME_H_TSYNTIME_H_MASK (0xFFFFFFFF << I40E_PRTTSYN_TIME_H_TSYNTIME_H_SHIFT)
+#define I40E_PRTTSYN_TIME_L 0x001E4100
+#define I40E_PRTTSYN_TIME_L_TSYNTIME_L_SHIFT 0
+#define I40E_PRTTSYN_TIME_L_TSYNTIME_L_MASK (0xFFFFFFFF << I40E_PRTTSYN_TIME_L_TSYNTIME_L_SHIFT)
+#define I40E_PRTTSYN_TXTIME_H 0x001E41E0
+#define I40E_PRTTSYN_TXTIME_H_TXTIEM_H_SHIFT 0
+#define I40E_PRTTSYN_TXTIME_H_TXTIEM_H_MASK (0xFFFFFFFF << I40E_PRTTSYN_TXTIME_H_TXTIEM_H_SHIFT)
+#define I40E_PRTTSYN_TXTIME_L 0x001E41C0
+#define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT 0
+#define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_MASK (0xFFFFFFFF << I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT)
+#define I40E_GLSCD_QUANTA 0x000B2080
+#define I40E_GLSCD_QUANTA_TSCDQUANTA_SHIFT 0
+#define I40E_GLSCD_QUANTA_TSCDQUANTA_MASK (0x7 << I40E_GLSCD_QUANTA_TSCDQUANTA_SHIFT)
+#define I40E_GL_MDET_RX 0x0012A510
+#define I40E_GL_MDET_RX_FUNCTION_SHIFT 0
+#define I40E_GL_MDET_RX_FUNCTION_MASK (0xFF << I40E_GL_MDET_RX_FUNCTION_SHIFT)
+#define I40E_GL_MDET_RX_EVENT_SHIFT 8
+#define I40E_GL_MDET_RX_EVENT_MASK (0x1FF << I40E_GL_MDET_RX_EVENT_SHIFT)
+#define I40E_GL_MDET_RX_QUEUE_SHIFT 17
+#define I40E_GL_MDET_RX_QUEUE_MASK (0x3FFF << I40E_GL_MDET_RX_QUEUE_SHIFT)
+#define I40E_GL_MDET_RX_VALID_SHIFT 31
+#define I40E_GL_MDET_RX_VALID_MASK (0x1 << I40E_GL_MDET_RX_VALID_SHIFT)
+#define I40E_GL_MDET_TX 0x000E6480
+#define I40E_GL_MDET_TX_FUNCTION_SHIFT 0
+#define I40E_GL_MDET_TX_FUNCTION_MASK (0xFF << I40E_GL_MDET_TX_FUNCTION_SHIFT)
+#define I40E_GL_MDET_TX_EVENT_SHIFT 8
+#define I40E_GL_MDET_TX_EVENT_MASK (0x1FF << I40E_GL_MDET_TX_EVENT_SHIFT)
+#define I40E_GL_MDET_TX_QUEUE_SHIFT 17
+#define I40E_GL_MDET_TX_QUEUE_MASK (0x3FFF << I40E_GL_MDET_TX_QUEUE_SHIFT)
+#define I40E_GL_MDET_TX_VALID_SHIFT 31
+#define I40E_GL_MDET_TX_VALID_MASK (0x1 << I40E_GL_MDET_TX_VALID_SHIFT)
+#define I40E_PF_MDET_RX 0x0012A400
+#define I40E_PF_MDET_RX_VALID_SHIFT 0
+#define I40E_PF_MDET_RX_VALID_MASK (0x1 << I40E_PF_MDET_RX_VALID_SHIFT)
+#define I40E_PF_MDET_TX 0x000E6400
+#define I40E_PF_MDET_TX_VALID_SHIFT 0
+#define I40E_PF_MDET_TX_VALID_MASK (0x1 << I40E_PF_MDET_TX_VALID_SHIFT)
+#define I40E_PF_VT_PFALLOC 0x001C0500
+#define I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT 0
+#define I40E_PF_VT_PFALLOC_FIRSTVF_MASK (0xFF << I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT)
+#define I40E_PF_VT_PFALLOC_LASTVF_SHIFT 8
+#define I40E_PF_VT_PFALLOC_LASTVF_MASK (0xFF << I40E_PF_VT_PFALLOC_LASTVF_SHIFT)
+#define I40E_PF_VT_PFALLOC_VALID_SHIFT 31
+#define I40E_PF_VT_PFALLOC_VALID_MASK (0x1 << I40E_PF_VT_PFALLOC_VALID_SHIFT)
+#define I40E_VP_MDET_RX(_VF) (0x0012A000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VP_MDET_RX_MAX_INDEX 127
+#define I40E_VP_MDET_RX_VALID_SHIFT 0
+#define I40E_VP_MDET_RX_VALID_MASK (0x1 << I40E_VP_MDET_RX_VALID_SHIFT)
+#define I40E_VP_MDET_TX(_VF) (0x000E6000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VP_MDET_TX_MAX_INDEX 127
+#define I40E_VP_MDET_TX_VALID_SHIFT 0
+#define I40E_VP_MDET_TX_VALID_MASK (0x1 << I40E_VP_MDET_TX_VALID_SHIFT)
+#define I40E_GLPM_WUMC 0x0006C800
+#define I40E_GLPM_WUMC_NOTCO_SHIFT 0
+#define I40E_GLPM_WUMC_NOTCO_MASK (0x1 << I40E_GLPM_WUMC_NOTCO_SHIFT)
+#define I40E_GLPM_WUMC_SRST_PIN_VAL_SHIFT 1
+#define I40E_GLPM_WUMC_SRST_PIN_VAL_MASK (0x1 << I40E_GLPM_WUMC_SRST_PIN_VAL_SHIFT)
+#define I40E_GLPM_WUMC_ROL_MODE_SHIFT 2
+#define I40E_GLPM_WUMC_ROL_MODE_MASK (0x1 << I40E_GLPM_WUMC_ROL_MODE_SHIFT)
+#define I40E_GLPM_WUMC_RESERVED_4_SHIFT 3
+#define I40E_GLPM_WUMC_RESERVED_4_MASK (0x1FFF << I40E_GLPM_WUMC_RESERVED_4_SHIFT)
+#define I40E_GLPM_WUMC_MNG_WU_PF_SHIFT 16
+#define I40E_GLPM_WUMC_MNG_WU_PF_MASK (0xFFFF << I40E_GLPM_WUMC_MNG_WU_PF_SHIFT)
+#define I40E_PFPM_APM 0x000B8080
+#define I40E_PFPM_APM_APME_SHIFT 0
+#define I40E_PFPM_APM_APME_MASK (0x1 << I40E_PFPM_APM_APME_SHIFT)
+#define I40E_PFPM_FHFT_LENGTH(_i) (0x0006A000 + ((_i) * 128)) /* _i=0...7 */
+#define I40E_PFPM_FHFT_LENGTH_MAX_INDEX 7
+#define I40E_PFPM_FHFT_LENGTH_LENGTH_SHIFT 0
+#define I40E_PFPM_FHFT_LENGTH_LENGTH_MASK (0xFF << I40E_PFPM_FHFT_LENGTH_LENGTH_SHIFT)
+#define I40E_PFPM_WUC 0x0006B200
+#define I40E_PFPM_WUC_EN_APM_D0_SHIFT 5
+#define I40E_PFPM_WUC_EN_APM_D0_MASK (0x1 << I40E_PFPM_WUC_EN_APM_D0_SHIFT)
+#define I40E_PFPM_WUFC 0x0006B400
+#define I40E_PFPM_WUFC_LNKC_SHIFT 0
+#define I40E_PFPM_WUFC_LNKC_MASK (0x1 << I40E_PFPM_WUFC_LNKC_SHIFT)
+#define I40E_PFPM_WUFC_MAG_SHIFT 1
+#define I40E_PFPM_WUFC_MAG_MASK (0x1 << I40E_PFPM_WUFC_MAG_SHIFT)
+#define I40E_PFPM_WUFC_MNG_SHIFT 3
+#define I40E_PFPM_WUFC_MNG_MASK (0x1 << I40E_PFPM_WUFC_MNG_SHIFT)
+#define I40E_PFPM_WUFC_FLX0_ACT_SHIFT 4
+#define I40E_PFPM_WUFC_FLX0_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX0_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX1_ACT_SHIFT 5
+#define I40E_PFPM_WUFC_FLX1_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX1_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX2_ACT_SHIFT 6
+#define I40E_PFPM_WUFC_FLX2_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX2_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX3_ACT_SHIFT 7
+#define I40E_PFPM_WUFC_FLX3_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX3_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX4_ACT_SHIFT 8
+#define I40E_PFPM_WUFC_FLX4_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX4_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX5_ACT_SHIFT 9
+#define I40E_PFPM_WUFC_FLX5_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX5_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX6_ACT_SHIFT 10
+#define I40E_PFPM_WUFC_FLX6_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX6_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX7_ACT_SHIFT 11
+#define I40E_PFPM_WUFC_FLX7_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX7_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX0_SHIFT 16
+#define I40E_PFPM_WUFC_FLX0_MASK (0x1 << I40E_PFPM_WUFC_FLX0_SHIFT)
+#define I40E_PFPM_WUFC_FLX1_SHIFT 17
+#define I40E_PFPM_WUFC_FLX1_MASK (0x1 << I40E_PFPM_WUFC_FLX1_SHIFT)
+#define I40E_PFPM_WUFC_FLX2_SHIFT 18
+#define I40E_PFPM_WUFC_FLX2_MASK (0x1 << I40E_PFPM_WUFC_FLX2_SHIFT)
+#define I40E_PFPM_WUFC_FLX3_SHIFT 19
+#define I40E_PFPM_WUFC_FLX3_MASK (0x1 << I40E_PFPM_WUFC_FLX3_SHIFT)
+#define I40E_PFPM_WUFC_FLX4_SHIFT 20
+#define I40E_PFPM_WUFC_FLX4_MASK (0x1 << I40E_PFPM_WUFC_FLX4_SHIFT)
+#define I40E_PFPM_WUFC_FLX5_SHIFT 21
+#define I40E_PFPM_WUFC_FLX5_MASK (0x1 << I40E_PFPM_WUFC_FLX5_SHIFT)
+#define I40E_PFPM_WUFC_FLX6_SHIFT 22
+#define I40E_PFPM_WUFC_FLX6_MASK (0x1 << I40E_PFPM_WUFC_FLX6_SHIFT)
+#define I40E_PFPM_WUFC_FLX7_SHIFT 23
+#define I40E_PFPM_WUFC_FLX7_MASK (0x1 << I40E_PFPM_WUFC_FLX7_SHIFT)
+#define I40E_PFPM_WUFC_FW_RST_WK_SHIFT 31
+#define I40E_PFPM_WUFC_FW_RST_WK_MASK (0x1 << I40E_PFPM_WUFC_FW_RST_WK_SHIFT)
+#define I40E_PFPM_WUS 0x0006B600
+#define I40E_PFPM_WUS_LNKC_SHIFT 0
+#define I40E_PFPM_WUS_LNKC_MASK (0x1 << I40E_PFPM_WUS_LNKC_SHIFT)
+#define I40E_PFPM_WUS_MAG_SHIFT 1
+#define I40E_PFPM_WUS_MAG_MASK (0x1 << I40E_PFPM_WUS_MAG_SHIFT)
+#define I40E_PFPM_WUS_PME_STATUS_SHIFT 2
+#define I40E_PFPM_WUS_PME_STATUS_MASK (0x1 << I40E_PFPM_WUS_PME_STATUS_SHIFT)
+#define I40E_PFPM_WUS_MNG_SHIFT 3
+#define I40E_PFPM_WUS_MNG_MASK (0x1 << I40E_PFPM_WUS_MNG_SHIFT)
+#define I40E_PFPM_WUS_FLX0_SHIFT 16
+#define I40E_PFPM_WUS_FLX0_MASK (0x1 << I40E_PFPM_WUS_FLX0_SHIFT)
+#define I40E_PFPM_WUS_FLX1_SHIFT 17
+#define I40E_PFPM_WUS_FLX1_MASK (0x1 << I40E_PFPM_WUS_FLX1_SHIFT)
+#define I40E_PFPM_WUS_FLX2_SHIFT 18
+#define I40E_PFPM_WUS_FLX2_MASK (0x1 << I40E_PFPM_WUS_FLX2_SHIFT)
+#define I40E_PFPM_WUS_FLX3_SHIFT 19
+#define I40E_PFPM_WUS_FLX3_MASK (0x1 << I40E_PFPM_WUS_FLX3_SHIFT)
+#define I40E_PFPM_WUS_FLX4_SHIFT 20
+#define I40E_PFPM_WUS_FLX4_MASK (0x1 << I40E_PFPM_WUS_FLX4_SHIFT)
+#define I40E_PFPM_WUS_FLX5_SHIFT 21
+#define I40E_PFPM_WUS_FLX5_MASK (0x1 << I40E_PFPM_WUS_FLX5_SHIFT)
+#define I40E_PFPM_WUS_FLX6_SHIFT 22
+#define I40E_PFPM_WUS_FLX6_MASK (0x1 << I40E_PFPM_WUS_FLX6_SHIFT)
+#define I40E_PFPM_WUS_FLX7_SHIFT 23
+#define I40E_PFPM_WUS_FLX7_MASK (0x1 << I40E_PFPM_WUS_FLX7_SHIFT)
+#define I40E_PFPM_WUS_FW_RST_WK_SHIFT 31
+#define I40E_PFPM_WUS_FW_RST_WK_MASK (0x1 << I40E_PFPM_WUS_FW_RST_WK_SHIFT)
+#define I40E_PRTPM_FHFHR 0x0006C000
+#define I40E_PRTPM_FHFHR_UNICAST_SHIFT 0
+#define I40E_PRTPM_FHFHR_UNICAST_MASK (0x1 << I40E_PRTPM_FHFHR_UNICAST_SHIFT)
+#define I40E_PRTPM_FHFHR_MULTICAST_SHIFT 1
+#define I40E_PRTPM_FHFHR_MULTICAST_MASK (0x1 << I40E_PRTPM_FHFHR_MULTICAST_SHIFT)
+#define I40E_PRTPM_SAH(_i) (0x001E44C0 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRTPM_SAH_MAX_INDEX 3
+#define I40E_PRTPM_SAH_PFPM_SAH_SHIFT 0
+#define I40E_PRTPM_SAH_PFPM_SAH_MASK (0xFFFF << I40E_PRTPM_SAH_PFPM_SAH_SHIFT)
+#define I40E_PRTPM_SAH_PF_NUM_SHIFT 26
+#define I40E_PRTPM_SAH_PF_NUM_MASK (0xF << I40E_PRTPM_SAH_PF_NUM_SHIFT)
+#define I40E_PRTPM_SAH_MC_MAG_EN_SHIFT 30
+#define I40E_PRTPM_SAH_MC_MAG_EN_MASK (0x1 << I40E_PRTPM_SAH_MC_MAG_EN_SHIFT)
+#define I40E_PRTPM_SAH_AV_SHIFT 31
+#define I40E_PRTPM_SAH_AV_MASK (0x1 << I40E_PRTPM_SAH_AV_SHIFT)
+#define I40E_PRTPM_SAL(_i) (0x001E4440 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRTPM_SAL_MAX_INDEX 3
+#define I40E_PRTPM_SAL_PFPM_SAL_SHIFT 0
+#define I40E_PRTPM_SAL_PFPM_SAL_MASK (0xFFFFFFFF << I40E_PRTPM_SAL_PFPM_SAL_SHIFT)
+#define I40E_VF_ARQBAH1 0x00006000
+#define I40E_VF_ARQBAH1_ARQBAH_SHIFT 0
+#define I40E_VF_ARQBAH1_ARQBAH_MASK (0xFFFFFFFF << I40E_VF_ARQBAH1_ARQBAH_SHIFT)
+#define I40E_VF_ARQBAL1 0x00006C00
+#define I40E_VF_ARQBAL1_ARQBAL_SHIFT 0
+#define I40E_VF_ARQBAL1_ARQBAL_MASK (0xFFFFFFFF << I40E_VF_ARQBAL1_ARQBAL_SHIFT)
+#define I40E_VF_ARQH1 0x00007400
+#define I40E_VF_ARQH1_ARQH_SHIFT 0
+#define I40E_VF_ARQH1_ARQH_MASK (0x3FF << I40E_VF_ARQH1_ARQH_SHIFT)
+#define I40E_VF_ARQLEN1 0x00008000
+#define I40E_VF_ARQLEN1_ARQLEN_SHIFT 0
+#define I40E_VF_ARQLEN1_ARQLEN_MASK (0x3FF << I40E_VF_ARQLEN1_ARQLEN_SHIFT)
+#define I40E_VF_ARQLEN1_ARQVFE_SHIFT 28
+#define I40E_VF_ARQLEN1_ARQVFE_MASK (0x1 << I40E_VF_ARQLEN1_ARQVFE_SHIFT)
+#define I40E_VF_ARQLEN1_ARQOVFL_SHIFT 29
+#define I40E_VF_ARQLEN1_ARQOVFL_MASK (0x1 << I40E_VF_ARQLEN1_ARQOVFL_SHIFT)
+#define I40E_VF_ARQLEN1_ARQCRIT_SHIFT 30
+#define I40E_VF_ARQLEN1_ARQCRIT_MASK (0x1 << I40E_VF_ARQLEN1_ARQCRIT_SHIFT)
+#define I40E_VF_ARQLEN1_ARQENABLE_SHIFT 31
+#define I40E_VF_ARQLEN1_ARQENABLE_MASK (0x1 << I40E_VF_ARQLEN1_ARQENABLE_SHIFT)
+#define I40E_VF_ARQT1 0x00007000
+#define I40E_VF_ARQT1_ARQT_SHIFT 0
+#define I40E_VF_ARQT1_ARQT_MASK (0x3FF << I40E_VF_ARQT1_ARQT_SHIFT)
+#define I40E_VF_ATQBAH1 0x00007800
+#define I40E_VF_ATQBAH1_ATQBAH_SHIFT 0
+#define I40E_VF_ATQBAH1_ATQBAH_MASK (0xFFFFFFFF << I40E_VF_ATQBAH1_ATQBAH_SHIFT)
+#define I40E_VF_ATQBAL1 0x00007C00
+#define I40E_VF_ATQBAL1_ATQBAL_SHIFT 0
+#define I40E_VF_ATQBAL1_ATQBAL_MASK (0xFFFFFFFF << I40E_VF_ATQBAL1_ATQBAL_SHIFT)
+#define I40E_VF_ATQH1 0x00006400
+#define I40E_VF_ATQH1_ATQH_SHIFT 0
+#define I40E_VF_ATQH1_ATQH_MASK (0x3FF << I40E_VF_ATQH1_ATQH_SHIFT)
+#define I40E_VF_ATQLEN1 0x00006800
+#define I40E_VF_ATQLEN1_ATQLEN_SHIFT 0
+#define I40E_VF_ATQLEN1_ATQLEN_MASK (0x3FF << I40E_VF_ATQLEN1_ATQLEN_SHIFT)
+#define I40E_VF_ATQLEN1_ATQVFE_SHIFT 28
+#define I40E_VF_ATQLEN1_ATQVFE_MASK (0x1 << I40E_VF_ATQLEN1_ATQVFE_SHIFT)
+#define I40E_VF_ATQLEN1_ATQOVFL_SHIFT 29
+#define I40E_VF_ATQLEN1_ATQOVFL_MASK (0x1 << I40E_VF_ATQLEN1_ATQOVFL_SHIFT)
+#define I40E_VF_ATQLEN1_ATQCRIT_SHIFT 30
+#define I40E_VF_ATQLEN1_ATQCRIT_MASK (0x1 << I40E_VF_ATQLEN1_ATQCRIT_SHIFT)
+#define I40E_VF_ATQLEN1_ATQENABLE_SHIFT 31
+#define I40E_VF_ATQLEN1_ATQENABLE_MASK (0x1 << I40E_VF_ATQLEN1_ATQENABLE_SHIFT)
+#define I40E_VF_ATQT1 0x00008400
+#define I40E_VF_ATQT1_ATQT_SHIFT 0
+#define I40E_VF_ATQT1_ATQT_MASK (0x3FF << I40E_VF_ATQT1_ATQT_SHIFT)
+#define I40E_VFGEN_RSTAT 0x00008800
+#define I40E_VFGEN_RSTAT_VFR_STATE_SHIFT 0
+#define I40E_VFGEN_RSTAT_VFR_STATE_MASK (0x3 << I40E_VFGEN_RSTAT_VFR_STATE_SHIFT)
+#define I40E_VFINT_DYN_CTL01 0x00005C00
+#define I40E_VFINT_DYN_CTL01_INTENA_SHIFT 0
+#define I40E_VFINT_DYN_CTL01_INTENA_MASK (0x1 << I40E_VFINT_DYN_CTL01_INTENA_SHIFT)
+#define I40E_VFINT_DYN_CTL01_CLEARPBA_SHIFT 1
+#define I40E_VFINT_DYN_CTL01_CLEARPBA_MASK (0x1 << I40E_VFINT_DYN_CTL01_CLEARPBA_SHIFT)
+#define I40E_VFINT_DYN_CTL01_SWINT_TRIG_SHIFT 2
+#define I40E_VFINT_DYN_CTL01_SWINT_TRIG_MASK (0x1 << I40E_VFINT_DYN_CTL01_SWINT_TRIG_SHIFT)
+#define I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT 3
+#define I40E_VFINT_DYN_CTL01_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTL01_INTERVAL_SHIFT 5
+#define I40E_VFINT_DYN_CTL01_INTERVAL_MASK (0xFFF << I40E_VFINT_DYN_CTL01_INTERVAL_SHIFT)
+#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_SHIFT 24
+#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_MASK (0x1 << I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_SHIFT 25
+#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTL01_SW_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTL01_INTENA_MSK_SHIFT 31
+#define I40E_VFINT_DYN_CTL01_INTENA_MSK_MASK (0x1 << I40E_VFINT_DYN_CTL01_INTENA_MSK_SHIFT)
+#define I40E_VFINT_DYN_CTLN1(_INTVF) (0x00003800 + ((_INTVF) * 4))
+#define I40E_VFINT_DYN_CTLN1_MAX_INDEX 15
+#define I40E_VFINT_DYN_CTLN1_INTENA_SHIFT 0
+#define I40E_VFINT_DYN_CTLN1_INTENA_MASK (0x1 << I40E_VFINT_DYN_CTLN1_INTENA_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_CLEARPBA_SHIFT 1
+#define I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK (0x1 << I40E_VFINT_DYN_CTLN1_CLEARPBA_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT 2
+#define I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK (0x1 << I40E_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT 3
+#define I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT 5
+#define I40E_VFINT_DYN_CTLN1_INTERVAL_MASK (0xFFF << I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT 24
+#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK (0x1 << I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_SHIFT 25
+#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_INTENA_MSK_SHIFT 31
+#define I40E_VFINT_DYN_CTLN1_INTENA_MSK_MASK (0x1 << I40E_VFINT_DYN_CTLN1_INTENA_MSK_SHIFT)
+#define I40E_VFINT_ICR0_ENA1 0x00005000
+#define I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_SHIFT 25
+#define I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_MASK (0x1 << I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_SHIFT)
+#define I40E_VFINT_ICR0_ENA1_ADMINQ_SHIFT 30
+#define I40E_VFINT_ICR0_ENA1_ADMINQ_MASK (0x1 << I40E_VFINT_ICR0_ENA1_ADMINQ_SHIFT)
+#define I40E_VFINT_ICR0_ENA1_RSVD_SHIFT 31
+#define I40E_VFINT_ICR0_ENA1_RSVD_MASK (0x1 << I40E_VFINT_ICR0_ENA1_RSVD_SHIFT)
+#define I40E_VFINT_ICR01 0x00004800
+#define I40E_VFINT_ICR01_INTEVENT_SHIFT 0
+#define I40E_VFINT_ICR01_INTEVENT_MASK (0x1 << I40E_VFINT_ICR01_INTEVENT_SHIFT)
+#define I40E_VFINT_ICR01_QUEUE_0_SHIFT 1
+#define I40E_VFINT_ICR01_QUEUE_0_MASK (0x1 << I40E_VFINT_ICR01_QUEUE_0_SHIFT)
+#define I40E_VFINT_ICR01_QUEUE_1_SHIFT 2
+#define I40E_VFINT_ICR01_QUEUE_1_MASK (0x1 << I40E_VFINT_ICR01_QUEUE_1_SHIFT)
+#define I40E_VFINT_ICR01_QUEUE_2_SHIFT 3
+#define I40E_VFINT_ICR01_QUEUE_2_MASK (0x1 << I40E_VFINT_ICR01_QUEUE_2_SHIFT)
+#define I40E_VFINT_ICR01_QUEUE_3_SHIFT 4
+#define I40E_VFINT_ICR01_QUEUE_3_MASK (0x1 << I40E_VFINT_ICR01_QUEUE_3_SHIFT)
+#define I40E_VFINT_ICR01_LINK_STAT_CHANGE_SHIFT 25
+#define I40E_VFINT_ICR01_LINK_STAT_CHANGE_MASK (0x1 << I40E_VFINT_ICR01_LINK_STAT_CHANGE_SHIFT)
+#define I40E_VFINT_ICR01_ADMINQ_SHIFT 30
+#define I40E_VFINT_ICR01_ADMINQ_MASK (0x1 << I40E_VFINT_ICR01_ADMINQ_SHIFT)
+#define I40E_VFINT_ICR01_SWINT_SHIFT 31
+#define I40E_VFINT_ICR01_SWINT_MASK (0x1 << I40E_VFINT_ICR01_SWINT_SHIFT)
+#define I40E_VFINT_ITR01(_i) (0x00004C00 + ((_i) * 4)) /* _i=0...2 */
+#define I40E_VFINT_ITR01_MAX_INDEX 2
+#define I40E_VFINT_ITR01_INTERVAL_SHIFT 0
+#define I40E_VFINT_ITR01_INTERVAL_MASK (0xFFF << I40E_VFINT_ITR01_INTERVAL_SHIFT)
+#define I40E_VFINT_ITRN1(_i, _INTVF) (0x00002800 + ((_i) * 64 + (_INTVF) * 4))
+#define I40E_VFINT_ITRN1_MAX_INDEX 2
+#define I40E_VFINT_ITRN1_INTERVAL_SHIFT 0
+#define I40E_VFINT_ITRN1_INTERVAL_MASK (0xFFF << I40E_VFINT_ITRN1_INTERVAL_SHIFT)
+#define I40E_VFINT_STAT_CTL01 0x00005400
+#define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT 2
+#define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_MASK (0x3 << I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT)
+#define I40E_QRX_TAIL1(_Q) (0x00002000 + ((_Q) * 4)) /* _i=0...15 */
+#define I40E_QRX_TAIL1_MAX_INDEX 15
+#define I40E_QRX_TAIL1_TAIL_SHIFT 0
+#define I40E_QRX_TAIL1_TAIL_MASK (0x1FFF << I40E_QRX_TAIL1_TAIL_SHIFT)
+#define I40E_QTX_TAIL1(_Q) (0x00000000 + ((_Q) * 4)) /* _i=0...15 */
+#define I40E_QTX_TAIL1_MAX_INDEX 15
+#define I40E_QTX_TAIL1_TAIL_SHIFT 0
+#define I40E_QTX_TAIL1_TAIL_MASK (0x1FFF << I40E_QTX_TAIL1_TAIL_SHIFT)
+#define I40E_VFMSIX_PBA 0x00002000
+#define I40E_VFMSIX_PBA_PENBIT_SHIFT 0
+#define I40E_VFMSIX_PBA_PENBIT_MASK (0xFFFFFFFF << I40E_VFMSIX_PBA_PENBIT_SHIFT)
+#define I40E_VFMSIX_TADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...16 */
+#define I40E_VFMSIX_TADD_MAX_INDEX 16
+#define I40E_VFMSIX_TADD_MSIXTADD10_SHIFT 0
+#define I40E_VFMSIX_TADD_MSIXTADD10_MASK (0x3 << I40E_VFMSIX_TADD_MSIXTADD10_SHIFT)
+#define I40E_VFMSIX_TADD_MSIXTADD_SHIFT 2
+#define I40E_VFMSIX_TADD_MSIXTADD_MASK (0x3FFFFFFF << I40E_VFMSIX_TADD_MSIXTADD_SHIFT)
+#define I40E_VFMSIX_TMSG(_i) (0x00000008 + ((_i) * 16)) /* _i=0...16 */
+#define I40E_VFMSIX_TMSG_MAX_INDEX 16
+#define I40E_VFMSIX_TMSG_MSIXTMSG_SHIFT 0
+#define I40E_VFMSIX_TMSG_MSIXTMSG_MASK (0xFFFFFFFF << I40E_VFMSIX_TMSG_MSIXTMSG_SHIFT)
+#define I40E_VFMSIX_TUADD(_i) (0x00000004 + ((_i) * 16)) /* _i=0...16 */
+#define I40E_VFMSIX_TUADD_MAX_INDEX 16
+#define I40E_VFMSIX_TUADD_MSIXTUADD_SHIFT 0
+#define I40E_VFMSIX_TUADD_MSIXTUADD_MASK (0xFFFFFFFF << I40E_VFMSIX_TUADD_MSIXTUADD_SHIFT)
+#define I40E_VFMSIX_TVCTRL(_i) (0x0000000C + ((_i) * 16)) /* _i=0...16 */
+#define I40E_VFMSIX_TVCTRL_MAX_INDEX 16
+#define I40E_VFMSIX_TVCTRL_MASK_SHIFT 0
+#define I40E_VFMSIX_TVCTRL_MASK_MASK (0x1 << I40E_VFMSIX_TVCTRL_MASK_SHIFT)
+#define I40E_VFCM_PE_ERRDATA 0x0000DC00
+#define I40E_VFCM_PE_ERRDATA_ERROR_CODE_SHIFT 0
+#define I40E_VFCM_PE_ERRDATA_ERROR_CODE_MASK (0xF << I40E_VFCM_PE_ERRDATA_ERROR_CODE_SHIFT)
+#define I40E_VFCM_PE_ERRDATA_Q_TYPE_SHIFT 4
+#define I40E_VFCM_PE_ERRDATA_Q_TYPE_MASK (0x7 << I40E_VFCM_PE_ERRDATA_Q_TYPE_SHIFT)
+#define I40E_VFCM_PE_ERRDATA_Q_NUM_SHIFT 8
+#define I40E_VFCM_PE_ERRDATA_Q_NUM_MASK (0x3FFFF << I40E_VFCM_PE_ERRDATA_Q_NUM_SHIFT)
+#define I40E_VFCM_PE_ERRINFO 0x0000D800
+#define I40E_VFCM_PE_ERRINFO_ERROR_VALID_SHIFT 0
+#define I40E_VFCM_PE_ERRINFO_ERROR_VALID_MASK (0x1 << I40E_VFCM_PE_ERRINFO_ERROR_VALID_SHIFT)
+#define I40E_VFCM_PE_ERRINFO_ERROR_INST_SHIFT 4
+#define I40E_VFCM_PE_ERRINFO_ERROR_INST_MASK (0x7 << I40E_VFCM_PE_ERRINFO_ERROR_INST_SHIFT)
+#define I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT 8
+#define I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_MASK (0xFF << I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT)
+#define I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT 16
+#define I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_MASK (0xFF << I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT)
+#define I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT 24
+#define I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_MASK (0xFF << I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT)
+#define I40E_VFPE_AEQALLOC1 0x0000A400
+#define I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT 0
+#define I40E_VFPE_AEQALLOC1_AECOUNT_MASK (0xFFFFFFFF << I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT)
+#define I40E_VFPE_CCQPHIGH1 0x00009800
+#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT 0
+#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_MASK (0xFFFFFFFF << I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT)
+#define I40E_VFPE_CCQPLOW1 0x0000AC00
+#define I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT 0
+#define I40E_VFPE_CCQPLOW1_PECCQPLOW_MASK (0xFFFFFFFF << I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1 0x0000B800
+#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT 0
+#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_MASK (0x1 << I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT 31
+#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_MASK (0x1 << I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT)
+#define I40E_VFPE_CQACK1 0x0000B000
+#define I40E_VFPE_CQACK1_PECQID_SHIFT 0
+#define I40E_VFPE_CQACK1_PECQID_MASK (0x1FFFF << I40E_VFPE_CQACK1_PECQID_SHIFT)
+#define I40E_VFPE_CQARM1 0x0000B400
+#define I40E_VFPE_CQARM1_PECQID_SHIFT 0
+#define I40E_VFPE_CQARM1_PECQID_MASK (0x1FFFF << I40E_VFPE_CQARM1_PECQID_SHIFT)
+#define I40E_VFPE_CQPDB1 0x0000BC00
+#define I40E_VFPE_CQPDB1_WQHEAD_SHIFT 0
+#define I40E_VFPE_CQPDB1_WQHEAD_MASK (0x7FF << I40E_VFPE_CQPDB1_WQHEAD_SHIFT)
+#define I40E_VFPE_CQPERRCODES1 0x00009C00
+#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT 0
+#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_MASK (0xFFFF << I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT)
+#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT 16
+#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_MASK (0xFFFF << I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT)
+#define I40E_VFPE_CQPTAIL1 0x0000A000
+#define I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT 0
+#define I40E_VFPE_CQPTAIL1_WQTAIL_MASK (0x7FF << I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT)
+#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT 31
+#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_MASK (0x1 << I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT)
+#define I40E_VFPE_IPCONFIG01 0x00008C00
+#define I40E_VFPE_IPCONFIG01_PEIPID_SHIFT 0
+#define I40E_VFPE_IPCONFIG01_PEIPID_MASK (0xFFFF << I40E_VFPE_IPCONFIG01_PEIPID_SHIFT)
+#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT 16
+#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_MASK (0x1 << I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT)
+#define I40E_VFPE_MRTEIDXMASK1 0x00009000
+#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT 0
+#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_MASK (0x1F << I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT)
+#define I40E_VFPE_RCVUNEXPECTEDERROR1 0x00009400
+#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT 0
+#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_MASK (0xFFFFFF << I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT)
+#define I40E_VFPE_TCPNOWTIMER1 0x0000A800
+#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT 0
+#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_MASK (0xFFFFFFFF << I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT)
+#define I40E_VFPE_WQEALLOC1 0x0000C000
+#define I40E_VFPE_WQEALLOC1_PEQPID_SHIFT 0
+#define I40E_VFPE_WQEALLOC1_PEQPID_MASK (0x3FFFF << I40E_VFPE_WQEALLOC1_PEQPID_SHIFT)
+#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT 20
+#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_MASK (0xFFF << I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT)
+#define I40E_VFQF_HENA(_i) (0x0000C400 + ((_i) * 4)) /* _i=0...1 */
+#define I40E_VFQF_HENA_MAX_INDEX 1
+#define I40E_VFQF_HENA_PTYPE_ENA_SHIFT 0
+#define I40E_VFQF_HENA_PTYPE_ENA_MASK (0xFFFFFFFF << I40E_VFQF_HENA_PTYPE_ENA_SHIFT)
+#define I40E_VFQF_HKEY(_i) (0x0000CC00 + ((_i) * 4)) /* _i=0...12 */
+#define I40E_VFQF_HKEY_MAX_INDEX 12
+#define I40E_VFQF_HKEY_KEY_0_SHIFT 0
+#define I40E_VFQF_HKEY_KEY_0_MASK (0xFF << I40E_VFQF_HKEY_KEY_0_SHIFT)
+#define I40E_VFQF_HKEY_KEY_1_SHIFT 8
+#define I40E_VFQF_HKEY_KEY_1_MASK (0xFF << I40E_VFQF_HKEY_KEY_1_SHIFT)
+#define I40E_VFQF_HKEY_KEY_2_SHIFT 16
+#define I40E_VFQF_HKEY_KEY_2_MASK (0xFF << I40E_VFQF_HKEY_KEY_2_SHIFT)
+#define I40E_VFQF_HKEY_KEY_3_SHIFT 24
+#define I40E_VFQF_HKEY_KEY_3_MASK (0xFF << I40E_VFQF_HKEY_KEY_3_SHIFT)
+#define I40E_VFQF_HLUT(_i) (0x0000D000 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_VFQF_HLUT_MAX_INDEX 15
+#define I40E_VFQF_HLUT_LUT0_SHIFT 0
+#define I40E_VFQF_HLUT_LUT0_MASK (0xF << I40E_VFQF_HLUT_LUT0_SHIFT)
+#define I40E_VFQF_HLUT_LUT1_SHIFT 8
+#define I40E_VFQF_HLUT_LUT1_MASK (0xF << I40E_VFQF_HLUT_LUT1_SHIFT)
+#define I40E_VFQF_HLUT_LUT2_SHIFT 16
+#define I40E_VFQF_HLUT_LUT2_MASK (0xF << I40E_VFQF_HLUT_LUT2_SHIFT)
+#define I40E_VFQF_HLUT_LUT3_SHIFT 24
+#define I40E_VFQF_HLUT_LUT3_MASK (0xF << I40E_VFQF_HLUT_LUT3_SHIFT)
+#define I40E_VFQF_HREGION(_i) (0x0000D400 + ((_i) * 4)) /* _i=0...7 */
+#define I40E_VFQF_HREGION_MAX_INDEX 7
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_0_SHIFT 0
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_0_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_0_SHIFT)
+#define I40E_VFQF_HREGION_REGION_0_SHIFT 1
+#define I40E_VFQF_HREGION_REGION_0_MASK (0x7 << I40E_VFQF_HREGION_REGION_0_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_1_SHIFT 4
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_1_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_1_SHIFT)
+#define I40E_VFQF_HREGION_REGION_1_SHIFT 5
+#define I40E_VFQF_HREGION_REGION_1_MASK (0x7 << I40E_VFQF_HREGION_REGION_1_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_2_SHIFT 8
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_2_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_2_SHIFT)
+#define I40E_VFQF_HREGION_REGION_2_SHIFT 9
+#define I40E_VFQF_HREGION_REGION_2_MASK (0x7 << I40E_VFQF_HREGION_REGION_2_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_3_SHIFT 12
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_3_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_3_SHIFT)
+#define I40E_VFQF_HREGION_REGION_3_SHIFT 13
+#define I40E_VFQF_HREGION_REGION_3_MASK (0x7 << I40E_VFQF_HREGION_REGION_3_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_4_SHIFT 16
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_4_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_4_SHIFT)
+#define I40E_VFQF_HREGION_REGION_4_SHIFT 17
+#define I40E_VFQF_HREGION_REGION_4_MASK (0x7 << I40E_VFQF_HREGION_REGION_4_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_5_SHIFT 20
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_5_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_5_SHIFT)
+#define I40E_VFQF_HREGION_REGION_5_SHIFT 21
+#define I40E_VFQF_HREGION_REGION_5_MASK (0x7 << I40E_VFQF_HREGION_REGION_5_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_6_SHIFT 24
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_6_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_6_SHIFT)
+#define I40E_VFQF_HREGION_REGION_6_SHIFT 25
+#define I40E_VFQF_HREGION_REGION_6_MASK (0x7 << I40E_VFQF_HREGION_REGION_6_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT 28
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_7_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT)
+#define I40E_VFQF_HREGION_REGION_7_SHIFT 29
+#define I40E_VFQF_HREGION_REGION_7_MASK (0x7 << I40E_VFQF_HREGION_REGION_7_SHIFT)
+#define I40E_RCU_PST_FOC_ACCESS_STATUS 0x00270110
+#define I40E_RCU_PST_FOC_ACCESS_STATUS_WR_ACCESS_CNT_SHIFT 0
+#define I40E_RCU_PST_FOC_ACCESS_STATUS_WR_ACCESS_CNT_MASK (0xFF << I40E_RCU_PST_FOC_ACCESS_STATUS_WR_ACCESS_CNT_SHIFT)
+#define I40E_RCU_PST_FOC_ACCESS_STATUS_RD_ACCESS_CNT_SHIFT 8
+#define I40E_RCU_PST_FOC_ACCESS_STATUS_RD_ACCESS_CNT_MASK (0xFF << I40E_RCU_PST_FOC_ACCESS_STATUS_RD_ACCESS_CNT_SHIFT)
+#define I40E_RCU_PST_FOC_ACCESS_STATUS_ERR_CNT_SHIFT 16
+#define I40E_RCU_PST_FOC_ACCESS_STATUS_ERR_CNT_MASK (0xFF << I40E_RCU_PST_FOC_ACCESS_STATUS_ERR_CNT_SHIFT)
+#define I40E_RCU_PST_FOC_ACCESS_STATUS_LAST_ERR_CODE_SHIFT 24
+#define I40E_RCU_PST_FOC_ACCESS_STATUS_LAST_ERR_CODE_MASK (0x7 << I40E_RCU_PST_FOC_ACCESS_STATUS_LAST_ERR_CODE_SHIFT)
+#endif
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_status.h b/drivers/net/ethernet/intel/i40evf/i40e_status.h
new file mode 100644
index 000000000000..7c08cc2e339b
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40e_status.h
@@ -0,0 +1,97 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_STATUS_H_
+#define _I40E_STATUS_H_
+
+/* Error Codes */
+enum i40e_status_code {
+ I40E_SUCCESS = 0,
+ I40E_ERR_NVM = -1,
+ I40E_ERR_NVM_CHECKSUM = -2,
+ I40E_ERR_PHY = -3,
+ I40E_ERR_CONFIG = -4,
+ I40E_ERR_PARAM = -5,
+ I40E_ERR_MAC_TYPE = -6,
+ I40E_ERR_UNKNOWN_PHY = -7,
+ I40E_ERR_LINK_SETUP = -8,
+ I40E_ERR_ADAPTER_STOPPED = -9,
+ I40E_ERR_INVALID_MAC_ADDR = -10,
+ I40E_ERR_DEVICE_NOT_SUPPORTED = -11,
+ I40E_ERR_MASTER_REQUESTS_PENDING = -12,
+ I40E_ERR_INVALID_LINK_SETTINGS = -13,
+ I40E_ERR_AUTONEG_NOT_COMPLETE = -14,
+ I40E_ERR_RESET_FAILED = -15,
+ I40E_ERR_SWFW_SYNC = -16,
+ I40E_ERR_NO_AVAILABLE_VSI = -17,
+ I40E_ERR_NO_MEMORY = -18,
+ I40E_ERR_BAD_PTR = -19,
+ I40E_ERR_RING_FULL = -20,
+ I40E_ERR_INVALID_PD_ID = -21,
+ I40E_ERR_INVALID_QP_ID = -22,
+ I40E_ERR_INVALID_CQ_ID = -23,
+ I40E_ERR_INVALID_CEQ_ID = -24,
+ I40E_ERR_INVALID_AEQ_ID = -25,
+ I40E_ERR_INVALID_SIZE = -26,
+ I40E_ERR_INVALID_ARP_INDEX = -27,
+ I40E_ERR_INVALID_FPM_FUNC_ID = -28,
+ I40E_ERR_QP_INVALID_MSG_SIZE = -29,
+ I40E_ERR_QP_TOOMANY_WRS_POSTED = -30,
+ I40E_ERR_INVALID_FRAG_COUNT = -31,
+ I40E_ERR_QUEUE_EMPTY = -32,
+ I40E_ERR_INVALID_ALIGNMENT = -33,
+ I40E_ERR_FLUSHED_QUEUE = -34,
+ I40E_ERR_INVALID_PUSH_PAGE_INDEX = -35,
+ I40E_ERR_INVALID_IMM_DATA_SIZE = -36,
+ I40E_ERR_TIMEOUT = -37,
+ I40E_ERR_OPCODE_MISMATCH = -38,
+ I40E_ERR_CQP_COMPL_ERROR = -39,
+ I40E_ERR_INVALID_VF_ID = -40,
+ I40E_ERR_INVALID_HMCFN_ID = -41,
+ I40E_ERR_BACKING_PAGE_ERROR = -42,
+ I40E_ERR_NO_PBLCHUNKS_AVAILABLE = -43,
+ I40E_ERR_INVALID_PBLE_INDEX = -44,
+ I40E_ERR_INVALID_SD_INDEX = -45,
+ I40E_ERR_INVALID_PAGE_DESC_INDEX = -46,
+ I40E_ERR_INVALID_SD_TYPE = -47,
+ I40E_ERR_MEMCPY_FAILED = -48,
+ I40E_ERR_INVALID_HMC_OBJ_INDEX = -49,
+ I40E_ERR_INVALID_HMC_OBJ_COUNT = -50,
+ I40E_ERR_INVALID_SRQ_ARM_LIMIT = -51,
+ I40E_ERR_SRQ_ENABLED = -52,
+ I40E_ERR_ADMIN_QUEUE_ERROR = -53,
+ I40E_ERR_ADMIN_QUEUE_TIMEOUT = -54,
+ I40E_ERR_BUF_TOO_SHORT = -55,
+ I40E_ERR_ADMIN_QUEUE_FULL = -56,
+ I40E_ERR_ADMIN_QUEUE_NO_WORK = -57,
+ I40E_ERR_BAD_IWARP_CQE = -58,
+ I40E_ERR_NVM_BLANK_MODE = -59,
+ I40E_ERR_NOT_IMPLEMENTED = -60,
+ I40E_ERR_PE_DOORBELL_NOT_ENABLED = -61,
+ I40E_ERR_DIAG_TEST_FAILED = -62,
+ I40E_ERR_NOT_READY = -63,
+ I40E_NOT_SUPPORTED = -64,
+ I40E_ERR_FIRMWARE_API_VERSION = -65,
+};
+
+#endif /* _I40E_STATUS_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
new file mode 100644
index 000000000000..8f2b3b2a2a90
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -0,0 +1,1573 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#include "i40evf.h"
+
+static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
+ u32 td_tag)
+{
+ return cpu_to_le64(I40E_TX_DESC_DTYPE_DATA |
+ ((u64)td_cmd << I40E_TXD_QW1_CMD_SHIFT) |
+ ((u64)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) |
+ ((u64)size << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) |
+ ((u64)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT));
+}
+
+#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
+
+/**
+ * i40e_unmap_and_free_tx_resource - Release a Tx buffer
+ * @ring: the ring that owns the buffer
+ * @tx_buffer: the buffer to free
+ **/
+static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
+ struct i40e_tx_buffer *tx_buffer)
+{
+ if (tx_buffer->skb) {
+ dev_kfree_skb_any(tx_buffer->skb);
+ if (dma_unmap_len(tx_buffer, len))
+ dma_unmap_single(ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
+ } else if (dma_unmap_len(tx_buffer, len)) {
+ dma_unmap_page(ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
+ }
+ tx_buffer->next_to_watch = NULL;
+ tx_buffer->skb = NULL;
+ dma_unmap_len_set(tx_buffer, len, 0);
+ /* tx_buffer must be completely set up in the transmit path */
+}
+
+/**
+ * i40evf_clean_tx_ring - Free any empty Tx buffers
+ * @tx_ring: ring to be cleaned
+ **/
+void i40evf_clean_tx_ring(struct i40e_ring *tx_ring)
+{
+ unsigned long bi_size;
+ u16 i;
+
+ /* ring already cleared, nothing to do */
+ if (!tx_ring->tx_bi)
+ return;
+
+ /* Free all the Tx ring sk_buffs */
+ for (i = 0; i < tx_ring->count; i++)
+ i40e_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]);
+
+ bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
+ memset(tx_ring->tx_bi, 0, bi_size);
+
+ /* Zero out the descriptor ring */
+ memset(tx_ring->desc, 0, tx_ring->size);
+
+ tx_ring->next_to_use = 0;
+ tx_ring->next_to_clean = 0;
+
+ if (!tx_ring->netdev)
+ return;
+
+ /* cleanup Tx queue statistics */
+ netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev,
+ tx_ring->queue_index));
+}
+
+/**
+ * i40evf_free_tx_resources - Free Tx resources per queue
+ * @tx_ring: Tx descriptor ring for a specific queue
+ *
+ * Free all transmit software resources
+ **/
+void i40evf_free_tx_resources(struct i40e_ring *tx_ring)
+{
+ i40evf_clean_tx_ring(tx_ring);
+ kfree(tx_ring->tx_bi);
+ tx_ring->tx_bi = NULL;
+
+ if (tx_ring->desc) {
+ dma_free_coherent(tx_ring->dev, tx_ring->size,
+ tx_ring->desc, tx_ring->dma);
+ tx_ring->desc = NULL;
+ }
+}
+
+/**
+ * i40e_get_tx_pending - how many tx descriptors not processed
+ * @tx_ring: the ring of descriptors
+ *
+ * Since there is no access to the ring head register
+ * in XL710, we need to use our local copies
+ **/
+static u32 i40e_get_tx_pending(struct i40e_ring *ring)
+{
+ u32 ntu = ((ring->next_to_clean <= ring->next_to_use)
+ ? ring->next_to_use
+ : ring->next_to_use + ring->count);
+ return ntu - ring->next_to_clean;
+}
+
+/**
+ * i40e_check_tx_hang - Is there a hang in the Tx queue
+ * @tx_ring: the ring of descriptors
+ **/
+static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
+{
+ u32 tx_pending = i40e_get_tx_pending(tx_ring);
+ bool ret = false;
+
+ clear_check_for_tx_hang(tx_ring);
+
+ /* Check for a hung queue, but be thorough. This verifies
+ * that a transmit has been completed since the previous
+ * check AND there is at least one packet pending. The
+ * ARMED bit is set to indicate a potential hang. The
+ * bit is cleared if a pause frame is received to remove
+ * false hang detection due to PFC or 802.3x frames. By
+ * requiring this to fail twice we avoid races with
+ * PFC clearing the ARMED bit and conditions where we
+ * run the check_tx_hang logic with a transmit completion
+ * pending but without time to complete it yet.
+ */
+ if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) &&
+ tx_pending) {
+ /* make sure it is true for two checks in a row */
+ ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED,
+ &tx_ring->state);
+ } else {
+ /* update completed stats and disarm the hang check */
+ tx_ring->tx_stats.tx_done_old = tx_ring->stats.packets;
+ clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state);
+ }
+
+ return ret;
+}
+
+/**
+ * i40e_clean_tx_irq - Reclaim resources after transmit completes
+ * @tx_ring: tx ring to clean
+ * @budget: how many cleans we're allowed
+ *
+ * Returns true if there's any budget left (e.g. the clean is finished)
+ **/
+static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
+{
+ u16 i = tx_ring->next_to_clean;
+ struct i40e_tx_buffer *tx_buf;
+ struct i40e_tx_desc *tx_desc;
+ unsigned int total_packets = 0;
+ unsigned int total_bytes = 0;
+
+ tx_buf = &tx_ring->tx_bi[i];
+ tx_desc = I40E_TX_DESC(tx_ring, i);
+ i -= tx_ring->count;
+
+ do {
+ struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
+
+ /* if next_to_watch is not set then there is no work pending */
+ if (!eop_desc)
+ break;
+
+ /* prevent any other reads prior to eop_desc */
+ read_barrier_depends();
+
+ /* if the descriptor isn't done, no work yet to do */
+ if (!(eop_desc->cmd_type_offset_bsz &
+ cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
+ break;
+
+ /* clear next_to_watch to prevent false hangs */
+ tx_buf->next_to_watch = NULL;
+
+ /* update the statistics for this packet */
+ total_bytes += tx_buf->bytecount;
+ total_packets += tx_buf->gso_segs;
+
+ /* free the skb */
+ dev_kfree_skb_any(tx_buf->skb);
+
+ /* unmap skb header data */
+ dma_unmap_single(tx_ring->dev,
+ dma_unmap_addr(tx_buf, dma),
+ dma_unmap_len(tx_buf, len),
+ DMA_TO_DEVICE);
+
+ /* clear tx_buffer data */
+ tx_buf->skb = NULL;
+ dma_unmap_len_set(tx_buf, len, 0);
+
+ /* unmap remaining buffers */
+ while (tx_desc != eop_desc) {
+
+ tx_buf++;
+ tx_desc++;
+ i++;
+ if (unlikely(!i)) {
+ i -= tx_ring->count;
+ tx_buf = tx_ring->tx_bi;
+ tx_desc = I40E_TX_DESC(tx_ring, 0);
+ }
+
+ /* unmap any remaining paged data */
+ if (dma_unmap_len(tx_buf, len)) {
+ dma_unmap_page(tx_ring->dev,
+ dma_unmap_addr(tx_buf, dma),
+ dma_unmap_len(tx_buf, len),
+ DMA_TO_DEVICE);
+ dma_unmap_len_set(tx_buf, len, 0);
+ }
+ }
+
+ /* move us one more past the eop_desc for start of next pkt */
+ tx_buf++;
+ tx_desc++;
+ i++;
+ if (unlikely(!i)) {
+ i -= tx_ring->count;
+ tx_buf = tx_ring->tx_bi;
+ tx_desc = I40E_TX_DESC(tx_ring, 0);
+ }
+
+ /* update budget accounting */
+ budget--;
+ } while (likely(budget));
+
+ i += tx_ring->count;
+ tx_ring->next_to_clean = i;
+ u64_stats_update_begin(&tx_ring->syncp);
+ tx_ring->stats.bytes += total_bytes;
+ tx_ring->stats.packets += total_packets;
+ u64_stats_update_end(&tx_ring->syncp);
+ tx_ring->q_vector->tx.total_bytes += total_bytes;
+ tx_ring->q_vector->tx.total_packets += total_packets;
+
+ if (check_for_tx_hang(tx_ring) && i40e_check_tx_hang(tx_ring)) {
+ /* schedule immediate reset if we believe we hung */
+ dev_info(tx_ring->dev, "Detected Tx Unit Hang\n"
+ " VSI <%d>\n"
+ " Tx Queue <%d>\n"
+ " next_to_use <%x>\n"
+ " next_to_clean <%x>\n",
+ tx_ring->vsi->seid,
+ tx_ring->queue_index,
+ tx_ring->next_to_use, i);
+ dev_info(tx_ring->dev, "tx_bi[next_to_clean]\n"
+ " time_stamp <%lx>\n"
+ " jiffies <%lx>\n",
+ tx_ring->tx_bi[i].time_stamp, jiffies);
+
+ netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
+
+ dev_info(tx_ring->dev,
+ "tx hang detected on queue %d, resetting adapter\n",
+ tx_ring->queue_index);
+
+ tx_ring->netdev->netdev_ops->ndo_tx_timeout(tx_ring->netdev);
+
+ /* the adapter is about to reset, no point in enabling stuff */
+ return true;
+ }
+
+ netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev,
+ tx_ring->queue_index),
+ total_packets, total_bytes);
+
+#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
+ if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
+ (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
+ /* Make sure that anybody stopping the queue after this
+ * sees the new next_to_clean.
+ */
+ smp_mb();
+ if (__netif_subqueue_stopped(tx_ring->netdev,
+ tx_ring->queue_index) &&
+ !test_bit(__I40E_DOWN, &tx_ring->vsi->state)) {
+ netif_wake_subqueue(tx_ring->netdev,
+ tx_ring->queue_index);
+ ++tx_ring->tx_stats.restart_queue;
+ }
+ }
+
+ return budget > 0;
+}
+
+/**
+ * i40e_set_new_dynamic_itr - Find new ITR level
+ * @rc: structure containing ring performance data
+ *
+ * Stores a new ITR value based on packets and byte counts during
+ * the last interrupt. The advantage of per interrupt computation
+ * is faster updates and more accurate ITR for the current traffic
+ * pattern. Constants in this function were computed based on
+ * theoretical maximum wire speed and thresholds were set based on
+ * testing data as well as attempting to minimize response time
+ * while increasing bulk throughput.
+ **/
+static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
+{
+ enum i40e_latency_range new_latency_range = rc->latency_range;
+ u32 new_itr = rc->itr;
+ int bytes_per_int;
+
+ if (rc->total_packets == 0 || !rc->itr)
+ return;
+
+ /* simple throttlerate management
+ * 0-10MB/s lowest (100000 ints/s)
+ * 10-20MB/s low (20000 ints/s)
+ * 20-1249MB/s bulk (8000 ints/s)
+ */
+ bytes_per_int = rc->total_bytes / rc->itr;
+ switch (rc->itr) {
+ case I40E_LOWEST_LATENCY:
+ if (bytes_per_int > 10)
+ new_latency_range = I40E_LOW_LATENCY;
+ break;
+ case I40E_LOW_LATENCY:
+ if (bytes_per_int > 20)
+ new_latency_range = I40E_BULK_LATENCY;
+ else if (bytes_per_int <= 10)
+ new_latency_range = I40E_LOWEST_LATENCY;
+ break;
+ case I40E_BULK_LATENCY:
+ if (bytes_per_int <= 20)
+ rc->latency_range = I40E_LOW_LATENCY;
+ break;
+ }
+
+ switch (new_latency_range) {
+ case I40E_LOWEST_LATENCY:
+ new_itr = I40E_ITR_100K;
+ break;
+ case I40E_LOW_LATENCY:
+ new_itr = I40E_ITR_20K;
+ break;
+ case I40E_BULK_LATENCY:
+ new_itr = I40E_ITR_8K;
+ break;
+ default:
+ break;
+ }
+
+ if (new_itr != rc->itr) {
+ /* do an exponential smoothing */
+ new_itr = (10 * new_itr * rc->itr) /
+ ((9 * new_itr) + rc->itr);
+ rc->itr = new_itr & I40E_MAX_ITR;
+ }
+
+ rc->total_bytes = 0;
+ rc->total_packets = 0;
+}
+
+/**
+ * i40e_update_dynamic_itr - Adjust ITR based on bytes per int
+ * @q_vector: the vector to adjust
+ **/
+static void i40e_update_dynamic_itr(struct i40e_q_vector *q_vector)
+{
+ u16 vector = q_vector->vsi->base_vector + q_vector->v_idx;
+ struct i40e_hw *hw = &q_vector->vsi->back->hw;
+ u32 reg_addr;
+ u16 old_itr;
+
+ reg_addr = I40E_VFINT_ITRN1(I40E_RX_ITR, vector - 1);
+ old_itr = q_vector->rx.itr;
+ i40e_set_new_dynamic_itr(&q_vector->rx);
+ if (old_itr != q_vector->rx.itr)
+ wr32(hw, reg_addr, q_vector->rx.itr);
+
+ reg_addr = I40E_VFINT_ITRN1(I40E_TX_ITR, vector - 1);
+ old_itr = q_vector->tx.itr;
+ i40e_set_new_dynamic_itr(&q_vector->tx);
+ if (old_itr != q_vector->tx.itr)
+ wr32(hw, reg_addr, q_vector->tx.itr);
+}
+
+/**
+ * i40evf_setup_tx_descriptors - Allocate the Tx descriptors
+ * @tx_ring: the tx ring to set up
+ *
+ * Return 0 on success, negative on error
+ **/
+int i40evf_setup_tx_descriptors(struct i40e_ring *tx_ring)
+{
+ struct device *dev = tx_ring->dev;
+ int bi_size;
+
+ if (!dev)
+ return -ENOMEM;
+
+ bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
+ tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL);
+ if (!tx_ring->tx_bi)
+ goto err;
+
+ /* round up to nearest 4K */
+ tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
+ tx_ring->size = ALIGN(tx_ring->size, 4096);
+ tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
+ &tx_ring->dma, GFP_KERNEL);
+ if (!tx_ring->desc) {
+ dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
+ tx_ring->size);
+ goto err;
+ }
+
+ tx_ring->next_to_use = 0;
+ tx_ring->next_to_clean = 0;
+ return 0;
+
+err:
+ kfree(tx_ring->tx_bi);
+ tx_ring->tx_bi = NULL;
+ return -ENOMEM;
+}
+
+/**
+ * i40evf_clean_rx_ring - Free Rx buffers
+ * @rx_ring: ring to be cleaned
+ **/
+void i40evf_clean_rx_ring(struct i40e_ring *rx_ring)
+{
+ struct device *dev = rx_ring->dev;
+ struct i40e_rx_buffer *rx_bi;
+ unsigned long bi_size;
+ u16 i;
+
+ /* ring already cleared, nothing to do */
+ if (!rx_ring->rx_bi)
+ return;
+
+ /* Free all the Rx ring sk_buffs */
+ for (i = 0; i < rx_ring->count; i++) {
+ rx_bi = &rx_ring->rx_bi[i];
+ if (rx_bi->dma) {
+ dma_unmap_single(dev,
+ rx_bi->dma,
+ rx_ring->rx_buf_len,
+ DMA_FROM_DEVICE);
+ rx_bi->dma = 0;
+ }
+ if (rx_bi->skb) {
+ dev_kfree_skb(rx_bi->skb);
+ rx_bi->skb = NULL;
+ }
+ if (rx_bi->page) {
+ if (rx_bi->page_dma) {
+ dma_unmap_page(dev,
+ rx_bi->page_dma,
+ PAGE_SIZE / 2,
+ DMA_FROM_DEVICE);
+ rx_bi->page_dma = 0;
+ }
+ __free_page(rx_bi->page);
+ rx_bi->page = NULL;
+ rx_bi->page_offset = 0;
+ }
+ }
+
+ bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
+ memset(rx_ring->rx_bi, 0, bi_size);
+
+ /* Zero out the descriptor ring */
+ memset(rx_ring->desc, 0, rx_ring->size);
+
+ rx_ring->next_to_clean = 0;
+ rx_ring->next_to_use = 0;
+}
+
+/**
+ * i40evf_free_rx_resources - Free Rx resources
+ * @rx_ring: ring to clean the resources from
+ *
+ * Free all receive software resources
+ **/
+void i40evf_free_rx_resources(struct i40e_ring *rx_ring)
+{
+ i40evf_clean_rx_ring(rx_ring);
+ kfree(rx_ring->rx_bi);
+ rx_ring->rx_bi = NULL;
+
+ if (rx_ring->desc) {
+ dma_free_coherent(rx_ring->dev, rx_ring->size,
+ rx_ring->desc, rx_ring->dma);
+ rx_ring->desc = NULL;
+ }
+}
+
+/**
+ * i40evf_setup_rx_descriptors - Allocate Rx descriptors
+ * @rx_ring: Rx descriptor ring (for a specific queue) to setup
+ *
+ * Returns 0 on success, negative on failure
+ **/
+int i40evf_setup_rx_descriptors(struct i40e_ring *rx_ring)
+{
+ struct device *dev = rx_ring->dev;
+ int bi_size;
+
+ bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
+ rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL);
+ if (!rx_ring->rx_bi)
+ goto err;
+
+ /* Round up to nearest 4K */
+ rx_ring->size = ring_is_16byte_desc_enabled(rx_ring)
+ ? rx_ring->count * sizeof(union i40e_16byte_rx_desc)
+ : rx_ring->count * sizeof(union i40e_32byte_rx_desc);
+ rx_ring->size = ALIGN(rx_ring->size, 4096);
+ rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
+ &rx_ring->dma, GFP_KERNEL);
+
+ if (!rx_ring->desc) {
+ dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
+ rx_ring->size);
+ goto err;
+ }
+
+ rx_ring->next_to_clean = 0;
+ rx_ring->next_to_use = 0;
+
+ return 0;
+err:
+ kfree(rx_ring->rx_bi);
+ rx_ring->rx_bi = NULL;
+ return -ENOMEM;
+}
+
+/**
+ * i40e_release_rx_desc - Store the new tail and head values
+ * @rx_ring: ring to bump
+ * @val: new head index
+ **/
+static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
+{
+ rx_ring->next_to_use = val;
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+ writel(val, rx_ring->tail);
+}
+
+/**
+ * i40evf_alloc_rx_buffers - Replace used receive buffers; packet split
+ * @rx_ring: ring to place buffers on
+ * @cleaned_count: number of buffers to replace
+ **/
+void i40evf_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
+{
+ u16 i = rx_ring->next_to_use;
+ union i40e_rx_desc *rx_desc;
+ struct i40e_rx_buffer *bi;
+ struct sk_buff *skb;
+
+ /* do nothing if no valid netdev defined */
+ if (!rx_ring->netdev || !cleaned_count)
+ return;
+
+ while (cleaned_count--) {
+ rx_desc = I40E_RX_DESC(rx_ring, i);
+ bi = &rx_ring->rx_bi[i];
+ skb = bi->skb;
+
+ if (!skb) {
+ skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
+ rx_ring->rx_buf_len);
+ if (!skb) {
+ rx_ring->rx_stats.alloc_buff_failed++;
+ goto no_buffers;
+ }
+ /* initialize queue mapping */
+ skb_record_rx_queue(skb, rx_ring->queue_index);
+ bi->skb = skb;
+ }
+
+ if (!bi->dma) {
+ bi->dma = dma_map_single(rx_ring->dev,
+ skb->data,
+ rx_ring->rx_buf_len,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(rx_ring->dev, bi->dma)) {
+ rx_ring->rx_stats.alloc_buff_failed++;
+ bi->dma = 0;
+ goto no_buffers;
+ }
+ }
+
+ if (ring_is_ps_enabled(rx_ring)) {
+ if (!bi->page) {
+ bi->page = alloc_page(GFP_ATOMIC);
+ if (!bi->page) {
+ rx_ring->rx_stats.alloc_page_failed++;
+ goto no_buffers;
+ }
+ }
+
+ if (!bi->page_dma) {
+ /* use a half page if we're re-using */
+ bi->page_offset ^= PAGE_SIZE / 2;
+ bi->page_dma = dma_map_page(rx_ring->dev,
+ bi->page,
+ bi->page_offset,
+ PAGE_SIZE / 2,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(rx_ring->dev,
+ bi->page_dma)) {
+ rx_ring->rx_stats.alloc_page_failed++;
+ bi->page_dma = 0;
+ goto no_buffers;
+ }
+ }
+
+ /* Refresh the desc even if buffer_addrs didn't change
+ * because each write-back erases this info.
+ */
+ rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
+ rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
+ } else {
+ rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
+ rx_desc->read.hdr_addr = 0;
+ }
+ i++;
+ if (i == rx_ring->count)
+ i = 0;
+ }
+
+no_buffers:
+ if (rx_ring->next_to_use != i)
+ i40e_release_rx_desc(rx_ring, i);
+}
+
+/**
+ * i40e_receive_skb - Send a completed packet up the stack
+ * @rx_ring: rx ring in play
+ * @skb: packet to send up
+ * @vlan_tag: vlan tag for packet
+ **/
+static void i40e_receive_skb(struct i40e_ring *rx_ring,
+ struct sk_buff *skb, u16 vlan_tag)
+{
+ struct i40e_q_vector *q_vector = rx_ring->q_vector;
+ struct i40e_vsi *vsi = rx_ring->vsi;
+ u64 flags = vsi->back->flags;
+
+ if (vlan_tag & VLAN_VID_MASK)
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
+
+ if (flags & I40E_FLAG_IN_NETPOLL)
+ netif_rx(skb);
+ else
+ napi_gro_receive(&q_vector->napi, skb);
+}
+
+/**
+ * i40e_rx_checksum - Indicate in skb if hw indicated a good cksum
+ * @vsi: the VSI we care about
+ * @skb: skb currently being received and modified
+ * @rx_status: status value of last descriptor in packet
+ * @rx_error: error value of last descriptor in packet
+ * @rx_ptype: ptype value of last descriptor in packet
+ **/
+static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
+ struct sk_buff *skb,
+ u32 rx_status,
+ u32 rx_error,
+ u16 rx_ptype)
+{
+ bool ipv4_tunnel, ipv6_tunnel;
+ __wsum rx_udp_csum;
+ __sum16 csum;
+ struct iphdr *iph;
+
+ ipv4_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT4_MAC_PAY3) &&
+ (rx_ptype < I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4);
+ ipv6_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT6_MAC_PAY3) &&
+ (rx_ptype < I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4);
+
+ skb->encapsulation = ipv4_tunnel || ipv6_tunnel;
+ skb->ip_summed = CHECKSUM_NONE;
+
+ /* Rx csum enabled and ip headers found? */
+ if (!(vsi->netdev->features & NETIF_F_RXCSUM &&
+ rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
+ return;
+
+ /* likely incorrect csum if alternate IP extention headers found */
+ if (rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
+ return;
+
+ /* IP or L4 or outmost IP checksum error */
+ if (rx_error & ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT) |
+ (1 << I40E_RX_DESC_ERROR_L4E_SHIFT) |
+ (1 << I40E_RX_DESC_ERROR_EIPE_SHIFT))) {
+ vsi->back->hw_csum_rx_error++;
+ return;
+ }
+
+ if (ipv4_tunnel &&
+ !(rx_status & (1 << I40E_RX_DESC_STATUS_UDP_0_SHIFT))) {
+ /* If VXLAN traffic has an outer UDPv4 checksum we need to check
+ * it in the driver, hardware does not do it for us.
+ * Since L3L4P bit was set we assume a valid IHL value (>=5)
+ * so the total length of IPv4 header is IHL*4 bytes
+ */
+ skb->transport_header = skb->mac_header +
+ sizeof(struct ethhdr) +
+ (ip_hdr(skb)->ihl * 4);
+
+ /* Add 4 bytes for VLAN tagged packets */
+ skb->transport_header += (skb->protocol == htons(ETH_P_8021Q) ||
+ skb->protocol == htons(ETH_P_8021AD))
+ ? VLAN_HLEN : 0;
+
+ rx_udp_csum = udp_csum(skb);
+ iph = ip_hdr(skb);
+ csum = csum_tcpudp_magic(
+ iph->saddr, iph->daddr,
+ (skb->len - skb_transport_offset(skb)),
+ IPPROTO_UDP, rx_udp_csum);
+
+ if (udp_hdr(skb)->check != csum) {
+ vsi->back->hw_csum_rx_error++;
+ return;
+ }
+ }
+
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+}
+
+/**
+ * i40e_rx_hash - returns the hash value from the Rx descriptor
+ * @ring: descriptor ring
+ * @rx_desc: specific descriptor
+ **/
+static inline u32 i40e_rx_hash(struct i40e_ring *ring,
+ union i40e_rx_desc *rx_desc)
+{
+ const __le64 rss_mask =
+ cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
+ I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
+
+ if ((ring->netdev->features & NETIF_F_RXHASH) &&
+ (rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask)
+ return le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
+ else
+ return 0;
+}
+
+/**
+ * i40e_clean_rx_irq - Reclaim resources after receive completes
+ * @rx_ring: rx ring to clean
+ * @budget: how many cleans we're allowed
+ *
+ * Returns true if there's any budget left (e.g. the clean is finished)
+ **/
+static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
+{
+ unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+ u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo;
+ u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
+ const int current_node = numa_node_id();
+ struct i40e_vsi *vsi = rx_ring->vsi;
+ u16 i = rx_ring->next_to_clean;
+ union i40e_rx_desc *rx_desc;
+ u32 rx_error, rx_status;
+ u64 qword;
+ u16 rx_ptype;
+
+ rx_desc = I40E_RX_DESC(rx_ring, i);
+ qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+ rx_status = (qword & I40E_RXD_QW1_STATUS_MASK)
+ >> I40E_RXD_QW1_STATUS_SHIFT;
+
+ while (rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) {
+ union i40e_rx_desc *next_rxd;
+ struct i40e_rx_buffer *rx_bi;
+ struct sk_buff *skb;
+ u16 vlan_tag;
+ rx_bi = &rx_ring->rx_bi[i];
+ skb = rx_bi->skb;
+ prefetch(skb->data);
+
+ rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
+ I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
+ rx_header_len = (qword & I40E_RXD_QW1_LENGTH_HBUF_MASK) >>
+ I40E_RXD_QW1_LENGTH_HBUF_SHIFT;
+ rx_sph = (qword & I40E_RXD_QW1_LENGTH_SPH_MASK) >>
+ I40E_RXD_QW1_LENGTH_SPH_SHIFT;
+
+ rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
+ I40E_RXD_QW1_ERROR_SHIFT;
+ rx_hbo = rx_error & (1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
+ rx_error &= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
+
+ rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
+ I40E_RXD_QW1_PTYPE_SHIFT;
+ rx_bi->skb = NULL;
+
+ /* This memory barrier is needed to keep us from reading
+ * any other fields out of the rx_desc until we know the
+ * STATUS_DD bit is set
+ */
+ rmb();
+
+ /* Get the header and possibly the whole packet
+ * If this is an skb from previous receive dma will be 0
+ */
+ if (rx_bi->dma) {
+ u16 len;
+
+ if (rx_hbo)
+ len = I40E_RX_HDR_SIZE;
+ else if (rx_sph)
+ len = rx_header_len;
+ else if (rx_packet_len)
+ len = rx_packet_len; /* 1buf/no split found */
+ else
+ len = rx_header_len; /* split always mode */
+
+ skb_put(skb, len);
+ dma_unmap_single(rx_ring->dev,
+ rx_bi->dma,
+ rx_ring->rx_buf_len,
+ DMA_FROM_DEVICE);
+ rx_bi->dma = 0;
+ }
+
+ /* Get the rest of the data if this was a header split */
+ if (ring_is_ps_enabled(rx_ring) && rx_packet_len) {
+
+ skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
+ rx_bi->page,
+ rx_bi->page_offset,
+ rx_packet_len);
+
+ skb->len += rx_packet_len;
+ skb->data_len += rx_packet_len;
+ skb->truesize += rx_packet_len;
+
+ if ((page_count(rx_bi->page) == 1) &&
+ (page_to_nid(rx_bi->page) == current_node))
+ get_page(rx_bi->page);
+ else
+ rx_bi->page = NULL;
+
+ dma_unmap_page(rx_ring->dev,
+ rx_bi->page_dma,
+ PAGE_SIZE / 2,
+ DMA_FROM_DEVICE);
+ rx_bi->page_dma = 0;
+ }
+ I40E_RX_NEXT_DESC_PREFETCH(rx_ring, i, next_rxd);
+
+ if (unlikely(
+ !(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
+ struct i40e_rx_buffer *next_buffer;
+
+ next_buffer = &rx_ring->rx_bi[i];
+
+ if (ring_is_ps_enabled(rx_ring)) {
+ rx_bi->skb = next_buffer->skb;
+ rx_bi->dma = next_buffer->dma;
+ next_buffer->skb = skb;
+ next_buffer->dma = 0;
+ }
+ rx_ring->rx_stats.non_eop_descs++;
+ goto next_desc;
+ }
+
+ /* ERR_MASK will only have valid bits if EOP set */
+ if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
+ dev_kfree_skb_any(skb);
+ goto next_desc;
+ }
+
+ skb->rxhash = i40e_rx_hash(rx_ring, rx_desc);
+ /* probably a little skewed due to removing CRC */
+ total_rx_bytes += skb->len;
+ total_rx_packets++;
+
+ skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+
+ i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
+
+ vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
+ ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
+ : 0;
+ i40e_receive_skb(rx_ring, skb, vlan_tag);
+
+ rx_ring->netdev->last_rx = jiffies;
+ budget--;
+next_desc:
+ rx_desc->wb.qword1.status_error_len = 0;
+ if (!budget)
+ break;
+
+ cleaned_count++;
+ /* return some buffers to hardware, one at a time is too slow */
+ if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
+ i40evf_alloc_rx_buffers(rx_ring, cleaned_count);
+ cleaned_count = 0;
+ }
+
+ /* use prefetched values */
+ rx_desc = next_rxd;
+ qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+ rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
+ I40E_RXD_QW1_STATUS_SHIFT;
+ }
+
+ rx_ring->next_to_clean = i;
+ u64_stats_update_begin(&rx_ring->syncp);
+ rx_ring->stats.packets += total_rx_packets;
+ rx_ring->stats.bytes += total_rx_bytes;
+ u64_stats_update_end(&rx_ring->syncp);
+ rx_ring->q_vector->rx.total_packets += total_rx_packets;
+ rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
+
+ if (cleaned_count)
+ i40evf_alloc_rx_buffers(rx_ring, cleaned_count);
+
+ return budget > 0;
+}
+
+/**
+ * i40evf_napi_poll - NAPI polling Rx/Tx cleanup routine
+ * @napi: napi struct with our devices info in it
+ * @budget: amount of work driver is allowed to do this pass, in packets
+ *
+ * This function will clean all queues associated with a q_vector.
+ *
+ * Returns the amount of work done
+ **/
+int i40evf_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct i40e_q_vector *q_vector =
+ container_of(napi, struct i40e_q_vector, napi);
+ struct i40e_vsi *vsi = q_vector->vsi;
+ struct i40e_ring *ring;
+ bool clean_complete = true;
+ int budget_per_ring;
+
+ if (test_bit(__I40E_DOWN, &vsi->state)) {
+ napi_complete(napi);
+ return 0;
+ }
+
+ /* Since the actual Tx work is minimal, we can give the Tx a larger
+ * budget and be more aggressive about cleaning up the Tx descriptors.
+ */
+ i40e_for_each_ring(ring, q_vector->tx)
+ clean_complete &= i40e_clean_tx_irq(ring, vsi->work_limit);
+
+ /* We attempt to distribute budget to each Rx queue fairly, but don't
+ * allow the budget to go below 1 because that would exit polling early.
+ */
+ budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
+
+ i40e_for_each_ring(ring, q_vector->rx)
+ clean_complete &= i40e_clean_rx_irq(ring, budget_per_ring);
+
+ /* If work not completed, return budget and polling will return */
+ if (!clean_complete)
+ return budget;
+
+ /* Work is done so exit the polling mode and re-enable the interrupt */
+ napi_complete(napi);
+ if (ITR_IS_DYNAMIC(vsi->rx_itr_setting) ||
+ ITR_IS_DYNAMIC(vsi->tx_itr_setting))
+ i40e_update_dynamic_itr(q_vector);
+
+ if (!test_bit(__I40E_DOWN, &vsi->state))
+ i40evf_irq_enable_queues(vsi->back, 1 << q_vector->v_idx);
+
+ return 0;
+}
+
+/**
+ * i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
+ * @skb: send buffer
+ * @tx_ring: ring to send buffer on
+ * @flags: the tx flags to be set
+ *
+ * Checks the skb and set up correspondingly several generic transmit flags
+ * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
+ *
+ * Returns error code indicate the frame should be dropped upon error and the
+ * otherwise returns 0 to indicate the flags has been set properly.
+ **/
+static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
+ struct i40e_ring *tx_ring,
+ u32 *flags)
+{
+ __be16 protocol = skb->protocol;
+ u32 tx_flags = 0;
+
+ /* if we have a HW VLAN tag being added, default to the HW one */
+ if (vlan_tx_tag_present(skb)) {
+ tx_flags |= vlan_tx_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
+ tx_flags |= I40E_TX_FLAGS_HW_VLAN;
+ /* else if it is a SW VLAN, check the next protocol and store the tag */
+ } else if (protocol == htons(ETH_P_8021Q)) {
+ struct vlan_hdr *vhdr, _vhdr;
+ vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
+ if (!vhdr)
+ return -EINVAL;
+
+ protocol = vhdr->h_vlan_encapsulated_proto;
+ tx_flags |= ntohs(vhdr->h_vlan_TCI) << I40E_TX_FLAGS_VLAN_SHIFT;
+ tx_flags |= I40E_TX_FLAGS_SW_VLAN;
+ }
+
+ *flags = tx_flags;
+ return 0;
+}
+
+/**
+ * i40e_tso - set up the tso context descriptor
+ * @tx_ring: ptr to the ring to send
+ * @skb: ptr to the skb we're sending
+ * @tx_flags: the collected send information
+ * @protocol: the send protocol
+ * @hdr_len: ptr to the size of the packet header
+ * @cd_tunneling: ptr to context descriptor bits
+ *
+ * Returns 0 if no TSO can happen, 1 if tso is going, or error
+ **/
+static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
+ u32 tx_flags, __be16 protocol, u8 *hdr_len,
+ u64 *cd_type_cmd_tso_mss, u32 *cd_tunneling)
+{
+ u32 cd_cmd, cd_tso_len, cd_mss;
+ struct tcphdr *tcph;
+ struct iphdr *iph;
+ u32 l4len;
+ int err;
+ struct ipv6hdr *ipv6h;
+
+ if (!skb_is_gso(skb))
+ return 0;
+
+ if (skb_header_cloned(skb)) {
+ err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+ if (err)
+ return err;
+ }
+
+ if (protocol == htons(ETH_P_IP)) {
+ iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
+ tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
+ iph->tot_len = 0;
+ iph->check = 0;
+ tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
+ 0, IPPROTO_TCP, 0);
+ } else if (skb_is_gso_v6(skb)) {
+
+ ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb)
+ : ipv6_hdr(skb);
+ tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
+ ipv6h->payload_len = 0;
+ tcph->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
+ 0, IPPROTO_TCP, 0);
+ }
+
+ l4len = skb->encapsulation ? inner_tcp_hdrlen(skb) : tcp_hdrlen(skb);
+ *hdr_len = (skb->encapsulation
+ ? (skb_inner_transport_header(skb) - skb->data)
+ : skb_transport_offset(skb)) + l4len;
+
+ /* find the field values */
+ cd_cmd = I40E_TX_CTX_DESC_TSO;
+ cd_tso_len = skb->len - *hdr_len;
+ cd_mss = skb_shinfo(skb)->gso_size;
+ *cd_type_cmd_tso_mss |= ((u64)cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
+ ((u64)cd_tso_len <<
+ I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
+ ((u64)cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
+ return 1;
+}
+
+/**
+ * i40e_tx_enable_csum - Enable Tx checksum offloads
+ * @skb: send buffer
+ * @tx_flags: Tx flags currently set
+ * @td_cmd: Tx descriptor command bits to set
+ * @td_offset: Tx descriptor header offsets to set
+ * @cd_tunneling: ptr to context desc bits
+ **/
+static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
+ u32 *td_cmd, u32 *td_offset,
+ struct i40e_ring *tx_ring,
+ u32 *cd_tunneling)
+{
+ struct ipv6hdr *this_ipv6_hdr;
+ unsigned int this_tcp_hdrlen;
+ struct iphdr *this_ip_hdr;
+ u32 network_hdr_len;
+ u8 l4_hdr = 0;
+
+ if (skb->encapsulation) {
+ network_hdr_len = skb_inner_network_header_len(skb);
+ this_ip_hdr = inner_ip_hdr(skb);
+ this_ipv6_hdr = inner_ipv6_hdr(skb);
+ this_tcp_hdrlen = inner_tcp_hdrlen(skb);
+
+ if (tx_flags & I40E_TX_FLAGS_IPV4) {
+
+ if (tx_flags & I40E_TX_FLAGS_TSO) {
+ *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4;
+ ip_hdr(skb)->check = 0;
+ } else {
+ *cd_tunneling |=
+ I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
+ }
+ } else if (tx_flags & I40E_TX_FLAGS_IPV6) {
+ if (tx_flags & I40E_TX_FLAGS_TSO) {
+ *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
+ ip_hdr(skb)->check = 0;
+ } else {
+ *cd_tunneling |=
+ I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
+ }
+ }
+
+ /* Now set the ctx descriptor fields */
+ *cd_tunneling |= (skb_network_header_len(skb) >> 2) <<
+ I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT |
+ I40E_TXD_CTX_UDP_TUNNELING |
+ ((skb_inner_network_offset(skb) -
+ skb_transport_offset(skb)) >> 1) <<
+ I40E_TXD_CTX_QW0_NATLEN_SHIFT;
+
+ } else {
+ network_hdr_len = skb_network_header_len(skb);
+ this_ip_hdr = ip_hdr(skb);
+ this_ipv6_hdr = ipv6_hdr(skb);
+ this_tcp_hdrlen = tcp_hdrlen(skb);
+ }
+
+ /* Enable IP checksum offloads */
+ if (tx_flags & I40E_TX_FLAGS_IPV4) {
+ l4_hdr = this_ip_hdr->protocol;
+ /* the stack computes the IP header already, the only time we
+ * need the hardware to recompute it is in the case of TSO.
+ */
+ if (tx_flags & I40E_TX_FLAGS_TSO) {
+ *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM;
+ this_ip_hdr->check = 0;
+ } else {
+ *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4;
+ }
+ /* Now set the td_offset for IP header length */
+ *td_offset = (network_hdr_len >> 2) <<
+ I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
+ } else if (tx_flags & I40E_TX_FLAGS_IPV6) {
+ l4_hdr = this_ipv6_hdr->nexthdr;
+ *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
+ /* Now set the td_offset for IP header length */
+ *td_offset = (network_hdr_len >> 2) <<
+ I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
+ }
+ /* words in MACLEN + dwords in IPLEN + dwords in L4Len */
+ *td_offset |= (skb_network_offset(skb) >> 1) <<
+ I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
+
+ /* Enable L4 checksum offloads */
+ switch (l4_hdr) {
+ case IPPROTO_TCP:
+ /* enable checksum offloads */
+ *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
+ *td_offset |= (this_tcp_hdrlen >> 2) <<
+ I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+ break;
+ case IPPROTO_SCTP:
+ /* enable SCTP checksum offload */
+ *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
+ *td_offset |= (sizeof(struct sctphdr) >> 2) <<
+ I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+ break;
+ case IPPROTO_UDP:
+ /* enable UDP checksum offload */
+ *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
+ *td_offset |= (sizeof(struct udphdr) >> 2) <<
+ I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * i40e_create_tx_ctx Build the Tx context descriptor
+ * @tx_ring: ring to create the descriptor on
+ * @cd_type_cmd_tso_mss: Quad Word 1
+ * @cd_tunneling: Quad Word 0 - bits 0-31
+ * @cd_l2tag2: Quad Word 0 - bits 32-63
+ **/
+static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
+ const u64 cd_type_cmd_tso_mss,
+ const u32 cd_tunneling, const u32 cd_l2tag2)
+{
+ struct i40e_tx_context_desc *context_desc;
+ int i = tx_ring->next_to_use;
+
+ if (!cd_type_cmd_tso_mss && !cd_tunneling && !cd_l2tag2)
+ return;
+
+ /* grab the next descriptor */
+ context_desc = I40E_TX_CTXTDESC(tx_ring, i);
+
+ i++;
+ tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
+
+ /* cpu_to_le32 and assign to struct fields */
+ context_desc->tunneling_params = cpu_to_le32(cd_tunneling);
+ context_desc->l2tag2 = cpu_to_le16(cd_l2tag2);
+ context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
+}
+
+/**
+ * i40e_tx_map - Build the Tx descriptor
+ * @tx_ring: ring to send buffer on
+ * @skb: send buffer
+ * @first: first buffer info buffer to use
+ * @tx_flags: collected send information
+ * @hdr_len: size of the packet header
+ * @td_cmd: the command field in the descriptor
+ * @td_offset: offset for checksum or crc
+ **/
+static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
+ struct i40e_tx_buffer *first, u32 tx_flags,
+ const u8 hdr_len, u32 td_cmd, u32 td_offset)
+{
+ unsigned int data_len = skb->data_len;
+ unsigned int size = skb_headlen(skb);
+ struct skb_frag_struct *frag;
+ struct i40e_tx_buffer *tx_bi;
+ struct i40e_tx_desc *tx_desc;
+ u16 i = tx_ring->next_to_use;
+ u32 td_tag = 0;
+ dma_addr_t dma;
+ u16 gso_segs;
+
+ if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
+ td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
+ td_tag = (tx_flags & I40E_TX_FLAGS_VLAN_MASK) >>
+ I40E_TX_FLAGS_VLAN_SHIFT;
+ }
+
+ if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO))
+ gso_segs = skb_shinfo(skb)->gso_segs;
+ else
+ gso_segs = 1;
+
+ /* multiply data chunks by size of headers */
+ first->bytecount = skb->len - hdr_len + (gso_segs * hdr_len);
+ first->gso_segs = gso_segs;
+ first->skb = skb;
+ first->tx_flags = tx_flags;
+
+ dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
+
+ tx_desc = I40E_TX_DESC(tx_ring, i);
+ tx_bi = first;
+
+ for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
+ if (dma_mapping_error(tx_ring->dev, dma))
+ goto dma_error;
+
+ /* record length, and DMA address */
+ dma_unmap_len_set(tx_bi, len, size);
+ dma_unmap_addr_set(tx_bi, dma, dma);
+
+ tx_desc->buffer_addr = cpu_to_le64(dma);
+
+ while (unlikely(size > I40E_MAX_DATA_PER_TXD)) {
+ tx_desc->cmd_type_offset_bsz =
+ build_ctob(td_cmd, td_offset,
+ I40E_MAX_DATA_PER_TXD, td_tag);
+
+ tx_desc++;
+ i++;
+ if (i == tx_ring->count) {
+ tx_desc = I40E_TX_DESC(tx_ring, 0);
+ i = 0;
+ }
+
+ dma += I40E_MAX_DATA_PER_TXD;
+ size -= I40E_MAX_DATA_PER_TXD;
+
+ tx_desc->buffer_addr = cpu_to_le64(dma);
+ }
+
+ if (likely(!data_len))
+ break;
+
+ tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
+ size, td_tag);
+
+ tx_desc++;
+ i++;
+ if (i == tx_ring->count) {
+ tx_desc = I40E_TX_DESC(tx_ring, 0);
+ i = 0;
+ }
+
+ size = skb_frag_size(frag);
+ data_len -= size;
+
+ dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
+ DMA_TO_DEVICE);
+
+ tx_bi = &tx_ring->tx_bi[i];
+ }
+
+ tx_desc->cmd_type_offset_bsz =
+ build_ctob(td_cmd, td_offset, size, td_tag) |
+ cpu_to_le64((u64)I40E_TXD_CMD << I40E_TXD_QW1_CMD_SHIFT);
+
+ netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev,
+ tx_ring->queue_index),
+ first->bytecount);
+
+ /* set the timestamp */
+ first->time_stamp = jiffies;
+
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+
+ /* set next_to_watch value indicating a packet is present */
+ first->next_to_watch = tx_desc;
+
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+
+ tx_ring->next_to_use = i;
+
+ /* notify HW of packet */
+ writel(i, tx_ring->tail);
+
+ return;
+
+dma_error:
+ dev_info(tx_ring->dev, "TX DMA map failed\n");
+
+ /* clear dma mappings for failed tx_bi map */
+ for (;;) {
+ tx_bi = &tx_ring->tx_bi[i];
+ i40e_unmap_and_free_tx_resource(tx_ring, tx_bi);
+ if (tx_bi == first)
+ break;
+ if (i == 0)
+ i = tx_ring->count;
+ i--;
+ }
+
+ tx_ring->next_to_use = i;
+}
+
+/**
+ * __i40e_maybe_stop_tx - 2nd level check for tx stop conditions
+ * @tx_ring: the ring to be checked
+ * @size: the size buffer we want to assure is available
+ *
+ * Returns -EBUSY if a stop is needed, else 0
+ **/
+static inline int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
+{
+ netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
+ /* Memory barrier before checking head and tail */
+ smp_mb();
+
+ /* Check again in a case another CPU has just made room available. */
+ if (likely(I40E_DESC_UNUSED(tx_ring) < size))
+ return -EBUSY;
+
+ /* A reprieve! - use start_queue because it doesn't call schedule */
+ netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
+ ++tx_ring->tx_stats.restart_queue;
+ return 0;
+}
+
+/**
+ * i40e_maybe_stop_tx - 1st level check for tx stop conditions
+ * @tx_ring: the ring to be checked
+ * @size: the size buffer we want to assure is available
+ *
+ * Returns 0 if stop is not needed
+ **/
+static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
+{
+ if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
+ return 0;
+ return __i40e_maybe_stop_tx(tx_ring, size);
+}
+
+/**
+ * i40e_xmit_descriptor_count - calculate number of tx descriptors needed
+ * @skb: send buffer
+ * @tx_ring: ring to send buffer on
+ *
+ * Returns number of data descriptors needed for this skb. Returns 0 to indicate
+ * there is not enough descriptors available in this ring since we need at least
+ * one descriptor.
+ **/
+static int i40e_xmit_descriptor_count(struct sk_buff *skb,
+ struct i40e_ring *tx_ring)
+{
+#if PAGE_SIZE > I40E_MAX_DATA_PER_TXD
+ unsigned int f;
+#endif
+ int count = 0;
+
+ /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
+ * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
+ * + 2 desc gap to keep tail from touching head,
+ * + 1 desc for context descriptor,
+ * otherwise try next time
+ */
+#if PAGE_SIZE > I40E_MAX_DATA_PER_TXD
+ for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
+ count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
+#else
+ count += skb_shinfo(skb)->nr_frags;
+#endif
+ count += TXD_USE_COUNT(skb_headlen(skb));
+ if (i40e_maybe_stop_tx(tx_ring, count + 3)) {
+ tx_ring->tx_stats.tx_busy++;
+ return 0;
+ }
+ return count;
+}
+
+/**
+ * i40e_xmit_frame_ring - Sends buffer on Tx ring
+ * @skb: send buffer
+ * @tx_ring: ring to send buffer on
+ *
+ * Returns NETDEV_TX_OK if sent, else an error code
+ **/
+static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
+ struct i40e_ring *tx_ring)
+{
+ u64 cd_type_cmd_tso_mss = I40E_TX_DESC_DTYPE_CONTEXT;
+ u32 cd_tunneling = 0, cd_l2tag2 = 0;
+ struct i40e_tx_buffer *first;
+ u32 td_offset = 0;
+ u32 tx_flags = 0;
+ __be16 protocol;
+ u32 td_cmd = 0;
+ u8 hdr_len = 0;
+ int tso;
+ if (0 == i40e_xmit_descriptor_count(skb, tx_ring))
+ return NETDEV_TX_BUSY;
+
+ /* prepare the xmit flags */
+ if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
+ goto out_drop;
+
+ /* obtain protocol of skb */
+ protocol = skb->protocol;
+
+ /* record the location of the first descriptor for this packet */
+ first = &tx_ring->tx_bi[tx_ring->next_to_use];
+
+ /* setup IPv4/IPv6 offloads */
+ if (protocol == htons(ETH_P_IP))
+ tx_flags |= I40E_TX_FLAGS_IPV4;
+ else if (protocol == htons(ETH_P_IPV6))
+ tx_flags |= I40E_TX_FLAGS_IPV6;
+
+ tso = i40e_tso(tx_ring, skb, tx_flags, protocol, &hdr_len,
+ &cd_type_cmd_tso_mss, &cd_tunneling);
+
+ if (tso < 0)
+ goto out_drop;
+ else if (tso)
+ tx_flags |= I40E_TX_FLAGS_TSO;
+
+ skb_tx_timestamp(skb);
+
+ /* always enable CRC insertion offload */
+ td_cmd |= I40E_TX_DESC_CMD_ICRC;
+
+ /* Always offload the checksum, since it's in the data descriptor */
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ tx_flags |= I40E_TX_FLAGS_CSUM;
+
+ i40e_tx_enable_csum(skb, tx_flags, &td_cmd, &td_offset,
+ tx_ring, &cd_tunneling);
+ }
+
+ i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
+ cd_tunneling, cd_l2tag2);
+
+ i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
+ td_cmd, td_offset);
+
+ i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
+
+ return NETDEV_TX_OK;
+
+out_drop:
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+}
+
+/**
+ * i40evf_xmit_frame - Selects the correct VSI and Tx queue to send buffer
+ * @skb: send buffer
+ * @netdev: network interface device structure
+ *
+ * Returns NETDEV_TX_OK if sent, else an error code
+ **/
+netdev_tx_t i40evf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ struct i40e_ring *tx_ring = adapter->tx_rings[skb->queue_mapping];
+
+ /* hardware can't handle really short frames, hardware padding works
+ * beyond this point
+ */
+ if (unlikely(skb->len < I40E_MIN_TX_LEN)) {
+ if (skb_pad(skb, I40E_MIN_TX_LEN - skb->len))
+ return NETDEV_TX_OK;
+ skb->len = I40E_MIN_TX_LEN;
+ skb_set_tail_pointer(skb, I40E_MIN_TX_LEN);
+ }
+
+ return i40e_xmit_frame_ring(skb, tx_ring);
+}
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
new file mode 100644
index 000000000000..10bf49e18d7f
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
@@ -0,0 +1,296 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_TXRX_H_
+#define _I40E_TXRX_H_
+
+/* Interrupt Throttling and Rate Limiting (storm control) Goodies */
+
+#define I40E_MAX_ITR 0x0FF0 /* reg uses 2 usec resolution */
+#define I40E_MIN_ITR 0x0004 /* reg uses 2 usec resolution */
+#define I40E_MAX_IRATE 0x03F
+#define I40E_MIN_IRATE 0x001
+#define I40E_IRATE_USEC_RESOLUTION 4
+#define I40E_ITR_100K 0x0005
+#define I40E_ITR_20K 0x0019
+#define I40E_ITR_8K 0x003E
+#define I40E_ITR_4K 0x007A
+#define I40E_ITR_RX_DEF I40E_ITR_8K
+#define I40E_ITR_TX_DEF I40E_ITR_4K
+#define I40E_ITR_DYNAMIC 0x8000 /* use top bit as a flag */
+#define I40E_MIN_INT_RATE 250 /* ~= 1000000 / (I40E_MAX_ITR * 2) */
+#define I40E_MAX_INT_RATE 500000 /* == 1000000 / (I40E_MIN_ITR * 2) */
+#define I40E_DEFAULT_IRQ_WORK 256
+#define ITR_TO_REG(setting) ((setting & ~I40E_ITR_DYNAMIC) >> 1)
+#define ITR_IS_DYNAMIC(setting) (!!(setting & I40E_ITR_DYNAMIC))
+#define ITR_REG_TO_USEC(itr_reg) (itr_reg << 1)
+
+#define I40E_QUEUE_END_OF_LIST 0x7FF
+
+/* this enum matches hardware bits and is meant to be used by DYN_CTLN
+ * registers and QINT registers or more generally anywhere in the manual
+ * mentioning ITR_INDX, ITR_NONE cannot be used as an index 'n' into any
+ * register but instead is a special value meaning "don't update" ITR0/1/2.
+ */
+enum i40e_dyn_idx_t {
+ I40E_IDX_ITR0 = 0,
+ I40E_IDX_ITR1 = 1,
+ I40E_IDX_ITR2 = 2,
+ I40E_ITR_NONE = 3 /* ITR_NONE must not be used as an index */
+};
+
+/* these are indexes into ITRN registers */
+#define I40E_RX_ITR I40E_IDX_ITR0
+#define I40E_TX_ITR I40E_IDX_ITR1
+#define I40E_PE_ITR I40E_IDX_ITR2
+
+/* Supported RSS offloads */
+#define I40E_DEFAULT_RSS_HENA ( \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD))
+
+/* Supported Rx Buffer Sizes */
+#define I40E_RXBUFFER_512 512 /* Used for packet split */
+#define I40E_RXBUFFER_2048 2048
+#define I40E_RXBUFFER_3072 3072 /* For FCoE MTU of 2158 */
+#define I40E_RXBUFFER_4096 4096
+#define I40E_RXBUFFER_8192 8192
+#define I40E_MAX_RXBUFFER 9728 /* largest size for single descriptor */
+
+/* NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we
+ * reserve 2 more, and skb_shared_info adds an additional 384 bytes more,
+ * this adds up to 512 bytes of extra data meaning the smallest allocation
+ * we could have is 1K.
+ * i.e. RXBUFFER_512 --> size-1024 slab
+ */
+#define I40E_RX_HDR_SIZE I40E_RXBUFFER_512
+
+/* How many Rx Buffers do we bundle into one write to the hardware ? */
+#define I40E_RX_BUFFER_WRITE 16 /* Must be power of 2 */
+#define I40E_RX_NEXT_DESC(r, i, n) \
+ do { \
+ (i)++; \
+ if ((i) == (r)->count) \
+ i = 0; \
+ (n) = I40E_RX_DESC((r), (i)); \
+ } while (0)
+
+#define I40E_RX_NEXT_DESC_PREFETCH(r, i, n) \
+ do { \
+ I40E_RX_NEXT_DESC((r), (i), (n)); \
+ prefetch((n)); \
+ } while (0)
+
+#define i40e_rx_desc i40e_32byte_rx_desc
+
+#define I40E_MIN_TX_LEN 17
+#define I40E_MAX_DATA_PER_TXD 16383 /* aka 16kB - 1 */
+
+/* Tx Descriptors needed, worst case */
+#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), I40E_MAX_DATA_PER_TXD)
+#define DESC_NEEDED ((MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE)) + 4)
+
+#define I40E_TX_FLAGS_CSUM (u32)(1)
+#define I40E_TX_FLAGS_HW_VLAN (u32)(1 << 1)
+#define I40E_TX_FLAGS_SW_VLAN (u32)(1 << 2)
+#define I40E_TX_FLAGS_TSO (u32)(1 << 3)
+#define I40E_TX_FLAGS_IPV4 (u32)(1 << 4)
+#define I40E_TX_FLAGS_IPV6 (u32)(1 << 5)
+#define I40E_TX_FLAGS_FCCRC (u32)(1 << 6)
+#define I40E_TX_FLAGS_FSO (u32)(1 << 7)
+#define I40E_TX_FLAGS_VLAN_MASK 0xffff0000
+#define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000
+#define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29
+#define I40E_TX_FLAGS_VLAN_SHIFT 16
+
+struct i40e_tx_buffer {
+ struct i40e_tx_desc *next_to_watch;
+ unsigned long time_stamp;
+ struct sk_buff *skb;
+ unsigned int bytecount;
+ unsigned short gso_segs;
+ DEFINE_DMA_UNMAP_ADDR(dma);
+ DEFINE_DMA_UNMAP_LEN(len);
+ u32 tx_flags;
+};
+
+struct i40e_rx_buffer {
+ struct sk_buff *skb;
+ dma_addr_t dma;
+ struct page *page;
+ dma_addr_t page_dma;
+ unsigned int page_offset;
+};
+
+struct i40e_queue_stats {
+ u64 packets;
+ u64 bytes;
+};
+
+struct i40e_tx_queue_stats {
+ u64 restart_queue;
+ u64 tx_busy;
+ u64 tx_done_old;
+};
+
+struct i40e_rx_queue_stats {
+ u64 non_eop_descs;
+ u64 alloc_page_failed;
+ u64 alloc_buff_failed;
+};
+
+enum i40e_ring_state_t {
+ __I40E_TX_FDIR_INIT_DONE,
+ __I40E_TX_XPS_INIT_DONE,
+ __I40E_TX_DETECT_HANG,
+ __I40E_HANG_CHECK_ARMED,
+ __I40E_RX_PS_ENABLED,
+ __I40E_RX_LRO_ENABLED,
+ __I40E_RX_16BYTE_DESC_ENABLED,
+};
+
+#define ring_is_ps_enabled(ring) \
+ test_bit(__I40E_RX_PS_ENABLED, &(ring)->state)
+#define set_ring_ps_enabled(ring) \
+ set_bit(__I40E_RX_PS_ENABLED, &(ring)->state)
+#define clear_ring_ps_enabled(ring) \
+ clear_bit(__I40E_RX_PS_ENABLED, &(ring)->state)
+#define check_for_tx_hang(ring) \
+ test_bit(__I40E_TX_DETECT_HANG, &(ring)->state)
+#define set_check_for_tx_hang(ring) \
+ set_bit(__I40E_TX_DETECT_HANG, &(ring)->state)
+#define clear_check_for_tx_hang(ring) \
+ clear_bit(__I40E_TX_DETECT_HANG, &(ring)->state)
+#define ring_is_lro_enabled(ring) \
+ test_bit(__I40E_RX_LRO_ENABLED, &(ring)->state)
+#define set_ring_lro_enabled(ring) \
+ set_bit(__I40E_RX_LRO_ENABLED, &(ring)->state)
+#define clear_ring_lro_enabled(ring) \
+ clear_bit(__I40E_RX_LRO_ENABLED, &(ring)->state)
+#define ring_is_16byte_desc_enabled(ring) \
+ test_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state)
+#define set_ring_16byte_desc_enabled(ring) \
+ set_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state)
+#define clear_ring_16byte_desc_enabled(ring) \
+ clear_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state)
+
+/* struct that defines a descriptor ring, associated with a VSI */
+struct i40e_ring {
+ struct i40e_ring *next; /* pointer to next ring in q_vector */
+ void *desc; /* Descriptor ring memory */
+ struct device *dev; /* Used for DMA mapping */
+ struct net_device *netdev; /* netdev ring maps to */
+ union {
+ struct i40e_tx_buffer *tx_bi;
+ struct i40e_rx_buffer *rx_bi;
+ };
+ unsigned long state;
+ u16 queue_index; /* Queue number of ring */
+ u8 dcb_tc; /* Traffic class of ring */
+ u8 __iomem *tail;
+
+ u16 count; /* Number of descriptors */
+ u16 reg_idx; /* HW register index of the ring */
+ u16 rx_hdr_len;
+ u16 rx_buf_len;
+ u8 dtype;
+#define I40E_RX_DTYPE_NO_SPLIT 0
+#define I40E_RX_DTYPE_SPLIT_ALWAYS 1
+#define I40E_RX_DTYPE_HEADER_SPLIT 2
+ u8 hsplit;
+#define I40E_RX_SPLIT_L2 0x1
+#define I40E_RX_SPLIT_IP 0x2
+#define I40E_RX_SPLIT_TCP_UDP 0x4
+#define I40E_RX_SPLIT_SCTP 0x8
+
+ /* used in interrupt processing */
+ u16 next_to_use;
+ u16 next_to_clean;
+
+ u8 atr_sample_rate;
+ u8 atr_count;
+
+ bool ring_active; /* is ring online or not */
+
+ /* stats structs */
+ struct i40e_queue_stats stats;
+ struct u64_stats_sync syncp;
+ union {
+ struct i40e_tx_queue_stats tx_stats;
+ struct i40e_rx_queue_stats rx_stats;
+ };
+
+ unsigned int size; /* length of descriptor ring in bytes */
+ dma_addr_t dma; /* physical address of ring */
+
+ struct i40e_vsi *vsi; /* Backreference to associated VSI */
+ struct i40e_q_vector *q_vector; /* Backreference to associated vector */
+
+ struct rcu_head rcu; /* to avoid race on free */
+} ____cacheline_internodealigned_in_smp;
+
+enum i40e_latency_range {
+ I40E_LOWEST_LATENCY = 0,
+ I40E_LOW_LATENCY = 1,
+ I40E_BULK_LATENCY = 2,
+};
+
+struct i40e_ring_container {
+ /* array of pointers to rings */
+ struct i40e_ring *ring;
+ unsigned int total_bytes; /* total bytes processed this int */
+ unsigned int total_packets; /* total packets processed this int */
+ u16 count;
+ enum i40e_latency_range latency_range;
+ u16 itr;
+};
+
+/* iterator for handling rings in ring container */
+#define i40e_for_each_ring(pos, head) \
+ for (pos = (head).ring; pos != NULL; pos = pos->next)
+
+void i40evf_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count);
+netdev_tx_t i40evf_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
+void i40evf_clean_tx_ring(struct i40e_ring *tx_ring);
+void i40evf_clean_rx_ring(struct i40e_ring *rx_ring);
+int i40evf_setup_tx_descriptors(struct i40e_ring *tx_ring);
+int i40evf_setup_rx_descriptors(struct i40e_ring *rx_ring);
+void i40evf_free_tx_resources(struct i40e_ring *tx_ring);
+void i40evf_free_rx_resources(struct i40e_ring *rx_ring);
+int i40evf_napi_poll(struct napi_struct *napi, int budget);
+#endif /* _I40E_TXRX_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h
new file mode 100644
index 000000000000..beb3fa7014c5
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h
@@ -0,0 +1,1152 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_TYPE_H_
+#define _I40E_TYPE_H_
+
+#include "i40e_status.h"
+#include "i40e_osdep.h"
+#include "i40e_register.h"
+#include "i40e_adminq.h"
+#include "i40e_hmc.h"
+#include "i40e_lan_hmc.h"
+
+/* Device IDs */
+#define I40E_SFP_XL710_DEVICE_ID 0x1572
+#define I40E_SFP_X710_DEVICE_ID 0x1573
+#define I40E_QEMU_DEVICE_ID 0x1574
+#define I40E_KX_A_DEVICE_ID 0x157F
+#define I40E_KX_B_DEVICE_ID 0x1580
+#define I40E_KX_C_DEVICE_ID 0x1581
+#define I40E_KX_D_DEVICE_ID 0x1582
+#define I40E_QSFP_A_DEVICE_ID 0x1583
+#define I40E_QSFP_B_DEVICE_ID 0x1584
+#define I40E_QSFP_C_DEVICE_ID 0x1585
+#define I40E_VF_DEVICE_ID 0x154C
+#define I40E_VF_HV_DEVICE_ID 0x1571
+
+#define i40e_is_40G_device(d) ((d) == I40E_QSFP_A_DEVICE_ID || \
+ (d) == I40E_QSFP_B_DEVICE_ID || \
+ (d) == I40E_QSFP_C_DEVICE_ID)
+
+#define I40E_MAX_VSI_QP 16
+#define I40E_MAX_VF_VSI 3
+#define I40E_MAX_CHAINED_RX_BUFFERS 5
+#define I40E_MAX_PF_UDP_OFFLOAD_PORTS 16
+
+/* Max default timeout in ms, */
+#define I40E_MAX_NVM_TIMEOUT 18000
+
+/* Switch from mc to the 2usec global time (this is the GTIME resolution) */
+#define I40E_MS_TO_GTIME(time) (((time) * 1000) / 2)
+
+/* forward declaration */
+struct i40e_hw;
+typedef void (*I40E_ADMINQ_CALLBACK)(struct i40e_hw *, struct i40e_aq_desc *);
+
+#define ETH_ALEN 6
+
+/* Data type manipulation macros. */
+
+#define I40E_DESC_UNUSED(R) \
+ ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
+ (R)->next_to_clean - (R)->next_to_use - 1)
+
+/* bitfields for Tx queue mapping in QTX_CTL */
+#define I40E_QTX_CTL_VF_QUEUE 0x0
+#define I40E_QTX_CTL_VM_QUEUE 0x1
+#define I40E_QTX_CTL_PF_QUEUE 0x2
+
+/* debug masks - set these bits in hw->debug_mask to control output */
+enum i40e_debug_mask {
+ I40E_DEBUG_INIT = 0x00000001,
+ I40E_DEBUG_RELEASE = 0x00000002,
+
+ I40E_DEBUG_LINK = 0x00000010,
+ I40E_DEBUG_PHY = 0x00000020,
+ I40E_DEBUG_HMC = 0x00000040,
+ I40E_DEBUG_NVM = 0x00000080,
+ I40E_DEBUG_LAN = 0x00000100,
+ I40E_DEBUG_FLOW = 0x00000200,
+ I40E_DEBUG_DCB = 0x00000400,
+ I40E_DEBUG_DIAG = 0x00000800,
+
+ I40E_DEBUG_AQ_MESSAGE = 0x01000000,
+ I40E_DEBUG_AQ_DESCRIPTOR = 0x02000000,
+ I40E_DEBUG_AQ_DESC_BUFFER = 0x04000000,
+ I40E_DEBUG_AQ_COMMAND = 0x06000000,
+ I40E_DEBUG_AQ = 0x0F000000,
+
+ I40E_DEBUG_USER = 0xF0000000,
+
+ I40E_DEBUG_ALL = 0xFFFFFFFF
+};
+
+/* PCI Bus Info */
+#define I40E_PCI_LINK_WIDTH_1 0x10
+#define I40E_PCI_LINK_WIDTH_2 0x20
+#define I40E_PCI_LINK_WIDTH_4 0x40
+#define I40E_PCI_LINK_WIDTH_8 0x80
+#define I40E_PCI_LINK_SPEED_2500 0x1
+#define I40E_PCI_LINK_SPEED_5000 0x2
+#define I40E_PCI_LINK_SPEED_8000 0x3
+
+/* These are structs for managing the hardware information and the operations.
+ * The structures of function pointers are filled out at init time when we
+ * know for sure exactly which hardware we're working with. This gives us the
+ * flexibility of using the same main driver code but adapting to slightly
+ * different hardware needs as new parts are developed. For this architecture,
+ * the Firmware and AdminQ are intended to insulate the driver from most of the
+ * future changes, but these structures will also do part of the job.
+ */
+enum i40e_mac_type {
+ I40E_MAC_UNKNOWN = 0,
+ I40E_MAC_X710,
+ I40E_MAC_XL710,
+ I40E_MAC_VF,
+ I40E_MAC_GENERIC,
+};
+
+enum i40e_media_type {
+ I40E_MEDIA_TYPE_UNKNOWN = 0,
+ I40E_MEDIA_TYPE_FIBER,
+ I40E_MEDIA_TYPE_BASET,
+ I40E_MEDIA_TYPE_BACKPLANE,
+ I40E_MEDIA_TYPE_CX4,
+ I40E_MEDIA_TYPE_DA,
+ I40E_MEDIA_TYPE_VIRTUAL
+};
+
+enum i40e_fc_mode {
+ I40E_FC_NONE = 0,
+ I40E_FC_RX_PAUSE,
+ I40E_FC_TX_PAUSE,
+ I40E_FC_FULL,
+ I40E_FC_PFC,
+ I40E_FC_DEFAULT
+};
+
+enum i40e_vsi_type {
+ I40E_VSI_MAIN = 0,
+ I40E_VSI_VMDQ1,
+ I40E_VSI_VMDQ2,
+ I40E_VSI_CTRL,
+ I40E_VSI_FCOE,
+ I40E_VSI_MIRROR,
+ I40E_VSI_SRIOV,
+ I40E_VSI_FDIR,
+ I40E_VSI_TYPE_UNKNOWN
+};
+
+enum i40e_queue_type {
+ I40E_QUEUE_TYPE_RX = 0,
+ I40E_QUEUE_TYPE_TX,
+ I40E_QUEUE_TYPE_PE_CEQ,
+ I40E_QUEUE_TYPE_UNKNOWN
+};
+
+struct i40e_link_status {
+ enum i40e_aq_phy_type phy_type;
+ enum i40e_aq_link_speed link_speed;
+ u8 link_info;
+ u8 an_info;
+ u8 ext_info;
+ u8 loopback;
+ /* is Link Status Event notification to SW enabled */
+ bool lse_enable;
+};
+
+struct i40e_phy_info {
+ struct i40e_link_status link_info;
+ struct i40e_link_status link_info_old;
+ u32 autoneg_advertised;
+ u32 phy_id;
+ u32 module_type;
+ bool get_link_info;
+ enum i40e_media_type media_type;
+};
+
+#define I40E_HW_CAP_MAX_GPIO 30
+/* Capabilities of a PF or a VF or the whole device */
+struct i40e_hw_capabilities {
+ u32 switch_mode;
+#define I40E_NVM_IMAGE_TYPE_EVB 0x0
+#define I40E_NVM_IMAGE_TYPE_CLOUD 0x2
+#define I40E_NVM_IMAGE_TYPE_UDP_CLOUD 0x3
+
+ u32 management_mode;
+ u32 npar_enable;
+ u32 os2bmc;
+ u32 valid_functions;
+ bool sr_iov_1_1;
+ bool vmdq;
+ bool evb_802_1_qbg; /* Edge Virtual Bridging */
+ bool evb_802_1_qbh; /* Bridge Port Extension */
+ bool dcb;
+ bool fcoe;
+ bool mfp_mode_1;
+ bool mgmt_cem;
+ bool ieee_1588;
+ bool iwarp;
+ bool fd;
+ u32 fd_filters_guaranteed;
+ u32 fd_filters_best_effort;
+ bool rss;
+ u32 rss_table_size;
+ u32 rss_table_entry_width;
+ bool led[I40E_HW_CAP_MAX_GPIO];
+ bool sdp[I40E_HW_CAP_MAX_GPIO];
+ u32 nvm_image_type;
+ u32 num_flow_director_filters;
+ u32 num_vfs;
+ u32 vf_base_id;
+ u32 num_vsis;
+ u32 num_rx_qp;
+ u32 num_tx_qp;
+ u32 base_queue;
+ u32 num_msix_vectors;
+ u32 num_msix_vectors_vf;
+ u32 led_pin_num;
+ u32 sdp_pin_num;
+ u32 mdio_port_num;
+ u32 mdio_port_mode;
+ u8 rx_buf_chain_len;
+ u32 enabled_tcmap;
+ u32 maxtc;
+};
+
+struct i40e_mac_info {
+ enum i40e_mac_type type;
+ u8 addr[ETH_ALEN];
+ u8 perm_addr[ETH_ALEN];
+ u8 san_addr[ETH_ALEN];
+ u16 max_fcoeq;
+};
+
+enum i40e_aq_resources_ids {
+ I40E_NVM_RESOURCE_ID = 1
+};
+
+enum i40e_aq_resource_access_type {
+ I40E_RESOURCE_READ = 1,
+ I40E_RESOURCE_WRITE
+};
+
+struct i40e_nvm_info {
+ u64 hw_semaphore_timeout; /* 2usec global time (GTIME resolution) */
+ u64 hw_semaphore_wait; /* - || - */
+ u32 timeout; /* [ms] */
+ u16 sr_size; /* Shadow RAM size in words */
+ bool blank_nvm_mode; /* is NVM empty (no FW present)*/
+ u16 version; /* NVM package version */
+ u32 eetrack; /* NVM data version */
+};
+
+/* PCI bus types */
+enum i40e_bus_type {
+ i40e_bus_type_unknown = 0,
+ i40e_bus_type_pci,
+ i40e_bus_type_pcix,
+ i40e_bus_type_pci_express,
+ i40e_bus_type_reserved
+};
+
+/* PCI bus speeds */
+enum i40e_bus_speed {
+ i40e_bus_speed_unknown = 0,
+ i40e_bus_speed_33 = 33,
+ i40e_bus_speed_66 = 66,
+ i40e_bus_speed_100 = 100,
+ i40e_bus_speed_120 = 120,
+ i40e_bus_speed_133 = 133,
+ i40e_bus_speed_2500 = 2500,
+ i40e_bus_speed_5000 = 5000,
+ i40e_bus_speed_8000 = 8000,
+ i40e_bus_speed_reserved
+};
+
+/* PCI bus widths */
+enum i40e_bus_width {
+ i40e_bus_width_unknown = 0,
+ i40e_bus_width_pcie_x1 = 1,
+ i40e_bus_width_pcie_x2 = 2,
+ i40e_bus_width_pcie_x4 = 4,
+ i40e_bus_width_pcie_x8 = 8,
+ i40e_bus_width_32 = 32,
+ i40e_bus_width_64 = 64,
+ i40e_bus_width_reserved
+};
+
+/* Bus parameters */
+struct i40e_bus_info {
+ enum i40e_bus_speed speed;
+ enum i40e_bus_width width;
+ enum i40e_bus_type type;
+
+ u16 func;
+ u16 device;
+ u16 lan_id;
+};
+
+/* Flow control (FC) parameters */
+struct i40e_fc_info {
+ enum i40e_fc_mode current_mode; /* FC mode in effect */
+ enum i40e_fc_mode requested_mode; /* FC mode requested by caller */
+};
+
+#define I40E_MAX_TRAFFIC_CLASS 8
+#define I40E_MAX_USER_PRIORITY 8
+#define I40E_DCBX_MAX_APPS 32
+#define I40E_LLDPDU_SIZE 1500
+
+/* IEEE 802.1Qaz ETS Configuration data */
+struct i40e_ieee_ets_config {
+ u8 willing;
+ u8 cbs;
+ u8 maxtcs;
+ u8 prioritytable[I40E_MAX_TRAFFIC_CLASS];
+ u8 tcbwtable[I40E_MAX_TRAFFIC_CLASS];
+ u8 tsatable[I40E_MAX_TRAFFIC_CLASS];
+};
+
+/* IEEE 802.1Qaz ETS Recommendation data */
+struct i40e_ieee_ets_recommend {
+ u8 prioritytable[I40E_MAX_TRAFFIC_CLASS];
+ u8 tcbwtable[I40E_MAX_TRAFFIC_CLASS];
+ u8 tsatable[I40E_MAX_TRAFFIC_CLASS];
+};
+
+/* IEEE 802.1Qaz PFC Configuration data */
+struct i40e_ieee_pfc_config {
+ u8 willing;
+ u8 mbc;
+ u8 pfccap;
+ u8 pfcenable;
+};
+
+/* IEEE 802.1Qaz Application Priority data */
+struct i40e_ieee_app_priority_table {
+ u8 priority;
+ u8 selector;
+ u16 protocolid;
+};
+
+struct i40e_dcbx_config {
+ u32 numapps;
+ struct i40e_ieee_ets_config etscfg;
+ struct i40e_ieee_ets_recommend etsrec;
+ struct i40e_ieee_pfc_config pfc;
+ struct i40e_ieee_app_priority_table app[I40E_DCBX_MAX_APPS];
+};
+
+/* Port hardware description */
+struct i40e_hw {
+ u8 __iomem *hw_addr;
+ void *back;
+
+ /* function pointer structs */
+ struct i40e_phy_info phy;
+ struct i40e_mac_info mac;
+ struct i40e_bus_info bus;
+ struct i40e_nvm_info nvm;
+ struct i40e_fc_info fc;
+
+ /* pci info */
+ u16 device_id;
+ u16 vendor_id;
+ u16 subsystem_device_id;
+ u16 subsystem_vendor_id;
+ u8 revision_id;
+ u8 port;
+ bool adapter_stopped;
+
+ /* capabilities for entire device and PCI func */
+ struct i40e_hw_capabilities dev_caps;
+ struct i40e_hw_capabilities func_caps;
+
+ /* Flow Director shared filter space */
+ u16 fdir_shared_filter_count;
+
+ /* device profile info */
+ u8 pf_id;
+ u16 main_vsi_seid;
+
+ /* Closest numa node to the device */
+ u16 numa_node;
+
+ /* Admin Queue info */
+ struct i40e_adminq_info aq;
+
+ /* HMC info */
+ struct i40e_hmc_info hmc; /* HMC info struct */
+
+ /* LLDP/DCBX Status */
+ u16 dcbx_status;
+
+ /* DCBX info */
+ struct i40e_dcbx_config local_dcbx_config;
+ struct i40e_dcbx_config remote_dcbx_config;
+
+ /* debug mask */
+ u32 debug_mask;
+};
+
+struct i40e_driver_version {
+ u8 major_version;
+ u8 minor_version;
+ u8 build_version;
+ u8 subbuild_version;
+};
+
+/* RX Descriptors */
+union i40e_16byte_rx_desc {
+ struct {
+ __le64 pkt_addr; /* Packet buffer address */
+ __le64 hdr_addr; /* Header buffer address */
+ } read;
+ struct {
+ struct {
+ struct {
+ union {
+ __le16 mirroring_status;
+ __le16 fcoe_ctx_id;
+ } mirr_fcoe;
+ __le16 l2tag1;
+ } lo_dword;
+ union {
+ __le32 rss; /* RSS Hash */
+ __le32 fd_id; /* Flow director filter id */
+ __le32 fcoe_param; /* FCoE DDP Context id */
+ } hi_dword;
+ } qword0;
+ struct {
+ /* ext status/error/pktype/length */
+ __le64 status_error_len;
+ } qword1;
+ } wb; /* writeback */
+};
+
+union i40e_32byte_rx_desc {
+ struct {
+ __le64 pkt_addr; /* Packet buffer address */
+ __le64 hdr_addr; /* Header buffer address */
+ /* bit 0 of hdr_buffer_addr is DD bit */
+ __le64 rsvd1;
+ __le64 rsvd2;
+ } read;
+ struct {
+ struct {
+ struct {
+ union {
+ __le16 mirroring_status;
+ __le16 fcoe_ctx_id;
+ } mirr_fcoe;
+ __le16 l2tag1;
+ } lo_dword;
+ union {
+ __le32 rss; /* RSS Hash */
+ __le32 fcoe_param; /* FCoE DDP Context id */
+ } hi_dword;
+ } qword0;
+ struct {
+ /* status/error/pktype/length */
+ __le64 status_error_len;
+ } qword1;
+ struct {
+ __le16 ext_status; /* extended status */
+ __le16 rsvd;
+ __le16 l2tag2_1;
+ __le16 l2tag2_2;
+ } qword2;
+ struct {
+ union {
+ __le32 flex_bytes_lo;
+ __le32 pe_status;
+ } lo_dword;
+ union {
+ __le32 flex_bytes_hi;
+ __le32 fd_id;
+ } hi_dword;
+ } qword3;
+ } wb; /* writeback */
+};
+
+#define I40E_RXD_QW1_STATUS_SHIFT 0
+#define I40E_RXD_QW1_STATUS_MASK (0x7FFFUL << I40E_RXD_QW1_STATUS_SHIFT)
+
+enum i40e_rx_desc_status_bits {
+ /* Note: These are predefined bit offsets */
+ I40E_RX_DESC_STATUS_DD_SHIFT = 0,
+ I40E_RX_DESC_STATUS_EOF_SHIFT = 1,
+ I40E_RX_DESC_STATUS_L2TAG1P_SHIFT = 2,
+ I40E_RX_DESC_STATUS_L3L4P_SHIFT = 3,
+ I40E_RX_DESC_STATUS_CRCP_SHIFT = 4,
+ I40E_RX_DESC_STATUS_TSYNINDX_SHIFT = 5, /* 2 BITS */
+ I40E_RX_DESC_STATUS_TSYNVALID_SHIFT = 7,
+ I40E_RX_DESC_STATUS_PIF_SHIFT = 8,
+ I40E_RX_DESC_STATUS_UMBCAST_SHIFT = 9, /* 2 BITS */
+ I40E_RX_DESC_STATUS_FLM_SHIFT = 11,
+ I40E_RX_DESC_STATUS_FLTSTAT_SHIFT = 12, /* 2 BITS */
+ I40E_RX_DESC_STATUS_LPBK_SHIFT = 14,
+ I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT = 15,
+ I40E_RX_DESC_STATUS_RESERVED_SHIFT = 16, /* 2 BITS */
+ I40E_RX_DESC_STATUS_UDP_0_SHIFT = 18
+};
+
+#define I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT I40E_RX_DESC_STATUS_TSYNINDX_SHIFT
+#define I40E_RXD_QW1_STATUS_TSYNINDX_MASK (0x3UL << \
+ I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT)
+
+#define I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT I40E_RX_DESC_STATUS_TSYNVALID_SHIFT
+#define I40E_RXD_QW1_STATUS_TSYNVALID_MASK (0x1UL << \
+ I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT)
+
+enum i40e_rx_desc_fltstat_values {
+ I40E_RX_DESC_FLTSTAT_NO_DATA = 0,
+ I40E_RX_DESC_FLTSTAT_RSV_FD_ID = 1, /* 16byte desc? FD_ID : RSV */
+ I40E_RX_DESC_FLTSTAT_RSV = 2,
+ I40E_RX_DESC_FLTSTAT_RSS_HASH = 3,
+};
+
+#define I40E_RXD_QW1_ERROR_SHIFT 19
+#define I40E_RXD_QW1_ERROR_MASK (0xFFUL << I40E_RXD_QW1_ERROR_SHIFT)
+
+enum i40e_rx_desc_error_bits {
+ /* Note: These are predefined bit offsets */
+ I40E_RX_DESC_ERROR_RXE_SHIFT = 0,
+ I40E_RX_DESC_ERROR_RECIPE_SHIFT = 1,
+ I40E_RX_DESC_ERROR_HBO_SHIFT = 2,
+ I40E_RX_DESC_ERROR_L3L4E_SHIFT = 3, /* 3 BITS */
+ I40E_RX_DESC_ERROR_IPE_SHIFT = 3,
+ I40E_RX_DESC_ERROR_L4E_SHIFT = 4,
+ I40E_RX_DESC_ERROR_EIPE_SHIFT = 5,
+ I40E_RX_DESC_ERROR_OVERSIZE_SHIFT = 6
+};
+
+enum i40e_rx_desc_error_l3l4e_fcoe_masks {
+ I40E_RX_DESC_ERROR_L3L4E_NONE = 0,
+ I40E_RX_DESC_ERROR_L3L4E_PROT = 1,
+ I40E_RX_DESC_ERROR_L3L4E_FC = 2,
+ I40E_RX_DESC_ERROR_L3L4E_DMAC_ERR = 3,
+ I40E_RX_DESC_ERROR_L3L4E_DMAC_WARN = 4
+};
+
+#define I40E_RXD_QW1_PTYPE_SHIFT 30
+#define I40E_RXD_QW1_PTYPE_MASK (0xFFULL << I40E_RXD_QW1_PTYPE_SHIFT)
+
+/* Packet type non-ip values */
+enum i40e_rx_l2_ptype {
+ I40E_RX_PTYPE_L2_RESERVED = 0,
+ I40E_RX_PTYPE_L2_MAC_PAY2 = 1,
+ I40E_RX_PTYPE_L2_TIMESYNC_PAY2 = 2,
+ I40E_RX_PTYPE_L2_FIP_PAY2 = 3,
+ I40E_RX_PTYPE_L2_OUI_PAY2 = 4,
+ I40E_RX_PTYPE_L2_MACCNTRL_PAY2 = 5,
+ I40E_RX_PTYPE_L2_LLDP_PAY2 = 6,
+ I40E_RX_PTYPE_L2_ECP_PAY2 = 7,
+ I40E_RX_PTYPE_L2_EVB_PAY2 = 8,
+ I40E_RX_PTYPE_L2_QCN_PAY2 = 9,
+ I40E_RX_PTYPE_L2_EAPOL_PAY2 = 10,
+ I40E_RX_PTYPE_L2_ARP = 11,
+ I40E_RX_PTYPE_L2_FCOE_PAY3 = 12,
+ I40E_RX_PTYPE_L2_FCOE_FCDATA_PAY3 = 13,
+ I40E_RX_PTYPE_L2_FCOE_FCRDY_PAY3 = 14,
+ I40E_RX_PTYPE_L2_FCOE_FCRSP_PAY3 = 15,
+ I40E_RX_PTYPE_L2_FCOE_FCOTHER_PA = 16,
+ I40E_RX_PTYPE_L2_FCOE_VFT_PAY3 = 17,
+ I40E_RX_PTYPE_L2_FCOE_VFT_FCDATA = 18,
+ I40E_RX_PTYPE_L2_FCOE_VFT_FCRDY = 19,
+ I40E_RX_PTYPE_L2_FCOE_VFT_FCRSP = 20,
+ I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER = 21,
+ I40E_RX_PTYPE_GRENAT4_MAC_PAY3 = 58,
+ I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4 = 87,
+ I40E_RX_PTYPE_GRENAT6_MAC_PAY3 = 124,
+ I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4 = 153
+};
+
+struct i40e_rx_ptype_decoded {
+ u32 ptype:8;
+ u32 known:1;
+ u32 outer_ip:1;
+ u32 outer_ip_ver:1;
+ u32 outer_frag:1;
+ u32 tunnel_type:3;
+ u32 tunnel_end_prot:2;
+ u32 tunnel_end_frag:1;
+ u32 inner_prot:4;
+ u32 payload_layer:3;
+};
+
+enum i40e_rx_ptype_outer_ip {
+ I40E_RX_PTYPE_OUTER_L2 = 0,
+ I40E_RX_PTYPE_OUTER_IP = 1
+};
+
+enum i40e_rx_ptype_outer_ip_ver {
+ I40E_RX_PTYPE_OUTER_NONE = 0,
+ I40E_RX_PTYPE_OUTER_IPV4 = 0,
+ I40E_RX_PTYPE_OUTER_IPV6 = 1
+};
+
+enum i40e_rx_ptype_outer_fragmented {
+ I40E_RX_PTYPE_NOT_FRAG = 0,
+ I40E_RX_PTYPE_FRAG = 1
+};
+
+enum i40e_rx_ptype_tunnel_type {
+ I40E_RX_PTYPE_TUNNEL_NONE = 0,
+ I40E_RX_PTYPE_TUNNEL_IP_IP = 1,
+ I40E_RX_PTYPE_TUNNEL_IP_GRENAT = 2,
+ I40E_RX_PTYPE_TUNNEL_IP_GRENAT_MAC = 3,
+ I40E_RX_PTYPE_TUNNEL_IP_GRENAT_MAC_VLAN = 4,
+};
+
+enum i40e_rx_ptype_tunnel_end_prot {
+ I40E_RX_PTYPE_TUNNEL_END_NONE = 0,
+ I40E_RX_PTYPE_TUNNEL_END_IPV4 = 1,
+ I40E_RX_PTYPE_TUNNEL_END_IPV6 = 2,
+};
+
+enum i40e_rx_ptype_inner_prot {
+ I40E_RX_PTYPE_INNER_PROT_NONE = 0,
+ I40E_RX_PTYPE_INNER_PROT_UDP = 1,
+ I40E_RX_PTYPE_INNER_PROT_TCP = 2,
+ I40E_RX_PTYPE_INNER_PROT_SCTP = 3,
+ I40E_RX_PTYPE_INNER_PROT_ICMP = 4,
+ I40E_RX_PTYPE_INNER_PROT_TIMESYNC = 5
+};
+
+enum i40e_rx_ptype_payload_layer {
+ I40E_RX_PTYPE_PAYLOAD_LAYER_NONE = 0,
+ I40E_RX_PTYPE_PAYLOAD_LAYER_PAY2 = 1,
+ I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3 = 2,
+ I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4 = 3,
+};
+
+#define I40E_RXD_QW1_LENGTH_PBUF_SHIFT 38
+#define I40E_RXD_QW1_LENGTH_PBUF_MASK (0x3FFFULL << \
+ I40E_RXD_QW1_LENGTH_PBUF_SHIFT)
+
+#define I40E_RXD_QW1_LENGTH_HBUF_SHIFT 52
+#define I40E_RXD_QW1_LENGTH_HBUF_MASK (0x7FFULL << \
+ I40E_RXD_QW1_LENGTH_HBUF_SHIFT)
+
+#define I40E_RXD_QW1_LENGTH_SPH_SHIFT 63
+#define I40E_RXD_QW1_LENGTH_SPH_MASK (0x1ULL << \
+ I40E_RXD_QW1_LENGTH_SPH_SHIFT)
+
+enum i40e_rx_desc_ext_status_bits {
+ /* Note: These are predefined bit offsets */
+ I40E_RX_DESC_EXT_STATUS_L2TAG2P_SHIFT = 0,
+ I40E_RX_DESC_EXT_STATUS_L2TAG3P_SHIFT = 1,
+ I40E_RX_DESC_EXT_STATUS_FLEXBL_SHIFT = 2, /* 2 BITS */
+ I40E_RX_DESC_EXT_STATUS_FLEXBH_SHIFT = 4, /* 2 BITS */
+ I40E_RX_DESC_EXT_STATUS_FTYPE_SHIFT = 6, /* 3 BITS */
+ I40E_RX_DESC_EXT_STATUS_FDLONGB_SHIFT = 9,
+ I40E_RX_DESC_EXT_STATUS_FCOELONGB_SHIFT = 10,
+ I40E_RX_DESC_EXT_STATUS_PELONGB_SHIFT = 11,
+};
+
+enum i40e_rx_desc_pe_status_bits {
+ /* Note: These are predefined bit offsets */
+ I40E_RX_DESC_PE_STATUS_QPID_SHIFT = 0, /* 18 BITS */
+ I40E_RX_DESC_PE_STATUS_L4PORT_SHIFT = 0, /* 16 BITS */
+ I40E_RX_DESC_PE_STATUS_IPINDEX_SHIFT = 16, /* 8 BITS */
+ I40E_RX_DESC_PE_STATUS_QPIDHIT_SHIFT = 24,
+ I40E_RX_DESC_PE_STATUS_APBVTHIT_SHIFT = 25,
+ I40E_RX_DESC_PE_STATUS_PORTV_SHIFT = 26,
+ I40E_RX_DESC_PE_STATUS_URG_SHIFT = 27,
+ I40E_RX_DESC_PE_STATUS_IPFRAG_SHIFT = 28,
+ I40E_RX_DESC_PE_STATUS_IPOPT_SHIFT = 29
+};
+
+#define I40E_RX_PROG_STATUS_DESC_LENGTH_SHIFT 38
+#define I40E_RX_PROG_STATUS_DESC_LENGTH 0x2000000
+
+#define I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT 2
+#define I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK (0x7UL << \
+ I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT)
+
+#define I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT 19
+#define I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK (0x3FUL << \
+ I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT)
+
+enum i40e_rx_prog_status_desc_status_bits {
+ /* Note: These are predefined bit offsets */
+ I40E_RX_PROG_STATUS_DESC_DD_SHIFT = 0,
+ I40E_RX_PROG_STATUS_DESC_PROG_ID_SHIFT = 2 /* 3 BITS */
+};
+
+enum i40e_rx_prog_status_desc_prog_id_masks {
+ I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS = 1,
+ I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS = 2,
+ I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS = 4,
+};
+
+enum i40e_rx_prog_status_desc_error_bits {
+ /* Note: These are predefined bit offsets */
+ I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT = 0,
+ I40E_RX_PROG_STATUS_DESC_NO_FD_QUOTA_SHIFT = 1,
+ I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT = 2,
+ I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT = 3
+};
+
+/* TX Descriptor */
+struct i40e_tx_desc {
+ __le64 buffer_addr; /* Address of descriptor's data buf */
+ __le64 cmd_type_offset_bsz;
+};
+
+#define I40E_TXD_QW1_DTYPE_SHIFT 0
+#define I40E_TXD_QW1_DTYPE_MASK (0xFUL << I40E_TXD_QW1_DTYPE_SHIFT)
+
+enum i40e_tx_desc_dtype_value {
+ I40E_TX_DESC_DTYPE_DATA = 0x0,
+ I40E_TX_DESC_DTYPE_NOP = 0x1, /* same as Context desc */
+ I40E_TX_DESC_DTYPE_CONTEXT = 0x1,
+ I40E_TX_DESC_DTYPE_FCOE_CTX = 0x2,
+ I40E_TX_DESC_DTYPE_FILTER_PROG = 0x8,
+ I40E_TX_DESC_DTYPE_DDP_CTX = 0x9,
+ I40E_TX_DESC_DTYPE_FLEX_DATA = 0xB,
+ I40E_TX_DESC_DTYPE_FLEX_CTX_1 = 0xC,
+ I40E_TX_DESC_DTYPE_FLEX_CTX_2 = 0xD,
+ I40E_TX_DESC_DTYPE_DESC_DONE = 0xF
+};
+
+#define I40E_TXD_QW1_CMD_SHIFT 4
+#define I40E_TXD_QW1_CMD_MASK (0x3FFUL << I40E_TXD_QW1_CMD_SHIFT)
+
+enum i40e_tx_desc_cmd_bits {
+ I40E_TX_DESC_CMD_EOP = 0x0001,
+ I40E_TX_DESC_CMD_RS = 0x0002,
+ I40E_TX_DESC_CMD_ICRC = 0x0004,
+ I40E_TX_DESC_CMD_IL2TAG1 = 0x0008,
+ I40E_TX_DESC_CMD_DUMMY = 0x0010,
+ I40E_TX_DESC_CMD_IIPT_NONIP = 0x0000, /* 2 BITS */
+ I40E_TX_DESC_CMD_IIPT_IPV6 = 0x0020, /* 2 BITS */
+ I40E_TX_DESC_CMD_IIPT_IPV4 = 0x0040, /* 2 BITS */
+ I40E_TX_DESC_CMD_IIPT_IPV4_CSUM = 0x0060, /* 2 BITS */
+ I40E_TX_DESC_CMD_FCOET = 0x0080,
+ I40E_TX_DESC_CMD_L4T_EOFT_UNK = 0x0000, /* 2 BITS */
+ I40E_TX_DESC_CMD_L4T_EOFT_TCP = 0x0100, /* 2 BITS */
+ I40E_TX_DESC_CMD_L4T_EOFT_SCTP = 0x0200, /* 2 BITS */
+ I40E_TX_DESC_CMD_L4T_EOFT_UDP = 0x0300, /* 2 BITS */
+ I40E_TX_DESC_CMD_L4T_EOFT_EOF_N = 0x0000, /* 2 BITS */
+ I40E_TX_DESC_CMD_L4T_EOFT_EOF_T = 0x0100, /* 2 BITS */
+ I40E_TX_DESC_CMD_L4T_EOFT_EOF_NI = 0x0200, /* 2 BITS */
+ I40E_TX_DESC_CMD_L4T_EOFT_EOF_A = 0x0300, /* 2 BITS */
+};
+
+#define I40E_TXD_QW1_OFFSET_SHIFT 16
+#define I40E_TXD_QW1_OFFSET_MASK (0x3FFFFULL << \
+ I40E_TXD_QW1_OFFSET_SHIFT)
+
+enum i40e_tx_desc_length_fields {
+ /* Note: These are predefined bit offsets */
+ I40E_TX_DESC_LENGTH_MACLEN_SHIFT = 0, /* 7 BITS */
+ I40E_TX_DESC_LENGTH_IPLEN_SHIFT = 7, /* 7 BITS */
+ I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT = 14 /* 4 BITS */
+};
+
+#define I40E_TXD_QW1_TX_BUF_SZ_SHIFT 34
+#define I40E_TXD_QW1_TX_BUF_SZ_MASK (0x3FFFULL << \
+ I40E_TXD_QW1_TX_BUF_SZ_SHIFT)
+
+#define I40E_TXD_QW1_L2TAG1_SHIFT 48
+#define I40E_TXD_QW1_L2TAG1_MASK (0xFFFFULL << I40E_TXD_QW1_L2TAG1_SHIFT)
+
+/* Context descriptors */
+struct i40e_tx_context_desc {
+ __le32 tunneling_params;
+ __le16 l2tag2;
+ __le16 rsvd;
+ __le64 type_cmd_tso_mss;
+};
+
+#define I40E_TXD_CTX_QW1_DTYPE_SHIFT 0
+#define I40E_TXD_CTX_QW1_DTYPE_MASK (0xFUL << I40E_TXD_CTX_QW1_DTYPE_SHIFT)
+
+#define I40E_TXD_CTX_QW1_CMD_SHIFT 4
+#define I40E_TXD_CTX_QW1_CMD_MASK (0xFFFFUL << I40E_TXD_CTX_QW1_CMD_SHIFT)
+
+enum i40e_tx_ctx_desc_cmd_bits {
+ I40E_TX_CTX_DESC_TSO = 0x01,
+ I40E_TX_CTX_DESC_TSYN = 0x02,
+ I40E_TX_CTX_DESC_IL2TAG2 = 0x04,
+ I40E_TX_CTX_DESC_IL2TAG2_IL2H = 0x08,
+ I40E_TX_CTX_DESC_SWTCH_NOTAG = 0x00,
+ I40E_TX_CTX_DESC_SWTCH_UPLINK = 0x10,
+ I40E_TX_CTX_DESC_SWTCH_LOCAL = 0x20,
+ I40E_TX_CTX_DESC_SWTCH_VSI = 0x30,
+ I40E_TX_CTX_DESC_SWPE = 0x40
+};
+
+#define I40E_TXD_CTX_QW1_TSO_LEN_SHIFT 30
+#define I40E_TXD_CTX_QW1_TSO_LEN_MASK (0x3FFFFULL << \
+ I40E_TXD_CTX_QW1_TSO_LEN_SHIFT)
+
+#define I40E_TXD_CTX_QW1_MSS_SHIFT 50
+#define I40E_TXD_CTX_QW1_MSS_MASK (0x3FFFULL << \
+ I40E_TXD_CTX_QW1_MSS_SHIFT)
+
+#define I40E_TXD_CTX_QW1_VSI_SHIFT 50
+#define I40E_TXD_CTX_QW1_VSI_MASK (0x1FFULL << I40E_TXD_CTX_QW1_VSI_SHIFT)
+
+#define I40E_TXD_CTX_QW0_EXT_IP_SHIFT 0
+#define I40E_TXD_CTX_QW0_EXT_IP_MASK (0x3ULL << \
+ I40E_TXD_CTX_QW0_EXT_IP_SHIFT)
+
+enum i40e_tx_ctx_desc_eipt_offload {
+ I40E_TX_CTX_EXT_IP_NONE = 0x0,
+ I40E_TX_CTX_EXT_IP_IPV6 = 0x1,
+ I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM = 0x2,
+ I40E_TX_CTX_EXT_IP_IPV4 = 0x3
+};
+
+#define I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT 2
+#define I40E_TXD_CTX_QW0_EXT_IPLEN_MASK (0x3FULL << \
+ I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT)
+
+#define I40E_TXD_CTX_QW0_NATT_SHIFT 9
+#define I40E_TXD_CTX_QW0_NATT_MASK (0x3ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
+
+#define I40E_TXD_CTX_UDP_TUNNELING (0x1ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
+#define I40E_TXD_CTX_GRE_TUNNELING (0x2ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
+
+#define I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT 11
+#define I40E_TXD_CTX_QW0_EIP_NOINC_MASK (0x1ULL << \
+ I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT)
+
+#define I40E_TXD_CTX_EIP_NOINC_IPID_CONST I40E_TXD_CTX_QW0_EIP_NOINC_MASK
+
+#define I40E_TXD_CTX_QW0_NATLEN_SHIFT 12
+#define I40E_TXD_CTX_QW0_NATLEN_MASK (0X7FULL << \
+ I40E_TXD_CTX_QW0_NATLEN_SHIFT)
+
+#define I40E_TXD_CTX_QW0_DECTTL_SHIFT 19
+#define I40E_TXD_CTX_QW0_DECTTL_MASK (0xFULL << \
+ I40E_TXD_CTX_QW0_DECTTL_SHIFT)
+
+struct i40e_filter_program_desc {
+ __le32 qindex_flex_ptype_vsi;
+ __le32 rsvd;
+ __le32 dtype_cmd_cntindex;
+ __le32 fd_id;
+};
+#define I40E_TXD_FLTR_QW0_QINDEX_SHIFT 0
+#define I40E_TXD_FLTR_QW0_QINDEX_MASK (0x7FFUL << \
+ I40E_TXD_FLTR_QW0_QINDEX_SHIFT)
+#define I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT 11
+#define I40E_TXD_FLTR_QW0_FLEXOFF_MASK (0x7UL << \
+ I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT)
+#define I40E_TXD_FLTR_QW0_PCTYPE_SHIFT 17
+#define I40E_TXD_FLTR_QW0_PCTYPE_MASK (0x3FUL << \
+ I40E_TXD_FLTR_QW0_PCTYPE_SHIFT)
+
+/* Packet Classifier Types for filters */
+enum i40e_filter_pctype {
+ /* Note: Values 0-28 are reserved for future use */
+ I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP = 29,
+ I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP = 30,
+ I40E_FILTER_PCTYPE_NONF_IPV4_UDP = 31,
+ I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN = 32,
+ I40E_FILTER_PCTYPE_NONF_IPV4_TCP = 33,
+ I40E_FILTER_PCTYPE_NONF_IPV4_SCTP = 34,
+ I40E_FILTER_PCTYPE_NONF_IPV4_OTHER = 35,
+ I40E_FILTER_PCTYPE_FRAG_IPV4 = 36,
+ /* Note: Values 37-38 are reserved for future use */
+ I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP = 39,
+ I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP = 40,
+ I40E_FILTER_PCTYPE_NONF_IPV6_UDP = 41,
+ I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN = 42,
+ I40E_FILTER_PCTYPE_NONF_IPV6_TCP = 43,
+ I40E_FILTER_PCTYPE_NONF_IPV6_SCTP = 44,
+ I40E_FILTER_PCTYPE_NONF_IPV6_OTHER = 45,
+ I40E_FILTER_PCTYPE_FRAG_IPV6 = 46,
+ /* Note: Value 47 is reserved for future use */
+ I40E_FILTER_PCTYPE_FCOE_OX = 48,
+ I40E_FILTER_PCTYPE_FCOE_RX = 49,
+ I40E_FILTER_PCTYPE_FCOE_OTHER = 50,
+ /* Note: Values 51-62 are reserved for future use */
+ I40E_FILTER_PCTYPE_L2_PAYLOAD = 63,
+};
+
+enum i40e_filter_program_desc_dest {
+ I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET = 0x0,
+ I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX = 0x1,
+ I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_OTHER = 0x2,
+};
+
+enum i40e_filter_program_desc_fd_status {
+ I40E_FILTER_PROGRAM_DESC_FD_STATUS_NONE = 0x0,
+ I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID = 0x1,
+ I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID_4FLEX_BYTES = 0x2,
+ I40E_FILTER_PROGRAM_DESC_FD_STATUS_8FLEX_BYTES = 0x3,
+};
+
+#define I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT 23
+#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK (0x1FFUL << \
+ I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT)
+
+#define I40E_TXD_FLTR_QW1_CMD_SHIFT 4
+#define I40E_TXD_FLTR_QW1_CMD_MASK (0xFFFFULL << \
+ I40E_TXD_FLTR_QW1_CMD_SHIFT)
+
+#define I40E_TXD_FLTR_QW1_PCMD_SHIFT (0x0ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT)
+#define I40E_TXD_FLTR_QW1_PCMD_MASK (0x7ULL << I40E_TXD_FLTR_QW1_PCMD_SHIFT)
+
+enum i40e_filter_program_desc_pcmd {
+ I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE = 0x1,
+ I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE = 0x2,
+};
+
+#define I40E_TXD_FLTR_QW1_DEST_SHIFT (0x3ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT)
+#define I40E_TXD_FLTR_QW1_DEST_MASK (0x3ULL << I40E_TXD_FLTR_QW1_DEST_SHIFT)
+
+#define I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT (0x7ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT)
+#define I40E_TXD_FLTR_QW1_CNT_ENA_MASK (0x1ULL << \
+ I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT)
+
+#define I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT (0x9ULL + \
+ I40E_TXD_FLTR_QW1_CMD_SHIFT)
+#define I40E_TXD_FLTR_QW1_FD_STATUS_MASK (0x3ULL << \
+ I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT)
+
+#define I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT 20
+#define I40E_TXD_FLTR_QW1_CNTINDEX_MASK (0x1FFUL << \
+ I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT)
+
+enum i40e_filter_type {
+ I40E_FLOW_DIRECTOR_FLTR = 0,
+ I40E_PE_QUAD_HASH_FLTR = 1,
+ I40E_ETHERTYPE_FLTR,
+ I40E_FCOE_CTX_FLTR,
+ I40E_MAC_VLAN_FLTR,
+ I40E_HASH_FLTR
+};
+
+struct i40e_vsi_context {
+ u16 seid;
+ u16 uplink_seid;
+ u16 vsi_number;
+ u16 vsis_allocated;
+ u16 vsis_unallocated;
+ u16 flags;
+ u8 pf_num;
+ u8 vf_num;
+ u8 connection_type;
+ struct i40e_aqc_vsi_properties_data info;
+};
+
+/* Statistics collected by each port, VSI, VEB, and S-channel */
+struct i40e_eth_stats {
+ u64 rx_bytes; /* gorc */
+ u64 rx_unicast; /* uprc */
+ u64 rx_multicast; /* mprc */
+ u64 rx_broadcast; /* bprc */
+ u64 rx_discards; /* rdpc */
+ u64 rx_errors; /* repc */
+ u64 rx_missed; /* rmpc */
+ u64 rx_unknown_protocol; /* rupp */
+ u64 tx_bytes; /* gotc */
+ u64 tx_unicast; /* uptc */
+ u64 tx_multicast; /* mptc */
+ u64 tx_broadcast; /* bptc */
+ u64 tx_discards; /* tdpc */
+ u64 tx_errors; /* tepc */
+};
+
+/* Statistics collected by the MAC */
+struct i40e_hw_port_stats {
+ /* eth stats collected by the port */
+ struct i40e_eth_stats eth;
+
+ /* additional port specific stats */
+ u64 tx_dropped_link_down; /* tdold */
+ u64 crc_errors; /* crcerrs */
+ u64 illegal_bytes; /* illerrc */
+ u64 error_bytes; /* errbc */
+ u64 mac_local_faults; /* mlfc */
+ u64 mac_remote_faults; /* mrfc */
+ u64 rx_length_errors; /* rlec */
+ u64 link_xon_rx; /* lxonrxc */
+ u64 link_xoff_rx; /* lxoffrxc */
+ u64 priority_xon_rx[8]; /* pxonrxc[8] */
+ u64 priority_xoff_rx[8]; /* pxoffrxc[8] */
+ u64 link_xon_tx; /* lxontxc */
+ u64 link_xoff_tx; /* lxofftxc */
+ u64 priority_xon_tx[8]; /* pxontxc[8] */
+ u64 priority_xoff_tx[8]; /* pxofftxc[8] */
+ u64 priority_xon_2_xoff[8]; /* pxon2offc[8] */
+ u64 rx_size_64; /* prc64 */
+ u64 rx_size_127; /* prc127 */
+ u64 rx_size_255; /* prc255 */
+ u64 rx_size_511; /* prc511 */
+ u64 rx_size_1023; /* prc1023 */
+ u64 rx_size_1522; /* prc1522 */
+ u64 rx_size_big; /* prc9522 */
+ u64 rx_undersize; /* ruc */
+ u64 rx_fragments; /* rfc */
+ u64 rx_oversize; /* roc */
+ u64 rx_jabber; /* rjc */
+ u64 tx_size_64; /* ptc64 */
+ u64 tx_size_127; /* ptc127 */
+ u64 tx_size_255; /* ptc255 */
+ u64 tx_size_511; /* ptc511 */
+ u64 tx_size_1023; /* ptc1023 */
+ u64 tx_size_1522; /* ptc1522 */
+ u64 tx_size_big; /* ptc9522 */
+ u64 mac_short_packet_dropped; /* mspdc */
+ u64 checksum_error; /* xec */
+};
+
+/* Checksum and Shadow RAM pointers */
+#define I40E_SR_NVM_CONTROL_WORD 0x00
+#define I40E_SR_EMP_MODULE_PTR 0x0F
+#define I40E_SR_NVM_IMAGE_VERSION 0x18
+#define I40E_SR_NVM_WAKE_ON_LAN 0x19
+#define I40E_SR_ALTERNATE_SAN_MAC_ADDRESS_PTR 0x27
+#define I40E_SR_NVM_EETRACK_LO 0x2D
+#define I40E_SR_NVM_EETRACK_HI 0x2E
+#define I40E_SR_VPD_PTR 0x2F
+#define I40E_SR_PCIE_ALT_AUTO_LOAD_PTR 0x3E
+#define I40E_SR_SW_CHECKSUM_WORD 0x3F
+
+/* Auxiliary field, mask and shift definition for Shadow RAM and NVM Flash */
+#define I40E_SR_VPD_MODULE_MAX_SIZE 1024
+#define I40E_SR_PCIE_ALT_MODULE_MAX_SIZE 1024
+#define I40E_SR_CONTROL_WORD_1_SHIFT 0x06
+#define I40E_SR_CONTROL_WORD_1_MASK (0x03 << I40E_SR_CONTROL_WORD_1_SHIFT)
+
+/* Shadow RAM related */
+#define I40E_SR_SECTOR_SIZE_IN_WORDS 0x800
+#define I40E_SR_WORDS_IN_1KB 512
+/* Checksum should be calculated such that after adding all the words,
+ * including the checksum word itself, the sum should be 0xBABA.
+ */
+#define I40E_SR_SW_CHECKSUM_BASE 0xBABA
+
+#define I40E_SRRD_SRCTL_ATTEMPTS 100000
+
+enum i40e_switch_element_types {
+ I40E_SWITCH_ELEMENT_TYPE_MAC = 1,
+ I40E_SWITCH_ELEMENT_TYPE_PF = 2,
+ I40E_SWITCH_ELEMENT_TYPE_VF = 3,
+ I40E_SWITCH_ELEMENT_TYPE_EMP = 4,
+ I40E_SWITCH_ELEMENT_TYPE_BMC = 6,
+ I40E_SWITCH_ELEMENT_TYPE_PE = 16,
+ I40E_SWITCH_ELEMENT_TYPE_VEB = 17,
+ I40E_SWITCH_ELEMENT_TYPE_PA = 18,
+ I40E_SWITCH_ELEMENT_TYPE_VSI = 19,
+};
+
+/* Supported EtherType filters */
+enum i40e_ether_type_index {
+ I40E_ETHER_TYPE_1588 = 0,
+ I40E_ETHER_TYPE_FIP = 1,
+ I40E_ETHER_TYPE_OUI_EXTENDED = 2,
+ I40E_ETHER_TYPE_MAC_CONTROL = 3,
+ I40E_ETHER_TYPE_LLDP = 4,
+ I40E_ETHER_TYPE_EVB_PROTOCOL1 = 5,
+ I40E_ETHER_TYPE_EVB_PROTOCOL2 = 6,
+ I40E_ETHER_TYPE_QCN_CNM = 7,
+ I40E_ETHER_TYPE_8021X = 8,
+ I40E_ETHER_TYPE_ARP = 9,
+ I40E_ETHER_TYPE_RSV1 = 10,
+ I40E_ETHER_TYPE_RSV2 = 11,
+};
+
+/* Filter context base size is 1K */
+#define I40E_HASH_FILTER_BASE_SIZE 1024
+/* Supported Hash filter values */
+enum i40e_hash_filter_size {
+ I40E_HASH_FILTER_SIZE_1K = 0,
+ I40E_HASH_FILTER_SIZE_2K = 1,
+ I40E_HASH_FILTER_SIZE_4K = 2,
+ I40E_HASH_FILTER_SIZE_8K = 3,
+ I40E_HASH_FILTER_SIZE_16K = 4,
+ I40E_HASH_FILTER_SIZE_32K = 5,
+ I40E_HASH_FILTER_SIZE_64K = 6,
+ I40E_HASH_FILTER_SIZE_128K = 7,
+ I40E_HASH_FILTER_SIZE_256K = 8,
+ I40E_HASH_FILTER_SIZE_512K = 9,
+ I40E_HASH_FILTER_SIZE_1M = 10,
+};
+
+/* DMA context base size is 0.5K */
+#define I40E_DMA_CNTX_BASE_SIZE 512
+/* Supported DMA context values */
+enum i40e_dma_cntx_size {
+ I40E_DMA_CNTX_SIZE_512 = 0,
+ I40E_DMA_CNTX_SIZE_1K = 1,
+ I40E_DMA_CNTX_SIZE_2K = 2,
+ I40E_DMA_CNTX_SIZE_4K = 3,
+ I40E_DMA_CNTX_SIZE_8K = 4,
+ I40E_DMA_CNTX_SIZE_16K = 5,
+ I40E_DMA_CNTX_SIZE_32K = 6,
+ I40E_DMA_CNTX_SIZE_64K = 7,
+ I40E_DMA_CNTX_SIZE_128K = 8,
+ I40E_DMA_CNTX_SIZE_256K = 9,
+};
+
+/* Supported Hash look up table (LUT) sizes */
+enum i40e_hash_lut_size {
+ I40E_HASH_LUT_SIZE_128 = 0,
+ I40E_HASH_LUT_SIZE_512 = 1,
+};
+
+/* Structure to hold a per PF filter control settings */
+struct i40e_filter_control_settings {
+ /* number of PE Quad Hash filter buckets */
+ enum i40e_hash_filter_size pe_filt_num;
+ /* number of PE Quad Hash contexts */
+ enum i40e_dma_cntx_size pe_cntx_num;
+ /* number of FCoE filter buckets */
+ enum i40e_hash_filter_size fcoe_filt_num;
+ /* number of FCoE DDP contexts */
+ enum i40e_dma_cntx_size fcoe_cntx_num;
+ /* size of the Hash LUT */
+ enum i40e_hash_lut_size hash_lut_size;
+ /* enable FDIR filters for PF and its VFs */
+ bool enable_fdir;
+ /* enable Ethertype filters for PF and its VFs */
+ bool enable_ethtype;
+ /* enable MAC/VLAN filters for PF and its VFs */
+ bool enable_macvlan;
+};
+
+/* Structure to hold device level control filter counts */
+struct i40e_control_filter_stats {
+ u16 mac_etype_used; /* Used perfect match MAC/EtherType filters */
+ u16 etype_used; /* Used perfect EtherType filters */
+ u16 mac_etype_free; /* Un-used perfect match MAC/EtherType filters */
+ u16 etype_free; /* Un-used perfect EtherType filters */
+};
+
+enum i40e_reset_type {
+ I40E_RESET_POR = 0,
+ I40E_RESET_CORER = 1,
+ I40E_RESET_GLOBR = 2,
+ I40E_RESET_EMPR = 3,
+};
+#endif /* _I40E_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
new file mode 100644
index 000000000000..ccf45d04b7ef
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
@@ -0,0 +1,364 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_VIRTCHNL_H_
+#define _I40E_VIRTCHNL_H_
+
+#include "i40e_type.h"
+
+/* Description:
+ * This header file describes the VF-PF communication protocol used
+ * by the various i40e drivers.
+ *
+ * Admin queue buffer usage:
+ * desc->opcode is always i40e_aqc_opc_send_msg_to_pf
+ * flags, retval, datalen, and data addr are all used normally.
+ * Firmware copies the cookie fields when sending messages between the PF and
+ * VF, but uses all other fields internally. Due to this limitation, we
+ * must send all messages as "indirect", i.e. using an external buffer.
+ *
+ * All the vsi indexes are relative to the VF. Each VF can have maximum of
+ * three VSIs. All the queue indexes are relative to the VSI. Each VF can
+ * have a maximum of sixteen queues for all of its VSIs.
+ *
+ * The PF is required to return a status code in v_retval for all messages
+ * except RESET_VF, which does not require any response. The return value is of
+ * i40e_status_code type, defined in the i40e_type.h.
+ *
+ * In general, VF driver initialization should roughly follow the order of these
+ * opcodes. The VF driver must first validate the API version of the PF driver,
+ * then request a reset, then get resources, then configure queues and
+ * interrupts. After these operations are complete, the VF driver may start
+ * its queues, optionally add MAC and VLAN filters, and process traffic.
+ */
+
+/* Opcodes for VF-PF communication. These are placed in the v_opcode field
+ * of the virtchnl_msg structure.
+ */
+enum i40e_virtchnl_ops {
+/* VF sends req. to pf for the following
+ * ops.
+ */
+ I40E_VIRTCHNL_OP_UNKNOWN = 0,
+ I40E_VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */
+ I40E_VIRTCHNL_OP_RESET_VF,
+ I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
+ I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE,
+ I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE,
+ I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
+ I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
+ I40E_VIRTCHNL_OP_ENABLE_QUEUES,
+ I40E_VIRTCHNL_OP_DISABLE_QUEUES,
+ I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
+ I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS,
+ I40E_VIRTCHNL_OP_ADD_VLAN,
+ I40E_VIRTCHNL_OP_DEL_VLAN,
+ I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
+ I40E_VIRTCHNL_OP_GET_STATS,
+ I40E_VIRTCHNL_OP_FCOE,
+/* PF sends status change events to vfs using
+ * the following op.
+ */
+ I40E_VIRTCHNL_OP_EVENT,
+};
+
+/* Virtual channel message descriptor. This overlays the admin queue
+ * descriptor. All other data is passed in external buffers.
+ */
+
+struct i40e_virtchnl_msg {
+ u8 pad[8]; /* AQ flags/opcode/len/retval fields */
+ enum i40e_virtchnl_ops v_opcode; /* avoid confusion with desc->opcode */
+ i40e_status v_retval; /* ditto for desc->retval */
+ u32 vfid; /* used by PF when sending to VF */
+};
+
+/* Message descriptions and data structures.*/
+
+/* I40E_VIRTCHNL_OP_VERSION
+ * VF posts its version number to the PF. PF responds with its version number
+ * in the same format, along with a return code.
+ * Reply from PF has its major/minor versions also in param0 and param1.
+ * If there is a major version mismatch, then the VF cannot operate.
+ * If there is a minor version mismatch, then the VF can operate but should
+ * add a warning to the system log.
+ *
+ * This enum element MUST always be specified as == 1, regardless of other
+ * changes in the API. The PF must always respond to this message without
+ * error regardless of version mismatch.
+ */
+#define I40E_VIRTCHNL_VERSION_MAJOR 1
+#define I40E_VIRTCHNL_VERSION_MINOR 0
+struct i40e_virtchnl_version_info {
+ u32 major;
+ u32 minor;
+};
+
+/* I40E_VIRTCHNL_OP_RESET_VF
+ * VF sends this request to PF with no parameters
+ * PF does NOT respond! VF driver must delay then poll VFGEN_RSTAT register
+ * until reset completion is indicated. The admin queue must be reinitialized
+ * after this operation.
+ *
+ * When reset is complete, PF must ensure that all queues in all VSIs associated
+ * with the VF are stopped, all queue configurations in the HMC are set to 0,
+ * and all MAC and VLAN filters (except the default MAC address) on all VSIs
+ * are cleared.
+ */
+
+/* I40E_VIRTCHNL_OP_GET_VF_RESOURCES
+ * VF sends this request to PF with no parameters
+ * PF responds with an indirect message containing
+ * i40e_virtchnl_vf_resource and one or more
+ * i40e_virtchnl_vsi_resource structures.
+ */
+
+struct i40e_virtchnl_vsi_resource {
+ u16 vsi_id;
+ u16 num_queue_pairs;
+ enum i40e_vsi_type vsi_type;
+ u16 qset_handle;
+ u8 default_mac_addr[ETH_ALEN];
+};
+/* VF offload flags */
+#define I40E_VIRTCHNL_VF_OFFLOAD_L2 0x00000001
+#define I40E_VIRTCHNL_VF_OFFLOAD_FCOE 0x00000004
+#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000
+
+struct i40e_virtchnl_vf_resource {
+ u16 num_vsis;
+ u16 num_queue_pairs;
+ u16 max_vectors;
+ u16 max_mtu;
+
+ u32 vf_offload_flags;
+ u32 max_fcoe_contexts;
+ u32 max_fcoe_filters;
+
+ struct i40e_virtchnl_vsi_resource vsi_res[1];
+};
+
+/* I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE
+ * VF sends this message to set up parameters for one TX queue.
+ * External data buffer contains one instance of i40e_virtchnl_txq_info.
+ * PF configures requested queue and returns a status code.
+ */
+
+/* Tx queue config info */
+struct i40e_virtchnl_txq_info {
+ u16 vsi_id;
+ u16 queue_id;
+ u16 ring_len; /* number of descriptors, multiple of 8 */
+ u16 headwb_enabled;
+ u64 dma_ring_addr;
+ u64 dma_headwb_addr;
+};
+
+/* I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE
+ * VF sends this message to set up parameters for one RX queue.
+ * External data buffer contains one instance of i40e_virtchnl_rxq_info.
+ * PF configures requested queue and returns a status code.
+ */
+
+/* Rx queue config info */
+struct i40e_virtchnl_rxq_info {
+ u16 vsi_id;
+ u16 queue_id;
+ u32 ring_len; /* number of descriptors, multiple of 32 */
+ u16 hdr_size;
+ u16 splithdr_enabled;
+ u32 databuffer_size;
+ u32 max_pkt_size;
+ u64 dma_ring_addr;
+ enum i40e_hmc_obj_rx_hsplit_0 rx_split_pos;
+};
+
+/* I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES
+ * VF sends this message to set parameters for all active TX and RX queues
+ * associated with the specified VSI.
+ * PF configures queues and returns status.
+ * If the number of queues specified is greater than the number of queues
+ * associated with the VSI, an error is returned and no queues are configured.
+ */
+struct i40e_virtchnl_queue_pair_info {
+ /* NOTE: vsi_id and queue_id should be identical for both queues. */
+ struct i40e_virtchnl_txq_info txq;
+ struct i40e_virtchnl_rxq_info rxq;
+};
+
+struct i40e_virtchnl_vsi_queue_config_info {
+ u16 vsi_id;
+ u16 num_queue_pairs;
+ struct i40e_virtchnl_queue_pair_info qpair[1];
+};
+
+/* I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP
+ * VF uses this message to map vectors to queues.
+ * The rxq_map and txq_map fields are bitmaps used to indicate which queues
+ * are to be associated with the specified vector.
+ * The "other" causes are always mapped to vector 0.
+ * PF configures interrupt mapping and returns status.
+ */
+struct i40e_virtchnl_vector_map {
+ u16 vsi_id;
+ u16 vector_id;
+ u16 rxq_map;
+ u16 txq_map;
+ u16 rxitr_idx;
+ u16 txitr_idx;
+};
+
+struct i40e_virtchnl_irq_map_info {
+ u16 num_vectors;
+ struct i40e_virtchnl_vector_map vecmap[1];
+};
+
+/* I40E_VIRTCHNL_OP_ENABLE_QUEUES
+ * I40E_VIRTCHNL_OP_DISABLE_QUEUES
+ * VF sends these message to enable or disable TX/RX queue pairs.
+ * The queues fields are bitmaps indicating which queues to act upon.
+ * (Currently, we only support 16 queues per VF, but we make the field
+ * u32 to allow for expansion.)
+ * PF performs requested action and returns status.
+ */
+struct i40e_virtchnl_queue_select {
+ u16 vsi_id;
+ u16 pad;
+ u32 rx_queues;
+ u32 tx_queues;
+};
+
+/* I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS
+ * VF sends this message in order to add one or more unicast or multicast
+ * address filters for the specified VSI.
+ * PF adds the filters and returns status.
+ */
+
+/* I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS
+ * VF sends this message in order to remove one or more unicast or multicast
+ * filters for the specified VSI.
+ * PF removes the filters and returns status.
+ */
+
+struct i40e_virtchnl_ether_addr {
+ u8 addr[ETH_ALEN];
+ u8 pad[2];
+};
+
+struct i40e_virtchnl_ether_addr_list {
+ u16 vsi_id;
+ u16 num_elements;
+ struct i40e_virtchnl_ether_addr list[1];
+};
+
+/* I40E_VIRTCHNL_OP_ADD_VLAN
+ * VF sends this message to add one or more VLAN tag filters for receives.
+ * PF adds the filters and returns status.
+ * If a port VLAN is configured by the PF, this operation will return an
+ * error to the VF.
+ */
+
+/* I40E_VIRTCHNL_OP_DEL_VLAN
+ * VF sends this message to remove one or more VLAN tag filters for receives.
+ * PF removes the filters and returns status.
+ * If a port VLAN is configured by the PF, this operation will return an
+ * error to the VF.
+ */
+
+struct i40e_virtchnl_vlan_filter_list {
+ u16 vsi_id;
+ u16 num_elements;
+ u16 vlan_id[1];
+};
+
+/* I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE
+ * VF sends VSI id and flags.
+ * PF returns status code in retval.
+ * Note: we assume that broadcast accept mode is always enabled.
+ */
+struct i40e_virtchnl_promisc_info {
+ u16 vsi_id;
+ u16 flags;
+};
+
+#define I40E_FLAG_VF_UNICAST_PROMISC 0x00000001
+#define I40E_FLAG_VF_MULTICAST_PROMISC 0x00000002
+
+/* I40E_VIRTCHNL_OP_GET_STATS
+ * VF sends this message to request stats for the selected VSI. VF uses
+ * the i40e_virtchnl_queue_select struct to specify the VSI. The queue_id
+ * field is ignored by the PF.
+ *
+ * PF replies with struct i40e_eth_stats in an external buffer.
+ */
+
+/* I40E_VIRTCHNL_OP_EVENT
+ * PF sends this message to inform the VF driver of events that may affect it.
+ * No direct response is expected from the VF, though it may generate other
+ * messages in response to this one.
+ */
+enum i40e_virtchnl_event_codes {
+ I40E_VIRTCHNL_EVENT_UNKNOWN = 0,
+ I40E_VIRTCHNL_EVENT_LINK_CHANGE,
+ I40E_VIRTCHNL_EVENT_RESET_IMPENDING,
+ I40E_VIRTCHNL_EVENT_PF_DRIVER_CLOSE,
+};
+#define I40E_PF_EVENT_SEVERITY_INFO 0
+#define I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM 255
+
+struct i40e_virtchnl_pf_event {
+ enum i40e_virtchnl_event_codes event;
+ union {
+ struct {
+ enum i40e_aq_link_speed link_speed;
+ bool link_status;
+ } link_event;
+ } event_data;
+
+ int severity;
+};
+
+/* The following are TBD, not necessary for LAN functionality.
+ * I40E_VIRTCHNL_OP_FCOE
+ */
+
+/* VF reset states - these are written into the RSTAT register:
+ * I40E_VFGEN_RSTAT1 on the PF
+ * I40E_VFGEN_RSTAT on the VF
+ * When the PF initiates a reset, it writes 0
+ * When the reset is complete, it writes 1
+ * When the PF detects that the VF has recovered, it writes 2
+ * VF checks this register periodically to determine if a reset has occurred,
+ * then polls it to know when the reset is complete.
+ * If either the PF or VF reads the register while the hardware
+ * is in a reset state, it will return DEADBEEF, which, when masked
+ * will result in 3.
+ */
+enum i40e_vfr_states {
+ I40E_VFR_INPROGRESS = 0,
+ I40E_VFR_COMPLETED,
+ I40E_VFR_VFACTIVE,
+ I40E_VFR_UNKNOWN,
+};
+
+#endif /* _I40E_VIRTCHNL_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h
new file mode 100644
index 000000000000..ff6529b288a1
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40evf.h
@@ -0,0 +1,321 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40EVF_H_
+#define _I40EVF_H_
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/aer.h>
+#include <linux/netdevice.h>
+#include <linux/vmalloc.h>
+#include <linux/interrupt.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/sctp.h>
+#include <linux/ipv6.h>
+#include <net/ip6_checksum.h>
+#include <net/udp.h>
+#include <linux/sctp.h>
+
+
+#include "i40e_type.h"
+#include "i40e_virtchnl.h"
+#include "i40e_txrx.h"
+
+#define DEFAULT_DEBUG_LEVEL_SHIFT 3
+#define PFX "i40evf: "
+#define DPRINTK(nlevel, klevel, fmt, args...) \
+ ((void)((NETIF_MSG_##nlevel & adapter->msg_enable) && \
+ printk(KERN_##klevel PFX "%s: %s: " fmt, adapter->netdev->name, \
+ __func__ , ## args)))
+
+/* dummy struct to make common code less painful */
+struct i40e_vsi {
+ struct i40evf_adapter *back;
+ struct net_device *netdev;
+ unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+ u16 seid;
+ u16 id;
+ unsigned long state;
+ int base_vector;
+ u16 work_limit;
+ /* high bit set means dynamic, use accessor routines to read/write.
+ * hardware only supports 2us resolution for the ITR registers.
+ * these values always store the USER setting, and must be converted
+ * before programming to a register.
+ */
+ u16 rx_itr_setting;
+ u16 tx_itr_setting;
+};
+
+/* How many Rx Buffers do we bundle into one write to the hardware ? */
+#define I40EVF_RX_BUFFER_WRITE 16 /* Must be power of 2 */
+#define I40EVF_DEFAULT_TXD 512
+#define I40EVF_DEFAULT_RXD 512
+#define I40EVF_MAX_TXD 4096
+#define I40EVF_MIN_TXD 64
+#define I40EVF_MAX_RXD 4096
+#define I40EVF_MIN_RXD 64
+#define I40EVF_REQ_DESCRIPTOR_MULTIPLE 8
+
+/* Supported Rx Buffer Sizes */
+#define I40EVF_RXBUFFER_64 64 /* Used for packet split */
+#define I40EVF_RXBUFFER_128 128 /* Used for packet split */
+#define I40EVF_RXBUFFER_256 256 /* Used for packet split */
+#define I40EVF_RXBUFFER_2048 2048
+#define I40EVF_MAX_RXBUFFER 16384 /* largest size for single descriptor */
+#define I40EVF_MAX_AQ_BUF_SIZE 4096
+#define I40EVF_AQ_LEN 32
+#define I40EVF_AQ_MAX_ERR 10 /* times to try before resetting AQ */
+
+#define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
+
+#define I40E_RX_DESC(R, i) (&(((union i40e_32byte_rx_desc *)((R)->desc))[i]))
+#define I40E_TX_DESC(R, i) (&(((struct i40e_tx_desc *)((R)->desc))[i]))
+#define I40E_TX_CTXTDESC(R, i) \
+ (&(((struct i40e_tx_context_desc *)((R)->desc))[i]))
+#define MAX_RX_QUEUES 8
+#define MAX_TX_QUEUES MAX_RX_QUEUES
+
+/* MAX_MSIX_Q_VECTORS of these are allocated,
+ * but we only use one per queue-specific vector.
+ */
+struct i40e_q_vector {
+ struct i40evf_adapter *adapter;
+ struct i40e_vsi *vsi;
+ struct napi_struct napi;
+ unsigned long reg_idx;
+ struct i40e_ring_container rx;
+ struct i40e_ring_container tx;
+ u32 ring_mask;
+ u8 num_ringpairs; /* total number of ring pairs in vector */
+ int v_idx; /* vector index in list */
+ char name[IFNAMSIZ + 9];
+ cpumask_var_t affinity_mask;
+};
+
+/* Helper macros to switch between ints/sec and what the register uses.
+ * And yes, it's the same math going both ways. The lowest value
+ * supported by all of the i40e hardware is 8.
+ */
+#define EITR_INTS_PER_SEC_TO_REG(_eitr) \
+ ((_eitr) ? (1000000000 / ((_eitr) * 256)) : 8)
+#define EITR_REG_TO_INTS_PER_SEC EITR_INTS_PER_SEC_TO_REG
+
+#define I40EVF_DESC_UNUSED(R) \
+ ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
+ (R)->next_to_clean - (R)->next_to_use - 1)
+
+#define I40EVF_RX_DESC_ADV(R, i) \
+ (&(((union i40e_adv_rx_desc *)((R).desc))[i]))
+#define I40EVF_TX_DESC_ADV(R, i) \
+ (&(((union i40e_adv_tx_desc *)((R).desc))[i]))
+#define I40EVF_TX_CTXTDESC_ADV(R, i) \
+ (&(((struct i40e_adv_tx_context_desc *)((R).desc))[i]))
+
+#define OTHER_VECTOR 1
+#define NONQ_VECS (OTHER_VECTOR)
+
+#define MAX_MSIX_Q_VECTORS 4
+#define MAX_MSIX_COUNT 5
+
+#define MIN_MSIX_Q_VECTORS 1
+#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NONQ_VECS)
+
+#define I40EVF_QUEUE_END_OF_LIST 0x7FF
+#define I40EVF_FREE_VECTOR 0x7FFF
+struct i40evf_mac_filter {
+ struct list_head list;
+ u8 macaddr[ETH_ALEN];
+ bool remove; /* filter needs to be removed */
+ bool add; /* filter needs to be added */
+};
+
+struct i40evf_vlan_filter {
+ struct list_head list;
+ u16 vlan;
+ bool remove; /* filter needs to be removed */
+ bool add; /* filter needs to be added */
+};
+
+/* Driver state. The order of these is important! */
+enum i40evf_state_t {
+ __I40EVF_STARTUP, /* driver loaded, probe complete */
+ __I40EVF_FAILED, /* PF communication failed. Fatal. */
+ __I40EVF_REMOVE, /* driver is being unloaded */
+ __I40EVF_INIT_VERSION_CHECK, /* aq msg sent, awaiting reply */
+ __I40EVF_INIT_GET_RESOURCES, /* aq msg sent, awaiting reply */
+ __I40EVF_INIT_SW, /* got resources, setting up structs */
+ /* Below here, watchdog is running */
+ __I40EVF_DOWN, /* ready, can be opened */
+ __I40EVF_TESTING, /* in ethtool self-test */
+ __I40EVF_RESETTING, /* in reset */
+ __I40EVF_RUNNING, /* opened, working */
+};
+
+enum i40evf_critical_section_t {
+ __I40EVF_IN_CRITICAL_TASK, /* cannot be interrupted */
+};
+/* make common code happy */
+#define __I40E_DOWN __I40EVF_DOWN
+
+/* board specific private data structure */
+struct i40evf_adapter {
+ struct timer_list watchdog_timer;
+ struct vlan_group *vlgrp;
+ struct work_struct reset_task;
+ struct work_struct adminq_task;
+ struct delayed_work init_task;
+ struct i40e_q_vector *q_vector[MAX_MSIX_Q_VECTORS];
+ struct list_head vlan_filter_list;
+ char name[MAX_MSIX_COUNT][IFNAMSIZ + 9];
+
+ /* Interrupt Throttle Rate */
+ u32 itr_setting;
+ u16 eitr_low;
+ u16 eitr_high;
+
+ /* TX */
+ struct i40e_ring *tx_rings[I40E_MAX_VSI_QP];
+ u64 restart_queue;
+ u64 hw_csum_tx_good;
+ u64 lsc_int;
+ u64 hw_tso_ctxt;
+ u64 hw_tso6_ctxt;
+ u32 tx_timeout_count;
+ struct list_head mac_filter_list;
+#ifdef DEBUG
+ bool detect_tx_hung;
+#endif /* DEBUG */
+
+ /* RX */
+ struct i40e_ring *rx_rings[I40E_MAX_VSI_QP];
+ int txd_count;
+ int rxd_count;
+ u64 hw_csum_rx_error;
+ u64 hw_rx_no_dma_resources;
+ u64 hw_csum_rx_good;
+ u64 non_eop_descs;
+ int num_msix_vectors;
+ struct msix_entry *msix_entries;
+
+ u64 rx_hdr_split;
+
+ u32 init_state;
+ volatile unsigned long flags;
+#define I40EVF_FLAG_RX_CSUM_ENABLED (u32)(1)
+#define I40EVF_FLAG_RX_1BUF_CAPABLE (u32)(1 << 1)
+#define I40EVF_FLAG_RX_PS_CAPABLE (u32)(1 << 2)
+#define I40EVF_FLAG_RX_PS_ENABLED (u32)(1 << 3)
+#define I40EVF_FLAG_IN_NETPOLL (u32)(1 << 4)
+#define I40EVF_FLAG_IMIR_ENABLED (u32)(1 << 5)
+#define I40EVF_FLAG_MQ_CAPABLE (u32)(1 << 6)
+#define I40EVF_FLAG_NEED_LINK_UPDATE (u32)(1 << 7)
+/* duplcates for common code */
+#define I40E_FLAG_FDIR_ATR_ENABLED 0
+#define I40E_FLAG_DCB_ENABLED 0
+#define I40E_FLAG_IN_NETPOLL I40EVF_FLAG_IN_NETPOLL
+#define I40E_FLAG_RX_CSUM_ENABLED I40EVF_FLAG_RX_CSUM_ENABLED
+ /* flags for admin queue service task */
+ u32 aq_required;
+ u32 aq_pending;
+#define I40EVF_FLAG_AQ_ENABLE_QUEUES (u32)(1)
+#define I40EVF_FLAG_AQ_DISABLE_QUEUES (u32)(1 << 1)
+#define I40EVF_FLAG_AQ_ADD_MAC_FILTER (u32)(1 << 2)
+#define I40EVF_FLAG_AQ_ADD_VLAN_FILTER (u32)(1 << 3)
+#define I40EVF_FLAG_AQ_DEL_MAC_FILTER (u32)(1 << 4)
+#define I40EVF_FLAG_AQ_DEL_VLAN_FILTER (u32)(1 << 5)
+#define I40EVF_FLAG_AQ_CONFIGURE_QUEUES (u32)(1 << 6)
+#define I40EVF_FLAG_AQ_MAP_VECTORS (u32)(1 << 7)
+#define I40EVF_FLAG_AQ_HANDLE_RESET (u32)(1 << 8)
+ /* OS defined structs */
+ struct net_device *netdev;
+ struct pci_dev *pdev;
+ struct net_device_stats net_stats;
+
+ /* structs defined in i40e_vf.h */
+ struct i40e_hw hw;
+
+ enum i40evf_state_t state;
+ volatile unsigned long crit_section;
+ u64 tx_busy;
+
+ struct work_struct watchdog_task;
+ bool netdev_registered;
+ bool dev_closed;
+ bool link_up;
+ enum i40e_virtchnl_ops current_op;
+ struct i40e_virtchnl_vf_resource *vf_res; /* incl. all VSIs */
+ struct i40e_virtchnl_vsi_resource *vsi_res; /* our LAN VSI */
+ u16 msg_enable;
+ struct i40e_eth_stats current_stats;
+ struct i40e_vsi vsi;
+ u32 aq_wait_count;
+};
+
+struct i40evf_info {
+ enum i40e_mac_type mac;
+ unsigned int flags;
+};
+
+
+/* needed by i40evf_ethtool.c */
+extern char i40evf_driver_name[];
+extern const char i40evf_driver_version[];
+
+int i40evf_up(struct i40evf_adapter *adapter);
+void i40evf_down(struct i40evf_adapter *adapter);
+void i40evf_reinit_locked(struct i40evf_adapter *adapter);
+void i40evf_reset(struct i40evf_adapter *adapter);
+void i40evf_set_ethtool_ops(struct net_device *netdev);
+void i40evf_update_stats(struct i40evf_adapter *adapter);
+void i40evf_reset_interrupt_capability(struct i40evf_adapter *adapter);
+int i40evf_init_interrupt_scheme(struct i40evf_adapter *adapter);
+void i40evf_irq_enable_queues(struct i40evf_adapter *adapter, u32 mask);
+
+void i40e_napi_add_all(struct i40evf_adapter *adapter);
+void i40e_napi_del_all(struct i40evf_adapter *adapter);
+
+int i40evf_send_api_ver(struct i40evf_adapter *adapter);
+int i40evf_verify_api_ver(struct i40evf_adapter *adapter);
+int i40evf_send_vf_config_msg(struct i40evf_adapter *adapter);
+int i40evf_get_vf_config(struct i40evf_adapter *adapter);
+void i40evf_irq_enable(struct i40evf_adapter *adapter, bool flush);
+void i40evf_configure_queues(struct i40evf_adapter *adapter);
+void i40evf_deconfigure_queues(struct i40evf_adapter *adapter);
+void i40evf_enable_queues(struct i40evf_adapter *adapter);
+void i40evf_disable_queues(struct i40evf_adapter *adapter);
+void i40evf_map_queues(struct i40evf_adapter *adapter);
+void i40evf_add_ether_addrs(struct i40evf_adapter *adapter);
+void i40evf_del_ether_addrs(struct i40evf_adapter *adapter);
+void i40evf_add_vlans(struct i40evf_adapter *adapter);
+void i40evf_del_vlans(struct i40evf_adapter *adapter);
+void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags);
+void i40evf_request_stats(struct i40evf_adapter *adapter);
+void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
+ enum i40e_virtchnl_ops v_opcode,
+ i40e_status v_retval, u8 *msg, u16 msglen);
+#endif /* _I40EVF_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
new file mode 100644
index 000000000000..b0b1f4bf5ac0
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
@@ -0,0 +1,390 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+/* ethtool support for i40evf */
+#include "i40evf.h"
+
+#include <linux/uaccess.h>
+
+
+struct i40evf_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ int stat_offset;
+};
+
+#define I40EVF_STAT(_name, _stat) { \
+ .stat_string = _name, \
+ .stat_offset = offsetof(struct i40evf_adapter, _stat) \
+}
+
+/* All stats are u64, so we don't need to track the size of the field. */
+static const struct i40evf_stats i40evf_gstrings_stats[] = {
+ I40EVF_STAT("rx_bytes", current_stats.rx_bytes),
+ I40EVF_STAT("rx_unicast", current_stats.rx_unicast),
+ I40EVF_STAT("rx_multicast", current_stats.rx_multicast),
+ I40EVF_STAT("rx_broadcast", current_stats.rx_broadcast),
+ I40EVF_STAT("rx_discards", current_stats.rx_discards),
+ I40EVF_STAT("rx_errors", current_stats.rx_errors),
+ I40EVF_STAT("rx_missed", current_stats.rx_missed),
+ I40EVF_STAT("rx_unknown_protocol", current_stats.rx_unknown_protocol),
+ I40EVF_STAT("tx_bytes", current_stats.tx_bytes),
+ I40EVF_STAT("tx_unicast", current_stats.tx_unicast),
+ I40EVF_STAT("tx_multicast", current_stats.tx_multicast),
+ I40EVF_STAT("tx_broadcast", current_stats.tx_broadcast),
+ I40EVF_STAT("tx_discards", current_stats.tx_discards),
+ I40EVF_STAT("tx_errors", current_stats.tx_errors),
+};
+
+#define I40EVF_GLOBAL_STATS_LEN ARRAY_SIZE(i40evf_gstrings_stats)
+#define I40EVF_QUEUE_STATS_LEN \
+ (((struct i40evf_adapter *) \
+ netdev_priv(netdev))->vsi_res->num_queue_pairs * 4)
+#define I40EVF_STATS_LEN (I40EVF_GLOBAL_STATS_LEN + I40EVF_QUEUE_STATS_LEN)
+
+/**
+ * i40evf_get_settings - Get Link Speed and Duplex settings
+ * @netdev: network interface device structure
+ * @ecmd: ethtool command
+ *
+ * Reports speed/duplex settings. Because this is a VF, we don't know what
+ * kind of link we really have, so we fake it.
+ **/
+static int i40evf_get_settings(struct net_device *netdev,
+ struct ethtool_cmd *ecmd)
+{
+ /* In the future the VF will be able to query the PF for
+ * some information - for now use a dummy value
+ */
+ ecmd->supported = SUPPORTED_10000baseT_Full;
+ ecmd->autoneg = AUTONEG_DISABLE;
+ ecmd->transceiver = XCVR_DUMMY1;
+ ecmd->port = PORT_NONE;
+
+ return 0;
+}
+
+/**
+ * i40evf_get_sset_count - Get length of string set
+ * @netdev: network interface device structure
+ * @sset: id of string set
+ *
+ * Reports size of string table. This driver only supports
+ * strings for statistics.
+ **/
+static int i40evf_get_sset_count(struct net_device *netdev, int sset)
+{
+ if (sset == ETH_SS_STATS)
+ return I40EVF_STATS_LEN;
+ else
+ return -ENOTSUPP;
+}
+
+/**
+ * i40evf_get_ethtool_stats - report device statistics
+ * @netdev: network interface device structure
+ * @stats: ethtool statistics structure
+ * @data: pointer to data buffer
+ *
+ * All statistics are added to the data buffer as an array of u64.
+ **/
+static void i40evf_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ int i, j;
+ char *p;
+
+ for (i = 0; i < I40EVF_GLOBAL_STATS_LEN; i++) {
+ p = (char *)adapter + i40evf_gstrings_stats[i].stat_offset;
+ data[i] = *(u64 *)p;
+ }
+ for (j = 0; j < adapter->vsi_res->num_queue_pairs; j++) {
+ data[i++] = adapter->tx_rings[j]->stats.packets;
+ data[i++] = adapter->tx_rings[j]->stats.bytes;
+ }
+ for (j = 0; j < adapter->vsi_res->num_queue_pairs; j++) {
+ data[i++] = adapter->rx_rings[j]->stats.packets;
+ data[i++] = adapter->rx_rings[j]->stats.bytes;
+ }
+}
+
+/**
+ * i40evf_get_strings - Get string set
+ * @netdev: network interface device structure
+ * @sset: id of string set
+ * @data: buffer for string data
+ *
+ * Builds stats string table.
+ **/
+static void i40evf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ u8 *p = data;
+ int i;
+
+ if (sset == ETH_SS_STATS) {
+ for (i = 0; i < I40EVF_GLOBAL_STATS_LEN; i++) {
+ memcpy(p, i40evf_gstrings_stats[i].stat_string,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) {
+ snprintf(p, ETH_GSTRING_LEN, "tx-%u.packets", i);
+ p += ETH_GSTRING_LEN;
+ snprintf(p, ETH_GSTRING_LEN, "tx-%u.bytes", i);
+ p += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) {
+ snprintf(p, ETH_GSTRING_LEN, "rx-%u.packets", i);
+ p += ETH_GSTRING_LEN;
+ snprintf(p, ETH_GSTRING_LEN, "rx-%u.bytes", i);
+ p += ETH_GSTRING_LEN;
+ }
+ }
+}
+
+/**
+ * i40evf_get_msglevel - Get debug message level
+ * @netdev: network interface device structure
+ *
+ * Returns current debug message level.
+ **/
+static u32 i40evf_get_msglevel(struct net_device *netdev)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ return adapter->msg_enable;
+}
+
+/**
+ * i40evf_get_msglevel - Set debug message level
+ * @netdev: network interface device structure
+ * @data: message level
+ *
+ * Set current debug message level. Higher values cause the driver to
+ * be noisier.
+ **/
+static void i40evf_set_msglevel(struct net_device *netdev, u32 data)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ adapter->msg_enable = data;
+}
+
+/**
+ * i40evf_get_drvinto - Get driver info
+ * @netdev: network interface device structure
+ * @drvinfo: ethool driver info structure
+ *
+ * Returns information about the driver and device for display to the user.
+ **/
+static void i40evf_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+
+ strlcpy(drvinfo->driver, i40evf_driver_name, 32);
+ strlcpy(drvinfo->version, i40evf_driver_version, 32);
+
+ strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+}
+
+/**
+ * i40evf_get_ringparam - Get ring parameters
+ * @netdev: network interface device structure
+ * @ring: ethtool ringparam structure
+ *
+ * Returns current ring parameters. TX and RX rings are reported separately,
+ * but the number of rings is not reported.
+ **/
+static void i40evf_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ struct i40e_ring *tx_ring = adapter->tx_rings[0];
+ struct i40e_ring *rx_ring = adapter->rx_rings[0];
+
+ ring->rx_max_pending = I40EVF_MAX_RXD;
+ ring->tx_max_pending = I40EVF_MAX_TXD;
+ ring->rx_pending = rx_ring->count;
+ ring->tx_pending = tx_ring->count;
+}
+
+/**
+ * i40evf_set_ringparam - Set ring parameters
+ * @netdev: network interface device structure
+ * @ring: ethtool ringparam structure
+ *
+ * Sets ring parameters. TX and RX rings are controlled separately, but the
+ * number of rings is not specified, so all rings get the same settings.
+ **/
+static int i40evf_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ u32 new_rx_count, new_tx_count;
+
+ if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
+ return -EINVAL;
+
+ new_tx_count = clamp_t(u32, ring->tx_pending,
+ I40EVF_MIN_TXD,
+ I40EVF_MAX_TXD);
+ new_tx_count = ALIGN(new_tx_count, I40EVF_REQ_DESCRIPTOR_MULTIPLE);
+
+ new_rx_count = clamp_t(u32, ring->rx_pending,
+ I40EVF_MIN_RXD,
+ I40EVF_MAX_RXD);
+ new_rx_count = ALIGN(new_rx_count, I40EVF_REQ_DESCRIPTOR_MULTIPLE);
+
+ /* if nothing to do return success */
+ if ((new_tx_count == adapter->txd_count) &&
+ (new_rx_count == adapter->rxd_count))
+ return 0;
+
+ adapter->txd_count = new_tx_count;
+ adapter->rxd_count = new_rx_count;
+
+ if (netif_running(netdev))
+ i40evf_reinit_locked(adapter);
+ return 0;
+}
+
+/**
+ * i40evf_get_coalesce - Get interrupt coalescing settings
+ * @netdev: network interface device structure
+ * @ec: ethtool coalesce structure
+ *
+ * Returns current coalescing settings. This is referred to elsewhere in the
+ * driver as Interrupt Throttle Rate, as this is how the hardware describes
+ * this functionality.
+ **/
+static int i40evf_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ struct i40e_vsi *vsi = &adapter->vsi;
+
+ ec->tx_max_coalesced_frames = vsi->work_limit;
+ ec->rx_max_coalesced_frames = vsi->work_limit;
+
+ if (ITR_IS_DYNAMIC(vsi->rx_itr_setting))
+ ec->rx_coalesce_usecs = 1;
+ else
+ ec->rx_coalesce_usecs = vsi->rx_itr_setting;
+
+ if (ITR_IS_DYNAMIC(vsi->tx_itr_setting))
+ ec->tx_coalesce_usecs = 1;
+ else
+ ec->tx_coalesce_usecs = vsi->tx_itr_setting;
+
+ return 0;
+}
+
+/**
+ * i40evf_set_coalesce - Set interrupt coalescing settings
+ * @netdev: network interface device structure
+ * @ec: ethtool coalesce structure
+ *
+ * Change current coalescing settings.
+ **/
+static int i40evf_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ struct i40e_hw *hw = &adapter->hw;
+ struct i40e_vsi *vsi = &adapter->vsi;
+ struct i40e_q_vector *q_vector;
+ int i;
+
+ if (ec->tx_max_coalesced_frames || ec->rx_max_coalesced_frames)
+ vsi->work_limit = ec->tx_max_coalesced_frames;
+
+ switch (ec->rx_coalesce_usecs) {
+ case 0:
+ vsi->rx_itr_setting = 0;
+ break;
+ case 1:
+ vsi->rx_itr_setting = (I40E_ITR_DYNAMIC
+ | ITR_REG_TO_USEC(I40E_ITR_RX_DEF));
+ break;
+ default:
+ if ((ec->rx_coalesce_usecs < (I40E_MIN_ITR << 1)) ||
+ (ec->rx_coalesce_usecs > (I40E_MAX_ITR << 1)))
+ return -EINVAL;
+ vsi->rx_itr_setting = ec->rx_coalesce_usecs;
+ break;
+ }
+
+ switch (ec->tx_coalesce_usecs) {
+ case 0:
+ vsi->tx_itr_setting = 0;
+ break;
+ case 1:
+ vsi->tx_itr_setting = (I40E_ITR_DYNAMIC
+ | ITR_REG_TO_USEC(I40E_ITR_TX_DEF));
+ break;
+ default:
+ if ((ec->tx_coalesce_usecs < (I40E_MIN_ITR << 1)) ||
+ (ec->tx_coalesce_usecs > (I40E_MAX_ITR << 1)))
+ return -EINVAL;
+ vsi->tx_itr_setting = ec->tx_coalesce_usecs;
+ break;
+ }
+
+ for (i = 0; i < adapter->num_msix_vectors - NONQ_VECS; i++) {
+ q_vector = adapter->q_vector[i];
+ q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
+ wr32(hw, I40E_VFINT_ITRN1(0, i), q_vector->rx.itr);
+ q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
+ wr32(hw, I40E_VFINT_ITRN1(1, i), q_vector->tx.itr);
+ i40e_flush(hw);
+ }
+
+ return 0;
+}
+
+static struct ethtool_ops i40evf_ethtool_ops = {
+ .get_settings = i40evf_get_settings,
+ .get_drvinfo = i40evf_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_ringparam = i40evf_get_ringparam,
+ .set_ringparam = i40evf_set_ringparam,
+ .get_strings = i40evf_get_strings,
+ .get_ethtool_stats = i40evf_get_ethtool_stats,
+ .get_sset_count = i40evf_get_sset_count,
+ .get_msglevel = i40evf_get_msglevel,
+ .set_msglevel = i40evf_set_msglevel,
+ .get_coalesce = i40evf_get_coalesce,
+ .set_coalesce = i40evf_set_coalesce,
+};
+
+/**
+ * i40evf_set_ethtool_ops - Initialize ethtool ops struct
+ * @netdev: network interface device structure
+ *
+ * Sets ethtool ops struct in our netdev so that ethtool can call
+ * our functions.
+ **/
+void i40evf_set_ethtool_ops(struct net_device *netdev)
+{
+ SET_ETHTOOL_OPS(netdev, &i40evf_ethtool_ops);
+}
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
new file mode 100644
index 000000000000..06bf82530fe5
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -0,0 +1,2353 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#include "i40evf.h"
+#include "i40e_prototype.h"
+static int i40evf_setup_all_tx_resources(struct i40evf_adapter *adapter);
+static int i40evf_setup_all_rx_resources(struct i40evf_adapter *adapter);
+static int i40evf_close(struct net_device *netdev);
+
+char i40evf_driver_name[] = "i40evf";
+static const char i40evf_driver_string[] =
+ "Intel(R) XL710 X710 Virtual Function Network Driver";
+
+#define DRV_VERSION "0.9.11"
+const char i40evf_driver_version[] = DRV_VERSION;
+static const char i40evf_copyright[] =
+ "Copyright (c) 2013 Intel Corporation.";
+
+/* i40evf_pci_tbl - PCI Device ID Table
+ *
+ * Wildcard entries (PCI_ANY_ID) should come last
+ * Last entry must be all 0s
+ *
+ * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
+ * Class, Class Mask, private data (not used) }
+ */
+static DEFINE_PCI_DEVICE_TABLE(i40evf_pci_tbl) = {
+ {PCI_VDEVICE(INTEL, I40E_VF_DEVICE_ID), 0},
+ /* required last entry */
+ {0, }
+};
+
+MODULE_DEVICE_TABLE(pci, i40evf_pci_tbl);
+
+MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
+MODULE_DESCRIPTION("Intel(R) XL710 X710 Virtual Function Network Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+/**
+ * i40evf_allocate_dma_mem_d - OS specific memory alloc for shared code
+ * @hw: pointer to the HW structure
+ * @mem: ptr to mem struct to fill out
+ * @size: size of memory requested
+ * @alignment: what to align the allocation to
+ **/
+i40e_status i40evf_allocate_dma_mem_d(struct i40e_hw *hw,
+ struct i40e_dma_mem *mem,
+ u64 size, u32 alignment)
+{
+ struct i40evf_adapter *adapter = (struct i40evf_adapter *)hw->back;
+
+ if (!mem)
+ return I40E_ERR_PARAM;
+
+ mem->size = ALIGN(size, alignment);
+ mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size,
+ (dma_addr_t *)&mem->pa, GFP_KERNEL);
+ if (mem->va)
+ return 0;
+ else
+ return I40E_ERR_NO_MEMORY;
+}
+
+/**
+ * i40evf_free_dma_mem_d - OS specific memory free for shared code
+ * @hw: pointer to the HW structure
+ * @mem: ptr to mem struct to free
+ **/
+i40e_status i40evf_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem)
+{
+ struct i40evf_adapter *adapter = (struct i40evf_adapter *)hw->back;
+
+ if (!mem || !mem->va)
+ return I40E_ERR_PARAM;
+ dma_free_coherent(&adapter->pdev->dev, mem->size,
+ mem->va, (dma_addr_t)mem->pa);
+ return 0;
+}
+
+/**
+ * i40evf_allocate_virt_mem_d - OS specific memory alloc for shared code
+ * @hw: pointer to the HW structure
+ * @mem: ptr to mem struct to fill out
+ * @size: size of memory requested
+ **/
+i40e_status i40evf_allocate_virt_mem_d(struct i40e_hw *hw,
+ struct i40e_virt_mem *mem, u32 size)
+{
+ if (!mem)
+ return I40E_ERR_PARAM;
+
+ mem->size = size;
+ mem->va = kzalloc(size, GFP_KERNEL);
+
+ if (mem->va)
+ return 0;
+ else
+ return I40E_ERR_NO_MEMORY;
+}
+
+/**
+ * i40evf_free_virt_mem_d - OS specific memory free for shared code
+ * @hw: pointer to the HW structure
+ * @mem: ptr to mem struct to free
+ **/
+i40e_status i40evf_free_virt_mem_d(struct i40e_hw *hw,
+ struct i40e_virt_mem *mem)
+{
+ if (!mem)
+ return I40E_ERR_PARAM;
+
+ /* it's ok to kfree a NULL pointer */
+ kfree(mem->va);
+
+ return 0;
+}
+
+/**
+ * i40evf_debug_d - OS dependent version of debug printing
+ * @hw: pointer to the HW structure
+ * @mask: debug level mask
+ * @fmt_str: printf-type format description
+ **/
+void i40evf_debug_d(void *hw, u32 mask, char *fmt_str, ...)
+{
+ char buf[512];
+ va_list argptr;
+
+ if (!(mask & ((struct i40e_hw *)hw)->debug_mask))
+ return;
+
+ va_start(argptr, fmt_str);
+ vsnprintf(buf, sizeof(buf), fmt_str, argptr);
+ va_end(argptr);
+
+ /* the debug string is already formatted with a newline */
+ pr_info("%s", buf);
+}
+
+/**
+ * i40evf_tx_timeout - Respond to a Tx Hang
+ * @netdev: network interface device structure
+ **/
+static void i40evf_tx_timeout(struct net_device *netdev)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+
+ adapter->tx_timeout_count++;
+
+ /* Do the reset outside of interrupt context */
+ schedule_work(&adapter->reset_task);
+}
+
+/**
+ * i40evf_misc_irq_disable - Mask off interrupt generation on the NIC
+ * @adapter: board private structure
+ **/
+static void i40evf_misc_irq_disable(struct i40evf_adapter *adapter)
+{
+ struct i40e_hw *hw = &adapter->hw;
+ wr32(hw, I40E_VFINT_DYN_CTL01, 0);
+
+ /* read flush */
+ rd32(hw, I40E_VFGEN_RSTAT);
+
+ synchronize_irq(adapter->msix_entries[0].vector);
+}
+
+/**
+ * i40evf_misc_irq_enable - Enable default interrupt generation settings
+ * @adapter: board private structure
+ **/
+static void i40evf_misc_irq_enable(struct i40evf_adapter *adapter)
+{
+ struct i40e_hw *hw = &adapter->hw;
+ wr32(hw, I40E_VFINT_DYN_CTL01, I40E_VFINT_DYN_CTL01_INTENA_MASK |
+ I40E_VFINT_DYN_CTL01_ITR_INDX_MASK);
+ wr32(hw, I40E_VFINT_ICR0_ENA1, I40E_VFINT_ICR0_ENA_ADMINQ_MASK);
+
+ /* read flush */
+ rd32(hw, I40E_VFGEN_RSTAT);
+}
+
+/**
+ * i40evf_irq_disable - Mask off interrupt generation on the NIC
+ * @adapter: board private structure
+ **/
+static void i40evf_irq_disable(struct i40evf_adapter *adapter)
+{
+ int i;
+ struct i40e_hw *hw = &adapter->hw;
+
+ for (i = 1; i < adapter->num_msix_vectors; i++) {
+ wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1), 0);
+ synchronize_irq(adapter->msix_entries[i].vector);
+ }
+ /* read flush */
+ rd32(hw, I40E_VFGEN_RSTAT);
+
+}
+
+/**
+ * i40evf_irq_enable_queues - Enable interrupt for specified queues
+ * @adapter: board private structure
+ * @mask: bitmap of queues to enable
+ **/
+void i40evf_irq_enable_queues(struct i40evf_adapter *adapter, u32 mask)
+{
+ struct i40e_hw *hw = &adapter->hw;
+ int i;
+
+ for (i = 1; i < adapter->num_msix_vectors; i++) {
+ if (mask & (1 << (i - 1))) {
+ wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1),
+ I40E_VFINT_DYN_CTLN1_INTENA_MASK |
+ I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
+ }
+ }
+}
+
+/**
+ * i40evf_fire_sw_int - Generate SW interrupt for specified vectors
+ * @adapter: board private structure
+ * @mask: bitmap of vectors to trigger
+ **/
+static void i40evf_fire_sw_int(struct i40evf_adapter *adapter,
+ u32 mask)
+{
+ struct i40e_hw *hw = &adapter->hw;
+ int i;
+ uint32_t dyn_ctl;
+
+ for (i = 1; i < adapter->num_msix_vectors; i++) {
+ if (mask & (1 << i)) {
+ dyn_ctl = rd32(hw, I40E_VFINT_DYN_CTLN1(i - 1));
+ dyn_ctl |= I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK |
+ I40E_VFINT_DYN_CTLN_CLEARPBA_MASK;
+ wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1), dyn_ctl);
+ }
+ }
+}
+
+/**
+ * i40evf_irq_enable - Enable default interrupt generation settings
+ * @adapter: board private structure
+ **/
+void i40evf_irq_enable(struct i40evf_adapter *adapter, bool flush)
+{
+ struct i40e_hw *hw = &adapter->hw;
+
+ i40evf_irq_enable_queues(adapter, ~0);
+
+ if (flush)
+ rd32(hw, I40E_VFGEN_RSTAT);
+}
+
+/**
+ * i40evf_msix_aq - Interrupt handler for vector 0
+ * @irq: interrupt number
+ * @data: pointer to netdev
+ **/
+static irqreturn_t i40evf_msix_aq(int irq, void *data)
+{
+ struct net_device *netdev = data;
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ struct i40e_hw *hw = &adapter->hw;
+ u32 val;
+ u32 ena_mask;
+
+ /* handle non-queue interrupts */
+ val = rd32(hw, I40E_VFINT_ICR01);
+ ena_mask = rd32(hw, I40E_VFINT_ICR0_ENA1);
+
+
+ val = rd32(hw, I40E_VFINT_DYN_CTL01);
+ val = val | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
+ wr32(hw, I40E_VFINT_DYN_CTL01, val);
+
+ /* re-enable interrupt causes */
+ wr32(hw, I40E_VFINT_ICR0_ENA1, ena_mask);
+ wr32(hw, I40E_VFINT_DYN_CTL01, I40E_VFINT_DYN_CTL01_INTENA_MASK);
+
+ /* schedule work on the private workqueue */
+ schedule_work(&adapter->adminq_task);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * i40evf_msix_clean_rings - MSIX mode Interrupt Handler
+ * @irq: interrupt number
+ * @data: pointer to a q_vector
+ **/
+static irqreturn_t i40evf_msix_clean_rings(int irq, void *data)
+{
+ struct i40e_q_vector *q_vector = data;
+
+ if (!q_vector->tx.ring && !q_vector->rx.ring)
+ return IRQ_HANDLED;
+
+ napi_schedule(&q_vector->napi);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * i40evf_map_vector_to_rxq - associate irqs with rx queues
+ * @adapter: board private structure
+ * @v_idx: interrupt number
+ * @r_idx: queue number
+ **/
+static void
+i40evf_map_vector_to_rxq(struct i40evf_adapter *adapter, int v_idx, int r_idx)
+{
+ struct i40e_q_vector *q_vector = adapter->q_vector[v_idx];
+ struct i40e_ring *rx_ring = adapter->rx_rings[r_idx];
+
+ rx_ring->q_vector = q_vector;
+ rx_ring->next = q_vector->rx.ring;
+ rx_ring->vsi = &adapter->vsi;
+ q_vector->rx.ring = rx_ring;
+ q_vector->rx.count++;
+ q_vector->rx.latency_range = I40E_LOW_LATENCY;
+}
+
+/**
+ * i40evf_map_vector_to_txq - associate irqs with tx queues
+ * @adapter: board private structure
+ * @v_idx: interrupt number
+ * @t_idx: queue number
+ **/
+static void
+i40evf_map_vector_to_txq(struct i40evf_adapter *adapter, int v_idx, int t_idx)
+{
+ struct i40e_q_vector *q_vector = adapter->q_vector[v_idx];
+ struct i40e_ring *tx_ring = adapter->tx_rings[t_idx];
+
+ tx_ring->q_vector = q_vector;
+ tx_ring->next = q_vector->tx.ring;
+ tx_ring->vsi = &adapter->vsi;
+ q_vector->tx.ring = tx_ring;
+ q_vector->tx.count++;
+ q_vector->tx.latency_range = I40E_LOW_LATENCY;
+ q_vector->num_ringpairs++;
+ q_vector->ring_mask |= (1 << t_idx);
+}
+
+/**
+ * i40evf_map_rings_to_vectors - Maps descriptor rings to vectors
+ * @adapter: board private structure to initialize
+ *
+ * This function maps descriptor rings to the queue-specific vectors
+ * we were allotted through the MSI-X enabling code. Ideally, we'd have
+ * one vector per ring/queue, but on a constrained vector budget, we
+ * group the rings as "efficiently" as possible. You would add new
+ * mapping configurations in here.
+ **/
+static int i40evf_map_rings_to_vectors(struct i40evf_adapter *adapter)
+{
+ int q_vectors;
+ int v_start = 0;
+ int rxr_idx = 0, txr_idx = 0;
+ int rxr_remaining = adapter->vsi_res->num_queue_pairs;
+ int txr_remaining = adapter->vsi_res->num_queue_pairs;
+ int i, j;
+ int rqpv, tqpv;
+ int err = 0;
+
+ q_vectors = adapter->num_msix_vectors - NONQ_VECS;
+
+ /* The ideal configuration...
+ * We have enough vectors to map one per queue.
+ */
+ if (q_vectors == (rxr_remaining * 2)) {
+ for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
+ i40evf_map_vector_to_rxq(adapter, v_start, rxr_idx);
+
+ for (; txr_idx < txr_remaining; v_start++, txr_idx++)
+ i40evf_map_vector_to_txq(adapter, v_start, txr_idx);
+ goto out;
+ }
+
+ /* If we don't have enough vectors for a 1-to-1
+ * mapping, we'll have to group them so there are
+ * multiple queues per vector.
+ * Re-adjusting *qpv takes care of the remainder.
+ */
+ for (i = v_start; i < q_vectors; i++) {
+ rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i);
+ for (j = 0; j < rqpv; j++) {
+ i40evf_map_vector_to_rxq(adapter, i, rxr_idx);
+ rxr_idx++;
+ rxr_remaining--;
+ }
+ }
+ for (i = v_start; i < q_vectors; i++) {
+ tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i);
+ for (j = 0; j < tqpv; j++) {
+ i40evf_map_vector_to_txq(adapter, i, txr_idx);
+ txr_idx++;
+ txr_remaining--;
+ }
+ }
+
+out:
+ adapter->aq_required |= I40EVF_FLAG_AQ_MAP_VECTORS;
+
+ return err;
+}
+
+/**
+ * i40evf_request_traffic_irqs - Initialize MSI-X interrupts
+ * @adapter: board private structure
+ *
+ * Allocates MSI-X vectors for tx and rx handling, and requests
+ * interrupts from the kernel.
+ **/
+static int
+i40evf_request_traffic_irqs(struct i40evf_adapter *adapter, char *basename)
+{
+ int vector, err, q_vectors;
+ int rx_int_idx = 0, tx_int_idx = 0;
+
+ i40evf_irq_disable(adapter);
+ /* Decrement for Other and TCP Timer vectors */
+ q_vectors = adapter->num_msix_vectors - NONQ_VECS;
+
+ for (vector = 0; vector < q_vectors; vector++) {
+ struct i40e_q_vector *q_vector = adapter->q_vector[vector];
+
+ if (q_vector->tx.ring && q_vector->rx.ring) {
+ snprintf(q_vector->name, sizeof(q_vector->name) - 1,
+ "i40evf-%s-%s-%d", basename,
+ "TxRx", rx_int_idx++);
+ tx_int_idx++;
+ } else if (q_vector->rx.ring) {
+ snprintf(q_vector->name, sizeof(q_vector->name) - 1,
+ "i40evf-%s-%s-%d", basename,
+ "rx", rx_int_idx++);
+ } else if (q_vector->tx.ring) {
+ snprintf(q_vector->name, sizeof(q_vector->name) - 1,
+ "i40evf-%s-%s-%d", basename,
+ "tx", tx_int_idx++);
+ } else {
+ /* skip this unused q_vector */
+ continue;
+ }
+ err = request_irq(
+ adapter->msix_entries[vector + NONQ_VECS].vector,
+ i40evf_msix_clean_rings,
+ 0,
+ q_vector->name,
+ q_vector);
+ if (err) {
+ dev_info(&adapter->pdev->dev,
+ "%s: request_irq failed, error: %d\n",
+ __func__, err);
+ goto free_queue_irqs;
+ }
+ /* assign the mask for this irq */
+ irq_set_affinity_hint(
+ adapter->msix_entries[vector + NONQ_VECS].vector,
+ q_vector->affinity_mask);
+ }
+
+ return 0;
+
+free_queue_irqs:
+ while (vector) {
+ vector--;
+ irq_set_affinity_hint(
+ adapter->msix_entries[vector + NONQ_VECS].vector,
+ NULL);
+ free_irq(adapter->msix_entries[vector + NONQ_VECS].vector,
+ adapter->q_vector[vector]);
+ }
+ return err;
+}
+
+/**
+ * i40evf_request_misc_irq - Initialize MSI-X interrupts
+ * @adapter: board private structure
+ *
+ * Allocates MSI-X vector 0 and requests interrupts from the kernel. This
+ * vector is only for the admin queue, and stays active even when the netdev
+ * is closed.
+ **/
+static int i40evf_request_misc_irq(struct i40evf_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ int err;
+
+ sprintf(adapter->name[0], "i40evf:mbx");
+ err = request_irq(adapter->msix_entries[0].vector,
+ &i40evf_msix_aq, 0, adapter->name[0], netdev);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "request_irq for msix_aq failed: %d\n", err);
+ free_irq(adapter->msix_entries[0].vector, netdev);
+ }
+ return err;
+}
+
+/**
+ * i40evf_free_traffic_irqs - Free MSI-X interrupts
+ * @adapter: board private structure
+ *
+ * Frees all MSI-X vectors other than 0.
+ **/
+static void i40evf_free_traffic_irqs(struct i40evf_adapter *adapter)
+{
+ int i;
+ int q_vectors;
+ q_vectors = adapter->num_msix_vectors - NONQ_VECS;
+
+ for (i = 0; i < q_vectors; i++) {
+ irq_set_affinity_hint(adapter->msix_entries[i+1].vector,
+ NULL);
+ free_irq(adapter->msix_entries[i+1].vector,
+ adapter->q_vector[i]);
+ }
+}
+
+/**
+ * i40evf_free_misc_irq - Free MSI-X miscellaneous vector
+ * @adapter: board private structure
+ *
+ * Frees MSI-X vector 0.
+ **/
+static void i40evf_free_misc_irq(struct i40evf_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ free_irq(adapter->msix_entries[0].vector, netdev);
+}
+
+/**
+ * i40evf_configure_tx - Configure Transmit Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Tx unit of the MAC after a reset.
+ **/
+static void i40evf_configure_tx(struct i40evf_adapter *adapter)
+{
+ struct i40e_hw *hw = &adapter->hw;
+ int i;
+ for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++)
+ adapter->tx_rings[i]->tail = hw->hw_addr + I40E_QTX_TAIL1(i);
+}
+
+/**
+ * i40evf_configure_rx - Configure Receive Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Rx unit of the MAC after a reset.
+ **/
+static void i40evf_configure_rx(struct i40evf_adapter *adapter)
+{
+ struct i40e_hw *hw = &adapter->hw;
+ struct net_device *netdev = adapter->netdev;
+ int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
+ int i;
+ int rx_buf_len;
+
+
+ adapter->flags &= ~I40EVF_FLAG_RX_PS_CAPABLE;
+ adapter->flags |= I40EVF_FLAG_RX_1BUF_CAPABLE;
+
+ /* Decide whether to use packet split mode or not */
+ if (netdev->mtu > ETH_DATA_LEN) {
+ if (adapter->flags & I40EVF_FLAG_RX_PS_CAPABLE)
+ adapter->flags |= I40EVF_FLAG_RX_PS_ENABLED;
+ else
+ adapter->flags &= ~I40EVF_FLAG_RX_PS_ENABLED;
+ } else {
+ if (adapter->flags & I40EVF_FLAG_RX_1BUF_CAPABLE)
+ adapter->flags &= ~I40EVF_FLAG_RX_PS_ENABLED;
+ else
+ adapter->flags |= I40EVF_FLAG_RX_PS_ENABLED;
+ }
+
+ /* Set the RX buffer length according to the mode */
+ if (adapter->flags & I40EVF_FLAG_RX_PS_ENABLED) {
+ rx_buf_len = I40E_RX_HDR_SIZE;
+ } else {
+ if (netdev->mtu <= ETH_DATA_LEN)
+ rx_buf_len = I40EVF_RXBUFFER_2048;
+ else
+ rx_buf_len = ALIGN(max_frame, 1024);
+ }
+
+ for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) {
+ adapter->rx_rings[i]->tail = hw->hw_addr + I40E_QRX_TAIL1(i);
+ adapter->rx_rings[i]->rx_buf_len = rx_buf_len;
+ }
+}
+
+/**
+ * i40evf_find_vlan - Search filter list for specific vlan filter
+ * @adapter: board private structure
+ * @vlan: vlan tag
+ *
+ * Returns ptr to the filter object or NULL
+ **/
+static struct
+i40evf_vlan_filter *i40evf_find_vlan(struct i40evf_adapter *adapter, u16 vlan)
+{
+ struct i40evf_vlan_filter *f;
+
+ list_for_each_entry(f, &adapter->vlan_filter_list, list) {
+ if (vlan == f->vlan)
+ return f;
+ }
+ return NULL;
+}
+
+/**
+ * i40evf_add_vlan - Add a vlan filter to the list
+ * @adapter: board private structure
+ * @vlan: VLAN tag
+ *
+ * Returns ptr to the filter object or NULL when no memory available.
+ **/
+static struct
+i40evf_vlan_filter *i40evf_add_vlan(struct i40evf_adapter *adapter, u16 vlan)
+{
+ struct i40evf_vlan_filter *f;
+
+ f = i40evf_find_vlan(adapter, vlan);
+ if (NULL == f) {
+ f = kzalloc(sizeof(*f), GFP_ATOMIC);
+ if (NULL == f) {
+ dev_info(&adapter->pdev->dev,
+ "%s: no memory for new VLAN filter\n",
+ __func__);
+ return NULL;
+ }
+ f->vlan = vlan;
+
+ INIT_LIST_HEAD(&f->list);
+ list_add(&f->list, &adapter->vlan_filter_list);
+ f->add = true;
+ adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
+ }
+
+ return f;
+}
+
+/**
+ * i40evf_del_vlan - Remove a vlan filter from the list
+ * @adapter: board private structure
+ * @vlan: VLAN tag
+ **/
+static void i40evf_del_vlan(struct i40evf_adapter *adapter, u16 vlan)
+{
+ struct i40evf_vlan_filter *f;
+
+ f = i40evf_find_vlan(adapter, vlan);
+ if (f) {
+ f->remove = true;
+ adapter->aq_required |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
+ }
+ return;
+}
+
+/**
+ * i40evf_vlan_rx_add_vid - Add a VLAN filter to a device
+ * @netdev: network device struct
+ * @vid: VLAN tag
+ **/
+static int i40evf_vlan_rx_add_vid(struct net_device *netdev,
+ __always_unused __be16 proto, u16 vid)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+
+ if (i40evf_add_vlan(adapter, vid) == NULL)
+ return -ENOMEM;
+ return 0;
+}
+
+/**
+ * i40evf_vlan_rx_kill_vid - Remove a VLAN filter from a device
+ * @netdev: network device struct
+ * @vid: VLAN tag
+ **/
+static int i40evf_vlan_rx_kill_vid(struct net_device *netdev,
+ __always_unused __be16 proto, u16 vid)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+
+ i40evf_del_vlan(adapter, vid);
+ return 0;
+}
+
+/**
+ * i40evf_find_filter - Search filter list for specific mac filter
+ * @adapter: board private structure
+ * @macaddr: the MAC address
+ *
+ * Returns ptr to the filter object or NULL
+ **/
+static struct
+i40evf_mac_filter *i40evf_find_filter(struct i40evf_adapter *adapter,
+ u8 *macaddr)
+{
+ struct i40evf_mac_filter *f;
+
+ if (!macaddr)
+ return NULL;
+
+ list_for_each_entry(f, &adapter->mac_filter_list, list) {
+ if (ether_addr_equal(macaddr, f->macaddr))
+ return f;
+ }
+ return NULL;
+}
+
+/**
+ * i40e_add_filter - Add a mac filter to the filter list
+ * @adapter: board private structure
+ * @macaddr: the MAC address
+ *
+ * Returns ptr to the filter object or NULL when no memory available.
+ **/
+static struct
+i40evf_mac_filter *i40evf_add_filter(struct i40evf_adapter *adapter,
+ u8 *macaddr)
+{
+ struct i40evf_mac_filter *f;
+
+ if (!macaddr)
+ return NULL;
+
+ while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
+ &adapter->crit_section))
+ mdelay(1);
+
+ f = i40evf_find_filter(adapter, macaddr);
+ if (NULL == f) {
+ f = kzalloc(sizeof(*f), GFP_ATOMIC);
+ if (NULL == f) {
+ dev_info(&adapter->pdev->dev,
+ "%s: no memory for new filter\n", __func__);
+ clear_bit(__I40EVF_IN_CRITICAL_TASK,
+ &adapter->crit_section);
+ return NULL;
+ }
+
+ memcpy(f->macaddr, macaddr, ETH_ALEN);
+
+ list_add(&f->list, &adapter->mac_filter_list);
+ f->add = true;
+ adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER;
+ }
+
+ clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+ return f;
+}
+
+/**
+ * i40evf_set_mac - NDO callback to set port mac address
+ * @netdev: network interface device structure
+ * @p: pointer to an address structure
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int i40evf_set_mac(struct net_device *netdev, void *p)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ struct i40e_hw *hw = &adapter->hw;
+ struct i40evf_mac_filter *f;
+ struct sockaddr *addr = p;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ if (ether_addr_equal(netdev->dev_addr, addr->sa_data))
+ return 0;
+
+ f = i40evf_add_filter(adapter, addr->sa_data);
+ if (f) {
+ memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
+ memcpy(netdev->dev_addr, adapter->hw.mac.addr,
+ netdev->addr_len);
+ }
+
+ return (f == NULL) ? -ENOMEM : 0;
+}
+
+/**
+ * i40evf_set_rx_mode - NDO callback to set the netdev filters
+ * @netdev: network interface device structure
+ **/
+static void i40evf_set_rx_mode(struct net_device *netdev)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ struct i40evf_mac_filter *f, *ftmp;
+ struct netdev_hw_addr *uca;
+ struct netdev_hw_addr *mca;
+
+ /* add addr if not already in the filter list */
+ netdev_for_each_uc_addr(uca, netdev) {
+ i40evf_add_filter(adapter, uca->addr);
+ }
+ netdev_for_each_mc_addr(mca, netdev) {
+ i40evf_add_filter(adapter, mca->addr);
+ }
+
+ while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
+ &adapter->crit_section))
+ mdelay(1);
+ /* remove filter if not in netdev list */
+ list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
+ bool found = false;
+
+ if (f->macaddr[0] & 0x01) {
+ netdev_for_each_mc_addr(mca, netdev) {
+ if (ether_addr_equal(mca->addr, f->macaddr)) {
+ found = true;
+ break;
+ }
+ }
+ } else {
+ netdev_for_each_uc_addr(uca, netdev) {
+ if (ether_addr_equal(uca->addr, f->macaddr)) {
+ found = true;
+ break;
+ }
+ }
+ }
+ if (found) {
+ f->remove = true;
+ adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER;
+ }
+ }
+ clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+}
+
+/**
+ * i40evf_napi_enable_all - enable NAPI on all queue vectors
+ * @adapter: board private structure
+ **/
+static void i40evf_napi_enable_all(struct i40evf_adapter *adapter)
+{
+ int q_idx;
+ struct i40e_q_vector *q_vector;
+ int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
+
+ for (q_idx = 0; q_idx < q_vectors; q_idx++) {
+ struct napi_struct *napi;
+ q_vector = adapter->q_vector[q_idx];
+ napi = &q_vector->napi;
+ napi_enable(napi);
+ }
+}
+
+/**
+ * i40evf_napi_disable_all - disable NAPI on all queue vectors
+ * @adapter: board private structure
+ **/
+static void i40evf_napi_disable_all(struct i40evf_adapter *adapter)
+{
+ int q_idx;
+ struct i40e_q_vector *q_vector;
+ int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
+
+ for (q_idx = 0; q_idx < q_vectors; q_idx++) {
+ q_vector = adapter->q_vector[q_idx];
+ napi_disable(&q_vector->napi);
+ }
+}
+
+/**
+ * i40evf_configure - set up transmit and receive data structures
+ * @adapter: board private structure
+ **/
+static void i40evf_configure(struct i40evf_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ int i;
+
+ i40evf_set_rx_mode(netdev);
+
+ i40evf_configure_tx(adapter);
+ i40evf_configure_rx(adapter);
+ adapter->aq_required |= I40EVF_FLAG_AQ_CONFIGURE_QUEUES;
+
+ for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) {
+ struct i40e_ring *ring = adapter->rx_rings[i];
+ i40evf_alloc_rx_buffers(ring, ring->count);
+ ring->next_to_use = ring->count - 1;
+ writel(ring->next_to_use, ring->tail);
+ }
+}
+
+/**
+ * i40evf_up_complete - Finish the last steps of bringing up a connection
+ * @adapter: board private structure
+ **/
+static int i40evf_up_complete(struct i40evf_adapter *adapter)
+{
+ adapter->state = __I40EVF_RUNNING;
+ clear_bit(__I40E_DOWN, &adapter->vsi.state);
+
+ i40evf_napi_enable_all(adapter);
+
+ adapter->aq_required |= I40EVF_FLAG_AQ_ENABLE_QUEUES;
+ mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
+ return 0;
+}
+
+/**
+ * i40evf_clean_all_rx_rings - Free Rx Buffers for all queues
+ * @adapter: board private structure
+ **/
+static void i40evf_clean_all_rx_rings(struct i40evf_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++)
+ i40evf_clean_rx_ring(adapter->rx_rings[i]);
+}
+
+/**
+ * i40evf_clean_all_tx_rings - Free Tx Buffers for all queues
+ * @adapter: board private structure
+ **/
+static void i40evf_clean_all_tx_rings(struct i40evf_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++)
+ i40evf_clean_tx_ring(adapter->tx_rings[i]);
+}
+
+/**
+ * i40e_down - Shutdown the connection processing
+ * @adapter: board private structure
+ **/
+void i40evf_down(struct i40evf_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct i40evf_mac_filter *f;
+
+ /* remove all MAC filters from the VSI */
+ list_for_each_entry(f, &adapter->mac_filter_list, list) {
+ f->remove = true;
+ }
+ adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER;
+ /* disable receives */
+ adapter->aq_required |= I40EVF_FLAG_AQ_DISABLE_QUEUES;
+ mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
+ msleep(20);
+
+ netif_tx_disable(netdev);
+
+ netif_tx_stop_all_queues(netdev);
+
+ i40evf_irq_disable(adapter);
+
+ i40evf_napi_disable_all(adapter);
+
+ netif_carrier_off(netdev);
+
+ i40evf_clean_all_tx_rings(adapter);
+ i40evf_clean_all_rx_rings(adapter);
+}
+
+/**
+ * i40evf_acquire_msix_vectors - Setup the MSIX capability
+ * @adapter: board private structure
+ * @vectors: number of vectors to request
+ *
+ * Work with the OS to set up the MSIX vectors needed.
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int
+i40evf_acquire_msix_vectors(struct i40evf_adapter *adapter, int vectors)
+{
+ int err, vector_threshold;
+
+ /* We'll want at least 3 (vector_threshold):
+ * 0) Other (Admin Queue and link, mostly)
+ * 1) TxQ[0] Cleanup
+ * 2) RxQ[0] Cleanup
+ */
+ vector_threshold = MIN_MSIX_COUNT;
+
+ /* The more we get, the more we will assign to Tx/Rx Cleanup
+ * for the separate queues...where Rx Cleanup >= Tx Cleanup.
+ * Right now, we simply care about how many we'll get; we'll
+ * set them up later while requesting irq's.
+ */
+ while (vectors >= vector_threshold) {
+ err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
+ vectors);
+ if (!err) /* Success in acquiring all requested vectors. */
+ break;
+ else if (err < 0)
+ vectors = 0; /* Nasty failure, quit now */
+ else /* err == number of vectors we should try again with */
+ vectors = err;
+ }
+
+ if (vectors < vector_threshold) {
+ dev_err(&adapter->pdev->dev, "Unable to allocate MSI-X interrupts.\n");
+ kfree(adapter->msix_entries);
+ adapter->msix_entries = NULL;
+ err = -EIO;
+ } else {
+ /* Adjust for only the vectors we'll use, which is minimum
+ * of max_msix_q_vectors + NONQ_VECS, or the number of
+ * vectors we were allocated.
+ */
+ adapter->num_msix_vectors = vectors;
+ }
+ return err;
+}
+
+/**
+ * i40evf_free_queues - Free memory for all rings
+ * @adapter: board private structure to initialize
+ *
+ * Free all of the memory associated with queue pairs.
+ **/
+static void i40evf_free_queues(struct i40evf_adapter *adapter)
+{
+ int i;
+
+ if (!adapter->vsi_res)
+ return;
+ for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) {
+ if (adapter->tx_rings[i])
+ kfree_rcu(adapter->tx_rings[i], rcu);
+ adapter->tx_rings[i] = NULL;
+ adapter->rx_rings[i] = NULL;
+ }
+}
+
+/**
+ * i40evf_alloc_queues - Allocate memory for all rings
+ * @adapter: board private structure to initialize
+ *
+ * We allocate one ring per queue at run-time since we don't know the
+ * number of queues at compile-time. The polling_netdev array is
+ * intended for Multiqueue, but should work fine with a single queue.
+ **/
+static int i40evf_alloc_queues(struct i40evf_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) {
+ struct i40e_ring *tx_ring;
+ struct i40e_ring *rx_ring;
+
+ tx_ring = kzalloc(sizeof(struct i40e_ring) * 2, GFP_KERNEL);
+ if (!tx_ring)
+ goto err_out;
+
+ tx_ring->queue_index = i;
+ tx_ring->netdev = adapter->netdev;
+ tx_ring->dev = &adapter->pdev->dev;
+ tx_ring->count = I40EVF_DEFAULT_TXD;
+ adapter->tx_rings[i] = tx_ring;
+
+ rx_ring = &tx_ring[1];
+ rx_ring->queue_index = i;
+ rx_ring->netdev = adapter->netdev;
+ rx_ring->dev = &adapter->pdev->dev;
+ rx_ring->count = I40EVF_DEFAULT_RXD;
+ adapter->rx_rings[i] = rx_ring;
+ }
+
+ return 0;
+
+err_out:
+ i40evf_free_queues(adapter);
+ return -ENOMEM;
+}
+
+/**
+ * i40evf_set_interrupt_capability - set MSI-X or FAIL if not supported
+ * @adapter: board private structure to initialize
+ *
+ * Attempt to configure the interrupts using the best available
+ * capabilities of the hardware and the kernel.
+ **/
+static int i40evf_set_interrupt_capability(struct i40evf_adapter *adapter)
+{
+ int vector, v_budget;
+ int pairs = 0;
+ int err = 0;
+
+ if (!adapter->vsi_res) {
+ err = -EIO;
+ goto out;
+ }
+ pairs = adapter->vsi_res->num_queue_pairs;
+
+ /* It's easy to be greedy for MSI-X vectors, but it really
+ * doesn't do us much good if we have a lot more vectors
+ * than CPU's. So let's be conservative and only ask for
+ * (roughly) twice the number of vectors as there are CPU's.
+ */
+ v_budget = min(pairs, (int)(num_online_cpus() * 2)) + NONQ_VECS;
+ v_budget = min(v_budget, (int)adapter->vf_res->max_vectors + 1);
+
+ /* A failure in MSI-X entry allocation isn't fatal, but it does
+ * mean we disable MSI-X capabilities of the adapter.
+ */
+ adapter->msix_entries = kcalloc(v_budget,
+ sizeof(struct msix_entry), GFP_KERNEL);
+ if (!adapter->msix_entries) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ for (vector = 0; vector < v_budget; vector++)
+ adapter->msix_entries[vector].entry = vector;
+
+ i40evf_acquire_msix_vectors(adapter, v_budget);
+
+out:
+ adapter->netdev->real_num_tx_queues = pairs;
+ return err;
+}
+
+/**
+ * i40evf_alloc_q_vectors - Allocate memory for interrupt vectors
+ * @adapter: board private structure to initialize
+ *
+ * We allocate one q_vector per queue interrupt. If allocation fails we
+ * return -ENOMEM.
+ **/
+static int i40evf_alloc_q_vectors(struct i40evf_adapter *adapter)
+{
+ int q_idx, num_q_vectors;
+ struct i40e_q_vector *q_vector;
+
+ num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
+
+ for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
+ q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL);
+ if (!q_vector)
+ goto err_out;
+ q_vector->adapter = adapter;
+ q_vector->vsi = &adapter->vsi;
+ q_vector->v_idx = q_idx;
+ netif_napi_add(adapter->netdev, &q_vector->napi,
+ i40evf_napi_poll, 64);
+ adapter->q_vector[q_idx] = q_vector;
+ }
+
+ return 0;
+
+err_out:
+ while (q_idx) {
+ q_idx--;
+ q_vector = adapter->q_vector[q_idx];
+ netif_napi_del(&q_vector->napi);
+ kfree(q_vector);
+ adapter->q_vector[q_idx] = NULL;
+ }
+ return -ENOMEM;
+}
+
+/**
+ * i40evf_free_q_vectors - Free memory allocated for interrupt vectors
+ * @adapter: board private structure to initialize
+ *
+ * This function frees the memory allocated to the q_vectors. In addition if
+ * NAPI is enabled it will delete any references to the NAPI struct prior
+ * to freeing the q_vector.
+ **/
+static void i40evf_free_q_vectors(struct i40evf_adapter *adapter)
+{
+ int q_idx, num_q_vectors;
+ int napi_vectors;
+
+ num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
+ napi_vectors = adapter->vsi_res->num_queue_pairs;
+
+ for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
+ struct i40e_q_vector *q_vector = adapter->q_vector[q_idx];
+
+ adapter->q_vector[q_idx] = NULL;
+ if (q_idx < napi_vectors)
+ netif_napi_del(&q_vector->napi);
+ kfree(q_vector);
+ }
+}
+
+/**
+ * i40evf_reset_interrupt_capability - Reset MSIX setup
+ * @adapter: board private structure
+ *
+ **/
+void i40evf_reset_interrupt_capability(struct i40evf_adapter *adapter)
+{
+ pci_disable_msix(adapter->pdev);
+ kfree(adapter->msix_entries);
+ adapter->msix_entries = NULL;
+
+ return;
+}
+
+/**
+ * i40evf_init_interrupt_scheme - Determine if MSIX is supported and init
+ * @adapter: board private structure to initialize
+ *
+ **/
+int i40evf_init_interrupt_scheme(struct i40evf_adapter *adapter)
+{
+ int err;
+
+ err = i40evf_set_interrupt_capability(adapter);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "Unable to setup interrupt capabilities\n");
+ goto err_set_interrupt;
+ }
+
+ err = i40evf_alloc_q_vectors(adapter);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "Unable to allocate memory for queue vectors\n");
+ goto err_alloc_q_vectors;
+ }
+
+ err = i40evf_alloc_queues(adapter);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "Unable to allocate memory for queues\n");
+ goto err_alloc_queues;
+ }
+
+ dev_info(&adapter->pdev->dev, "Multiqueue %s: Queue pair count = %u",
+ (adapter->vsi_res->num_queue_pairs > 1) ? "Enabled" :
+ "Disabled", adapter->vsi_res->num_queue_pairs);
+
+ return 0;
+err_alloc_queues:
+ i40evf_free_q_vectors(adapter);
+err_alloc_q_vectors:
+ i40evf_reset_interrupt_capability(adapter);
+err_set_interrupt:
+ return err;
+}
+
+/**
+ * i40evf_watchdog_timer - Periodic call-back timer
+ * @data: pointer to adapter disguised as unsigned long
+ **/
+static void i40evf_watchdog_timer(unsigned long data)
+{
+ struct i40evf_adapter *adapter = (struct i40evf_adapter *)data;
+ schedule_work(&adapter->watchdog_task);
+ /* timer will be rescheduled in watchdog task */
+}
+
+/**
+ * i40evf_watchdog_task - Periodic call-back task
+ * @work: pointer to work_struct
+ **/
+static void i40evf_watchdog_task(struct work_struct *work)
+{
+ struct i40evf_adapter *adapter = container_of(work,
+ struct i40evf_adapter,
+ watchdog_task);
+ struct i40e_hw *hw = &adapter->hw;
+
+ if (adapter->state < __I40EVF_DOWN)
+ goto watchdog_done;
+
+ if (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section))
+ goto watchdog_done;
+
+ /* check for unannounced reset */
+ if ((adapter->state != __I40EVF_RESETTING) &&
+ (rd32(hw, I40E_VFGEN_RSTAT) & 0x3) != I40E_VFR_VFACTIVE) {
+ adapter->state = __I40EVF_RESETTING;
+ schedule_work(&adapter->reset_task);
+ dev_info(&adapter->pdev->dev, "%s: hardware reset detected\n",
+ __func__);
+ goto watchdog_done;
+ }
+
+ /* Process admin queue tasks. After init, everything gets done
+ * here so we don't race on the admin queue.
+ */
+ if (adapter->aq_pending)
+ goto watchdog_done;
+
+ if (adapter->aq_required & I40EVF_FLAG_AQ_MAP_VECTORS) {
+ i40evf_map_queues(adapter);
+ goto watchdog_done;
+ }
+
+ if (adapter->aq_required & I40EVF_FLAG_AQ_ADD_MAC_FILTER) {
+ i40evf_add_ether_addrs(adapter);
+ goto watchdog_done;
+ }
+
+ if (adapter->aq_required & I40EVF_FLAG_AQ_ADD_VLAN_FILTER) {
+ i40evf_add_vlans(adapter);
+ goto watchdog_done;
+ }
+
+ if (adapter->aq_required & I40EVF_FLAG_AQ_DEL_MAC_FILTER) {
+ i40evf_del_ether_addrs(adapter);
+ goto watchdog_done;
+ }
+
+ if (adapter->aq_required & I40EVF_FLAG_AQ_DEL_VLAN_FILTER) {
+ i40evf_del_vlans(adapter);
+ goto watchdog_done;
+ }
+
+ if (adapter->aq_required & I40EVF_FLAG_AQ_DISABLE_QUEUES) {
+ i40evf_disable_queues(adapter);
+ goto watchdog_done;
+ }
+
+ if (adapter->aq_required & I40EVF_FLAG_AQ_CONFIGURE_QUEUES) {
+ i40evf_configure_queues(adapter);
+ goto watchdog_done;
+ }
+
+ if (adapter->aq_required & I40EVF_FLAG_AQ_ENABLE_QUEUES) {
+ i40evf_enable_queues(adapter);
+ goto watchdog_done;
+ }
+
+ if (adapter->state == __I40EVF_RUNNING)
+ i40evf_request_stats(adapter);
+
+ i40evf_irq_enable(adapter, true);
+ i40evf_fire_sw_int(adapter, 0xFF);
+watchdog_done:
+ if (adapter->aq_required)
+ mod_timer(&adapter->watchdog_timer,
+ jiffies + msecs_to_jiffies(20));
+ else
+ mod_timer(&adapter->watchdog_timer, jiffies + (HZ * 2));
+ clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+ schedule_work(&adapter->adminq_task);
+}
+
+/**
+ * i40evf_configure_rss - Prepare for RSS if used
+ * @adapter: board private structure
+ **/
+static void i40evf_configure_rss(struct i40evf_adapter *adapter)
+{
+ struct i40e_hw *hw = &adapter->hw;
+ u32 lut = 0;
+ int i, j;
+ u64 hena;
+
+ /* Set of random keys generated using kernel random number generator */
+ static const u32 seed[I40E_VFQF_HKEY_MAX_INDEX + 1] = {
+ 0x794221b4, 0xbca0c5ab, 0x6cd5ebd9, 0x1ada6127,
+ 0x983b3aa1, 0x1c4e71eb, 0x7f6328b2, 0xfcdc0da0,
+ 0xc135cafa, 0x7a6f7e2d, 0xe7102d28, 0x163cd12e,
+ 0x4954b126 };
+
+ /* Hash type is configured by the PF - we just supply the key */
+
+ /* Fill out hash function seed */
+ for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
+ wr32(hw, I40E_VFQF_HKEY(i), seed[i]);
+
+ /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */
+ hena = I40E_DEFAULT_RSS_HENA;
+ wr32(hw, I40E_VFQF_HENA(0), (u32)hena);
+ wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32));
+
+ /* Populate the LUT with max no. of queues in round robin fashion */
+ for (i = 0, j = 0; i < I40E_VFQF_HLUT_MAX_INDEX; i++, j++) {
+ if (j == adapter->vsi_res->num_queue_pairs)
+ j = 0;
+ /* lut = 4-byte sliding window of 4 lut entries */
+ lut = (lut << 8) | (j &
+ ((0x1 << 8) - 1));
+ /* On i = 3, we have 4 entries in lut; write to the register */
+ if ((i & 3) == 3)
+ wr32(hw, I40E_VFQF_HLUT(i >> 2), lut);
+ }
+ i40e_flush(hw);
+}
+
+/**
+ * i40evf_reset_task - Call-back task to handle hardware reset
+ * @work: pointer to work_struct
+ *
+ * During reset we need to shut down and reinitialize the admin queue
+ * before we can use it to communicate with the PF again. We also clear
+ * and reinit the rings because that context is lost as well.
+ **/
+static void i40evf_reset_task(struct work_struct *work)
+{
+ struct i40evf_adapter *adapter =
+ container_of(work, struct i40evf_adapter, reset_task);
+ struct i40e_hw *hw = &adapter->hw;
+ int i = 0, err;
+ uint32_t rstat_val;
+
+ while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
+ &adapter->crit_section))
+ udelay(500);
+
+ /* wait until the reset is complete */
+ for (i = 0; i < 20; i++) {
+ rstat_val = rd32(hw, I40E_VFGEN_RSTAT) &
+ I40E_VFGEN_RSTAT_VFR_STATE_MASK;
+ if (rstat_val == I40E_VFR_COMPLETED)
+ break;
+ else
+ mdelay(100);
+ }
+ if (i == 20) {
+ /* reset never finished */
+ dev_info(&adapter->pdev->dev, "%s: reset never finished: %x\n",
+ __func__, rstat_val);
+ /* carry on anyway */
+ }
+ i40evf_down(adapter);
+ adapter->state = __I40EVF_RESETTING;
+
+ /* kill and reinit the admin queue */
+ if (i40evf_shutdown_adminq(hw))
+ dev_warn(&adapter->pdev->dev,
+ "%s: Failed to destroy the Admin Queue resources\n",
+ __func__);
+ err = i40evf_init_adminq(hw);
+ if (err)
+ dev_info(&adapter->pdev->dev, "%s: init_adminq failed: %d\n",
+ __func__, err);
+
+ adapter->aq_pending = 0;
+ adapter->aq_required = 0;
+ i40evf_map_queues(adapter);
+ clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+
+ mod_timer(&adapter->watchdog_timer, jiffies + 2);
+
+ if (netif_running(adapter->netdev)) {
+ /* allocate transmit descriptors */
+ err = i40evf_setup_all_tx_resources(adapter);
+ if (err)
+ goto reset_err;
+
+ /* allocate receive descriptors */
+ err = i40evf_setup_all_rx_resources(adapter);
+ if (err)
+ goto reset_err;
+
+ i40evf_configure(adapter);
+
+ err = i40evf_up_complete(adapter);
+ if (err)
+ goto reset_err;
+
+ i40evf_irq_enable(adapter, true);
+ }
+ return;
+reset_err:
+ dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit.\n");
+ i40evf_close(adapter->netdev);
+}
+
+/**
+ * i40evf_adminq_task - worker thread to clean the admin queue
+ * @work: pointer to work_struct containing our data
+ **/
+static void i40evf_adminq_task(struct work_struct *work)
+{
+ struct i40evf_adapter *adapter =
+ container_of(work, struct i40evf_adapter, adminq_task);
+ struct i40e_hw *hw = &adapter->hw;
+ struct i40e_arq_event_info event;
+ struct i40e_virtchnl_msg *v_msg;
+ i40e_status ret;
+ u16 pending;
+
+ event.msg_size = I40EVF_MAX_AQ_BUF_SIZE;
+ event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL);
+ if (!event.msg_buf) {
+ dev_info(&adapter->pdev->dev, "%s: no memory for ARQ clean\n",
+ __func__);
+ return;
+ }
+ v_msg = (struct i40e_virtchnl_msg *)&event.desc;
+ do {
+ ret = i40evf_clean_arq_element(hw, &event, &pending);
+ if (ret)
+ break; /* No event to process or error cleaning ARQ */
+
+ i40evf_virtchnl_completion(adapter, v_msg->v_opcode,
+ v_msg->v_retval, event.msg_buf,
+ event.msg_size);
+ if (pending != 0) {
+ dev_info(&adapter->pdev->dev,
+ "%s: ARQ: Pending events %d\n",
+ __func__, pending);
+ memset(event.msg_buf, 0, I40EVF_MAX_AQ_BUF_SIZE);
+ }
+ } while (pending);
+
+ /* re-enable Admin queue interrupt cause */
+ i40evf_misc_irq_enable(adapter);
+
+ kfree(event.msg_buf);
+}
+
+/**
+ * i40evf_free_all_tx_resources - Free Tx Resources for All Queues
+ * @adapter: board private structure
+ *
+ * Free all transmit software resources
+ **/
+static void i40evf_free_all_tx_resources(struct i40evf_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++)
+ if (adapter->tx_rings[i]->desc)
+ i40evf_free_tx_resources(adapter->tx_rings[i]);
+
+}
+
+/**
+ * i40evf_setup_all_tx_resources - allocate all queues Tx resources
+ * @adapter: board private structure
+ *
+ * If this function returns with an error, then it's possible one or
+ * more of the rings is populated (while the rest are not). It is the
+ * callers duty to clean those orphaned rings.
+ *
+ * Return 0 on success, negative on failure
+ **/
+static int i40evf_setup_all_tx_resources(struct i40evf_adapter *adapter)
+{
+ int i, err = 0;
+
+ for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) {
+ err = i40evf_setup_tx_descriptors(adapter->tx_rings[i]);
+ if (!err)
+ continue;
+ dev_err(&adapter->pdev->dev,
+ "%s: Allocation for Tx Queue %u failed\n",
+ __func__, i);
+ break;
+ }
+
+ return err;
+}
+
+/**
+ * i40evf_setup_all_rx_resources - allocate all queues Rx resources
+ * @adapter: board private structure
+ *
+ * If this function returns with an error, then it's possible one or
+ * more of the rings is populated (while the rest are not). It is the
+ * callers duty to clean those orphaned rings.
+ *
+ * Return 0 on success, negative on failure
+ **/
+static int i40evf_setup_all_rx_resources(struct i40evf_adapter *adapter)
+{
+ int i, err = 0;
+
+ for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) {
+ err = i40evf_setup_rx_descriptors(adapter->rx_rings[i]);
+ if (!err)
+ continue;
+ dev_err(&adapter->pdev->dev,
+ "%s: Allocation for Rx Queue %u failed\n",
+ __func__, i);
+ break;
+ }
+ return err;
+}
+
+/**
+ * i40evf_free_all_rx_resources - Free Rx Resources for All Queues
+ * @adapter: board private structure
+ *
+ * Free all receive software resources
+ **/
+static void i40evf_free_all_rx_resources(struct i40evf_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++)
+ if (adapter->rx_rings[i]->desc)
+ i40evf_free_rx_resources(adapter->rx_rings[i]);
+}
+
+/**
+ * i40evf_open - Called when a network interface is made active
+ * @netdev: network interface device structure
+ *
+ * Returns 0 on success, negative value on failure
+ *
+ * The open entry point is called when a network interface is made
+ * active by the system (IFF_UP). At this point all resources needed
+ * for transmit and receive operations are allocated, the interrupt
+ * handler is registered with the OS, the watchdog timer is started,
+ * and the stack is notified that the interface is ready.
+ **/
+static int i40evf_open(struct net_device *netdev)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ int err;
+
+ if (adapter->state != __I40EVF_DOWN)
+ return -EBUSY;
+
+ /* allocate transmit descriptors */
+ err = i40evf_setup_all_tx_resources(adapter);
+ if (err)
+ goto err_setup_tx;
+
+ /* allocate receive descriptors */
+ err = i40evf_setup_all_rx_resources(adapter);
+ if (err)
+ goto err_setup_rx;
+
+ /* clear any pending interrupts, may auto mask */
+ err = i40evf_request_traffic_irqs(adapter, netdev->name);
+ if (err)
+ goto err_req_irq;
+
+ i40evf_configure(adapter);
+
+ err = i40evf_up_complete(adapter);
+ if (err)
+ goto err_req_irq;
+
+ i40evf_irq_enable(adapter, true);
+
+ return 0;
+
+err_req_irq:
+ i40evf_down(adapter);
+ i40evf_free_traffic_irqs(adapter);
+err_setup_rx:
+ i40evf_free_all_rx_resources(adapter);
+err_setup_tx:
+ i40evf_free_all_tx_resources(adapter);
+
+ return err;
+}
+
+/**
+ * i40evf_close - Disables a network interface
+ * @netdev: network interface device structure
+ *
+ * Returns 0, this is not allowed to fail
+ *
+ * The close entry point is called when an interface is de-activated
+ * by the OS. The hardware is still under the drivers control, but
+ * needs to be disabled. All IRQs except vector 0 (reserved for admin queue)
+ * are freed, along with all transmit and receive resources.
+ **/
+static int i40evf_close(struct net_device *netdev)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+
+ /* signal that we are down to the interrupt handler */
+ adapter->state = __I40EVF_DOWN;
+ set_bit(__I40E_DOWN, &adapter->vsi.state);
+
+ i40evf_down(adapter);
+ i40evf_free_traffic_irqs(adapter);
+
+ i40evf_free_all_tx_resources(adapter);
+ i40evf_free_all_rx_resources(adapter);
+
+ return 0;
+}
+
+/**
+ * i40evf_get_stats - Get System Network Statistics
+ * @netdev: network interface device structure
+ *
+ * Returns the address of the device statistics structure.
+ * The statistics are actually updated from the timer callback.
+ **/
+static struct net_device_stats *i40evf_get_stats(struct net_device *netdev)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+
+ /* only return the current stats */
+ return &adapter->net_stats;
+}
+
+/**
+ * i40evf_reinit_locked - Software reinit
+ * @adapter: board private structure
+ *
+ * Reinititalizes the ring structures in response to a software configuration
+ * change. Roughly the same as close followed by open, but skips releasing
+ * and reallocating the interrupts.
+ **/
+void i40evf_reinit_locked(struct i40evf_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ int err;
+
+ WARN_ON(in_interrupt());
+
+ adapter->state = __I40EVF_RESETTING;
+
+ i40evf_down(adapter);
+
+ /* allocate transmit descriptors */
+ err = i40evf_setup_all_tx_resources(adapter);
+ if (err)
+ goto err_reinit;
+
+ /* allocate receive descriptors */
+ err = i40evf_setup_all_rx_resources(adapter);
+ if (err)
+ goto err_reinit;
+
+ i40evf_configure(adapter);
+
+ err = i40evf_up_complete(adapter);
+ if (err)
+ goto err_reinit;
+
+ i40evf_irq_enable(adapter, true);
+ return;
+
+err_reinit:
+ dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit.\n");
+ i40evf_close(netdev);
+}
+
+/**
+ * i40evf_change_mtu - Change the Maximum Transfer Unit
+ * @netdev: network interface device structure
+ * @new_mtu: new value for maximum frame size
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int i40evf_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
+
+ if ((new_mtu < 68) || (max_frame > I40E_MAX_RXBUFFER))
+ return -EINVAL;
+
+ /* must set new MTU before calling down or up */
+ netdev->mtu = new_mtu;
+ i40evf_reinit_locked(adapter);
+ return 0;
+}
+
+static const struct net_device_ops i40evf_netdev_ops = {
+ .ndo_open = i40evf_open,
+ .ndo_stop = i40evf_close,
+ .ndo_start_xmit = i40evf_xmit_frame,
+ .ndo_get_stats = i40evf_get_stats,
+ .ndo_set_rx_mode = i40evf_set_rx_mode,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = i40evf_set_mac,
+ .ndo_change_mtu = i40evf_change_mtu,
+ .ndo_tx_timeout = i40evf_tx_timeout,
+ .ndo_vlan_rx_add_vid = i40evf_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = i40evf_vlan_rx_kill_vid,
+};
+
+/**
+ * i40evf_check_reset_complete - check that VF reset is complete
+ * @hw: pointer to hw struct
+ *
+ * Returns 0 if device is ready to use, or -EBUSY if it's in reset.
+ **/
+static int i40evf_check_reset_complete(struct i40e_hw *hw)
+{
+ u32 rstat;
+ int i;
+
+ for (i = 0; i < 100; i++) {
+ rstat = rd32(hw, I40E_VFGEN_RSTAT);
+ if (rstat == I40E_VFR_VFACTIVE)
+ return 0;
+ udelay(10);
+ }
+ return -EBUSY;
+}
+
+/**
+ * i40evf_init_task - worker thread to perform delayed initialization
+ * @work: pointer to work_struct containing our data
+ *
+ * This task completes the work that was begun in probe. Due to the nature
+ * of VF-PF communications, we may need to wait tens of milliseconds to get
+ * reponses back from the PF. Rather than busy-wait in probe and bog down the
+ * whole system, we'll do it in a task so we can sleep.
+ * This task only runs during driver init. Once we've established
+ * communications with the PF driver and set up our netdev, the watchdog
+ * takes over.
+ **/
+static void i40evf_init_task(struct work_struct *work)
+{
+ struct i40evf_adapter *adapter = container_of(work,
+ struct i40evf_adapter,
+ init_task.work);
+ struct net_device *netdev = adapter->netdev;
+ struct i40evf_mac_filter *f;
+ struct i40e_hw *hw = &adapter->hw;
+ struct pci_dev *pdev = adapter->pdev;
+ int i, err, bufsz;
+
+ switch (adapter->state) {
+ case __I40EVF_STARTUP:
+ /* driver loaded, probe complete */
+ err = i40e_set_mac_type(hw);
+ if (err) {
+ dev_info(&pdev->dev, "%s: set_mac_type failed: %d\n",
+ __func__, err);
+ goto err;
+ }
+ err = i40evf_check_reset_complete(hw);
+ if (err) {
+ dev_info(&pdev->dev, "%s: device is still in reset (%d).\n",
+ __func__, err);
+ goto err;
+ }
+ hw->aq.num_arq_entries = I40EVF_AQ_LEN;
+ hw->aq.num_asq_entries = I40EVF_AQ_LEN;
+ hw->aq.arq_buf_size = I40EVF_MAX_AQ_BUF_SIZE;
+ hw->aq.asq_buf_size = I40EVF_MAX_AQ_BUF_SIZE;
+
+ err = i40evf_init_adminq(hw);
+ if (err) {
+ dev_info(&pdev->dev, "%s: init_adminq failed: %d\n",
+ __func__, err);
+ goto err;
+ }
+ err = i40evf_send_api_ver(adapter);
+ if (err) {
+ dev_info(&pdev->dev, "%s: unable to send to PF (%d)\n",
+ __func__, err);
+ i40evf_shutdown_adminq(hw);
+ goto err;
+ }
+ adapter->state = __I40EVF_INIT_VERSION_CHECK;
+ goto restart;
+ break;
+ case __I40EVF_INIT_VERSION_CHECK:
+ if (!i40evf_asq_done(hw))
+ goto err;
+
+ /* aq msg sent, awaiting reply */
+ err = i40evf_verify_api_ver(adapter);
+ if (err) {
+ dev_err(&pdev->dev, "Unable to verify API version, error %d\n",
+ err);
+ goto err;
+ }
+ err = i40evf_send_vf_config_msg(adapter);
+ if (err) {
+ dev_err(&pdev->dev, "Unable send config request, error %d\n",
+ err);
+ goto err;
+ }
+ adapter->state = __I40EVF_INIT_GET_RESOURCES;
+ goto restart;
+ break;
+ case __I40EVF_INIT_GET_RESOURCES:
+ /* aq msg sent, awaiting reply */
+ if (!adapter->vf_res) {
+ bufsz = sizeof(struct i40e_virtchnl_vf_resource) +
+ (I40E_MAX_VF_VSI *
+ sizeof(struct i40e_virtchnl_vsi_resource));
+ adapter->vf_res = kzalloc(bufsz, GFP_KERNEL);
+ if (!adapter->vf_res) {
+ dev_err(&pdev->dev, "%s: unable to allocate memory\n",
+ __func__);
+ goto err;
+ }
+ }
+ err = i40evf_get_vf_config(adapter);
+ if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK)
+ goto restart;
+ if (err) {
+ dev_info(&pdev->dev, "%s: unable to get VF config (%d)\n",
+ __func__, err);
+ goto err_alloc;
+ }
+ adapter->state = __I40EVF_INIT_SW;
+ break;
+ default:
+ goto err_alloc;
+ }
+ /* got VF config message back from PF, now we can parse it */
+ for (i = 0; i < adapter->vf_res->num_vsis; i++) {
+ if (adapter->vf_res->vsi_res[i].vsi_type == I40E_VSI_SRIOV)
+ adapter->vsi_res = &adapter->vf_res->vsi_res[i];
+ }
+ if (!adapter->vsi_res) {
+ dev_info(&pdev->dev, "%s: no LAN VSI found\n", __func__);
+ goto err_alloc;
+ }
+
+ adapter->flags |= I40EVF_FLAG_RX_CSUM_ENABLED;
+
+ adapter->txd_count = I40EVF_DEFAULT_TXD;
+ adapter->rxd_count = I40EVF_DEFAULT_RXD;
+
+ netdev->netdev_ops = &i40evf_netdev_ops;
+ i40evf_set_ethtool_ops(netdev);
+ netdev->watchdog_timeo = 5 * HZ;
+
+ netdev->features |= NETIF_F_SG |
+ NETIF_F_IP_CSUM |
+ NETIF_F_SCTP_CSUM |
+ NETIF_F_IPV6_CSUM |
+ NETIF_F_TSO |
+ NETIF_F_TSO6 |
+ NETIF_F_GRO;
+
+ if (adapter->vf_res->vf_offload_flags
+ & I40E_VIRTCHNL_VF_OFFLOAD_VLAN) {
+ netdev->vlan_features = netdev->features;
+ netdev->features |= NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_FILTER;
+ }
+
+ /* The HW MAC address was set and/or determined in sw_init */
+ if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
+ dev_info(&pdev->dev,
+ "Invalid MAC address %pMAC, using random\n",
+ adapter->hw.mac.addr);
+ random_ether_addr(adapter->hw.mac.addr);
+ }
+ memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
+ memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
+
+ INIT_LIST_HEAD(&adapter->mac_filter_list);
+ INIT_LIST_HEAD(&adapter->vlan_filter_list);
+ f = kzalloc(sizeof(*f), GFP_ATOMIC);
+ if (NULL == f)
+ goto err_sw_init;
+
+ memcpy(f->macaddr, adapter->hw.mac.addr, ETH_ALEN);
+ f->add = true;
+ adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER;
+
+ list_add(&f->list, &adapter->mac_filter_list);
+
+ init_timer(&adapter->watchdog_timer);
+ adapter->watchdog_timer.function = &i40evf_watchdog_timer;
+ adapter->watchdog_timer.data = (unsigned long)adapter;
+ mod_timer(&adapter->watchdog_timer, jiffies + 1);
+
+ err = i40evf_init_interrupt_scheme(adapter);
+ if (err)
+ goto err_sw_init;
+ i40evf_map_rings_to_vectors(adapter);
+ i40evf_configure_rss(adapter);
+ err = i40evf_request_misc_irq(adapter);
+ if (err)
+ goto err_sw_init;
+
+ netif_carrier_off(netdev);
+
+ strcpy(netdev->name, "eth%d");
+
+ adapter->vsi.id = adapter->vsi_res->vsi_id;
+ adapter->vsi.seid = adapter->vsi_res->vsi_id; /* dummy */
+ adapter->vsi.back = adapter;
+ adapter->vsi.base_vector = 1;
+ adapter->vsi.work_limit = I40E_DEFAULT_IRQ_WORK;
+ adapter->vsi.rx_itr_setting = I40E_ITR_DYNAMIC;
+ adapter->vsi.tx_itr_setting = I40E_ITR_DYNAMIC;
+ adapter->vsi.netdev = adapter->netdev;
+
+ err = register_netdev(netdev);
+ if (err)
+ goto err_register;
+
+ adapter->netdev_registered = true;
+
+ netif_tx_stop_all_queues(netdev);
+
+ dev_info(&pdev->dev, "MAC address: %pMAC\n", adapter->hw.mac.addr);
+ if (netdev->features & NETIF_F_GRO)
+ dev_info(&pdev->dev, "GRO is enabled\n");
+
+ dev_info(&pdev->dev, "%s\n", i40evf_driver_string);
+ adapter->state = __I40EVF_DOWN;
+ set_bit(__I40E_DOWN, &adapter->vsi.state);
+ i40evf_misc_irq_enable(adapter);
+ return;
+restart:
+ schedule_delayed_work(&adapter->init_task,
+ msecs_to_jiffies(50));
+ return;
+
+err_register:
+ i40evf_free_misc_irq(adapter);
+err_sw_init:
+ i40evf_reset_interrupt_capability(adapter);
+ adapter->state = __I40EVF_FAILED;
+err_alloc:
+ kfree(adapter->vf_res);
+ adapter->vf_res = NULL;
+err:
+ /* Things went into the weeds, so try again later */
+ if (++adapter->aq_wait_count > I40EVF_AQ_MAX_ERR) {
+ dev_err(&pdev->dev, "Failed to communicate with PF; giving up.\n");
+ if (hw->aq.asq.count)
+ i40evf_shutdown_adminq(hw); /* ignore error */
+ adapter->state = __I40EVF_FAILED;
+ return; /* do not reschedule */
+ }
+ schedule_delayed_work(&adapter->init_task, HZ * 3);
+ return;
+}
+
+/**
+ * i40evf_shutdown - Shutdown the device in preparation for a reboot
+ * @pdev: pci device structure
+ **/
+static void i40evf_shutdown(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+
+ netif_device_detach(netdev);
+
+ if (netif_running(netdev))
+ i40evf_close(netdev);
+
+#ifdef CONFIG_PM
+ pci_save_state(pdev);
+
+#endif
+ pci_disable_device(pdev);
+}
+
+/**
+ * i40evf_probe - Device Initialization Routine
+ * @pdev: PCI device information struct
+ * @ent: entry in i40evf_pci_tbl
+ *
+ * Returns 0 on success, negative on failure
+ *
+ * i40evf_probe initializes an adapter identified by a pci_dev structure.
+ * The OS initialization, configuring of the adapter private structure,
+ * and a hardware reset occur.
+ **/
+static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct net_device *netdev;
+ struct i40evf_adapter *adapter = NULL;
+ struct i40e_hw *hw = NULL;
+ int err, pci_using_dac;
+
+ err = pci_enable_device(pdev);
+ if (err)
+ return err;
+
+ if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
+ pci_using_dac = true;
+ /* coherent mask for the same size will always succeed if
+ * dma_set_mask does
+ */
+ dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
+ } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
+ pci_using_dac = false;
+ dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+ } else {
+ dev_err(&pdev->dev, "%s: DMA configuration failed: %d\n",
+ __func__, err);
+ err = -EIO;
+ goto err_dma;
+ }
+
+ err = pci_request_regions(pdev, i40evf_driver_name);
+ if (err) {
+ dev_err(&pdev->dev,
+ "pci_request_regions failed 0x%x\n", err);
+ goto err_pci_reg;
+ }
+
+ pci_enable_pcie_error_reporting(pdev);
+
+ pci_set_master(pdev);
+
+ netdev = alloc_etherdev_mq(sizeof(struct i40evf_adapter),
+ MAX_TX_QUEUES);
+ if (!netdev) {
+ err = -ENOMEM;
+ goto err_alloc_etherdev;
+ }
+
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ pci_set_drvdata(pdev, netdev);
+ adapter = netdev_priv(netdev);
+ if (pci_using_dac)
+ netdev->features |= NETIF_F_HIGHDMA;
+
+ adapter->netdev = netdev;
+ adapter->pdev = pdev;
+
+ hw = &adapter->hw;
+ hw->back = adapter;
+
+ adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
+ adapter->state = __I40EVF_STARTUP;
+
+ /* Call save state here because it relies on the adapter struct. */
+ pci_save_state(pdev);
+
+ hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
+ if (!hw->hw_addr) {
+ err = -EIO;
+ goto err_ioremap;
+ }
+ hw->vendor_id = pdev->vendor;
+ hw->device_id = pdev->device;
+ pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
+ hw->subsystem_vendor_id = pdev->subsystem_vendor;
+ hw->subsystem_device_id = pdev->subsystem_device;
+ hw->bus.device = PCI_SLOT(pdev->devfn);
+ hw->bus.func = PCI_FUNC(pdev->devfn);
+
+ INIT_WORK(&adapter->reset_task, i40evf_reset_task);
+ INIT_WORK(&adapter->adminq_task, i40evf_adminq_task);
+ INIT_WORK(&adapter->watchdog_task, i40evf_watchdog_task);
+ INIT_DELAYED_WORK(&adapter->init_task, i40evf_init_task);
+ schedule_delayed_work(&adapter->init_task, 10);
+
+ return 0;
+
+err_ioremap:
+ free_netdev(netdev);
+err_alloc_etherdev:
+ pci_release_regions(pdev);
+err_pci_reg:
+err_dma:
+ pci_disable_device(pdev);
+ return err;
+}
+
+#ifdef CONFIG_PM
+/**
+ * i40evf_suspend - Power management suspend routine
+ * @pdev: PCI device information struct
+ * @state: unused
+ *
+ * Called when the system (VM) is entering sleep/suspend.
+ **/
+static int i40evf_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ int retval = 0;
+
+ netif_device_detach(netdev);
+
+ if (netif_running(netdev)) {
+ rtnl_lock();
+ i40evf_down(adapter);
+ rtnl_unlock();
+ }
+ i40evf_free_misc_irq(adapter);
+ i40evf_reset_interrupt_capability(adapter);
+
+ retval = pci_save_state(pdev);
+ if (retval)
+ return retval;
+
+ pci_disable_device(pdev);
+
+ return 0;
+}
+
+/**
+ * i40evf_resume - Power managment resume routine
+ * @pdev: PCI device information struct
+ *
+ * Called when the system (VM) is resumed from sleep/suspend.
+ **/
+static int i40evf_resume(struct pci_dev *pdev)
+{
+ struct i40evf_adapter *adapter = pci_get_drvdata(pdev);
+ struct net_device *netdev = adapter->netdev;
+ u32 err;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ /* pci_restore_state clears dev->state_saved so call
+ * pci_save_state to restore it.
+ */
+ pci_save_state(pdev);
+
+ err = pci_enable_device_mem(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "Cannot enable PCI device from suspend.\n");
+ return err;
+ }
+ pci_set_master(pdev);
+
+ rtnl_lock();
+ err = i40evf_set_interrupt_capability(adapter);
+ if (err) {
+ dev_err(&pdev->dev, "Cannot enable MSI-X interrupts.\n");
+ return err;
+ }
+ err = i40evf_request_misc_irq(adapter);
+ rtnl_unlock();
+ if (err) {
+ dev_err(&pdev->dev, "Cannot get interrupt vector.\n");
+ return err;
+ }
+
+ schedule_work(&adapter->reset_task);
+
+ netif_device_attach(netdev);
+
+ return err;
+}
+
+#endif /* CONFIG_PM */
+/**
+ * i40evf_remove - Device Removal Routine
+ * @pdev: PCI device information struct
+ *
+ * i40evf_remove is called by the PCI subsystem to alert the driver
+ * that it should release a PCI device. The could be caused by a
+ * Hot-Plug event, or because the driver is going to be removed from
+ * memory.
+ **/
+static void i40evf_remove(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ struct i40e_hw *hw = &adapter->hw;
+
+ cancel_delayed_work_sync(&adapter->init_task);
+
+ if (adapter->netdev_registered) {
+ unregister_netdev(netdev);
+ adapter->netdev_registered = false;
+ }
+ adapter->state = __I40EVF_REMOVE;
+
+ if (adapter->num_msix_vectors) {
+ i40evf_misc_irq_disable(adapter);
+ del_timer_sync(&adapter->watchdog_timer);
+
+ flush_scheduled_work();
+
+ i40evf_free_misc_irq(adapter);
+
+ i40evf_reset_interrupt_capability(adapter);
+ }
+
+ if (hw->aq.asq.count)
+ i40evf_shutdown_adminq(hw);
+
+ iounmap(hw->hw_addr);
+ pci_release_regions(pdev);
+
+ i40evf_free_queues(adapter);
+ kfree(adapter->vf_res);
+
+ free_netdev(netdev);
+
+ pci_disable_pcie_error_reporting(pdev);
+
+ pci_disable_device(pdev);
+}
+
+static struct pci_driver i40evf_driver = {
+ .name = i40evf_driver_name,
+ .id_table = i40evf_pci_tbl,
+ .probe = i40evf_probe,
+ .remove = i40evf_remove,
+#ifdef CONFIG_PM
+ .suspend = i40evf_suspend,
+ .resume = i40evf_resume,
+#endif
+ .shutdown = i40evf_shutdown,
+};
+
+/**
+ * i40e_init_module - Driver Registration Routine
+ *
+ * i40e_init_module is the first routine called when the driver is
+ * loaded. All it does is register with the PCI subsystem.
+ **/
+static int __init i40evf_init_module(void)
+{
+ int ret;
+ pr_info("i40evf: %s - version %s\n", i40evf_driver_string,
+ i40evf_driver_version);
+
+ pr_info("%s\n", i40evf_copyright);
+
+ ret = pci_register_driver(&i40evf_driver);
+ return ret;
+}
+
+module_init(i40evf_init_module);
+
+/**
+ * i40e_exit_module - Driver Exit Cleanup Routine
+ *
+ * i40e_exit_module is called just before the driver is removed
+ * from memory.
+ **/
+static void __exit i40evf_exit_module(void)
+{
+ pci_unregister_driver(&i40evf_driver);
+}
+
+module_exit(i40evf_exit_module);
+
+/* i40evf_main.c */
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
new file mode 100644
index 000000000000..e6978d79e62b
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
@@ -0,0 +1,772 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#include "i40evf.h"
+#include "i40e_prototype.h"
+
+/* busy wait delay in msec */
+#define I40EVF_BUSY_WAIT_DELAY 10
+#define I40EVF_BUSY_WAIT_COUNT 50
+
+/**
+ * i40evf_send_pf_msg
+ * @adapter: adapter structure
+ * @op: virtual channel opcode
+ * @msg: pointer to message buffer
+ * @len: message length
+ *
+ * Send message to PF and print status if failure.
+ **/
+static int i40evf_send_pf_msg(struct i40evf_adapter *adapter,
+ enum i40e_virtchnl_ops op, u8 *msg, u16 len)
+{
+ struct i40e_hw *hw = &adapter->hw;
+ i40e_status err;
+
+ err = i40e_aq_send_msg_to_pf(hw, op, 0, msg, len, NULL);
+ if (err)
+ dev_err(&adapter->pdev->dev, "Unable to send opcode %d to PF, error %d, aq status %d\n",
+ op, err, hw->aq.asq_last_status);
+ return err;
+}
+
+/**
+ * i40evf_send_api_ver
+ * @adapter: adapter structure
+ *
+ * Send API version admin queue message to the PF. The reply is not checked
+ * in this function. Returns 0 if the message was successfully
+ * sent, or one of the I40E_ADMIN_QUEUE_ERROR_ statuses if not.
+ **/
+int i40evf_send_api_ver(struct i40evf_adapter *adapter)
+{
+ struct i40e_virtchnl_version_info vvi;
+
+ vvi.major = I40E_VIRTCHNL_VERSION_MAJOR;
+ vvi.minor = I40E_VIRTCHNL_VERSION_MINOR;
+
+ return i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_VERSION, (u8 *)&vvi,
+ sizeof(vvi));
+}
+
+/**
+ * i40evf_verify_api_ver
+ * @adapter: adapter structure
+ *
+ * Compare API versions with the PF. Must be called after admin queue is
+ * initialized. Returns 0 if API versions match, -EIO if
+ * they do not, or I40E_ERR_ADMIN_QUEUE_NO_WORK if the admin queue is empty.
+ **/
+int i40evf_verify_api_ver(struct i40evf_adapter *adapter)
+{
+ struct i40e_virtchnl_version_info *pf_vvi;
+ struct i40e_hw *hw = &adapter->hw;
+ struct i40e_arq_event_info event;
+ i40e_status err;
+
+ event.msg_size = I40EVF_MAX_AQ_BUF_SIZE;
+ event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL);
+ if (!event.msg_buf) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ err = i40evf_clean_arq_element(hw, &event, NULL);
+ if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK)
+ goto out_alloc;
+
+ err = (i40e_status)le32_to_cpu(event.desc.cookie_low);
+ if (err) {
+ err = -EIO;
+ goto out_alloc;
+ }
+
+ if ((enum i40e_virtchnl_ops)le32_to_cpu(event.desc.cookie_high) !=
+ I40E_VIRTCHNL_OP_VERSION) {
+ err = -EIO;
+ goto out_alloc;
+ }
+
+ pf_vvi = (struct i40e_virtchnl_version_info *)event.msg_buf;
+ if ((pf_vvi->major != I40E_VIRTCHNL_VERSION_MAJOR) ||
+ (pf_vvi->minor != I40E_VIRTCHNL_VERSION_MINOR))
+ err = -EIO;
+
+out_alloc:
+ kfree(event.msg_buf);
+out:
+ return err;
+}
+
+/**
+ * i40evf_send_vf_config_msg
+ * @adapter: adapter structure
+ *
+ * Send VF configuration request admin queue message to the PF. The reply
+ * is not checked in this function. Returns 0 if the message was
+ * successfully sent, or one of the I40E_ADMIN_QUEUE_ERROR_ statuses if not.
+ **/
+int i40evf_send_vf_config_msg(struct i40evf_adapter *adapter)
+{
+ return i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
+ NULL, 0);
+}
+
+/**
+ * i40evf_get_vf_config
+ * @hw: pointer to the hardware structure
+ * @len: length of buffer
+ *
+ * Get VF configuration from PF and populate hw structure. Must be called after
+ * admin queue is initialized. Busy waits until response is received from PF,
+ * with maximum timeout. Response from PF is returned in the buffer for further
+ * processing by the caller.
+ **/
+int i40evf_get_vf_config(struct i40evf_adapter *adapter)
+{
+ struct i40e_hw *hw = &adapter->hw;
+ struct i40e_arq_event_info event;
+ u16 len;
+ i40e_status err;
+
+ len = sizeof(struct i40e_virtchnl_vf_resource) +
+ I40E_MAX_VF_VSI * sizeof(struct i40e_virtchnl_vsi_resource);
+ event.msg_size = len;
+ event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL);
+ if (!event.msg_buf) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ err = i40evf_clean_arq_element(hw, &event, NULL);
+ if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK)
+ goto out_alloc;
+
+ err = (i40e_status)le32_to_cpu(event.desc.cookie_low);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "%s: Error returned from PF, %d, %d\n", __func__,
+ le32_to_cpu(event.desc.cookie_high),
+ le32_to_cpu(event.desc.cookie_low));
+ err = -EIO;
+ goto out_alloc;
+ }
+
+ if ((enum i40e_virtchnl_ops)le32_to_cpu(event.desc.cookie_high) !=
+ I40E_VIRTCHNL_OP_GET_VF_RESOURCES) {
+ dev_err(&adapter->pdev->dev,
+ "%s: Invalid response from PF, %d, %d\n", __func__,
+ le32_to_cpu(event.desc.cookie_high),
+ le32_to_cpu(event.desc.cookie_low));
+ err = -EIO;
+ goto out_alloc;
+ }
+ memcpy(adapter->vf_res, event.msg_buf, min(event.msg_size, len));
+
+ i40e_vf_parse_hw_config(hw, adapter->vf_res);
+out_alloc:
+ kfree(event.msg_buf);
+out:
+ return err;
+}
+
+/**
+ * i40evf_configure_queues
+ * @adapter: adapter structure
+ *
+ * Request that the PF set up our (previously allocated) queues.
+ **/
+void i40evf_configure_queues(struct i40evf_adapter *adapter)
+{
+ struct i40e_virtchnl_vsi_queue_config_info *vqci;
+ struct i40e_virtchnl_queue_pair_info *vqpi;
+ int pairs = adapter->vsi_res->num_queue_pairs;
+ int i, len;
+
+ if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+ dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
+ __func__, adapter->current_op);
+ return;
+ }
+ adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES;
+ len = sizeof(struct i40e_virtchnl_vsi_queue_config_info) +
+ (sizeof(struct i40e_virtchnl_queue_pair_info) * pairs);
+ vqci = kzalloc(len, GFP_ATOMIC);
+ if (!vqci) {
+ dev_err(&adapter->pdev->dev, "%s: unable to allocate memory\n",
+ __func__);
+ return;
+ }
+ vqci->vsi_id = adapter->vsi_res->vsi_id;
+ vqci->num_queue_pairs = pairs;
+ vqpi = vqci->qpair;
+ /* Size check is not needed here - HW max is 16 queue pairs, and we
+ * can fit info for 31 of them into the AQ buffer before it overflows.
+ */
+ for (i = 0; i < pairs; i++) {
+ vqpi->txq.vsi_id = vqci->vsi_id;
+ vqpi->txq.queue_id = i;
+ vqpi->txq.ring_len = adapter->tx_rings[i]->count;
+ vqpi->txq.dma_ring_addr = adapter->tx_rings[i]->dma;
+
+ vqpi->rxq.vsi_id = vqci->vsi_id;
+ vqpi->rxq.queue_id = i;
+ vqpi->rxq.ring_len = adapter->rx_rings[i]->count;
+ vqpi->rxq.dma_ring_addr = adapter->rx_rings[i]->dma;
+ vqpi->rxq.max_pkt_size = adapter->netdev->mtu
+ + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN;
+ vqpi->rxq.databuffer_size = adapter->rx_rings[i]->rx_buf_len;
+ vqpi++;
+ }
+
+ i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
+ (u8 *)vqci, len);
+ kfree(vqci);
+ adapter->aq_pending |= I40EVF_FLAG_AQ_CONFIGURE_QUEUES;
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_CONFIGURE_QUEUES;
+}
+
+/**
+ * i40evf_enable_queues
+ * @adapter: adapter structure
+ *
+ * Request that the PF enable all of our queues.
+ **/
+void i40evf_enable_queues(struct i40evf_adapter *adapter)
+{
+ struct i40e_virtchnl_queue_select vqs;
+
+ if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+ dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
+ __func__, adapter->current_op);
+ return;
+ }
+ adapter->current_op = I40E_VIRTCHNL_OP_ENABLE_QUEUES;
+ vqs.vsi_id = adapter->vsi_res->vsi_id;
+ vqs.tx_queues = (1 << adapter->vsi_res->num_queue_pairs) - 1;
+ vqs.rx_queues = vqs.tx_queues;
+ i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
+ (u8 *)&vqs, sizeof(vqs));
+ adapter->aq_pending |= I40EVF_FLAG_AQ_ENABLE_QUEUES;
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_ENABLE_QUEUES;
+}
+
+/**
+ * i40evf_disable_queues
+ * @adapter: adapter structure
+ *
+ * Request that the PF disable all of our queues.
+ **/
+void i40evf_disable_queues(struct i40evf_adapter *adapter)
+{
+ struct i40e_virtchnl_queue_select vqs;
+
+ if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+ dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
+ __func__, adapter->current_op);
+ return;
+ }
+ adapter->current_op = I40E_VIRTCHNL_OP_DISABLE_QUEUES;
+ vqs.vsi_id = adapter->vsi_res->vsi_id;
+ vqs.tx_queues = (1 << adapter->vsi_res->num_queue_pairs) - 1;
+ vqs.rx_queues = vqs.tx_queues;
+ i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
+ (u8 *)&vqs, sizeof(vqs));
+ adapter->aq_pending |= I40EVF_FLAG_AQ_DISABLE_QUEUES;
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_DISABLE_QUEUES;
+}
+
+/**
+ * i40evf_map_queues
+ * @adapter: adapter structure
+ *
+ * Request that the PF map queues to interrupt vectors. Misc causes, including
+ * admin queue, are always mapped to vector 0.
+ **/
+void i40evf_map_queues(struct i40evf_adapter *adapter)
+{
+ struct i40e_virtchnl_irq_map_info *vimi;
+ int v_idx, q_vectors, len;
+ struct i40e_q_vector *q_vector;
+
+ if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+ dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
+ __func__, adapter->current_op);
+ return;
+ }
+ adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP;
+
+ q_vectors = adapter->num_msix_vectors - NONQ_VECS;
+
+ len = sizeof(struct i40e_virtchnl_irq_map_info) +
+ (adapter->num_msix_vectors *
+ sizeof(struct i40e_virtchnl_vector_map));
+ vimi = kzalloc(len, GFP_ATOMIC);
+ if (!vimi) {
+ dev_err(&adapter->pdev->dev, "%s: unable to allocate memory\n",
+ __func__);
+ return;
+ }
+
+ vimi->num_vectors = adapter->num_msix_vectors;
+ /* Queue vectors first */
+ for (v_idx = 0; v_idx < q_vectors; v_idx++) {
+ q_vector = adapter->q_vector[v_idx];
+ vimi->vecmap[v_idx].vsi_id = adapter->vsi_res->vsi_id;
+ vimi->vecmap[v_idx].vector_id = v_idx + NONQ_VECS;
+ vimi->vecmap[v_idx].txq_map = q_vector->ring_mask;
+ vimi->vecmap[v_idx].rxq_map = q_vector->ring_mask;
+ }
+ /* Misc vector last - this is only for AdminQ messages */
+ vimi->vecmap[v_idx].vsi_id = adapter->vsi_res->vsi_id;
+ vimi->vecmap[v_idx].vector_id = 0;
+ vimi->vecmap[v_idx].txq_map = 0;
+ vimi->vecmap[v_idx].rxq_map = 0;
+
+ i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
+ (u8 *)vimi, len);
+ kfree(vimi);
+ adapter->aq_pending |= I40EVF_FLAG_AQ_MAP_VECTORS;
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_MAP_VECTORS;
+}
+
+/**
+ * i40evf_add_ether_addrs
+ * @adapter: adapter structure
+ * @addrs: the MAC address filters to add (contiguous)
+ * @count: number of filters
+ *
+ * Request that the PF add one or more addresses to our filters.
+ **/
+void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)
+{
+ struct i40e_virtchnl_ether_addr_list *veal;
+ int len, i = 0, count = 0;
+ struct i40evf_mac_filter *f;
+
+ if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+ dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
+ __func__, adapter->current_op);
+ return;
+ }
+ list_for_each_entry(f, &adapter->mac_filter_list, list) {
+ if (f->add)
+ count++;
+ }
+ if (!count) {
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER;
+ return;
+ }
+ adapter->current_op = I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS;
+
+ len = sizeof(struct i40e_virtchnl_ether_addr_list) +
+ (count * sizeof(struct i40e_virtchnl_ether_addr));
+ if (len > I40EVF_MAX_AQ_BUF_SIZE) {
+ dev_warn(&adapter->pdev->dev, "%s: Too many MAC address changes in one request.\n",
+ __func__);
+ count = (I40EVF_MAX_AQ_BUF_SIZE -
+ sizeof(struct i40e_virtchnl_ether_addr_list)) /
+ sizeof(struct i40e_virtchnl_ether_addr);
+ len = I40EVF_MAX_AQ_BUF_SIZE;
+ }
+
+ veal = kzalloc(len, GFP_ATOMIC);
+ if (!veal) {
+ dev_err(&adapter->pdev->dev, "%s: unable to allocate memory\n",
+ __func__);
+ return;
+ }
+ veal->vsi_id = adapter->vsi_res->vsi_id;
+ veal->num_elements = count;
+ list_for_each_entry(f, &adapter->mac_filter_list, list) {
+ if (f->add) {
+ memcpy(veal->list[i].addr, f->macaddr, ETH_ALEN);
+ i++;
+ f->add = false;
+ }
+ }
+ i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
+ (u8 *)veal, len);
+ kfree(veal);
+ adapter->aq_pending |= I40EVF_FLAG_AQ_ADD_MAC_FILTER;
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER;
+
+}
+
+/**
+ * i40evf_del_ether_addrs
+ * @adapter: adapter structure
+ * @addrs: the MAC address filters to remove (contiguous)
+ * @count: number of filtes
+ *
+ * Request that the PF remove one or more addresses from our filters.
+ **/
+void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)
+{
+ struct i40e_virtchnl_ether_addr_list *veal;
+ struct i40evf_mac_filter *f, *ftmp;
+ int len, i = 0, count = 0;
+
+ if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+ dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
+ __func__, adapter->current_op);
+ return;
+ }
+ list_for_each_entry(f, &adapter->mac_filter_list, list) {
+ if (f->remove)
+ count++;
+ }
+ if (!count) {
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER;
+ return;
+ }
+ adapter->current_op = I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS;
+
+ len = sizeof(struct i40e_virtchnl_ether_addr_list) +
+ (count * sizeof(struct i40e_virtchnl_ether_addr));
+ if (len > I40EVF_MAX_AQ_BUF_SIZE) {
+ dev_warn(&adapter->pdev->dev, "%s: Too many MAC address changes in one request.\n",
+ __func__);
+ count = (I40EVF_MAX_AQ_BUF_SIZE -
+ sizeof(struct i40e_virtchnl_ether_addr_list)) /
+ sizeof(struct i40e_virtchnl_ether_addr);
+ len = I40EVF_MAX_AQ_BUF_SIZE;
+ }
+ veal = kzalloc(len, GFP_ATOMIC);
+ if (!veal) {
+ dev_err(&adapter->pdev->dev, "%s: unable to allocate memory\n",
+ __func__);
+ return;
+ }
+ veal->vsi_id = adapter->vsi_res->vsi_id;
+ veal->num_elements = count;
+ list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
+ if (f->remove) {
+ memcpy(veal->list[i].addr, f->macaddr, ETH_ALEN);
+ i++;
+ list_del(&f->list);
+ kfree(f);
+ }
+ }
+ i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS,
+ (u8 *)veal, len);
+ kfree(veal);
+ adapter->aq_pending |= I40EVF_FLAG_AQ_DEL_MAC_FILTER;
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER;
+}
+
+/**
+ * i40evf_add_vlans
+ * @adapter: adapter structure
+ * @vlans: the VLANs to add
+ * @count: number of VLANs
+ *
+ * Request that the PF add one or more VLAN filters to our VSI.
+ **/
+void i40evf_add_vlans(struct i40evf_adapter *adapter)
+{
+ struct i40e_virtchnl_vlan_filter_list *vvfl;
+ int len, i = 0, count = 0;
+ struct i40evf_vlan_filter *f;
+
+ if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+ dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
+ __func__, adapter->current_op);
+ return;
+ }
+
+ list_for_each_entry(f, &adapter->vlan_filter_list, list) {
+ if (f->add)
+ count++;
+ }
+ if (!count) {
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
+ return;
+ }
+ adapter->current_op = I40E_VIRTCHNL_OP_ADD_VLAN;
+
+ len = sizeof(struct i40e_virtchnl_vlan_filter_list) +
+ (count * sizeof(u16));
+ if (len > I40EVF_MAX_AQ_BUF_SIZE) {
+ dev_warn(&adapter->pdev->dev, "%s: Too many VLAN changes in one request.\n",
+ __func__);
+ count = (I40EVF_MAX_AQ_BUF_SIZE -
+ sizeof(struct i40e_virtchnl_vlan_filter_list)) /
+ sizeof(u16);
+ len = I40EVF_MAX_AQ_BUF_SIZE;
+ }
+ vvfl = kzalloc(len, GFP_ATOMIC);
+ if (!vvfl) {
+ dev_err(&adapter->pdev->dev, "%s: unable to allocate memory\n",
+ __func__);
+ return;
+ }
+ vvfl->vsi_id = adapter->vsi_res->vsi_id;
+ vvfl->num_elements = count;
+ list_for_each_entry(f, &adapter->vlan_filter_list, list) {
+ if (f->add) {
+ vvfl->vlan_id[i] = f->vlan;
+ i++;
+ f->add = false;
+ }
+ }
+ i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len);
+ kfree(vvfl);
+ adapter->aq_pending |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
+}
+
+/**
+ * i40evf_del_vlans
+ * @adapter: adapter structure
+ * @vlans: the VLANs to remove
+ * @count: number of VLANs
+ *
+ * Request that the PF remove one or more VLAN filters from our VSI.
+ **/
+void i40evf_del_vlans(struct i40evf_adapter *adapter)
+{
+ struct i40e_virtchnl_vlan_filter_list *vvfl;
+ struct i40evf_vlan_filter *f, *ftmp;
+ int len, i = 0, count = 0;
+
+ if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+ dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
+ __func__, adapter->current_op);
+ return;
+ }
+
+ list_for_each_entry(f, &adapter->vlan_filter_list, list) {
+ if (f->remove)
+ count++;
+ }
+ if (!count) {
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
+ return;
+ }
+ adapter->current_op = I40E_VIRTCHNL_OP_DEL_VLAN;
+
+ len = sizeof(struct i40e_virtchnl_vlan_filter_list) +
+ (count * sizeof(u16));
+ if (len > I40EVF_MAX_AQ_BUF_SIZE) {
+ dev_warn(&adapter->pdev->dev, "%s: Too many VLAN changes in one request.\n",
+ __func__);
+ count = (I40EVF_MAX_AQ_BUF_SIZE -
+ sizeof(struct i40e_virtchnl_vlan_filter_list)) /
+ sizeof(u16);
+ len = I40EVF_MAX_AQ_BUF_SIZE;
+ }
+ vvfl = kzalloc(len, GFP_ATOMIC);
+ if (!vvfl) {
+ dev_err(&adapter->pdev->dev, "%s: unable to allocate memory\n",
+ __func__);
+ return;
+ }
+ vvfl->vsi_id = adapter->vsi_res->vsi_id;
+ vvfl->num_elements = count;
+ list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
+ if (f->remove) {
+ vvfl->vlan_id[i] = f->vlan;
+ i++;
+ list_del(&f->list);
+ kfree(f);
+ }
+ }
+ i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DEL_VLAN, (u8 *)vvfl, len);
+ kfree(vvfl);
+ adapter->aq_pending |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
+}
+
+/**
+ * i40evf_set_promiscuous
+ * @adapter: adapter structure
+ * @flags: bitmask to control unicast/multicast promiscuous.
+ *
+ * Request that the PF enable promiscuous mode for our VSI.
+ **/
+void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags)
+{
+ struct i40e_virtchnl_promisc_info vpi;
+
+ if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+ dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
+ __func__, adapter->current_op);
+ return;
+ }
+ adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE;
+ vpi.vsi_id = adapter->vsi_res->vsi_id;
+ vpi.flags = flags;
+ i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
+ (u8 *)&vpi, sizeof(vpi));
+}
+
+/**
+ * i40evf_request_stats
+ * @adapter: adapter structure
+ *
+ * Request VSI statistics from PF.
+ **/
+void i40evf_request_stats(struct i40evf_adapter *adapter)
+{
+ struct i40e_virtchnl_queue_select vqs;
+ if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ /* no error message, this isn't crucial */
+ return;
+ }
+ adapter->current_op = I40E_VIRTCHNL_OP_GET_STATS;
+ vqs.vsi_id = adapter->vsi_res->vsi_id;
+ /* queue maps are ignored for this message - only the vsi is used */
+ if (i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_GET_STATS,
+ (u8 *)&vqs, sizeof(vqs)))
+ /* if the request failed, don't lock out others */
+ adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
+}
+
+/**
+ * i40evf_virtchnl_completion
+ * @adapter: adapter structure
+ * @v_opcode: opcode sent by PF
+ * @v_retval: retval sent by PF
+ * @msg: message sent by PF
+ * @msglen: message length
+ *
+ * Asynchronous completion function for admin queue messages. Rather than busy
+ * wait, we fire off our requests and assume that no errors will be returned.
+ * This function handles the reply messages.
+ **/
+void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
+ enum i40e_virtchnl_ops v_opcode,
+ i40e_status v_retval,
+ u8 *msg, u16 msglen)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ if (v_opcode == I40E_VIRTCHNL_OP_EVENT) {
+ struct i40e_virtchnl_pf_event *vpe =
+ (struct i40e_virtchnl_pf_event *)msg;
+ switch (vpe->event) {
+ case I40E_VIRTCHNL_EVENT_LINK_CHANGE:
+ adapter->link_up =
+ vpe->event_data.link_event.link_status;
+ if (adapter->link_up && !netif_carrier_ok(netdev)) {
+ dev_info(&adapter->pdev->dev, "NIC Link is Up\n");
+ netif_carrier_on(netdev);
+ netif_tx_wake_all_queues(netdev);
+ } else if (!adapter->link_up) {
+ dev_info(&adapter->pdev->dev, "NIC Link is Down\n");
+ netif_carrier_off(netdev);
+ netif_tx_stop_all_queues(netdev);
+ }
+ break;
+ case I40E_VIRTCHNL_EVENT_RESET_IMPENDING:
+ adapter->state = __I40EVF_RESETTING;
+ schedule_work(&adapter->reset_task);
+ dev_info(&adapter->pdev->dev,
+ "%s: hardware reset pending\n", __func__);
+ break;
+ default:
+ dev_err(&adapter->pdev->dev,
+ "%s: Unknown event %d from pf\n",
+ __func__, vpe->event);
+ break;
+
+ }
+ return;
+ }
+ if (v_opcode != adapter->current_op) {
+ dev_err(&adapter->pdev->dev, "%s: Pending op is %d, received %d.\n",
+ __func__, adapter->current_op, v_opcode);
+ /* We're probably completely screwed at this point, but clear
+ * the current op and try to carry on....
+ */
+ adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
+ return;
+ }
+ if (v_retval) {
+ dev_err(&adapter->pdev->dev, "%s: PF returned error %d to our request %d!\n",
+ __func__, v_retval, v_opcode);
+ }
+ switch (v_opcode) {
+ case I40E_VIRTCHNL_OP_GET_STATS: {
+ struct i40e_eth_stats *stats =
+ (struct i40e_eth_stats *)msg;
+ adapter->net_stats.rx_packets = stats->rx_unicast +
+ stats->rx_multicast +
+ stats->rx_broadcast;
+ adapter->net_stats.tx_packets = stats->tx_unicast +
+ stats->tx_multicast +
+ stats->tx_broadcast;
+ adapter->net_stats.rx_bytes = stats->rx_bytes;
+ adapter->net_stats.tx_bytes = stats->tx_bytes;
+ adapter->net_stats.rx_errors = stats->rx_errors;
+ adapter->net_stats.tx_errors = stats->tx_errors;
+ adapter->net_stats.rx_dropped = stats->rx_missed;
+ adapter->net_stats.tx_dropped = stats->tx_discards;
+ adapter->current_stats = *stats;
+ }
+ break;
+ case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
+ adapter->aq_pending &= ~(I40EVF_FLAG_AQ_ADD_MAC_FILTER);
+ break;
+ case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
+ adapter->aq_pending &= ~(I40EVF_FLAG_AQ_DEL_MAC_FILTER);
+ break;
+ case I40E_VIRTCHNL_OP_ADD_VLAN:
+ adapter->aq_pending &= ~(I40EVF_FLAG_AQ_ADD_VLAN_FILTER);
+ break;
+ case I40E_VIRTCHNL_OP_DEL_VLAN:
+ adapter->aq_pending &= ~(I40EVF_FLAG_AQ_DEL_VLAN_FILTER);
+ break;
+ case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
+ adapter->aq_pending &= ~(I40EVF_FLAG_AQ_ENABLE_QUEUES);
+ /* enable transmits */
+ i40evf_irq_enable(adapter, true);
+ netif_tx_start_all_queues(adapter->netdev);
+ netif_carrier_on(adapter->netdev);
+ break;
+ case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
+ adapter->aq_pending &= ~(I40EVF_FLAG_AQ_DISABLE_QUEUES);
+ break;
+ case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
+ adapter->aq_pending &= ~(I40EVF_FLAG_AQ_CONFIGURE_QUEUES);
+ break;
+ case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
+ adapter->aq_pending &= ~(I40EVF_FLAG_AQ_MAP_VECTORS);
+ break;
+ default:
+ dev_warn(&adapter->pdev->dev, "%s: Received unexpected message %d from PF.\n",
+ __func__, v_opcode);
+ break;
+ } /* switch v_opcode */
+ adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
+}
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index 47c2d10df826..06df6928f44c 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -113,6 +113,59 @@ static bool igb_sgmii_uses_mdio_82575(struct e1000_hw *hw)
}
/**
+ * igb_check_for_link_media_swap - Check which M88E1112 interface linked
+ * @hw: pointer to the HW structure
+ *
+ * Poll the M88E1112 interfaces to see which interface achieved link.
+ */
+static s32 igb_check_for_link_media_swap(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 data;
+ u8 port = 0;
+
+ /* Check the copper medium. */
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = phy->ops.read_reg(hw, E1000_M88E1112_STATUS, &data);
+ if (ret_val)
+ return ret_val;
+
+ if (data & E1000_M88E1112_STATUS_LINK)
+ port = E1000_MEDIA_PORT_COPPER;
+
+ /* Check the other medium. */
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 1);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = phy->ops.read_reg(hw, E1000_M88E1112_STATUS, &data);
+ if (ret_val)
+ return ret_val;
+
+ /* reset page to 0 */
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0);
+ if (ret_val)
+ return ret_val;
+
+ if (data & E1000_M88E1112_STATUS_LINK)
+ port = E1000_MEDIA_PORT_OTHER;
+
+ /* Determine if a swap needs to happen. */
+ if (port && (hw->dev_spec._82575.media_port != port)) {
+ hw->dev_spec._82575.media_port = port;
+ hw->dev_spec._82575.media_changed = true;
+ } else {
+ ret_val = igb_check_for_link_82575(hw);
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
* igb_init_phy_params_82575 - Init PHY func ptrs.
* @hw: pointer to the HW structure
**/
@@ -189,6 +242,29 @@ static s32 igb_init_phy_params_82575(struct e1000_hw *hw)
else
phy->ops.get_cable_length = igb_get_cable_length_m88;
phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88;
+ /* Check if this PHY is confgured for media swap. */
+ if (phy->id == M88E1112_E_PHY_ID) {
+ u16 data;
+
+ ret_val = phy->ops.write_reg(hw,
+ E1000_M88E1112_PAGE_ADDR,
+ 2);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.read_reg(hw,
+ E1000_M88E1112_MAC_CTRL_1,
+ &data);
+ if (ret_val)
+ goto out;
+
+ data = (data & E1000_M88E1112_MAC_CTRL_1_MODE_MASK) >>
+ E1000_M88E1112_MAC_CTRL_1_MODE_SHIFT;
+ if (data == E1000_M88E1112_AUTO_COPPER_SGMII ||
+ data == E1000_M88E1112_AUTO_COPPER_BASEX)
+ hw->mac.ops.check_for_link =
+ igb_check_for_link_media_swap;
+ }
break;
case IGP03E1000_E_PHY_ID:
phy->type = e1000_phy_igp_3;
@@ -365,6 +441,19 @@ static s32 igb_init_mac_params_82575(struct e1000_hw *hw)
? igb_setup_copper_link_82575
: igb_setup_serdes_link_82575;
+ if (mac->type == e1000_82580) {
+ switch (hw->device_id) {
+ /* feature not supported on these id's */
+ case E1000_DEV_ID_DH89XXCC_SGMII:
+ case E1000_DEV_ID_DH89XXCC_SERDES:
+ case E1000_DEV_ID_DH89XXCC_BACKPLANE:
+ case E1000_DEV_ID_DH89XXCC_SFP:
+ break;
+ default:
+ hw->dev_spec._82575.mas_capable = true;
+ break;
+ }
+ }
return 0;
}
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
index 978eca31ceda..0571b973be80 100644
--- a/drivers/net/ethernet/intel/igb/e1000_defines.h
+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
@@ -205,6 +205,11 @@
*/
#define E1000_CONNSW_ENRGSRC 0x4
+#define E1000_CONNSW_PHYSD 0x400
+#define E1000_CONNSW_PHY_PDN 0x800
+#define E1000_CONNSW_SERDESD 0x200
+#define E1000_CONNSW_AUTOSENSE_CONF 0x2
+#define E1000_CONNSW_AUTOSENSE_EN 0x1
#define E1000_PCS_CFG_PCS_EN 8
#define E1000_PCS_LCTL_FLV_LINK_UP 1
#define E1000_PCS_LCTL_FSV_100 2
@@ -532,6 +537,17 @@
#define E1000_MDICNFG_PHY_MASK 0x03E00000
#define E1000_MDICNFG_PHY_SHIFT 21
+#define E1000_MEDIA_PORT_COPPER 1
+#define E1000_MEDIA_PORT_OTHER 2
+#define E1000_M88E1112_AUTO_COPPER_SGMII 0x2
+#define E1000_M88E1112_AUTO_COPPER_BASEX 0x3
+#define E1000_M88E1112_STATUS_LINK 0x0004 /* Interface Link Bit */
+#define E1000_M88E1112_MAC_CTRL_1 0x10
+#define E1000_M88E1112_MAC_CTRL_1_MODE_MASK 0x0380 /* Mode Select */
+#define E1000_M88E1112_MAC_CTRL_1_MODE_SHIFT 7
+#define E1000_M88E1112_PAGE_ADDR 0x16
+#define E1000_M88E1112_STATUS 0x01
+
/* PCI Express Control */
#define E1000_GCR_CMPL_TMOUT_MASK 0x0000F000
#define E1000_GCR_CMPL_TMOUT_10ms 0x00001000
diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h
index 2e166b22d52b..ab99e2b582a8 100644
--- a/drivers/net/ethernet/intel/igb/e1000_hw.h
+++ b/drivers/net/ethernet/intel/igb/e1000_hw.h
@@ -533,6 +533,9 @@ struct e1000_dev_spec_82575 {
bool clear_semaphore_once;
struct e1000_sfp_flags eth_flags;
bool module_plugged;
+ u8 media_port;
+ bool media_changed;
+ bool mas_capable;
};
struct e1000_hw {
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c
index c4c4fe332c7e..ad2b74d95138 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.c
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.c
@@ -1728,7 +1728,10 @@ s32 igb_phy_has_link(struct e1000_hw *hw, u32 iterations,
* ownership of the resources, wait and try again to
* see if they have relinquished the resources yet.
*/
- udelay(usec_interval);
+ if (usec_interval >= 1000)
+ mdelay(usec_interval/1000);
+ else
+ udelay(usec_interval);
}
ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
if (ret_val)
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 5e9ed89403aa..ccf472f073dd 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -41,6 +41,7 @@
#include <linux/if_vlan.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
+#include <linux/pci.h>
struct igb_adapter;
@@ -67,6 +68,7 @@ struct igb_adapter;
#define IGB_MIN_ITR_USECS 10
#define NON_Q_VECTORS 1
#define MAX_Q_VECTORS 8
+#define MAX_MSIX_ENTRIES 10
/* Transmit and receive queues */
#define IGB_MAX_RX_QUEUES 8
@@ -127,9 +129,9 @@ struct vf_data_storage {
#define IGB_TX_PTHRESH ((hw->mac.type == e1000_i354) ? 20 : 8)
#define IGB_TX_HTHRESH 1
#define IGB_RX_WTHRESH ((hw->mac.type == e1000_82576 && \
- adapter->msix_entries) ? 1 : 4)
+ (adapter->flags & IGB_FLAG_HAS_MSIX)) ? 1 : 4)
#define IGB_TX_WTHRESH ((hw->mac.type == e1000_82576 && \
- adapter->msix_entries) ? 1 : 16)
+ (adapter->flags & IGB_FLAG_HAS_MSIX)) ? 1 : 16)
/* this is the size past which hardware will drop packets when setting LPE=0 */
#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
@@ -337,8 +339,10 @@ struct hwmon_attr {
};
struct hwmon_buff {
- struct device *device;
- struct hwmon_attr *hwmon_list;
+ struct attribute_group group;
+ const struct attribute_group *groups[2];
+ struct attribute *attrs[E1000_MAX_SENSORS * 4 + 1];
+ struct hwmon_attr hwmon_list[E1000_MAX_SENSORS * 4];
unsigned int n_hwmon;
};
#endif
@@ -355,7 +359,7 @@ struct igb_adapter {
unsigned int flags;
unsigned int num_q_vectors;
- struct msix_entry *msix_entries;
+ struct msix_entry msix_entries[MAX_MSIX_ENTRIES];
/* Interrupt Throttle Rate */
u32 rx_itr_setting;
@@ -440,7 +444,7 @@ struct igb_adapter {
char fw_version[32];
#ifdef CONFIG_IGB_HWMON
- struct hwmon_buff igb_hwmon_buff;
+ struct hwmon_buff *igb_hwmon_buff;
bool ets;
#endif
struct i2c_algo_bit_data i2c_algo;
@@ -450,6 +454,8 @@ struct igb_adapter {
u8 rss_indir_tbl[IGB_RETA_SIZE];
unsigned long link_check_timeout;
+ int copper_tries;
+ struct e1000_info ei;
};
#define IGB_FLAG_HAS_MSI (1 << 0)
@@ -462,6 +468,16 @@ struct igb_adapter {
#define IGB_FLAG_RSS_FIELD_IPV6_UDP (1 << 7)
#define IGB_FLAG_WOL_SUPPORTED (1 << 8)
#define IGB_FLAG_NEED_LINK_UPDATE (1 << 9)
+#define IGB_FLAG_MEDIA_RESET (1 << 10)
+#define IGB_FLAG_MAS_CAPABLE (1 << 11)
+#define IGB_FLAG_MAS_ENABLE (1 << 12)
+#define IGB_FLAG_HAS_MSIX (1 << 13)
+
+/* Media Auto Sense */
+#define IGB_MAS_ENABLE_0 0X0001
+#define IGB_MAS_ENABLE_1 0X0002
+#define IGB_MAS_ENABLE_2 0X0004
+#define IGB_MAS_ENABLE_3 0X0008
/* DMA Coalescing defines */
#define IGB_MIN_TXPBSIZE 20408
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index b0f3666b1d7f..1df02378de69 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -1386,7 +1386,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
*data = 0;
/* Hook up test interrupt handler just for this test */
- if (adapter->msix_entries) {
+ if (adapter->flags & IGB_FLAG_HAS_MSIX) {
if (request_irq(adapter->msix_entries[0].vector,
igb_test_intr, 0, netdev->name, adapter)) {
*data = 1;
@@ -1519,7 +1519,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
msleep(10);
/* Unhook test interrupt handler */
- if (adapter->msix_entries)
+ if (adapter->flags & IGB_FLAG_HAS_MSIX)
free_irq(adapter->msix_entries[0].vector, adapter);
else
free_irq(irq, adapter);
@@ -1983,6 +1983,10 @@ static void igb_diag_test(struct net_device *netdev,
bool if_running = netif_running(netdev);
set_bit(__IGB_TESTING, &adapter->state);
+
+ /* can't do offline tests on media switching devices */
+ if (adapter->hw.dev_spec._82575.mas_capable)
+ eth_test->flags &= ~ETH_TEST_FL_OFFLINE;
if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
/* Offline tests */
@@ -2062,14 +2066,15 @@ static void igb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
{
struct igb_adapter *adapter = netdev_priv(netdev);
- wol->supported = WAKE_UCAST | WAKE_MCAST |
- WAKE_BCAST | WAKE_MAGIC |
- WAKE_PHY;
wol->wolopts = 0;
if (!(adapter->flags & IGB_FLAG_WOL_SUPPORTED))
return;
+ wol->supported = WAKE_UCAST | WAKE_MCAST |
+ WAKE_BCAST | WAKE_MAGIC |
+ WAKE_PHY;
+
/* apply any specific unsupported masks here */
switch (adapter->hw.device_id) {
default:
@@ -2928,7 +2933,7 @@ static void igb_get_channels(struct net_device *netdev,
ch->max_combined = igb_max_channels(adapter);
/* Report info for other vector */
- if (adapter->msix_entries) {
+ if (adapter->flags & IGB_FLAG_HAS_MSIX) {
ch->max_other = NON_Q_VECTORS;
ch->other_count = NON_Q_VECTORS;
}
diff --git a/drivers/net/ethernet/intel/igb/igb_hwmon.c b/drivers/net/ethernet/intel/igb/igb_hwmon.c
index 58f1ce967aeb..e0af5bc61613 100644
--- a/drivers/net/ethernet/intel/igb/igb_hwmon.c
+++ b/drivers/net/ethernet/intel/igb/igb_hwmon.c
@@ -117,29 +117,29 @@ static int igb_add_hwmon_attr(struct igb_adapter *adapter,
unsigned int n_attr;
struct hwmon_attr *igb_attr;
- n_attr = adapter->igb_hwmon_buff.n_hwmon;
- igb_attr = &adapter->igb_hwmon_buff.hwmon_list[n_attr];
+ n_attr = adapter->igb_hwmon_buff->n_hwmon;
+ igb_attr = &adapter->igb_hwmon_buff->hwmon_list[n_attr];
switch (type) {
case IGB_HWMON_TYPE_LOC:
igb_attr->dev_attr.show = igb_hwmon_show_location;
snprintf(igb_attr->name, sizeof(igb_attr->name),
- "temp%u_label", offset);
+ "temp%u_label", offset + 1);
break;
case IGB_HWMON_TYPE_TEMP:
igb_attr->dev_attr.show = igb_hwmon_show_temp;
snprintf(igb_attr->name, sizeof(igb_attr->name),
- "temp%u_input", offset);
+ "temp%u_input", offset + 1);
break;
case IGB_HWMON_TYPE_CAUTION:
igb_attr->dev_attr.show = igb_hwmon_show_cautionthresh;
snprintf(igb_attr->name, sizeof(igb_attr->name),
- "temp%u_max", offset);
+ "temp%u_max", offset + 1);
break;
case IGB_HWMON_TYPE_MAX:
igb_attr->dev_attr.show = igb_hwmon_show_maxopthresh;
snprintf(igb_attr->name, sizeof(igb_attr->name),
- "temp%u_crit", offset);
+ "temp%u_crit", offset + 1);
break;
default:
rc = -EPERM;
@@ -154,30 +154,16 @@ static int igb_add_hwmon_attr(struct igb_adapter *adapter,
igb_attr->dev_attr.attr.mode = S_IRUGO;
igb_attr->dev_attr.attr.name = igb_attr->name;
sysfs_attr_init(&igb_attr->dev_attr.attr);
- rc = device_create_file(&adapter->pdev->dev,
- &igb_attr->dev_attr);
- if (rc == 0)
- ++adapter->igb_hwmon_buff.n_hwmon;
- return rc;
+ adapter->igb_hwmon_buff->attrs[n_attr] = &igb_attr->dev_attr.attr;
+
+ ++adapter->igb_hwmon_buff->n_hwmon;
+
+ return 0;
}
static void igb_sysfs_del_adapter(struct igb_adapter *adapter)
{
- int i;
-
- if (adapter == NULL)
- return;
-
- for (i = 0; i < adapter->igb_hwmon_buff.n_hwmon; i++) {
- device_remove_file(&adapter->pdev->dev,
- &adapter->igb_hwmon_buff.hwmon_list[i].dev_attr);
- }
-
- kfree(adapter->igb_hwmon_buff.hwmon_list);
-
- if (adapter->igb_hwmon_buff.device)
- hwmon_device_unregister(adapter->igb_hwmon_buff.device);
}
/* called from igb_main.c */
@@ -189,11 +175,11 @@ void igb_sysfs_exit(struct igb_adapter *adapter)
/* called from igb_main.c */
int igb_sysfs_init(struct igb_adapter *adapter)
{
- struct hwmon_buff *igb_hwmon = &adapter->igb_hwmon_buff;
+ struct hwmon_buff *igb_hwmon;
+ struct i2c_client *client;
+ struct device *hwmon_dev;
unsigned int i;
- int n_attrs;
int rc = 0;
- struct i2c_client *client = NULL;
/* If this method isn't defined we don't support thermals */
if (adapter->hw.mac.ops.init_thermal_sensor_thresh == NULL)
@@ -201,34 +187,16 @@ int igb_sysfs_init(struct igb_adapter *adapter)
/* Don't create thermal hwmon interface if no sensors present */
rc = (adapter->hw.mac.ops.init_thermal_sensor_thresh(&adapter->hw));
- if (rc)
- goto exit;
-
- /* init i2c_client */
- client = i2c_new_device(&adapter->i2c_adap, &i350_sensor_info);
- if (client == NULL) {
- dev_info(&adapter->pdev->dev,
- "Failed to create new i2c device..\n");
+ if (rc)
goto exit;
- }
- adapter->i2c_client = client;
- /* Allocation space for max attributes
- * max num sensors * values (loc, temp, max, caution)
- */
- n_attrs = E1000_MAX_SENSORS * 4;
- igb_hwmon->hwmon_list = kcalloc(n_attrs, sizeof(struct hwmon_attr),
- GFP_KERNEL);
- if (!igb_hwmon->hwmon_list) {
+ igb_hwmon = devm_kzalloc(&adapter->pdev->dev, sizeof(*igb_hwmon),
+ GFP_KERNEL);
+ if (!igb_hwmon) {
rc = -ENOMEM;
- goto err;
- }
-
- igb_hwmon->device = hwmon_device_register(&adapter->pdev->dev);
- if (IS_ERR(igb_hwmon->device)) {
- rc = PTR_ERR(igb_hwmon->device);
- goto err;
+ goto exit;
}
+ adapter->igb_hwmon_buff = igb_hwmon;
for (i = 0; i < E1000_MAX_SENSORS; i++) {
@@ -240,11 +208,39 @@ int igb_sysfs_init(struct igb_adapter *adapter)
/* Bail if any hwmon attr struct fails to initialize */
rc = igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_CAUTION);
- rc |= igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_LOC);
- rc |= igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_TEMP);
- rc |= igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_MAX);
if (rc)
- goto err;
+ goto exit;
+ rc = igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_LOC);
+ if (rc)
+ goto exit;
+ rc = igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_TEMP);
+ if (rc)
+ goto exit;
+ rc = igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_MAX);
+ if (rc)
+ goto exit;
+ }
+
+ /* init i2c_client */
+ client = i2c_new_device(&adapter->i2c_adap, &i350_sensor_info);
+ if (client == NULL) {
+ dev_info(&adapter->pdev->dev,
+ "Failed to create new i2c device.\n");
+ rc = -ENODEV;
+ goto exit;
+ }
+ adapter->i2c_client = client;
+
+ igb_hwmon->groups[0] = &igb_hwmon->group;
+ igb_hwmon->group.attrs = igb_hwmon->attrs;
+
+ hwmon_dev = devm_hwmon_device_register_with_groups(&adapter->pdev->dev,
+ client->name,
+ igb_hwmon,
+ igb_hwmon->groups);
+ if (IS_ERR(hwmon_dev)) {
+ rc = PTR_ERR(hwmon_dev);
+ goto err;
}
goto exit;
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 025e5f4b7481..46d31a49f5ea 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -803,7 +803,7 @@ static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
if (tx_queue > IGB_N0_QUEUE)
msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
- if (!adapter->msix_entries && msix_vector == 0)
+ if (!(adapter->flags & IGB_FLAG_HAS_MSIX) && msix_vector == 0)
msixbm |= E1000_EIMS_OTHER;
array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
q_vector->eims_value = msixbm;
@@ -983,43 +983,58 @@ err_out:
return err;
}
-static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
-{
- if (adapter->msix_entries) {
- pci_disable_msix(adapter->pdev);
- kfree(adapter->msix_entries);
- adapter->msix_entries = NULL;
- } else if (adapter->flags & IGB_FLAG_HAS_MSI) {
- pci_disable_msi(adapter->pdev);
- }
-}
-
/**
* igb_free_q_vector - Free memory allocated for specific interrupt vector
* @adapter: board private structure to initialize
* @v_idx: Index of vector to be freed
*
- * This function frees the memory allocated to the q_vector. In addition if
- * NAPI is enabled it will delete any references to the NAPI struct prior
- * to freeing the q_vector.
+ * This function frees the memory allocated to the q_vector.
**/
static void igb_free_q_vector(struct igb_adapter *adapter, int v_idx)
{
struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
+ adapter->q_vector[v_idx] = NULL;
+
+ /* igb_get_stats64() might access the rings on this vector,
+ * we must wait a grace period before freeing it.
+ */
+ kfree_rcu(q_vector, rcu);
+}
+
+/**
+ * igb_reset_q_vector - Reset config for interrupt vector
+ * @adapter: board private structure to initialize
+ * @v_idx: Index of vector to be reset
+ *
+ * If NAPI is enabled it will delete any references to the
+ * NAPI struct. This is preparation for igb_free_q_vector.
+ **/
+static void igb_reset_q_vector(struct igb_adapter *adapter, int v_idx)
+{
+ struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
+
if (q_vector->tx.ring)
adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL;
if (q_vector->rx.ring)
adapter->tx_ring[q_vector->rx.ring->queue_index] = NULL;
- adapter->q_vector[v_idx] = NULL;
netif_napi_del(&q_vector->napi);
- /* igb_get_stats64() might access the rings on this vector,
- * we must wait a grace period before freeing it.
- */
- kfree_rcu(q_vector, rcu);
+}
+
+static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
+{
+ int v_idx = adapter->num_q_vectors;
+
+ if (adapter->flags & IGB_FLAG_HAS_MSIX)
+ pci_disable_msix(adapter->pdev);
+ else if (adapter->flags & IGB_FLAG_HAS_MSI)
+ pci_disable_msi(adapter->pdev);
+
+ while (v_idx--)
+ igb_reset_q_vector(adapter, v_idx);
}
/**
@@ -1038,8 +1053,10 @@ static void igb_free_q_vectors(struct igb_adapter *adapter)
adapter->num_rx_queues = 0;
adapter->num_q_vectors = 0;
- while (v_idx--)
+ while (v_idx--) {
+ igb_reset_q_vector(adapter, v_idx);
igb_free_q_vector(adapter, v_idx);
+ }
}
/**
@@ -1070,6 +1087,7 @@ static void igb_set_interrupt_capability(struct igb_adapter *adapter, bool msix)
if (!msix)
goto msi_only;
+ adapter->flags |= IGB_FLAG_HAS_MSIX;
/* Number of supported queues. */
adapter->num_rx_queues = adapter->rss_queues;
@@ -1090,12 +1108,6 @@ static void igb_set_interrupt_capability(struct igb_adapter *adapter, bool msix)
/* add 1 vector for link status interrupts */
numvecs++;
- adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
- GFP_KERNEL);
-
- if (!adapter->msix_entries)
- goto msi_only;
-
for (i = 0; i < numvecs; i++)
adapter->msix_entries[i].entry = i;
@@ -1172,7 +1184,9 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter,
(sizeof(struct igb_ring) * ring_count);
/* allocate q_vector and rings */
- q_vector = kzalloc(size, GFP_KERNEL);
+ q_vector = adapter->q_vector[v_idx];
+ if (!q_vector)
+ q_vector = kzalloc(size, GFP_KERNEL);
if (!q_vector)
return -ENOMEM;
@@ -1370,7 +1384,7 @@ static int igb_request_irq(struct igb_adapter *adapter)
struct pci_dev *pdev = adapter->pdev;
int err = 0;
- if (adapter->msix_entries) {
+ if (adapter->flags & IGB_FLAG_HAS_MSIX) {
err = igb_request_msix(adapter);
if (!err)
goto request_done;
@@ -1414,7 +1428,7 @@ request_done:
static void igb_free_irq(struct igb_adapter *adapter)
{
- if (adapter->msix_entries) {
+ if (adapter->flags & IGB_FLAG_HAS_MSIX) {
int vector = 0, i;
free_irq(adapter->msix_entries[vector++].vector, adapter);
@@ -1439,7 +1453,7 @@ static void igb_irq_disable(struct igb_adapter *adapter)
* mapped into these registers and so clearing the bits can cause
* issues on the VF drivers so we only need to clear what we set
*/
- if (adapter->msix_entries) {
+ if (adapter->flags & IGB_FLAG_HAS_MSIX) {
u32 regval = rd32(E1000_EIAM);
wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask);
wr32(E1000_EIMC, adapter->eims_enable_mask);
@@ -1450,7 +1464,7 @@ static void igb_irq_disable(struct igb_adapter *adapter)
wr32(E1000_IAM, 0);
wr32(E1000_IMC, ~0);
wrfl();
- if (adapter->msix_entries) {
+ if (adapter->flags & IGB_FLAG_HAS_MSIX) {
int i;
for (i = 0; i < adapter->num_q_vectors; i++)
synchronize_irq(adapter->msix_entries[i].vector);
@@ -1467,7 +1481,7 @@ static void igb_irq_enable(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
- if (adapter->msix_entries) {
+ if (adapter->flags & IGB_FLAG_HAS_MSIX) {
u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_DRSTA;
u32 regval = rd32(E1000_EIAC);
wr32(E1000_EIAC, regval | adapter->eims_enable_mask);
@@ -1607,6 +1621,73 @@ static void igb_power_down_link(struct igb_adapter *adapter)
}
/**
+ * Detect and switch function for Media Auto Sense
+ * @adapter: address of the board private structure
+ **/
+static void igb_check_swap_media(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 ctrl_ext, connsw;
+ bool swap_now = false;
+
+ ctrl_ext = rd32(E1000_CTRL_EXT);
+ connsw = rd32(E1000_CONNSW);
+
+ /* need to live swap if current media is copper and we have fiber/serdes
+ * to go to.
+ */
+
+ if ((hw->phy.media_type == e1000_media_type_copper) &&
+ (!(connsw & E1000_CONNSW_AUTOSENSE_EN))) {
+ swap_now = true;
+ } else if (!(connsw & E1000_CONNSW_SERDESD)) {
+ /* copper signal takes time to appear */
+ if (adapter->copper_tries < 4) {
+ adapter->copper_tries++;
+ connsw |= E1000_CONNSW_AUTOSENSE_CONF;
+ wr32(E1000_CONNSW, connsw);
+ return;
+ } else {
+ adapter->copper_tries = 0;
+ if ((connsw & E1000_CONNSW_PHYSD) &&
+ (!(connsw & E1000_CONNSW_PHY_PDN))) {
+ swap_now = true;
+ connsw &= ~E1000_CONNSW_AUTOSENSE_CONF;
+ wr32(E1000_CONNSW, connsw);
+ }
+ }
+ }
+
+ if (!swap_now)
+ return;
+
+ switch (hw->phy.media_type) {
+ case e1000_media_type_copper:
+ netdev_info(adapter->netdev,
+ "MAS: changing media to fiber/serdes\n");
+ ctrl_ext |=
+ E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
+ adapter->flags |= IGB_FLAG_MEDIA_RESET;
+ adapter->copper_tries = 0;
+ break;
+ case e1000_media_type_internal_serdes:
+ case e1000_media_type_fiber:
+ netdev_info(adapter->netdev,
+ "MAS: changing media to copper\n");
+ ctrl_ext &=
+ ~E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
+ adapter->flags |= IGB_FLAG_MEDIA_RESET;
+ break;
+ default:
+ /* shouldn't get here during regular operation */
+ netdev_err(adapter->netdev,
+ "AMS: Invalid media type found, returning\n");
+ break;
+ }
+ wr32(E1000_CTRL_EXT, ctrl_ext);
+}
+
+/**
* igb_up - Open the interface and prepare it to handle traffic
* @adapter: board private structure
**/
@@ -1623,7 +1704,7 @@ int igb_up(struct igb_adapter *adapter)
for (i = 0; i < adapter->num_q_vectors; i++)
napi_enable(&(adapter->q_vector[i]->napi));
- if (adapter->msix_entries)
+ if (adapter->flags & IGB_FLAG_HAS_MSIX)
igb_configure_msix(adapter);
else
igb_assign_vector(adapter->q_vector[0], 0);
@@ -1719,6 +1800,37 @@ void igb_reinit_locked(struct igb_adapter *adapter)
clear_bit(__IGB_RESETTING, &adapter->state);
}
+/** igb_enable_mas - Media Autosense re-enable after swap
+ *
+ * @adapter: adapter struct
+ **/
+static s32 igb_enable_mas(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 connsw;
+ s32 ret_val = 0;
+
+ connsw = rd32(E1000_CONNSW);
+ if (!(hw->phy.media_type == e1000_media_type_copper))
+ return ret_val;
+
+ /* configure for SerDes media detect */
+ if (!(connsw & E1000_CONNSW_SERDESD)) {
+ connsw |= E1000_CONNSW_ENRGSRC;
+ connsw |= E1000_CONNSW_AUTOSENSE_EN;
+ wr32(E1000_CONNSW, connsw);
+ wrfl();
+ } else if (connsw & E1000_CONNSW_SERDESD) {
+ /* already SerDes, no need to enable anything */
+ return ret_val;
+ } else {
+ netdev_info(adapter->netdev,
+ "MAS: Unable to configure feature, disabling..\n");
+ adapter->flags &= ~IGB_FLAG_MAS_ENABLE;
+ }
+ return ret_val;
+}
+
void igb_reset(struct igb_adapter *adapter)
{
struct pci_dev *pdev = adapter->pdev;
@@ -1830,6 +1942,16 @@ void igb_reset(struct igb_adapter *adapter)
hw->mac.ops.reset_hw(hw);
wr32(E1000_WUC, 0);
+ if (adapter->flags & IGB_FLAG_MEDIA_RESET) {
+ /* need to resetup here after media swap */
+ adapter->ei.get_invariants(hw);
+ adapter->flags &= ~IGB_FLAG_MEDIA_RESET;
+ }
+ if (adapter->flags & IGB_FLAG_MAS_ENABLE) {
+ if (igb_enable_mas(adapter))
+ dev_err(&pdev->dev,
+ "Error enabling Media Auto Sense\n");
+ }
if (hw->mac.ops.init_hw(hw))
dev_err(&pdev->dev, "Hardware Error\n");
@@ -1976,6 +2098,58 @@ void igb_set_fw_version(struct igb_adapter *adapter)
}
/**
+ * igb_init_mas - init Media Autosense feature if enabled in the NVM
+ *
+ * @adapter: adapter struct
+ **/
+static void igb_init_mas(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u16 eeprom_data;
+
+ hw->nvm.ops.read(hw, NVM_COMPAT, 1, &eeprom_data);
+ switch (hw->bus.func) {
+ case E1000_FUNC_0:
+ if (eeprom_data & IGB_MAS_ENABLE_0) {
+ adapter->flags |= IGB_FLAG_MAS_ENABLE;
+ netdev_info(adapter->netdev,
+ "MAS: Enabling Media Autosense for port %d\n",
+ hw->bus.func);
+ }
+ break;
+ case E1000_FUNC_1:
+ if (eeprom_data & IGB_MAS_ENABLE_1) {
+ adapter->flags |= IGB_FLAG_MAS_ENABLE;
+ netdev_info(adapter->netdev,
+ "MAS: Enabling Media Autosense for port %d\n",
+ hw->bus.func);
+ }
+ break;
+ case E1000_FUNC_2:
+ if (eeprom_data & IGB_MAS_ENABLE_2) {
+ adapter->flags |= IGB_FLAG_MAS_ENABLE;
+ netdev_info(adapter->netdev,
+ "MAS: Enabling Media Autosense for port %d\n",
+ hw->bus.func);
+ }
+ break;
+ case E1000_FUNC_3:
+ if (eeprom_data & IGB_MAS_ENABLE_3) {
+ adapter->flags |= IGB_FLAG_MAS_ENABLE;
+ netdev_info(adapter->netdev,
+ "MAS: Enabling Media Autosense for port %d\n",
+ hw->bus.func);
+ }
+ break;
+ default:
+ /* Shouldn't get here */
+ netdev_err(adapter->netdev,
+ "MAS: Invalid port configuration, returning\n");
+ break;
+ }
+}
+
+/**
* igb_init_i2c - Init I2C interface
* @adapter: pointer to adapter structure
**/
@@ -2022,7 +2196,6 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
s32 ret_val;
static int global_quad_port_a; /* global quad port a indication */
const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
- unsigned long mmio_start, mmio_len;
int err, pci_using_dac;
u8 part_str[E1000_PBANUM_LENGTH];
@@ -2079,11 +2252,8 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw->back = adapter;
adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
- mmio_start = pci_resource_start(pdev, 0);
- mmio_len = pci_resource_len(pdev, 0);
-
err = -EIO;
- hw->hw_addr = ioremap(mmio_start, mmio_len);
+ hw->hw_addr = pci_iomap(pdev, 0, 0);
if (!hw->hw_addr)
goto err_ioremap;
@@ -2093,8 +2263,8 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
- netdev->mem_start = mmio_start;
- netdev->mem_end = mmio_start + mmio_len;
+ netdev->mem_start = pci_resource_start(pdev, 0);
+ netdev->mem_end = pci_resource_end(pdev, 0);
/* PCI config space info */
hw->vendor_id = pdev->vendor;
@@ -2350,6 +2520,11 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->ets = false;
}
#endif
+ /* Check if Media Autosense is enabled */
+ adapter->ei = *ei;
+ if (hw->dev_spec._82575.mas_capable)
+ igb_init_mas(adapter);
+
/* do hw tstamp init after resetting */
igb_ptp_init(adapter);
@@ -2382,7 +2557,7 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_info(&pdev->dev, "%s: PBA No: %s\n", netdev->name, part_str);
dev_info(&pdev->dev,
"Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
- adapter->msix_entries ? "MSI-X" :
+ (adapter->flags & IGB_FLAG_HAS_MSIX) ? "MSI-X" :
(adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy",
adapter->num_rx_queues, adapter->num_tx_queues);
switch (hw->mac.type) {
@@ -2470,7 +2645,7 @@ static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs)
int err = 0;
int i;
- if (!adapter->msix_entries || num_vfs > 7) {
+ if (!(adapter->flags & IGB_FLAG_HAS_MSIX) || num_vfs > 7) {
err = -EPERM;
goto out;
}
@@ -3935,6 +4110,7 @@ static void igb_watchdog_task(struct work_struct *work)
struct net_device *netdev = adapter->netdev;
u32 link;
int i;
+ u32 connsw;
link = igb_has_link(adapter);
@@ -3945,7 +4121,21 @@ static void igb_watchdog_task(struct work_struct *work)
link = false;
}
+ /* Force link down if we have fiber to swap to */
+ if (adapter->flags & IGB_FLAG_MAS_ENABLE) {
+ if (hw->phy.media_type == e1000_media_type_copper) {
+ connsw = rd32(E1000_CONNSW);
+ if (!(connsw & E1000_CONNSW_AUTOSENSE_EN))
+ link = 0;
+ }
+ }
if (link) {
+ /* Perform a reset if the media type changed. */
+ if (hw->dev_spec._82575.media_changed) {
+ hw->dev_spec._82575.media_changed = false;
+ adapter->flags |= IGB_FLAG_MEDIA_RESET;
+ igb_reset(adapter);
+ }
/* Cancel scheduled suspend requests. */
pm_runtime_resume(netdev->dev.parent);
@@ -4026,8 +4216,27 @@ static void igb_watchdog_task(struct work_struct *work)
mod_timer(&adapter->phy_info_timer,
round_jiffies(jiffies + 2 * HZ));
+ /* link is down, time to check for alternate media */
+ if (adapter->flags & IGB_FLAG_MAS_ENABLE) {
+ igb_check_swap_media(adapter);
+ if (adapter->flags & IGB_FLAG_MEDIA_RESET) {
+ schedule_work(&adapter->reset_task);
+ /* return immediately */
+ return;
+ }
+ }
pm_schedule_suspend(netdev->dev.parent,
MSEC_PER_SEC * 5);
+
+ /* also check for alternate media here */
+ } else if (!netif_carrier_ok(netdev) &&
+ (adapter->flags & IGB_FLAG_MAS_ENABLE)) {
+ igb_check_swap_media(adapter);
+ if (adapter->flags & IGB_FLAG_MEDIA_RESET) {
+ schedule_work(&adapter->reset_task);
+ /* return immediately */
+ return;
+ }
}
}
@@ -4056,7 +4265,7 @@ static void igb_watchdog_task(struct work_struct *work)
}
/* Cause software interrupt to ensure Rx ring is cleaned */
- if (adapter->msix_entries) {
+ if (adapter->flags & IGB_FLAG_HAS_MSIX) {
u32 eics = 0;
for (i = 0; i < adapter->num_q_vectors; i++)
eics |= adapter->q_vector[i]->eims_value;
@@ -5977,7 +6186,7 @@ static void igb_ring_irq_enable(struct igb_q_vector *q_vector)
}
if (!test_bit(__IGB_DOWN, &adapter->state)) {
- if (adapter->msix_entries)
+ if (adapter->flags & IGB_FLAG_HAS_MSIX)
wr32(E1000_EIMS, q_vector->eims_value);
else
igb_irq_enable(adapter);
@@ -7344,7 +7553,7 @@ static void igb_netpoll(struct net_device *netdev)
for (i = 0; i < adapter->num_q_vectors; i++) {
q_vector = adapter->q_vector[i];
- if (adapter->msix_entries)
+ if (adapter->flags & IGB_FLAG_HAS_MSIX)
wr32(E1000_EIMC, q_vector->eims_value);
else
igb_irq_disable(adapter);
@@ -7842,7 +8051,7 @@ int igb_reinit_queues(struct igb_adapter *adapter)
if (netif_running(netdev))
igb_close(netdev);
- igb_clear_interrupt_scheme(adapter);
+ igb_reset_interrupt_capability(adapter);
if (igb_init_interrupt_scheme(adapter, true)) {
dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 04bf22e5ee31..675435fc2e53 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -1745,7 +1745,7 @@ static int igbvf_set_mac(struct net_device *netdev, void *p)
hw->mac.ops.rar_set(hw, hw->mac.addr, 0);
- if (memcmp(addr->sa_data, hw->mac.addr, 6))
+ if (!ether_addr_equal(addr->sa_data, hw->mac.addr))
return -EADDRNOTAVAIL;
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index f38fc0a343a2..49531cd18987 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -552,8 +552,10 @@ struct hwmon_attr {
};
struct hwmon_buff {
- struct device *device;
- struct hwmon_attr *hwmon_list;
+ struct attribute_group group;
+ const struct attribute_group *groups[2];
+ struct attribute *attrs[IXGBE_MAX_SENSORS * 4 + 1];
+ struct hwmon_attr hwmon_list[IXGBE_MAX_SENSORS * 4];
unsigned int n_hwmon;
};
#endif /* CONFIG_IXGBE_HWMON */
@@ -775,7 +777,7 @@ struct ixgbe_adapter {
u32 vferr_refcount;
struct kobject *info_kobj;
#ifdef CONFIG_IXGBE_HWMON
- struct hwmon_buff ixgbe_hwmon_buff;
+ struct hwmon_buff *ixgbe_hwmon_buff;
#endif /* CONFIG_IXGBE_HWMON */
#ifdef CONFIG_DEBUG_FS
struct dentry *ixgbe_dbg_adapter;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 0c55079ebee3..cc06854296a3 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -4251,8 +4251,8 @@ static void ixgbe_disable_fwd_ring(struct ixgbe_fwd_adapter *vadapter,
rx_ring->l2_accel_priv = NULL;
}
-int ixgbe_fwd_ring_down(struct net_device *vdev,
- struct ixgbe_fwd_adapter *accel)
+static int ixgbe_fwd_ring_down(struct net_device *vdev,
+ struct ixgbe_fwd_adapter *accel)
{
struct ixgbe_adapter *adapter = accel->real_adapter;
unsigned int rxbase = accel->rx_base_queue;
@@ -7986,10 +7986,9 @@ skip_sriov:
NETIF_F_TSO |
NETIF_F_TSO6 |
NETIF_F_RXHASH |
- NETIF_F_RXCSUM |
- NETIF_F_HW_L2FW_DOFFLOAD;
+ NETIF_F_RXCSUM;
- netdev->hw_features = netdev->features;
+ netdev->hw_features = netdev->features | NETIF_F_HW_L2FW_DOFFLOAD;
switch (adapter->hw.mac.type) {
case ixgbe_mac_82599EB:
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index e4c676006be9..39217e5ff7dc 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -46,6 +46,7 @@ static bool ixgbe_get_i2c_data(u32 *i2cctl);
static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw);
static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id);
static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw);
+static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw);
/**
* ixgbe_identify_phy_generic - Get physical layer module
@@ -1164,7 +1165,7 @@ err_read_i2c_eeprom:
*
* Searches for and identifies the QSFP module and assigns appropriate PHY type
**/
-s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw)
+static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw)
{
struct ixgbe_adapter *adapter = hw->back;
s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
index aae900a256da..fffcbdd2bf0e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
@@ -145,7 +145,6 @@ s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw,
s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw);
s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw);
s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw);
-s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw);
s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
u16 *list_offset,
u16 *data_offset);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index d6f0c0d8cf11..359f6e60320d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -291,7 +291,9 @@ static int ixgbe_pci_sriov_disable(struct pci_dev *dev)
{
struct ixgbe_adapter *adapter = pci_get_drvdata(dev);
int err;
+#ifdef CONFIG_PCI_IOV
u32 current_flags = adapter->flags;
+#endif
err = ixgbe_disable_sriov(adapter);
@@ -715,8 +717,7 @@ static int ixgbe_set_vf_mac_addr(struct ixgbe_adapter *adapter,
}
if (adapter->vfinfo[vf].pf_set_mac &&
- memcmp(adapter->vfinfo[vf].vf_mac_addresses, new_mac,
- ETH_ALEN)) {
+ !ether_addr_equal(adapter->vfinfo[vf].vf_mac_addresses, new_mac)) {
e_warn(drv,
"VF %d attempted to override administratively set MAC address\n"
"Reload the VF driver to resume operations\n",
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c
index d118def16f35..e74ae3682733 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c
@@ -111,29 +111,29 @@ static int ixgbe_add_hwmon_attr(struct ixgbe_adapter *adapter,
unsigned int n_attr;
struct hwmon_attr *ixgbe_attr;
- n_attr = adapter->ixgbe_hwmon_buff.n_hwmon;
- ixgbe_attr = &adapter->ixgbe_hwmon_buff.hwmon_list[n_attr];
+ n_attr = adapter->ixgbe_hwmon_buff->n_hwmon;
+ ixgbe_attr = &adapter->ixgbe_hwmon_buff->hwmon_list[n_attr];
switch (type) {
case IXGBE_HWMON_TYPE_LOC:
ixgbe_attr->dev_attr.show = ixgbe_hwmon_show_location;
snprintf(ixgbe_attr->name, sizeof(ixgbe_attr->name),
- "temp%u_label", offset);
+ "temp%u_label", offset + 1);
break;
case IXGBE_HWMON_TYPE_TEMP:
ixgbe_attr->dev_attr.show = ixgbe_hwmon_show_temp;
snprintf(ixgbe_attr->name, sizeof(ixgbe_attr->name),
- "temp%u_input", offset);
+ "temp%u_input", offset + 1);
break;
case IXGBE_HWMON_TYPE_CAUTION:
ixgbe_attr->dev_attr.show = ixgbe_hwmon_show_cautionthresh;
snprintf(ixgbe_attr->name, sizeof(ixgbe_attr->name),
- "temp%u_max", offset);
+ "temp%u_max", offset + 1);
break;
case IXGBE_HWMON_TYPE_MAX:
ixgbe_attr->dev_attr.show = ixgbe_hwmon_show_maxopthresh;
snprintf(ixgbe_attr->name, sizeof(ixgbe_attr->name),
- "temp%u_crit", offset);
+ "temp%u_crit", offset + 1);
break;
default:
rc = -EPERM;
@@ -147,32 +147,17 @@ static int ixgbe_add_hwmon_attr(struct ixgbe_adapter *adapter,
ixgbe_attr->dev_attr.store = NULL;
ixgbe_attr->dev_attr.attr.mode = S_IRUGO;
ixgbe_attr->dev_attr.attr.name = ixgbe_attr->name;
+ sysfs_attr_init(&ixgbe_attr->dev_attr.attr);
- rc = device_create_file(&adapter->pdev->dev,
- &ixgbe_attr->dev_attr);
+ adapter->ixgbe_hwmon_buff->attrs[n_attr] = &ixgbe_attr->dev_attr.attr;
- if (rc == 0)
- ++adapter->ixgbe_hwmon_buff.n_hwmon;
+ ++adapter->ixgbe_hwmon_buff->n_hwmon;
- return rc;
+ return 0;
}
static void ixgbe_sysfs_del_adapter(struct ixgbe_adapter *adapter)
{
- int i;
-
- if (adapter == NULL)
- return;
-
- for (i = 0; i < adapter->ixgbe_hwmon_buff.n_hwmon; i++) {
- device_remove_file(&adapter->pdev->dev,
- &adapter->ixgbe_hwmon_buff.hwmon_list[i].dev_attr);
- }
-
- kfree(adapter->ixgbe_hwmon_buff.hwmon_list);
-
- if (adapter->ixgbe_hwmon_buff.device)
- hwmon_device_unregister(adapter->ixgbe_hwmon_buff.device);
}
/* called from ixgbe_main.c */
@@ -184,9 +169,9 @@ void ixgbe_sysfs_exit(struct ixgbe_adapter *adapter)
/* called from ixgbe_main.c */
int ixgbe_sysfs_init(struct ixgbe_adapter *adapter)
{
- struct hwmon_buff *ixgbe_hwmon = &adapter->ixgbe_hwmon_buff;
+ struct hwmon_buff *ixgbe_hwmon;
+ struct device *hwmon_dev;
unsigned int i;
- int n_attrs;
int rc = 0;
/* If this method isn't defined we don't support thermals */
@@ -198,23 +183,13 @@ int ixgbe_sysfs_init(struct ixgbe_adapter *adapter)
if (adapter->hw.mac.ops.init_thermal_sensor_thresh(&adapter->hw))
goto exit;
- /*
- * Allocation space for max attributs
- * max num sensors * values (loc, temp, max, caution)
- */
- n_attrs = IXGBE_MAX_SENSORS * 4;
- ixgbe_hwmon->hwmon_list = kcalloc(n_attrs, sizeof(struct hwmon_attr),
- GFP_KERNEL);
- if (!ixgbe_hwmon->hwmon_list) {
+ ixgbe_hwmon = devm_kzalloc(&adapter->pdev->dev, sizeof(*ixgbe_hwmon),
+ GFP_KERNEL);
+ if (ixgbe_hwmon == NULL) {
rc = -ENOMEM;
- goto err;
- }
-
- ixgbe_hwmon->device = hwmon_device_register(&adapter->pdev->dev);
- if (IS_ERR(ixgbe_hwmon->device)) {
- rc = PTR_ERR(ixgbe_hwmon->device);
- goto err;
+ goto exit;
}
+ adapter->ixgbe_hwmon_buff = ixgbe_hwmon;
for (i = 0; i < IXGBE_MAX_SENSORS; i++) {
/*
@@ -226,17 +201,28 @@ int ixgbe_sysfs_init(struct ixgbe_adapter *adapter)
/* Bail if any hwmon attr struct fails to initialize */
rc = ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_CAUTION);
- rc |= ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_LOC);
- rc |= ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_TEMP);
- rc |= ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_MAX);
if (rc)
- goto err;
+ goto exit;
+ rc = ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_LOC);
+ if (rc)
+ goto exit;
+ rc = ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_TEMP);
+ if (rc)
+ goto exit;
+ rc = ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_MAX);
+ if (rc)
+ goto exit;
}
- goto exit;
+ ixgbe_hwmon->groups[0] = &ixgbe_hwmon->group;
+ ixgbe_hwmon->group.attrs = ixgbe_hwmon->attrs;
-err:
- ixgbe_sysfs_del_adapter(adapter);
+ hwmon_dev = devm_hwmon_device_register_with_groups(&adapter->pdev->dev,
+ "ixgbe",
+ ixgbe_hwmon,
+ ixgbe_hwmon->groups);
+ if (IS_ERR(hwmon_dev))
+ rc = PTR_ERR(hwmon_dev);
exit:
return rc;
}
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index 8971e2d0a984..bb76e96f8278 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -86,9 +86,7 @@ struct ixgbevf_ring {
u64 bp_misses;
u64 bp_cleaned;
#endif
-
- u16 head;
- u16 tail;
+ u8 __iomem *tail;
u16 reg_idx; /* holds the special value that gets the hardware register
* offset associated with this ring, which is different
@@ -356,6 +354,7 @@ struct ixgbevf_adapter {
u32 flags;
#define IXGBE_FLAG_IN_WATCHDOG_TASK (u32)(1)
#define IXGBE_FLAG_IN_NETPOLL (u32)(1 << 1)
+#define IXGBEVF_FLAG_QUEUE_RESET_REQUESTED (u32)(1 << 2)
/* OS defined structs */
struct net_device *netdev;
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 92ef4cb5a8e8..a5d31674ff42 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -95,13 +95,15 @@ module_param(debug, int, 0);
MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
/* forward decls */
+static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter);
static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector);
static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter);
-static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw,
- struct ixgbevf_ring *rx_ring,
+static inline void ixgbevf_release_rx_desc(struct ixgbevf_ring *rx_ring,
u32 val)
{
+ rx_ring->next_to_use = val;
+
/*
* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
@@ -109,7 +111,7 @@ static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw,
* such as IA-64).
*/
wmb();
- IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rx_ring->reg_idx), val);
+ writel(val, rx_ring->tail);
}
/**
@@ -406,10 +408,8 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
}
no_buffers:
- if (rx_ring->next_to_use != i) {
- rx_ring->next_to_use = i;
- ixgbevf_release_rx_desc(&adapter->hw, rx_ring, i);
- }
+ if (rx_ring->next_to_use != i)
+ ixgbevf_release_rx_desc(rx_ring, i);
}
static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter,
@@ -1110,8 +1110,9 @@ static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j), tdlen);
IXGBE_WRITE_REG(hw, IXGBE_VFTDH(j), 0);
IXGBE_WRITE_REG(hw, IXGBE_VFTDT(j), 0);
- adapter->tx_ring[i].head = IXGBE_VFTDH(j);
- adapter->tx_ring[i].tail = IXGBE_VFTDT(j);
+ ring->tail = hw->hw_addr + IXGBE_VFTDT(j);
+ ring->next_to_clean = 0;
+ ring->next_to_use = 0;
/* Disable Tx Head Writeback RO bit, since this hoses
* bookkeeping if things aren't delivered in order.
*/
@@ -1208,20 +1209,22 @@ static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
/* set_rx_buffer_len must be called before ring initialization */
ixgbevf_set_rx_buffer_len(adapter);
- rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc);
/* Setup the HW Rx Head and Tail Descriptor Pointers and
* the Base and Length of the Rx Descriptor Ring */
for (i = 0; i < adapter->num_rx_queues; i++) {
- rdba = adapter->rx_ring[i].dma;
- j = adapter->rx_ring[i].reg_idx;
+ struct ixgbevf_ring *ring = &adapter->rx_ring[i];
+ rdba = ring->dma;
+ j = ring->reg_idx;
+ rdlen = ring->count * sizeof(union ixgbe_adv_rx_desc);
IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(j),
(rdba & DMA_BIT_MASK(32)));
IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32));
IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j), rdlen);
IXGBE_WRITE_REG(hw, IXGBE_VFRDH(j), 0);
IXGBE_WRITE_REG(hw, IXGBE_VFRDT(j), 0);
- adapter->rx_ring[i].head = IXGBE_VFRDH(j);
- adapter->rx_ring[i].tail = IXGBE_VFRDT(j);
+ ring->tail = hw->hw_addr + IXGBE_VFRDT(j);
+ ring->next_to_clean = 0;
+ ring->next_to_use = 0;
ixgbevf_configure_srrctl(adapter, j);
}
@@ -1366,11 +1369,51 @@ static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter)
}
}
+static int ixgbevf_configure_dcb(struct ixgbevf_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ unsigned int def_q = 0;
+ unsigned int num_tcs = 0;
+ unsigned int num_rx_queues = 1;
+ int err;
+
+ spin_lock_bh(&adapter->mbx_lock);
+
+ /* fetch queue configuration from the PF */
+ err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
+
+ spin_unlock_bh(&adapter->mbx_lock);
+
+ if (err)
+ return err;
+
+ if (num_tcs > 1) {
+ /* update default Tx ring register index */
+ adapter->tx_ring[0].reg_idx = def_q;
+
+ /* we need as many queues as traffic classes */
+ num_rx_queues = num_tcs;
+ }
+
+ /* if we have a bad config abort request queue reset */
+ if (adapter->num_rx_queues != num_rx_queues) {
+ /* force mailbox timeout to prevent further messages */
+ hw->mbx.timeout = 0;
+
+ /* wait for watchdog to come around and bail us out */
+ adapter->flags |= IXGBEVF_FLAG_QUEUE_RESET_REQUESTED;
+ }
+
+ return 0;
+}
+
static void ixgbevf_configure(struct ixgbevf_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
int i;
+ ixgbevf_configure_dcb(adapter);
+
ixgbevf_set_rx_mode(netdev);
ixgbevf_restore_vlan(adapter);
@@ -1402,7 +1445,7 @@ static void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
hw_dbg(hw, "RXDCTL.ENABLE queue %d not set while polling\n",
rxr);
- ixgbevf_release_rx_desc(&adapter->hw, &adapter->rx_ring[rxr],
+ ixgbevf_release_rx_desc(&adapter->rx_ring[rxr],
(adapter->rx_ring[rxr].count - 1));
}
@@ -1549,85 +1592,10 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
mod_timer(&adapter->watchdog_timer, jiffies);
}
-static int ixgbevf_reset_queues(struct ixgbevf_adapter *adapter)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- struct ixgbevf_ring *rx_ring;
- unsigned int def_q = 0;
- unsigned int num_tcs = 0;
- unsigned int num_rx_queues = 1;
- int err, i;
-
- spin_lock_bh(&adapter->mbx_lock);
-
- /* fetch queue configuration from the PF */
- err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
-
- spin_unlock_bh(&adapter->mbx_lock);
-
- if (err)
- return err;
-
- if (num_tcs > 1) {
- /* update default Tx ring register index */
- adapter->tx_ring[0].reg_idx = def_q;
-
- /* we need as many queues as traffic classes */
- num_rx_queues = num_tcs;
- }
-
- /* nothing to do if we have the correct number of queues */
- if (adapter->num_rx_queues == num_rx_queues)
- return 0;
-
- /* allocate new rings */
- rx_ring = kcalloc(num_rx_queues,
- sizeof(struct ixgbevf_ring), GFP_KERNEL);
- if (!rx_ring)
- return -ENOMEM;
-
- /* setup ring fields */
- for (i = 0; i < num_rx_queues; i++) {
- rx_ring[i].count = adapter->rx_ring_count;
- rx_ring[i].queue_index = i;
- rx_ring[i].reg_idx = i;
- rx_ring[i].dev = &adapter->pdev->dev;
- rx_ring[i].netdev = adapter->netdev;
-
- /* allocate resources on the ring */
- err = ixgbevf_setup_rx_resources(adapter, &rx_ring[i]);
- if (err) {
- while (i) {
- i--;
- ixgbevf_free_rx_resources(adapter, &rx_ring[i]);
- }
- kfree(rx_ring);
- return err;
- }
- }
-
- /* free the existing rings and queues */
- ixgbevf_free_all_rx_resources(adapter);
- adapter->num_rx_queues = 0;
- kfree(adapter->rx_ring);
-
- /* move new rings into position on the adapter struct */
- adapter->rx_ring = rx_ring;
- adapter->num_rx_queues = num_rx_queues;
-
- /* reset ring to vector mapping */
- ixgbevf_reset_q_vectors(adapter);
- ixgbevf_map_rings_to_vectors(adapter);
-
- return 0;
-}
-
void ixgbevf_up(struct ixgbevf_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
- ixgbevf_reset_queues(adapter);
-
ixgbevf_configure(adapter);
ixgbevf_up_complete(adapter);
@@ -1680,14 +1648,6 @@ static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter,
/* Zero out the descriptor ring */
memset(rx_ring->desc, 0, rx_ring->size);
-
- rx_ring->next_to_clean = 0;
- rx_ring->next_to_use = 0;
-
- if (rx_ring->head)
- writel(0, adapter->hw.hw_addr + rx_ring->head);
- if (rx_ring->tail)
- writel(0, adapter->hw.hw_addr + rx_ring->tail);
}
/**
@@ -1715,14 +1675,6 @@ static void ixgbevf_clean_tx_ring(struct ixgbevf_adapter *adapter,
memset(tx_ring->tx_buffer_info, 0, size);
memset(tx_ring->desc, 0, tx_ring->size);
-
- tx_ring->next_to_use = 0;
- tx_ring->next_to_clean = 0;
-
- if (tx_ring->head)
- writel(0, adapter->hw.hw_addr + tx_ring->head);
- if (tx_ring->tail)
- writel(0, adapter->hw.hw_addr + tx_ring->tail);
}
/**
@@ -1889,9 +1841,28 @@ static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
**/
static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
{
+ struct ixgbe_hw *hw = &adapter->hw;
+ unsigned int def_q = 0;
+ unsigned int num_tcs = 0;
+ int err;
+
/* Start with base case */
adapter->num_rx_queues = 1;
adapter->num_tx_queues = 1;
+
+ spin_lock_bh(&adapter->mbx_lock);
+
+ /* fetch queue configuration from the PF */
+ err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
+
+ spin_unlock_bh(&adapter->mbx_lock);
+
+ if (err)
+ return;
+
+ /* we need as many queues as traffic classes */
+ if (num_tcs > 1)
+ adapter->num_rx_queues = num_tcs;
}
/**
@@ -2340,6 +2311,8 @@ static void ixgbevf_watchdog_task(struct work_struct *work)
bool link_up = adapter->link_up;
s32 need_reset;
+ ixgbevf_queue_reset_subtask(adapter);
+
adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK;
/*
@@ -2473,8 +2446,6 @@ int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *adapter,
if (!tx_ring->desc)
goto err;
- tx_ring->next_to_use = 0;
- tx_ring->next_to_clean = 0;
return 0;
err:
@@ -2542,9 +2513,6 @@ int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter,
goto alloc_failed;
}
- rx_ring->next_to_clean = 0;
- rx_ring->next_to_use = 0;
-
return 0;
alloc_failed:
return -ENOMEM;
@@ -2614,63 +2582,6 @@ static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter)
&adapter->rx_ring[i]);
}
-static int ixgbevf_setup_queues(struct ixgbevf_adapter *adapter)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- struct ixgbevf_ring *rx_ring;
- unsigned int def_q = 0;
- unsigned int num_tcs = 0;
- unsigned int num_rx_queues = 1;
- int err, i;
-
- spin_lock_bh(&adapter->mbx_lock);
-
- /* fetch queue configuration from the PF */
- err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
-
- spin_unlock_bh(&adapter->mbx_lock);
-
- if (err)
- return err;
-
- if (num_tcs > 1) {
- /* update default Tx ring register index */
- adapter->tx_ring[0].reg_idx = def_q;
-
- /* we need as many queues as traffic classes */
- num_rx_queues = num_tcs;
- }
-
- /* nothing to do if we have the correct number of queues */
- if (adapter->num_rx_queues == num_rx_queues)
- return 0;
-
- /* allocate new rings */
- rx_ring = kcalloc(num_rx_queues,
- sizeof(struct ixgbevf_ring), GFP_KERNEL);
- if (!rx_ring)
- return -ENOMEM;
-
- /* setup ring fields */
- for (i = 0; i < num_rx_queues; i++) {
- rx_ring[i].count = adapter->rx_ring_count;
- rx_ring[i].queue_index = i;
- rx_ring[i].reg_idx = i;
- rx_ring[i].dev = &adapter->pdev->dev;
- rx_ring[i].netdev = adapter->netdev;
- }
-
- /* free the existing ring and queues */
- adapter->num_rx_queues = 0;
- kfree(adapter->rx_ring);
-
- /* move new rings into position on the adapter struct */
- adapter->rx_ring = rx_ring;
- adapter->num_rx_queues = num_rx_queues;
-
- return 0;
-}
-
/**
* ixgbevf_open - Called when a network interface is made active
* @netdev: network interface device structure
@@ -2714,11 +2625,6 @@ static int ixgbevf_open(struct net_device *netdev)
}
}
- /* setup queue reg_idx and Rx queue count */
- err = ixgbevf_setup_queues(adapter);
- if (err)
- goto err_setup_queues;
-
/* allocate transmit descriptors */
err = ixgbevf_setup_all_tx_resources(adapter);
if (err)
@@ -2756,7 +2662,6 @@ err_setup_rx:
ixgbevf_free_all_rx_resources(adapter);
err_setup_tx:
ixgbevf_free_all_tx_resources(adapter);
-err_setup_queues:
ixgbevf_reset(adapter);
err_setup_reset:
@@ -2788,6 +2693,34 @@ static int ixgbevf_close(struct net_device *netdev)
return 0;
}
+static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter)
+{
+ struct net_device *dev = adapter->netdev;
+
+ if (!(adapter->flags & IXGBEVF_FLAG_QUEUE_RESET_REQUESTED))
+ return;
+
+ adapter->flags &= ~IXGBEVF_FLAG_QUEUE_RESET_REQUESTED;
+
+ /* if interface is down do nothing */
+ if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
+ test_bit(__IXGBEVF_RESETTING, &adapter->state))
+ return;
+
+ /* Hardware has to reinitialize queues and interrupts to
+ * match packet buffer alignment. Unfortunately, the
+ * hardware is not flexible enough to do this dynamically.
+ */
+ if (netif_running(dev))
+ ixgbevf_close(dev);
+
+ ixgbevf_clear_interrupt_scheme(adapter);
+ ixgbevf_init_interrupt_scheme(adapter);
+
+ if (netif_running(dev))
+ ixgbevf_open(dev);
+}
+
static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring,
u32 vlan_macip_lens, u32 type_tucmd,
u32 mss_l4len_idx)
@@ -3181,7 +3114,7 @@ static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
ixgbevf_tx_map(tx_ring, skb, tx_flags),
first, skb->len, hdr_len);
- writel(tx_ring->next_to_use, adapter->hw.hw_addr + tx_ring->tail);
+ writel(tx_ring->next_to_use, tx_ring->tail);
ixgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED);
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
index 6a6c1f76d8e0..974a007c4277 100644
--- a/drivers/net/ethernet/lantiq_etop.c
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -9,8 +9,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
* Copyright (C) 2011 John Crispin <blogic@openwrt.org>
*/
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 61088a6a9424..a2565ce22b7c 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -33,8 +33,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -2067,23 +2066,6 @@ static inline void oom_timer_wrapper(unsigned long data)
napi_schedule(&mp->napi);
}
-static void phy_reset(struct mv643xx_eth_private *mp)
-{
- int data;
-
- data = phy_read(mp->phy, MII_BMCR);
- if (data < 0)
- return;
-
- data |= BMCR_RESET;
- if (phy_write(mp->phy, MII_BMCR, data) < 0)
- return;
-
- do {
- data = phy_read(mp->phy, MII_BMCR);
- } while (data >= 0 && data & BMCR_RESET);
-}
-
static void port_start(struct mv643xx_eth_private *mp)
{
u32 pscr;
@@ -2096,8 +2078,9 @@ static void port_start(struct mv643xx_eth_private *mp)
struct ethtool_cmd cmd;
mv643xx_eth_get_settings(mp->dev, &cmd);
- phy_reset(mp);
+ phy_init_hw(mp->phy);
mv643xx_eth_set_settings(mp->dev, &cmd);
+ phy_start(mp->phy);
}
/*
@@ -2293,7 +2276,8 @@ static int mv643xx_eth_stop(struct net_device *dev)
del_timer_sync(&mp->rx_oom);
netif_carrier_off(dev);
-
+ if (mp->phy)
+ phy_stop(mp->phy);
free_irq(dev->irq, dev);
port_reset(mp);
@@ -2764,8 +2748,6 @@ static void phy_init(struct mv643xx_eth_private *mp, int speed, int duplex)
{
struct phy_device *phy = mp->phy;
- phy_reset(mp);
-
if (speed == 0) {
phy->autoneg = AUTONEG_ENABLE;
phy->speed = 0;
diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c
index 7354960b583b..c4eeb69a5bee 100644
--- a/drivers/net/ethernet/marvell/mvmdio.c
+++ b/drivers/net/ethernet/marvell/mvmdio.c
@@ -92,6 +92,12 @@ static int orion_mdio_wait_ready(struct mii_bus *bus)
if (time_is_before_jiffies(end))
++timedout;
} else {
+ /* wait_event_timeout does not guarantee a delay of at
+ * least one whole jiffie, so timeout must be no less
+ * than two.
+ */
+ if (timeout < 2)
+ timeout = 2;
wait_event_timeout(dev->smi_busy_wait,
orion_mdio_smi_is_done(dev),
timeout);
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index b8e232b4ea2d..d5f0d72e5e33 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -1378,7 +1378,7 @@ static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
dev_kfree_skb_any(skb);
dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr,
- rx_desc->data_size, DMA_FROM_DEVICE);
+ MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE);
}
if (rx_done)
@@ -1424,7 +1424,7 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
}
dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr,
- rx_desc->data_size, DMA_FROM_DEVICE);
+ MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE);
rx_bytes = rx_desc->data_size -
(ETH_FCS_LEN + MVNETA_MH_SIZE);
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index fff62460185c..452e81de33de 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -19,8 +19,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/init.h>
@@ -321,23 +320,6 @@ static void ethernet_phy_set_addr(struct pxa168_eth_private *pep, int phy_addr)
wrl(pep, PHY_ADDRESS, reg_data);
}
-static void ethernet_phy_reset(struct pxa168_eth_private *pep)
-{
- int data;
-
- data = phy_read(pep->phy, MII_BMCR);
- if (data < 0)
- return;
-
- data |= BMCR_RESET;
- if (phy_write(pep->phy, MII_BMCR, data) < 0)
- return;
-
- do {
- data = phy_read(pep->phy, MII_BMCR);
- } while (data >= 0 && data & BMCR_RESET);
-}
-
static void rxq_refill(struct net_device *dev)
{
struct pxa168_eth_private *pep = netdev_priv(dev);
@@ -646,7 +628,7 @@ static void eth_port_start(struct net_device *dev)
struct ethtool_cmd cmd;
pxa168_get_settings(pep->dev, &cmd);
- ethernet_phy_reset(pep);
+ phy_init_hw(pep->phy);
pxa168_set_settings(pep->dev, &cmd);
}
@@ -1383,7 +1365,6 @@ static struct phy_device *phy_scan(struct pxa168_eth_private *pep, int phy_addr)
static void phy_init(struct pxa168_eth_private *pep, int speed, int duplex)
{
struct phy_device *phy = pep->phy;
- ethernet_phy_reset(pep);
phy_attach(pep->dev, dev_name(&phy->dev), PHY_INTERFACE_MODE_MII);
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 43aa7acd84a6..6509935d145e 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -2495,7 +2495,7 @@ static struct sk_buff *receive_copy(struct sky2_port *sky2,
skb_copy_from_linear_data(re->skb, skb->data, length);
skb->ip_summed = re->skb->ip_summed;
skb->csum = re->skb->csum;
- skb->rxhash = re->skb->rxhash;
+ skb_copy_hash(skb, re->skb);
skb->vlan_proto = re->skb->vlan_proto;
skb->vlan_tci = re->skb->vlan_tci;
@@ -2503,7 +2503,7 @@ static struct sk_buff *receive_copy(struct sky2_port *sky2,
length, PCI_DMA_FROMDEVICE);
re->skb->vlan_proto = 0;
re->skb->vlan_tci = 0;
- re->skb->rxhash = 0;
+ skb_clear_hash(re->skb);
re->skb->ip_summed = CHECKSUM_NONE;
skb_put(skb, length);
}
@@ -2723,7 +2723,7 @@ static void sky2_rx_hash(struct sky2_port *sky2, u32 status)
struct sk_buff *skb;
skb = sky2->rx_ring[sky2->rx_next].skb;
- skb->rxhash = le32_to_cpu(status);
+ skb_set_hash(skb, le32_to_cpu(status), PKT_HASH_TYPE_L3);
}
/* Process status response ring */
diff --git a/drivers/net/ethernet/mellanox/mlx4/Kconfig b/drivers/net/ethernet/mellanox/mlx4/Kconfig
index eb520ab64014..563495d8975a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx4/Kconfig
@@ -6,6 +6,7 @@ config MLX4_EN
tristate "Mellanox Technologies 10Gbit Ethernet support"
depends on PCI
select MLX4_CORE
+ select PTP_1588_CLOCK
---help---
This driver supports Mellanox Technologies ConnectX Ethernet
devices.
diff --git a/drivers/net/ethernet/mellanox/mlx4/alloc.c b/drivers/net/ethernet/mellanox/mlx4/alloc.c
index 06fef5b44f77..c3ad464d0627 100644
--- a/drivers/net/ethernet/mellanox/mlx4/alloc.c
+++ b/drivers/net/ethernet/mellanox/mlx4/alloc.c
@@ -71,9 +71,9 @@ u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap)
return obj;
}
-void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj)
+void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj, int use_rr)
{
- mlx4_bitmap_free_range(bitmap, obj, 1);
+ mlx4_bitmap_free_range(bitmap, obj, 1, use_rr);
}
u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align)
@@ -118,11 +118,17 @@ u32 mlx4_bitmap_avail(struct mlx4_bitmap *bitmap)
return bitmap->avail;
}
-void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt)
+void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt,
+ int use_rr)
{
obj &= bitmap->max + bitmap->reserved_top - 1;
spin_lock(&bitmap->lock);
+ if (!use_rr) {
+ bitmap->last = min(bitmap->last, obj);
+ bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
+ & bitmap->mask;
+ }
bitmap_clear(bitmap->table, obj, cnt);
bitmap->avail += cnt;
spin_unlock(&bitmap->lock);
diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c
index 22fcbe78311c..34c5b87a1c2a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cq.c
@@ -187,7 +187,7 @@ err_put:
mlx4_table_put(dev, &cq_table->table, *cqn);
err_out:
- mlx4_bitmap_free(&cq_table->bitmap, *cqn);
+ mlx4_bitmap_free(&cq_table->bitmap, *cqn, MLX4_NO_RR);
return err;
}
@@ -217,7 +217,7 @@ void __mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn)
mlx4_table_put(dev, &cq_table->cmpt_table, cqn);
mlx4_table_put(dev, &cq_table->table, cqn);
- mlx4_bitmap_free(&cq_table->bitmap, cqn);
+ mlx4_bitmap_free(&cq_table->bitmap, cqn, MLX4_NO_RR);
}
static void mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn)
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_clock.c b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
index fd6441071319..abaf6bb22416 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_clock.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
@@ -42,6 +42,10 @@ int mlx4_en_timestamp_config(struct net_device *dev, int tx_type, int rx_filter)
int port_up = 0;
int err = 0;
+ if (priv->hwtstamp_config.tx_type == tx_type &&
+ priv->hwtstamp_config.rx_filter == rx_filter)
+ return 0;
+
mutex_lock(&mdev->state_lock);
if (priv->port_up) {
port_up = 1;
@@ -103,19 +107,191 @@ void mlx4_en_fill_hwtstamps(struct mlx4_en_dev *mdev,
struct skb_shared_hwtstamps *hwts,
u64 timestamp)
{
+ unsigned long flags;
u64 nsec;
+ read_lock_irqsave(&mdev->clock_lock, flags);
nsec = timecounter_cyc2time(&mdev->clock, timestamp);
+ read_unlock_irqrestore(&mdev->clock_lock, flags);
memset(hwts, 0, sizeof(struct skb_shared_hwtstamps));
hwts->hwtstamp = ns_to_ktime(nsec);
}
+/**
+ * mlx4_en_remove_timestamp - disable PTP device
+ * @mdev: board private structure
+ *
+ * Stop the PTP support.
+ **/
+void mlx4_en_remove_timestamp(struct mlx4_en_dev *mdev)
+{
+ if (mdev->ptp_clock) {
+ ptp_clock_unregister(mdev->ptp_clock);
+ mdev->ptp_clock = NULL;
+ mlx4_info(mdev, "removed PHC\n");
+ }
+}
+
+void mlx4_en_ptp_overflow_check(struct mlx4_en_dev *mdev)
+{
+ bool timeout = time_is_before_jiffies(mdev->last_overflow_check +
+ mdev->overflow_period);
+ unsigned long flags;
+
+ if (timeout) {
+ write_lock_irqsave(&mdev->clock_lock, flags);
+ timecounter_read(&mdev->clock);
+ write_unlock_irqrestore(&mdev->clock_lock, flags);
+ mdev->last_overflow_check = jiffies;
+ }
+}
+
+/**
+ * mlx4_en_phc_adjfreq - adjust the frequency of the hardware clock
+ * @ptp: ptp clock structure
+ * @delta: Desired frequency change in parts per billion
+ *
+ * Adjust the frequency of the PHC cycle counter by the indicated delta from
+ * the base frequency.
+ **/
+static int mlx4_en_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta)
+{
+ u64 adj;
+ u32 diff, mult;
+ int neg_adj = 0;
+ unsigned long flags;
+ struct mlx4_en_dev *mdev = container_of(ptp, struct mlx4_en_dev,
+ ptp_clock_info);
+
+ if (delta < 0) {
+ neg_adj = 1;
+ delta = -delta;
+ }
+ mult = mdev->nominal_c_mult;
+ adj = mult;
+ adj *= delta;
+ diff = div_u64(adj, 1000000000ULL);
+
+ write_lock_irqsave(&mdev->clock_lock, flags);
+ timecounter_read(&mdev->clock);
+ mdev->cycles.mult = neg_adj ? mult - diff : mult + diff;
+ write_unlock_irqrestore(&mdev->clock_lock, flags);
+
+ return 0;
+}
+
+/**
+ * mlx4_en_phc_adjtime - Shift the time of the hardware clock
+ * @ptp: ptp clock structure
+ * @delta: Desired change in nanoseconds
+ *
+ * Adjust the timer by resetting the timecounter structure.
+ **/
+static int mlx4_en_phc_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct mlx4_en_dev *mdev = container_of(ptp, struct mlx4_en_dev,
+ ptp_clock_info);
+ unsigned long flags;
+ s64 now;
+
+ write_lock_irqsave(&mdev->clock_lock, flags);
+ now = timecounter_read(&mdev->clock);
+ now += delta;
+ timecounter_init(&mdev->clock, &mdev->cycles, now);
+ write_unlock_irqrestore(&mdev->clock_lock, flags);
+
+ return 0;
+}
+
+/**
+ * mlx4_en_phc_gettime - Reads the current time from the hardware clock
+ * @ptp: ptp clock structure
+ * @ts: timespec structure to hold the current time value
+ *
+ * Read the timecounter and return the correct value in ns after converting
+ * it into a struct timespec.
+ **/
+static int mlx4_en_phc_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
+{
+ struct mlx4_en_dev *mdev = container_of(ptp, struct mlx4_en_dev,
+ ptp_clock_info);
+ unsigned long flags;
+ u32 remainder;
+ u64 ns;
+
+ write_lock_irqsave(&mdev->clock_lock, flags);
+ ns = timecounter_read(&mdev->clock);
+ write_unlock_irqrestore(&mdev->clock_lock, flags);
+
+ ts->tv_sec = div_u64_rem(ns, NSEC_PER_SEC, &remainder);
+ ts->tv_nsec = remainder;
+
+ return 0;
+}
+
+/**
+ * mlx4_en_phc_settime - Set the current time on the hardware clock
+ * @ptp: ptp clock structure
+ * @ts: timespec containing the new time for the cycle counter
+ *
+ * Reset the timecounter to use a new base value instead of the kernel
+ * wall timer value.
+ **/
+static int mlx4_en_phc_settime(struct ptp_clock_info *ptp,
+ const struct timespec *ts)
+{
+ struct mlx4_en_dev *mdev = container_of(ptp, struct mlx4_en_dev,
+ ptp_clock_info);
+ u64 ns = timespec_to_ns(ts);
+ unsigned long flags;
+
+ /* reset the timecounter */
+ write_lock_irqsave(&mdev->clock_lock, flags);
+ timecounter_init(&mdev->clock, &mdev->cycles, ns);
+ write_unlock_irqrestore(&mdev->clock_lock, flags);
+
+ return 0;
+}
+
+/**
+ * mlx4_en_phc_enable - enable or disable an ancillary feature
+ * @ptp: ptp clock structure
+ * @request: Desired resource to enable or disable
+ * @on: Caller passes one to enable or zero to disable
+ *
+ * Enable (or disable) ancillary features of the PHC subsystem.
+ * Currently, no ancillary features are supported.
+ **/
+static int mlx4_en_phc_enable(struct ptp_clock_info __always_unused *ptp,
+ struct ptp_clock_request __always_unused *request,
+ int __always_unused on)
+{
+ return -EOPNOTSUPP;
+}
+
+static const struct ptp_clock_info mlx4_en_ptp_clock_info = {
+ .owner = THIS_MODULE,
+ .max_adj = 100000000,
+ .n_alarm = 0,
+ .n_ext_ts = 0,
+ .n_per_out = 0,
+ .pps = 0,
+ .adjfreq = mlx4_en_phc_adjfreq,
+ .adjtime = mlx4_en_phc_adjtime,
+ .gettime = mlx4_en_phc_gettime,
+ .settime = mlx4_en_phc_settime,
+ .enable = mlx4_en_phc_enable,
+};
+
void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
{
struct mlx4_dev *dev = mdev->dev;
+ unsigned long flags;
u64 ns;
+ rwlock_init(&mdev->clock_lock);
+
memset(&mdev->cycles, 0, sizeof(mdev->cycles));
mdev->cycles.read = mlx4_en_read_clock;
mdev->cycles.mask = CLOCKSOURCE_MASK(48);
@@ -127,9 +303,12 @@ void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
mdev->cycles.shift = 14;
mdev->cycles.mult =
clocksource_khz2mult(1000 * dev->caps.hca_core_clock, mdev->cycles.shift);
+ mdev->nominal_c_mult = mdev->cycles.mult;
+ write_lock_irqsave(&mdev->clock_lock, flags);
timecounter_init(&mdev->clock, &mdev->cycles,
ktime_to_ns(ktime_get_real()));
+ write_unlock_irqrestore(&mdev->clock_lock, flags);
/* Calculate period in seconds to call the overflow watchdog - to make
* sure counter is checked at least once every wrap around.
@@ -137,15 +316,18 @@ void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
ns = cyclecounter_cyc2ns(&mdev->cycles, mdev->cycles.mask);
do_div(ns, NSEC_PER_SEC / 2 / HZ);
mdev->overflow_period = ns;
-}
-void mlx4_en_ptp_overflow_check(struct mlx4_en_dev *mdev)
-{
- bool timeout = time_is_before_jiffies(mdev->last_overflow_check +
- mdev->overflow_period);
+ /* Configure the PHC */
+ mdev->ptp_clock_info = mlx4_en_ptp_clock_info;
+ snprintf(mdev->ptp_clock_info.name, 16, "mlx4 ptp");
- if (timeout) {
- timecounter_read(&mdev->clock);
- mdev->last_overflow_check = jiffies;
+ mdev->ptp_clock = ptp_clock_register(&mdev->ptp_clock_info,
+ &mdev->pdev->dev);
+ if (IS_ERR(mdev->ptp_clock)) {
+ mdev->ptp_clock = NULL;
+ mlx4_err(mdev, "ptp_clock_register failed\n");
+ } else {
+ mlx4_info(mdev, "registered PHC clock\n");
}
+
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index 3a098cc4d349..70e95324a97d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -161,12 +161,16 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
cq->mcq.comp = cq->is_tx ? mlx4_en_tx_irq : mlx4_en_rx_irq;
cq->mcq.event = mlx4_en_cq_event;
- if (!cq->is_tx) {
+ if (cq->is_tx) {
+ netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_tx_cq,
+ NAPI_POLL_WEIGHT);
+ } else {
netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, 64);
napi_hash_add(&cq->napi);
- napi_enable(&cq->napi);
}
+ napi_enable(&cq->napi);
+
return 0;
}
@@ -188,12 +192,12 @@ void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq)
void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
{
+ napi_disable(&cq->napi);
if (!cq->is_tx) {
- napi_disable(&cq->napi);
napi_hash_del(&cq->napi);
synchronize_rcu();
- netif_napi_del(&cq->napi);
}
+ netif_napi_del(&cq->napi);
mlx4_cq_free(priv->mdev->dev, &cq->mcq);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 0596f9f85a0e..3e8d33605fe7 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -1193,6 +1193,9 @@ static int mlx4_en_get_ts_info(struct net_device *dev,
info->rx_filters =
(1 << HWTSTAMP_FILTER_NONE) |
(1 << HWTSTAMP_FILTER_ALL);
+
+ if (mdev->ptp_clock)
+ info->phc_index = ptp_clock_index(mdev->ptp_clock);
}
return ret;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c
index 0d087b03a7b0..d357bf5a4686 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c
@@ -174,6 +174,9 @@ static void mlx4_en_event(struct mlx4_dev *dev, void *endev_ptr,
mlx4_err(mdev, "Internal error detected, restarting device\n");
break;
+ case MLX4_DEV_EVENT_SLAVE_INIT:
+ case MLX4_DEV_EVENT_SLAVE_SHUTDOWN:
+ break;
default:
if (port < 1 || port > dev->caps.num_ports ||
!mdev->pndev[port])
@@ -196,6 +199,9 @@ static void mlx4_en_remove(struct mlx4_dev *dev, void *endev_ptr)
if (mdev->pndev[i])
mlx4_en_destroy_netdev(mdev->pndev[i]);
+ if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
+ mlx4_en_remove_timestamp(mdev);
+
flush_workqueue(mdev->workqueue);
destroy_workqueue(mdev->workqueue);
(void) mlx4_mr_free(dev, &mdev->mr);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index e72d8a112a6b..fad45316200a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -468,6 +468,53 @@ static void mlx4_en_u64_to_mac(unsigned char dst_mac[ETH_ALEN + 2], u64 src_mac)
memset(&dst_mac[ETH_ALEN], 0, 2);
}
+
+static int mlx4_en_tunnel_steer_add(struct mlx4_en_priv *priv, unsigned char *addr,
+ int qpn, u64 *reg_id)
+{
+ int err;
+ struct mlx4_spec_list spec_eth_outer = { {NULL} };
+ struct mlx4_spec_list spec_vxlan = { {NULL} };
+ struct mlx4_spec_list spec_eth_inner = { {NULL} };
+
+ struct mlx4_net_trans_rule rule = {
+ .queue_mode = MLX4_NET_TRANS_Q_FIFO,
+ .exclusive = 0,
+ .allow_loopback = 1,
+ .promisc_mode = MLX4_FS_REGULAR,
+ .priority = MLX4_DOMAIN_NIC,
+ };
+
+ __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
+
+ if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
+ return 0; /* do nothing */
+
+ rule.port = priv->port;
+ rule.qpn = qpn;
+ INIT_LIST_HEAD(&rule.list);
+
+ spec_eth_outer.id = MLX4_NET_TRANS_RULE_ID_ETH;
+ memcpy(spec_eth_outer.eth.dst_mac, addr, ETH_ALEN);
+ memcpy(spec_eth_outer.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
+
+ spec_vxlan.id = MLX4_NET_TRANS_RULE_ID_VXLAN; /* any vxlan header */
+ spec_eth_inner.id = MLX4_NET_TRANS_RULE_ID_ETH; /* any inner eth header */
+
+ list_add_tail(&spec_eth_outer.list, &rule.list);
+ list_add_tail(&spec_vxlan.list, &rule.list);
+ list_add_tail(&spec_eth_inner.list, &rule.list);
+
+ err = mlx4_flow_attach(priv->mdev->dev, &rule, reg_id);
+ if (err) {
+ en_err(priv, "failed to add vxlan steering rule, err %d\n", err);
+ return err;
+ }
+ en_dbg(DRV, priv, "added vxlan steering rule, mac %pM reg_id %llx\n", addr, *reg_id);
+ return 0;
+}
+
+
static int mlx4_en_uc_steer_add(struct mlx4_en_priv *priv,
unsigned char *mac, int *qpn, u64 *reg_id)
{
@@ -585,6 +632,11 @@ static int mlx4_en_get_qp(struct mlx4_en_priv *priv)
if (err)
goto steer_err;
+ err = mlx4_en_tunnel_steer_add(priv, priv->dev->dev_addr, *qpn,
+ &priv->tunnel_reg_id);
+ if (err)
+ goto tunnel_err;
+
entry = kmalloc(sizeof(*entry), GFP_KERNEL);
if (!entry) {
err = -ENOMEM;
@@ -599,6 +651,9 @@ static int mlx4_en_get_qp(struct mlx4_en_priv *priv)
return 0;
alloc_err:
+ if (priv->tunnel_reg_id)
+ mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
+tunnel_err:
mlx4_en_uc_steer_release(priv, priv->dev->dev_addr, *qpn, reg_id);
steer_err:
@@ -642,6 +697,11 @@ static void mlx4_en_put_qp(struct mlx4_en_priv *priv)
}
}
+ if (priv->tunnel_reg_id) {
+ mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
+ priv->tunnel_reg_id = 0;
+ }
+
en_dbg(DRV, priv, "Releasing qp: port %d, qpn %d\n",
priv->port, qpn);
mlx4_qp_release_range(dev, qpn, 1);
@@ -782,7 +842,7 @@ static void update_mclist_flags(struct mlx4_en_priv *priv,
list_for_each_entry(dst_tmp, dst, list) {
found = false;
list_for_each_entry(src_tmp, src, list) {
- if (!memcmp(dst_tmp->addr, src_tmp->addr, ETH_ALEN)) {
+ if (ether_addr_equal(dst_tmp->addr, src_tmp->addr)) {
found = true;
break;
}
@@ -797,7 +857,7 @@ static void update_mclist_flags(struct mlx4_en_priv *priv,
list_for_each_entry(src_tmp, src, list) {
found = false;
list_for_each_entry(dst_tmp, dst, list) {
- if (!memcmp(dst_tmp->addr, src_tmp->addr, ETH_ALEN)) {
+ if (ether_addr_equal(dst_tmp->addr, src_tmp->addr)) {
dst_tmp->action = MCLIST_NONE;
found = true;
break;
@@ -1044,6 +1104,12 @@ static void mlx4_en_do_multicast(struct mlx4_en_priv *priv,
if (err)
en_err(priv, "Fail to detach multicast address\n");
+ if (mclist->tunnel_reg_id) {
+ err = mlx4_flow_detach(priv->mdev->dev, mclist->tunnel_reg_id);
+ if (err)
+ en_err(priv, "Failed to detach multicast address\n");
+ }
+
/* remove from list */
list_del(&mclist->list);
kfree(mclist);
@@ -1061,6 +1127,10 @@ static void mlx4_en_do_multicast(struct mlx4_en_priv *priv,
if (err)
en_err(priv, "Fail to attach multicast address\n");
+ err = mlx4_en_tunnel_steer_add(priv, &mc_list[10], priv->base_qpn,
+ &mclist->tunnel_reg_id);
+ if (err)
+ en_err(priv, "Failed to attach multicast address\n");
}
}
}
@@ -1598,6 +1668,15 @@ int mlx4_en_start_port(struct net_device *dev)
goto tx_err;
}
+ if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
+ err = mlx4_SET_PORT_VXLAN(mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC);
+ if (err) {
+ en_err(priv, "Failed setting port L2 tunnel configuration, err %d\n",
+ err);
+ goto tx_err;
+ }
+ }
+
/* Init port */
en_dbg(HW, priv, "Initializing port\n");
err = mlx4_INIT_PORT(mdev->dev, priv->port);
@@ -1910,8 +1989,10 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
prof->tx_ring_size, i, TX, node))
goto err;
- if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], priv->base_tx_qpn + i,
- prof->tx_ring_size, TXBB_SIZE, node))
+ if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i],
+ priv->base_tx_qpn + i,
+ prof->tx_ring_size, TXBB_SIZE,
+ node, i))
goto err;
}
@@ -2025,7 +2106,7 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
return 0;
}
-static int mlx4_en_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
+static int mlx4_en_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
@@ -2084,11 +2165,21 @@ static int mlx4_en_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
sizeof(config)) ? -EFAULT : 0;
}
+static int mlx4_en_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+
+ return copy_to_user(ifr->ifr_data, &priv->hwtstamp_config,
+ sizeof(priv->hwtstamp_config)) ? -EFAULT : 0;
+}
+
static int mlx4_en_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
switch (cmd) {
case SIOCSHWTSTAMP:
- return mlx4_en_hwtstamp_ioctl(dev, ifr);
+ return mlx4_en_hwtstamp_set(dev, ifr);
+ case SIOCGHWTSTAMP:
+ return mlx4_en_hwtstamp_get(dev, ifr);
default:
return -EOPNOTSUPP;
}
@@ -2154,6 +2245,27 @@ static int mlx4_en_set_vf_link_state(struct net_device *dev, int vf, int link_st
return mlx4_set_vf_link_state(mdev->dev, en_priv->port, vf, link_state);
}
+
+#define PORT_ID_BYTE_LEN 8
+static int mlx4_en_get_phys_port_id(struct net_device *dev,
+ struct netdev_phys_port_id *ppid)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_dev *mdev = priv->mdev->dev;
+ int i;
+ u64 phys_port_id = mdev->caps.phys_port_id[priv->port];
+
+ if (!phys_port_id)
+ return -EOPNOTSUPP;
+
+ ppid->id_len = sizeof(phys_port_id);
+ for (i = PORT_ID_BYTE_LEN - 1; i >= 0; --i) {
+ ppid->id[i] = phys_port_id & 0xff;
+ phys_port_id >>= 8;
+ }
+ return 0;
+}
+
static const struct net_device_ops mlx4_netdev_ops = {
.ndo_open = mlx4_en_open,
.ndo_stop = mlx4_en_close,
@@ -2179,6 +2291,7 @@ static const struct net_device_ops mlx4_netdev_ops = {
#ifdef CONFIG_NET_RX_BUSY_POLL
.ndo_busy_poll = mlx4_en_low_latency_recv,
#endif
+ .ndo_get_phys_port_id = mlx4_en_get_phys_port_id,
};
static const struct net_device_ops mlx4_netdev_ops_master = {
@@ -2207,6 +2320,7 @@ static const struct net_device_ops mlx4_netdev_ops_master = {
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = mlx4_en_filter_rfs,
#endif
+ .ndo_get_phys_port_id = mlx4_en_get_phys_port_id,
};
int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
@@ -2365,6 +2479,13 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
dev->priv_flags |= IFF_UNICAST_FLT;
+ if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
+ dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
+ NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL;
+ dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
+ dev->features |= NETIF_F_GSO_UDP_TUNNEL;
+ }
+
mdev->pndev[port] = dev;
netif_carrier_off(dev);
@@ -2394,6 +2515,15 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
goto out;
}
+ if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
+ err = mlx4_SET_PORT_VXLAN(mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC);
+ if (err) {
+ en_err(priv, "Failed setting port L2 tunnel configuration, err %d\n",
+ err);
+ goto out;
+ }
+ }
+
/* Init port */
en_warn(priv, "Initializing port\n");
err = mlx4_INIT_PORT(mdev->dev, priv->port);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_resources.c b/drivers/net/ethernet/mellanox/mlx4/en_resources.c
index d3f508697a3d..f1a5500ff72d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_resources.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_resources.c
@@ -68,6 +68,12 @@ void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
context->db_rec_addr = cpu_to_be64(priv->res.db.dma << 2);
if (!(dev->features & NETIF_F_HW_VLAN_CTAG_RX))
context->param3 |= cpu_to_be32(1 << 30);
+
+ if (!is_tx && !rss &&
+ (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)) {
+ en_dbg(HW, priv, "Setting RX qp %x tunnel mode to RX tunneled & non-tunneled\n", qpn);
+ context->srqn = cpu_to_be32(7 << 28); /* this fills bits 30:28 */
+ }
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 07a1d0fbae47..3b66f26ba049 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -631,6 +631,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
int ip_summed;
int factor = priv->cqe_factor;
u64 timestamp;
+ bool l2_tunnel;
if (!priv->port_up)
return 0;
@@ -709,6 +710,8 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
length -= ring->fcs_del;
ring->bytes += length;
ring->packets++;
+ l2_tunnel = (dev->hw_enc_features & NETIF_F_RXCSUM) &&
+ (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_L2_TUNNEL));
if (likely(dev->features & NETIF_F_RXCSUM)) {
if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&
@@ -738,6 +741,8 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
gro_skb->data_len = length;
gro_skb->ip_summed = CHECKSUM_UNNECESSARY;
+ if (l2_tunnel)
+ gro_skb->encapsulation = 1;
if ((cqe->vlan_my_qpn &
cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK)) &&
(dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
@@ -747,7 +752,9 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
}
if (dev->features & NETIF_F_RXHASH)
- gro_skb->rxhash = be32_to_cpu(cqe->immed_rss_invalid);
+ skb_set_hash(gro_skb,
+ be32_to_cpu(cqe->immed_rss_invalid),
+ PKT_HASH_TYPE_L3);
skb_record_rx_queue(gro_skb, cq->ring);
@@ -788,8 +795,13 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
skb->protocol = eth_type_trans(skb, dev);
skb_record_rx_queue(skb, cq->ring);
+ if (l2_tunnel)
+ skb->encapsulation = 1;
+
if (dev->features & NETIF_F_RXHASH)
- skb->rxhash = be32_to_cpu(cqe->immed_rss_invalid);
+ skb_set_hash(skb,
+ be32_to_cpu(cqe->immed_rss_invalid),
+ PKT_HASH_TYPE_L3);
if ((be32_to_cpu(cqe->vlan_my_qpn) &
MLX4_CQE_VLAN_PRESENT_MASK) &&
@@ -1053,6 +1065,12 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
rss_mask |= MLX4_RSS_UDP_IPV4 | MLX4_RSS_UDP_IPV6;
rss_context->base_qpn_udp = rss_context->default_qpn;
}
+
+ if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
+ en_info(priv, "Setting RSS context tunnel type to RSS on inner headers\n");
+ rss_mask |= MLX4_RSS_BY_INNER_HEADERS;
+ }
+
rss_context->flags = rss_mask;
rss_context->hash_fn = MLX4_RSS_HASH_TOP;
for (i = 0; i < 10; i++)
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
index 40626690e8a8..c11d063473e5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
@@ -140,7 +140,6 @@ void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
- struct mlx4_en_tx_ring *tx_ring;
int i, carrier_ok;
memset(buf, 0, sizeof(u64) * MLX4_EN_NUM_SELF_TEST);
@@ -150,16 +149,10 @@ void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf)
carrier_ok = netif_carrier_ok(dev);
netif_carrier_off(dev);
-retry_tx:
/* Wait until all tx queues are empty.
* there should not be any additional incoming traffic
* since we turned the carrier off */
msleep(200);
- for (i = 0; i < priv->tx_ring_num && carrier_ok; i++) {
- tx_ring = priv->tx_ring[i];
- if (tx_ring->prod != (tx_ring->cons + tx_ring->last_nr_txbb))
- goto retry_tx;
- }
if (priv->mdev->dev->caps.flags &
MLX4_DEV_CAP_FLAG_UC_LOOPBACK) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index f54ebd5a1702..160e86d21607 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -39,6 +39,7 @@
#include <linux/if_vlan.h>
#include <linux/vmalloc.h>
#include <linux/tcp.h>
+#include <linux/ip.h>
#include <linux/moduleparam.h>
#include "mlx4_en.h"
@@ -55,7 +56,7 @@ MODULE_PARM_DESC(inline_thold, "threshold for using inline data");
int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring **pring, int qpn, u32 size,
- u16 stride, int node)
+ u16 stride, int node, int queue_index)
{
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_tx_ring *ring;
@@ -140,6 +141,10 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
ring->bf_enabled = true;
ring->hwtstamp_tx_type = priv->hwtstamp_config.tx_type;
+ ring->queue_index = queue_index;
+
+ if (queue_index < priv->num_tx_rings_p_up && cpu_online(queue_index))
+ cpumask_set_cpu(queue_index, &ring->affinity_mask);
*pring = ring;
return 0;
@@ -206,6 +211,9 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context,
&ring->qp, &ring->qp_state);
+ if (!user_prio && cpu_online(ring->queue_index))
+ netif_set_xps_queue(priv->dev, &ring->affinity_mask,
+ ring->queue_index);
return err;
}
@@ -317,7 +325,7 @@ static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
}
}
}
- dev_kfree_skb_any(skb);
+ dev_kfree_skb(skb);
return tx_info->nr_txbb;
}
@@ -354,7 +362,9 @@ int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring)
return cnt;
}
-static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
+static int mlx4_en_process_tx_cq(struct net_device *dev,
+ struct mlx4_en_cq *cq,
+ int budget)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_cq *mcq = &cq->mcq;
@@ -372,9 +382,10 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
u32 bytes = 0;
int factor = priv->cqe_factor;
u64 timestamp = 0;
+ int done = 0;
if (!priv->port_up)
- return;
+ return 0;
index = cons_index & size_mask;
cqe = &buf[(index << factor) + factor];
@@ -383,7 +394,7 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
/* Process all completed CQEs */
while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
- cons_index & size)) {
+ cons_index & size) && (done < budget)) {
/*
* make sure we read the CQE after we read the
* ownership bit
@@ -421,7 +432,7 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
txbbs_stamp = txbbs_skipped;
packets++;
bytes += ring->tx_info[ring_index].nr_bytes;
- } while (ring_index != new_index);
+ } while ((++done < budget) && (ring_index != new_index));
++cons_index;
index = cons_index & size_mask;
@@ -447,6 +458,7 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
netif_tx_wake_queue(ring->tx_queue);
priv->port_stats.wake_queue++;
}
+ return done;
}
void mlx4_en_tx_irq(struct mlx4_cq *mcq)
@@ -454,10 +466,31 @@ void mlx4_en_tx_irq(struct mlx4_cq *mcq)
struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq);
struct mlx4_en_priv *priv = netdev_priv(cq->dev);
- mlx4_en_process_tx_cq(cq->dev, cq);
- mlx4_en_arm_cq(priv, cq);
+ if (priv->port_up)
+ napi_schedule(&cq->napi);
+ else
+ mlx4_en_arm_cq(priv, cq);
}
+/* TX CQ polling - called by NAPI */
+int mlx4_en_poll_tx_cq(struct napi_struct *napi, int budget)
+{
+ struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi);
+ struct net_device *dev = cq->dev;
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ int done;
+
+ done = mlx4_en_process_tx_cq(dev, cq, budget);
+
+ /* If we used up all the quota - we're probably not done yet... */
+ if (done < budget) {
+ /* Done for now */
+ napi_complete(napi);
+ mlx4_en_arm_cq(priv, cq);
+ return done;
+ }
+ return budget;
+}
static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring *ring,
@@ -528,7 +561,10 @@ static int get_real_size(struct sk_buff *skb, struct net_device *dev,
int real_size;
if (skb_is_gso(skb)) {
- *lso_header_size = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ if (skb->encapsulation)
+ *lso_header_size = (skb_inner_transport_header(skb) - skb->data) + inner_tcp_hdrlen(skb);
+ else
+ *lso_header_size = skb_transport_offset(skb) + tcp_hdrlen(skb);
real_size = CTRL_SIZE + skb_shinfo(skb)->nr_frags * DS_SIZE +
ALIGN(*lso_header_size + 4, DS_SIZE);
if (unlikely(*lso_header_size != skb_headlen(skb))) {
@@ -827,6 +863,14 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
tx_info->inl = 1;
}
+ if (skb->encapsulation) {
+ struct iphdr *ipv4 = (struct iphdr *)skb_inner_network_header(skb);
+ if (ipv4->protocol == IPPROTO_TCP || ipv4->protocol == IPPROTO_UDP)
+ op_own |= cpu_to_be32(MLX4_WQE_CTRL_IIP | MLX4_WQE_CTRL_ILP);
+ else
+ op_own |= cpu_to_be32(MLX4_WQE_CTRL_IIP);
+ }
+
ring->prod += nr_txbb;
/* If we used a bounce buffer then copy descriptor back into place */
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index c9cdb2a2c596..ae5212f8f090 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -963,7 +963,7 @@ err_out_free_mtt:
mlx4_mtt_cleanup(dev, &eq->mtt);
err_out_free_eq:
- mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn);
+ mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn, MLX4_USE_RR);
err_out_free_pages:
for (i = 0; i < npages; ++i)
@@ -1018,7 +1018,7 @@ static void mlx4_free_eq(struct mlx4_dev *dev,
eq->page_list[i].map);
kfree(eq->page_list);
- mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn);
+ mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn, MLX4_USE_RR);
mlx4_free_cmd_mailbox(dev, mailbox);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 194928214606..55c4ea740258 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -134,7 +134,8 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags)
[5] = "Time stamping support",
[6] = "VST (control vlan insertion/stripping) support",
[7] = "FSM (MAC anti-spoofing) support",
- [8] = "Dynamic QP updates support"
+ [8] = "Dynamic QP updates support",
+ [9] = "TCP/IP offloads/flow-steering for VXLAN support"
};
int i;
@@ -207,25 +208,25 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
/* when opcode modifier = 1 */
#define QUERY_FUNC_CAP_PHYS_PORT_OFFSET 0x3
-#define QUERY_FUNC_CAP_RDMA_PROPS_OFFSET 0x8
-#define QUERY_FUNC_CAP_ETH_PROPS_OFFSET 0xc
+#define QUERY_FUNC_CAP_FLAGS0_OFFSET 0x8
+#define QUERY_FUNC_CAP_FLAGS1_OFFSET 0xc
#define QUERY_FUNC_CAP_QP0_TUNNEL 0x10
#define QUERY_FUNC_CAP_QP0_PROXY 0x14
#define QUERY_FUNC_CAP_QP1_TUNNEL 0x18
#define QUERY_FUNC_CAP_QP1_PROXY 0x1c
+#define QUERY_FUNC_CAP_PHYS_PORT_ID 0x28
-#define QUERY_FUNC_CAP_ETH_PROPS_FORCE_MAC 0x40
-#define QUERY_FUNC_CAP_ETH_PROPS_FORCE_VLAN 0x80
+#define QUERY_FUNC_CAP_FLAGS1_FORCE_MAC 0x40
+#define QUERY_FUNC_CAP_FLAGS1_FORCE_VLAN 0x80
+#define QUERY_FUNC_CAP_FLAGS1_NIC_INFO 0x10
-#define QUERY_FUNC_CAP_RDMA_PROPS_FORCE_PHY_WQE_GID 0x80
+#define QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID 0x80
if (vhcr->op_modifier == 1) {
- field = 0;
- /* ensure force vlan and force mac bits are not set */
- MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_ETH_PROPS_OFFSET);
- /* ensure that phy_wqe_gid bit is not set */
- MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_RDMA_PROPS_OFFSET);
+ /* Set nic_info bit to mark new fields support */
+ field = QUERY_FUNC_CAP_FLAGS1_NIC_INFO;
+ MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS1_OFFSET);
field = vhcr->in_modifier; /* phys-port = logical-port */
MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_PHYS_PORT_OFFSET);
@@ -243,6 +244,9 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
size += 2;
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP1_PROXY);
+ MLX4_PUT(outbox->buf, dev->caps.phys_port_id[vhcr->in_modifier],
+ QUERY_FUNC_CAP_PHYS_PORT_ID);
+
} else if (vhcr->op_modifier == 0) {
/* enable rdma and ethernet interfaces, and new quota locations */
field = (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA |
@@ -391,22 +395,22 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u32 gen_or_port,
goto out;
}
+ MLX4_GET(func_cap->flags1, outbox, QUERY_FUNC_CAP_FLAGS1_OFFSET);
if (dev->caps.port_type[gen_or_port] == MLX4_PORT_TYPE_ETH) {
- MLX4_GET(field, outbox, QUERY_FUNC_CAP_ETH_PROPS_OFFSET);
- if (field & QUERY_FUNC_CAP_ETH_PROPS_FORCE_VLAN) {
+ if (func_cap->flags1 & QUERY_FUNC_CAP_FLAGS1_OFFSET) {
mlx4_err(dev, "VLAN is enforced on this port\n");
err = -EPROTONOSUPPORT;
goto out;
}
- if (field & QUERY_FUNC_CAP_ETH_PROPS_FORCE_MAC) {
+ if (func_cap->flags1 & QUERY_FUNC_CAP_FLAGS1_FORCE_MAC) {
mlx4_err(dev, "Force mac is enabled on this port\n");
err = -EPROTONOSUPPORT;
goto out;
}
} else if (dev->caps.port_type[gen_or_port] == MLX4_PORT_TYPE_IB) {
- MLX4_GET(field, outbox, QUERY_FUNC_CAP_RDMA_PROPS_OFFSET);
- if (field & QUERY_FUNC_CAP_RDMA_PROPS_FORCE_PHY_WQE_GID) {
+ MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS0_OFFSET);
+ if (field & QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID) {
mlx4_err(dev, "phy_wqe_gid is "
"enforced on this ib port\n");
err = -EPROTONOSUPPORT;
@@ -433,6 +437,10 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u32 gen_or_port,
MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP1_PROXY);
func_cap->qp1_proxy_qpn = size & 0xFFFFFF;
+ if (func_cap->flags1 & QUERY_FUNC_CAP_FLAGS1_NIC_INFO)
+ MLX4_GET(func_cap->phys_port_id, outbox,
+ QUERY_FUNC_CAP_PHYS_PORT_ID);
+
/* All other resources are allocated by the master, but we still report
* 'num' and 'reserved' capabilities as follows:
* - num remains the maximum resource index
@@ -529,6 +537,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
#define QUERY_DEV_CAP_RSVD_LKEY_OFFSET 0x98
#define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET 0xa0
#define QUERY_DEV_CAP_FW_REASSIGN_MAC 0x9d
+#define QUERY_DEV_CAP_VXLAN 0x9e
dev_cap->flags2 = 0;
mailbox = mlx4_alloc_cmd_mailbox(dev);
@@ -694,6 +703,9 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
MLX4_GET(field, outbox, QUERY_DEV_CAP_FW_REASSIGN_MAC);
if (field & 1<<6)
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN;
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_VXLAN);
+ if (field & 1<<3)
+ dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS;
MLX4_GET(dev_cap->max_icm_sz, outbox,
QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET);
if (dev_cap->flags & MLX4_DEV_CAP_FLAG_COUNTERS)
@@ -842,6 +854,11 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
field &= 0x7f;
MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET);
+ /* For guests, disable vxlan tunneling */
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_VXLAN);
+ field &= 0xf7;
+ MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_VXLAN);
+
/* For guests, report Blueflame disabled */
MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_BF_OFFSET);
field &= 0x7f;
@@ -1267,6 +1284,7 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
#define INIT_HCA_IN_SIZE 0x200
#define INIT_HCA_VERSION_OFFSET 0x000
#define INIT_HCA_VERSION 2
+#define INIT_HCA_VXLAN_OFFSET 0x0c
#define INIT_HCA_CACHELINE_SZ_OFFSET 0x0e
#define INIT_HCA_FLAGS_OFFSET 0x014
#define INIT_HCA_QPC_OFFSET 0x020
@@ -1425,6 +1443,12 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
MLX4_PUT(inbox, param->uar_page_sz, INIT_HCA_UAR_PAGE_SZ_OFFSET);
MLX4_PUT(inbox, param->log_uar_sz, INIT_HCA_LOG_UAR_SZ_OFFSET);
+ /* set parser VXLAN attributes */
+ if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS) {
+ u8 parser_params = 0;
+ MLX4_PUT(inbox, parser_params, INIT_HCA_VXLAN_OFFSET);
+ }
+
err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_INIT_HCA, 10000,
MLX4_CMD_NATIVE);
@@ -1713,6 +1737,43 @@ int mlx4_NOP(struct mlx4_dev *dev)
return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, 100, MLX4_CMD_NATIVE);
}
+int mlx4_get_phys_port_id(struct mlx4_dev *dev)
+{
+ u8 port;
+ u32 *outbox;
+ struct mlx4_cmd_mailbox *mailbox;
+ u32 in_mod;
+ u32 guid_hi, guid_lo;
+ int err, ret = 0;
+#define MOD_STAT_CFG_PORT_OFFSET 8
+#define MOD_STAT_CFG_GUID_H 0X14
+#define MOD_STAT_CFG_GUID_L 0X1c
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+ outbox = mailbox->buf;
+
+ for (port = 1; port <= dev->caps.num_ports; port++) {
+ in_mod = port << MOD_STAT_CFG_PORT_OFFSET;
+ err = mlx4_cmd_box(dev, 0, mailbox->dma, in_mod, 0x2,
+ MLX4_CMD_MOD_STAT_CFG, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_NATIVE);
+ if (err) {
+ mlx4_err(dev, "Fail to get port %d uplink guid\n",
+ port);
+ ret = err;
+ } else {
+ MLX4_GET(guid_hi, outbox, MOD_STAT_CFG_GUID_H);
+ MLX4_GET(guid_lo, outbox, MOD_STAT_CFG_GUID_L);
+ dev->caps.phys_port_id[port] = (u64)guid_lo |
+ (u64)guid_hi << 32;
+ }
+ }
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return ret;
+}
+
#define MLX4_WOL_SETUP_MODE (5 << 28)
int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port)
{
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h
index a0a368b7c939..6811ee00ba7c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.h
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.h
@@ -140,6 +140,8 @@ struct mlx4_func_cap {
u32 qp1_proxy_qpn;
u8 physical_port;
u8 port_flags;
+ u8 flags1;
+ u64 phys_port_id;
};
struct mlx4_adapter {
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 5789ea2c934d..d711158b0d4b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -96,10 +96,10 @@ MODULE_PARM_DESC(log_num_mgm_entry_size, "log mgm size, that defines the num"
" To activate device managed"
" flow steering when available, set to -1");
-static bool enable_64b_cqe_eqe;
+static bool enable_64b_cqe_eqe = true;
module_param(enable_64b_cqe_eqe, bool, 0444);
MODULE_PARM_DESC(enable_64b_cqe_eqe,
- "Enable 64 byte CQEs/EQEs when the FW supports this");
+ "Enable 64 byte CQEs/EQEs when the FW supports this (default: True)");
#define HCA_GLOBAL_CAP_MASK 0
@@ -388,6 +388,84 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
return 0;
}
+
+static int mlx4_get_pcie_dev_link_caps(struct mlx4_dev *dev,
+ enum pci_bus_speed *speed,
+ enum pcie_link_width *width)
+{
+ u32 lnkcap1, lnkcap2;
+ int err1, err2;
+
+#define PCIE_MLW_CAP_SHIFT 4 /* start of MLW mask in link capabilities */
+
+ *speed = PCI_SPEED_UNKNOWN;
+ *width = PCIE_LNK_WIDTH_UNKNOWN;
+
+ err1 = pcie_capability_read_dword(dev->pdev, PCI_EXP_LNKCAP, &lnkcap1);
+ err2 = pcie_capability_read_dword(dev->pdev, PCI_EXP_LNKCAP2, &lnkcap2);
+ if (!err2 && lnkcap2) { /* PCIe r3.0-compliant */
+ if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB)
+ *speed = PCIE_SPEED_8_0GT;
+ else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB)
+ *speed = PCIE_SPEED_5_0GT;
+ else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB)
+ *speed = PCIE_SPEED_2_5GT;
+ }
+ if (!err1) {
+ *width = (lnkcap1 & PCI_EXP_LNKCAP_MLW) >> PCIE_MLW_CAP_SHIFT;
+ if (!lnkcap2) { /* pre-r3.0 */
+ if (lnkcap1 & PCI_EXP_LNKCAP_SLS_5_0GB)
+ *speed = PCIE_SPEED_5_0GT;
+ else if (lnkcap1 & PCI_EXP_LNKCAP_SLS_2_5GB)
+ *speed = PCIE_SPEED_2_5GT;
+ }
+ }
+
+ if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN) {
+ return err1 ? err1 :
+ err2 ? err2 : -EINVAL;
+ }
+ return 0;
+}
+
+static void mlx4_check_pcie_caps(struct mlx4_dev *dev)
+{
+ enum pcie_link_width width, width_cap;
+ enum pci_bus_speed speed, speed_cap;
+ int err;
+
+#define PCIE_SPEED_STR(speed) \
+ (speed == PCIE_SPEED_8_0GT ? "8.0GT/s" : \
+ speed == PCIE_SPEED_5_0GT ? "5.0GT/s" : \
+ speed == PCIE_SPEED_2_5GT ? "2.5GT/s" : \
+ "Unknown")
+
+ err = mlx4_get_pcie_dev_link_caps(dev, &speed_cap, &width_cap);
+ if (err) {
+ mlx4_warn(dev,
+ "Unable to determine PCIe device BW capabilities\n");
+ return;
+ }
+
+ err = pcie_get_minimum_link(dev->pdev, &speed, &width);
+ if (err || speed == PCI_SPEED_UNKNOWN ||
+ width == PCIE_LNK_WIDTH_UNKNOWN) {
+ mlx4_warn(dev,
+ "Unable to determine PCI device chain minimum BW\n");
+ return;
+ }
+
+ if (width != width_cap || speed != speed_cap)
+ mlx4_warn(dev,
+ "PCIe BW is different than device's capability\n");
+
+ mlx4_info(dev, "PCIe link speed is %s, device supports %s\n",
+ PCIE_SPEED_STR(speed), PCIE_SPEED_STR(speed_cap));
+ mlx4_info(dev, "PCIe link width is x%d, device supports x%d\n",
+ width, width_cap);
+ return;
+}
+
/*The function checks if there are live vf, return the num of them*/
static int mlx4_how_many_lives_vf(struct mlx4_dev *dev)
{
@@ -606,6 +684,7 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
dev->caps.qp1_tunnel[i - 1] = func_cap.qp1_tunnel_qpn;
dev->caps.qp1_proxy[i - 1] = func_cap.qp1_proxy_qpn;
dev->caps.port_mask[i] = dev->caps.port_type[i];
+ dev->caps.phys_port_id[i] = func_cap.phys_port_id;
if (mlx4_get_slave_pkey_gid_tbl_len(dev, i,
&dev->caps.gid_table_len[i],
&dev->caps.pkey_table_len[i]))
@@ -1443,6 +1522,19 @@ static void choose_steering_mode(struct mlx4_dev *dev,
mlx4_log_num_mgm_entry_size);
}
+static void choose_tunnel_offload_mode(struct mlx4_dev *dev,
+ struct mlx4_dev_cap *dev_cap)
+{
+ if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED &&
+ dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS)
+ dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_VXLAN;
+ else
+ dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_NONE;
+
+ mlx4_dbg(dev, "Tunneling offload mode is: %s\n", (dev->caps.tunnel_offload_mode
+ == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) ? "vxlan" : "none");
+}
+
static int mlx4_init_hca(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
@@ -1483,6 +1575,11 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
}
choose_steering_mode(dev, &dev_cap);
+ choose_tunnel_offload_mode(dev, &dev_cap);
+
+ err = mlx4_get_phys_port_id(dev);
+ if (err)
+ mlx4_err(dev, "Fail to get physical port id\n");
if (mlx4_is_master(dev))
mlx4_parav_master_pf_caps(dev);
@@ -1654,7 +1751,7 @@ EXPORT_SYMBOL_GPL(mlx4_counter_alloc);
void __mlx4_counter_free(struct mlx4_dev *dev, u32 idx)
{
- mlx4_bitmap_free(&mlx4_priv(dev)->counters_bitmap, idx);
+ mlx4_bitmap_free(&mlx4_priv(dev)->counters_bitmap, idx, MLX4_USE_RR);
return;
}
@@ -2287,6 +2384,12 @@ slave_start:
goto err_mfunc;
}
+ /* check if the device is functioning at its maximum possible speed.
+ * No return code for this call, just warn the user in case of PCI
+ * express device capabilities are under-satisfied by the bus.
+ */
+ mlx4_check_pcie_caps(dev);
+
/* In master functions, the communication channel must be initialized
* after obtaining its address from fw */
if (mlx4_is_master(dev)) {
@@ -2635,6 +2738,8 @@ static int __init mlx4_init(void)
return -ENOMEM;
ret = pci_register_driver(&mlx4_driver);
+ if (ret < 0)
+ destroy_workqueue(mlx4_wq);
return ret < 0 ? ret : 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index acf9d5f1f922..bfe65f7be91c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -125,9 +125,14 @@ static struct mlx4_promisc_qp *get_promisc_qp(struct mlx4_dev *dev, u8 port,
enum mlx4_steer_type steer,
u32 qpn)
{
- struct mlx4_steer *s_steer = &mlx4_priv(dev)->steer[port - 1];
+ struct mlx4_steer *s_steer;
struct mlx4_promisc_qp *pqp;
+ if (port < 1 || port > dev->caps.num_ports)
+ return NULL;
+
+ s_steer = &mlx4_priv(dev)->steer[port - 1];
+
list_for_each_entry(pqp, &s_steer->promisc_qps[steer], list) {
if (pqp->qpn == qpn)
return pqp;
@@ -154,6 +159,9 @@ static int new_steering_entry(struct mlx4_dev *dev, u8 port,
u32 prot;
int err;
+ if (port < 1 || port > dev->caps.num_ports)
+ return -EINVAL;
+
s_steer = &mlx4_priv(dev)->steer[port - 1];
new_entry = kzalloc(sizeof *new_entry, GFP_KERNEL);
if (!new_entry)
@@ -238,6 +246,9 @@ static int existing_steering_entry(struct mlx4_dev *dev, u8 port,
struct mlx4_promisc_qp *pqp;
struct mlx4_promisc_qp *dqp;
+ if (port < 1 || port > dev->caps.num_ports)
+ return -EINVAL;
+
s_steer = &mlx4_priv(dev)->steer[port - 1];
pqp = get_promisc_qp(dev, port, steer, qpn);
@@ -283,6 +294,9 @@ static bool check_duplicate_entry(struct mlx4_dev *dev, u8 port,
struct mlx4_steer_index *tmp_entry, *entry = NULL;
struct mlx4_promisc_qp *dqp, *tmp_dqp;
+ if (port < 1 || port > dev->caps.num_ports)
+ return NULL;
+
s_steer = &mlx4_priv(dev)->steer[port - 1];
/* if qp is not promisc, it cannot be duplicated */
@@ -324,6 +338,9 @@ static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 port,
bool ret = false;
int i;
+ if (port < 1 || port > dev->caps.num_ports)
+ return NULL;
+
s_steer = &mlx4_priv(dev)->steer[port - 1];
mailbox = mlx4_alloc_cmd_mailbox(dev);
@@ -378,6 +395,9 @@ static int add_promisc_qp(struct mlx4_dev *dev, u8 port,
int err;
struct mlx4_priv *priv = mlx4_priv(dev);
+ if (port < 1 || port > dev->caps.num_ports)
+ return -EINVAL;
+
s_steer = &mlx4_priv(dev)->steer[port - 1];
mutex_lock(&priv->mcg_table.mutex);
@@ -484,6 +504,9 @@ static int remove_promisc_qp(struct mlx4_dev *dev, u8 port,
int loc, i;
int err;
+ if (port < 1 || port > dev->caps.num_ports)
+ return -EINVAL;
+
s_steer = &mlx4_priv(dev)->steer[port - 1];
mutex_lock(&priv->mcg_table.mutex);
@@ -674,7 +697,8 @@ const u16 __sw_id_hw[] = {
[MLX4_NET_TRANS_RULE_ID_IPV6] = 0xE003,
[MLX4_NET_TRANS_RULE_ID_IPV4] = 0xE002,
[MLX4_NET_TRANS_RULE_ID_TCP] = 0xE004,
- [MLX4_NET_TRANS_RULE_ID_UDP] = 0xE006
+ [MLX4_NET_TRANS_RULE_ID_UDP] = 0xE006,
+ [MLX4_NET_TRANS_RULE_ID_VXLAN] = 0xE008
};
int mlx4_map_sw_to_hw_steering_id(struct mlx4_dev *dev,
@@ -699,7 +723,9 @@ static const int __rule_hw_sz[] = {
[MLX4_NET_TRANS_RULE_ID_TCP] =
sizeof(struct mlx4_net_trans_rule_hw_tcp_udp),
[MLX4_NET_TRANS_RULE_ID_UDP] =
- sizeof(struct mlx4_net_trans_rule_hw_tcp_udp)
+ sizeof(struct mlx4_net_trans_rule_hw_tcp_udp),
+ [MLX4_NET_TRANS_RULE_ID_VXLAN] =
+ sizeof(struct mlx4_net_trans_rule_hw_vxlan)
};
int mlx4_hw_rule_sz(struct mlx4_dev *dev,
@@ -764,6 +790,13 @@ static int parse_trans_rule(struct mlx4_dev *dev, struct mlx4_spec_list *spec,
rule_hw->tcp_udp.src_port_msk = spec->tcp_udp.src_port_msk;
break;
+ case MLX4_NET_TRANS_RULE_ID_VXLAN:
+ rule_hw->vxlan.vni =
+ cpu_to_be32(be32_to_cpu(spec->vxlan.vni) << 8);
+ rule_hw->vxlan.vni_mask =
+ cpu_to_be32(be32_to_cpu(spec->vxlan.vni_mask) << 8);
+ break;
+
default:
return -EINVAL;
}
@@ -910,6 +943,9 @@ int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
u8 port = gid[5];
u8 new_entry = 0;
+ if (port < 1 || port > dev->caps.num_ports)
+ return -EINVAL;
+
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
@@ -996,7 +1032,7 @@ out:
index, dev->caps.num_mgms);
else
mlx4_bitmap_free(&priv->mcg_table.bitmap,
- index - dev->caps.num_mgms);
+ index - dev->caps.num_mgms, MLX4_USE_RR);
}
mutex_unlock(&priv->mcg_table.mutex);
@@ -1087,7 +1123,7 @@ int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
index, amgm_index, dev->caps.num_mgms);
else
mlx4_bitmap_free(&priv->mcg_table.bitmap,
- amgm_index - dev->caps.num_mgms);
+ amgm_index - dev->caps.num_mgms, MLX4_USE_RR);
}
} else {
/* Remove entry from AMGM */
@@ -1107,7 +1143,7 @@ int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
prev, index, dev->caps.num_mgms);
else
mlx4_bitmap_free(&priv->mcg_table.bitmap,
- index - dev->caps.num_mgms);
+ index - dev->caps.num_mgms, MLX4_USE_RR);
}
out:
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index e582a41a802b..6ed9dfa5f05b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -783,6 +783,11 @@ enum {
MLX4_PCI_DEV_FORCE_SENSE_PORT = 1 << 1,
};
+enum {
+ MLX4_NO_RR = 0,
+ MLX4_USE_RR = 1,
+};
+
struct mlx4_priv {
struct mlx4_dev dev;
@@ -844,9 +849,10 @@ static inline struct mlx4_priv *mlx4_priv(struct mlx4_dev *dev)
extern struct workqueue_struct *mlx4_wq;
u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap);
-void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj);
+void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj, int use_rr);
u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align);
-void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt);
+void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt,
+ int use_rr);
u32 mlx4_bitmap_avail(struct mlx4_bitmap *bitmap);
int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask,
u32 reserved_bot, u32 resetrved_top);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index f3758de59c05..2f1e200f2e4c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -45,6 +45,7 @@
#include <linux/dcbnl.h>
#endif
#include <linux/cpu_rmap.h>
+#include <linux/ptp_clock_kernel.h>
#include <linux/mlx4/device.h>
#include <linux/mlx4/qp.h>
@@ -255,6 +256,8 @@ struct mlx4_en_tx_ring {
u16 poll_cnt;
struct mlx4_en_tx_info *tx_info;
u8 *bounce_buf;
+ u8 queue_index;
+ cpumask_t affinity_mask;
u32 last_nr_txbb;
struct mlx4_qp qp;
struct mlx4_qp_context context;
@@ -373,10 +376,14 @@ struct mlx4_en_dev {
u32 priv_pdn;
spinlock_t uar_lock;
u8 mac_removed[MLX4_MAX_PORTS + 1];
+ rwlock_t clock_lock;
+ u32 nominal_c_mult;
struct cyclecounter cycles;
struct timecounter clock;
unsigned long last_overflow_check;
unsigned long overflow_period;
+ struct ptp_clock *ptp_clock;
+ struct ptp_clock_info ptp_clock_info;
};
@@ -434,6 +441,7 @@ struct mlx4_en_mc_list {
enum mlx4_en_mclist_act action;
u8 addr[ETH_ALEN];
u64 reg_id;
+ u64 tunnel_reg_id;
};
struct mlx4_en_frag_info {
@@ -565,7 +573,7 @@ struct mlx4_en_priv {
struct list_head filters;
struct hlist_head filter_hash[1 << MLX4_EN_FILTER_HASH_SHIFT];
#endif
-
+ u64 tunnel_reg_id;
};
enum mlx4_en_wol {
@@ -719,7 +727,8 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring **pring,
- int qpn, u32 size, u16 stride, int node);
+ int qpn, u32 size, u16 stride,
+ int node, int queue_index);
void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring **pring);
int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
@@ -741,6 +750,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev,
struct mlx4_en_cq *cq,
int budget);
int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget);
+int mlx4_en_poll_tx_cq(struct napi_struct *napi, int budget);
void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
int is_tx, int rss, int qpn, int cqn, int user_prio,
struct mlx4_qp_context *context);
@@ -786,6 +796,7 @@ void mlx4_en_fill_hwtstamps(struct mlx4_en_dev *mdev,
struct skb_shared_hwtstamps *hwts,
u64 timestamp);
void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev);
+void mlx4_en_remove_timestamp(struct mlx4_en_dev *mdev);
int mlx4_en_timestamp_config(struct net_device *dev,
int tx_type,
int rx_filter);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c
index b3ee9bafff5e..0558dddd81e5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mr.c
@@ -346,7 +346,7 @@ void __mlx4_mpt_release(struct mlx4_dev *dev, u32 index)
{
struct mlx4_priv *priv = mlx4_priv(dev);
- mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, index);
+ mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, index, MLX4_NO_RR);
}
static void mlx4_mpt_release(struct mlx4_dev *dev, u32 index)
diff --git a/drivers/net/ethernet/mellanox/mlx4/pd.c b/drivers/net/ethernet/mellanox/mlx4/pd.c
index 84cfb40bf451..c6af9619ce6d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/pd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/pd.c
@@ -59,7 +59,7 @@ EXPORT_SYMBOL_GPL(mlx4_pd_alloc);
void mlx4_pd_free(struct mlx4_dev *dev, u32 pdn)
{
- mlx4_bitmap_free(&mlx4_priv(dev)->pd_bitmap, pdn);
+ mlx4_bitmap_free(&mlx4_priv(dev)->pd_bitmap, pdn, MLX4_USE_RR);
}
EXPORT_SYMBOL_GPL(mlx4_pd_free);
@@ -96,7 +96,7 @@ EXPORT_SYMBOL_GPL(mlx4_xrcd_alloc);
void __mlx4_xrcd_free(struct mlx4_dev *dev, u32 xrcdn)
{
- mlx4_bitmap_free(&mlx4_priv(dev)->xrcd_bitmap, xrcdn);
+ mlx4_bitmap_free(&mlx4_priv(dev)->xrcd_bitmap, xrcdn, MLX4_USE_RR);
}
void mlx4_xrcd_free(struct mlx4_dev *dev, u32 xrcdn)
@@ -164,7 +164,7 @@ EXPORT_SYMBOL_GPL(mlx4_uar_alloc);
void mlx4_uar_free(struct mlx4_dev *dev, struct mlx4_uar *uar)
{
- mlx4_bitmap_free(&mlx4_priv(dev)->uar_table.bitmap, uar->index);
+ mlx4_bitmap_free(&mlx4_priv(dev)->uar_table.bitmap, uar->index, MLX4_USE_RR);
}
EXPORT_SYMBOL_GPL(mlx4_uar_free);
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index 97d342fa5032..93f75ec78c82 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -800,6 +800,47 @@ int mlx4_SET_PORT_SCHEDULER(struct mlx4_dev *dev, u8 port, u8 *tc_tx_bw,
}
EXPORT_SYMBOL(mlx4_SET_PORT_SCHEDULER);
+enum {
+ VXLAN_ENABLE_MODIFY = 1 << 7,
+ VXLAN_STEERING_MODIFY = 1 << 6,
+
+ VXLAN_ENABLE = 1 << 7,
+};
+
+struct mlx4_set_port_vxlan_context {
+ u32 reserved1;
+ u8 modify_flags;
+ u8 reserved2;
+ u8 enable_flags;
+ u8 steering;
+};
+
+int mlx4_SET_PORT_VXLAN(struct mlx4_dev *dev, u8 port, u8 steering)
+{
+ int err;
+ u32 in_mod;
+ struct mlx4_cmd_mailbox *mailbox;
+ struct mlx4_set_port_vxlan_context *context;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+ context = mailbox->buf;
+ memset(context, 0, sizeof(*context));
+
+ context->modify_flags = VXLAN_ENABLE_MODIFY | VXLAN_STEERING_MODIFY;
+ context->enable_flags = VXLAN_ENABLE;
+ context->steering = steering;
+
+ in_mod = MLX4_SET_PORT_VXLAN << 8 | port;
+ err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
+
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+}
+EXPORT_SYMBOL(mlx4_SET_PORT_VXLAN);
+
int mlx4_SET_MCAST_FLTR_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index 2715e61dbb74..06b2d13d5310 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -250,7 +250,7 @@ void __mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
if (mlx4_is_qp_reserved(dev, (u32) base_qpn))
return;
- mlx4_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt);
+ mlx4_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt, MLX4_USE_RR);
}
void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 2f3f2bc7f283..2e3232cad0ae 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -3634,7 +3634,7 @@ static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header,
!is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
list_for_each_entry_safe(res, tmp, rlist, list) {
be_mac = cpu_to_be64(res->mac << 16);
- if (!memcmp(&be_mac, eth_header->eth.dst_mac, ETH_ALEN))
+ if (ether_addr_equal((u8 *)&be_mac, eth_header->eth.dst_mac))
return 0;
}
pr_err("MAC %pM doesn't belong to VF %d, Steering rule rejected\n",
diff --git a/drivers/net/ethernet/mellanox/mlx4/srq.c b/drivers/net/ethernet/mellanox/mlx4/srq.c
index 8fdf23753779..1d2f9d3862a7 100644
--- a/drivers/net/ethernet/mellanox/mlx4/srq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/srq.c
@@ -117,7 +117,7 @@ err_put:
mlx4_table_put(dev, &srq_table->table, *srqn);
err_out:
- mlx4_bitmap_free(&srq_table->bitmap, *srqn);
+ mlx4_bitmap_free(&srq_table->bitmap, *srqn, MLX4_NO_RR);
return err;
}
@@ -145,7 +145,7 @@ void __mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn)
mlx4_table_put(dev, &srq_table->cmpt_table, srqn);
mlx4_table_put(dev, &srq_table->table, srqn);
- mlx4_bitmap_free(&srq_table->bitmap, srqn);
+ mlx4_bitmap_free(&srq_table->bitmap, srqn, MLX4_NO_RR);
}
static void mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn)
diff --git a/drivers/net/ethernet/micrel/ks8842.c b/drivers/net/ethernet/micrel/ks8842.c
index 0951f7aca1ef..822616e3c375 100644
--- a/drivers/net/ethernet/micrel/ks8842.c
+++ b/drivers/net/ethernet/micrel/ks8842.c
@@ -459,8 +459,7 @@ static int ks8842_tx_frame_dma(struct sk_buff *skb, struct net_device *netdev)
sg_dma_len(&ctl->sg) += 4 - sg_dma_len(&ctl->sg) % 4;
ctl->adesc = dmaengine_prep_slave_sg(ctl->chan,
- &ctl->sg, 1, DMA_MEM_TO_DEV,
- DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP);
+ &ctl->sg, 1, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT);
if (!ctl->adesc)
return NETDEV_TX_BUSY;
@@ -571,8 +570,7 @@ static int __ks8842_start_new_rx_dma(struct net_device *netdev)
sg_dma_len(sg) = DMA_BUFFER_SIZE;
ctl->adesc = dmaengine_prep_slave_sg(ctl->chan,
- sg, 1, DMA_DEV_TO_MEM,
- DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP);
+ sg, 1, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT);
if (!ctl->adesc)
goto out;
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index ddd252a3da9c..8e9dad770900 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -4128,10 +4128,10 @@ static int hw_add_addr(struct ksz_hw *hw, u8 *mac_addr)
int i;
int j = ADDITIONAL_ENTRIES;
- if (!memcmp(hw->override_addr, mac_addr, ETH_ALEN))
+ if (ether_addr_equal(hw->override_addr, mac_addr))
return 0;
for (i = 0; i < hw->addr_list_size; i++) {
- if (!memcmp(hw->address[i], mac_addr, ETH_ALEN))
+ if (ether_addr_equal(hw->address[i], mac_addr))
return 0;
if (ADDITIONAL_ENTRIES == j && empty_addr(hw->address[i]))
j = i;
@@ -4149,7 +4149,7 @@ static int hw_del_addr(struct ksz_hw *hw, u8 *mac_addr)
int i;
for (i = 0; i < hw->addr_list_size; i++) {
- if (!memcmp(hw->address[i], mac_addr, ETH_ALEN)) {
+ if (ether_addr_equal(hw->address[i], mac_addr)) {
memset(hw->address[i], 0, ETH_ALEN);
writel(0, hw->io + ADD_ADDR_INCR * i +
KS_ADD_ADDR_0_HI);
@@ -7104,8 +7104,7 @@ static int pcidev_init(struct pci_dev *pdev, const struct pci_device_id *id)
ETH_ALEN);
else {
memcpy(dev->dev_addr, sw->other_addr, ETH_ALEN);
- if (!memcmp(sw->other_addr, hw->override_addr,
- ETH_ALEN))
+ if (ether_addr_equal(sw->other_addr, hw->override_addr))
dev->dev_addr[5] += port->first_port;
}
diff --git a/drivers/net/ethernet/natsemi/ns83820.c b/drivers/net/ethernet/natsemi/ns83820.c
index d3b47003a575..dbccf1de49ec 100644
--- a/drivers/net/ethernet/natsemi/ns83820.c
+++ b/drivers/net/ethernet/natsemi/ns83820.c
@@ -22,8 +22,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
*
* ChangeLog
@@ -2236,7 +2235,6 @@ out_disable:
pci_disable_device(pci_dev);
out_free:
free_netdev(ndev);
- pci_set_drvdata(pci_dev, NULL);
out:
return err;
}
@@ -2260,7 +2258,6 @@ static void ns83820_remove_one(struct pci_dev *pci_dev)
dev->rx_info.descs, dev->rx_info.phy_descs);
pci_disable_device(dev->pci_dev);
free_netdev(ndev);
- pci_set_drvdata(pci_dev, NULL);
}
static DEFINE_PCI_DEVICE_TABLE(ns83820_pci_tbl) = {
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index f9876ea8c8bf..6eae2168fff9 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -507,7 +507,8 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
* if rss is disabled/enabled, so key off of that.
*/
if (ext_info.rth_value)
- skb->rxhash = ext_info.rth_value;
+ skb_set_hash(skb, ext_info.rth_value,
+ PKT_HASH_TYPE_L3);
vxge_rx_complete(ring, skb, ext_info.vlan,
pkt_length, &ext_info);
@@ -1429,7 +1430,7 @@ vxge_search_mac_addr_in_da_table(struct vxge_vpath *vpath, struct macInfo *mac)
return status;
}
- while (memcmp(mac->macaddr, macaddr, ETH_ALEN)) {
+ while (!ether_addr_equal(mac->macaddr, macaddr)) {
status = vxge_hw_vpath_mac_addr_get_next(vpath->handle,
macaddr, macmask);
if (status != VXGE_HW_OK)
@@ -3189,7 +3190,7 @@ static enum vxge_hw_status vxge_timestamp_config(struct __vxge_hw_device *devh)
return status;
}
-static int vxge_hwtstamp_ioctl(struct vxgedev *vdev, void __user *data)
+static int vxge_hwtstamp_set(struct vxgedev *vdev, void __user *data)
{
struct hwtstamp_config config;
int i;
@@ -3250,6 +3251,21 @@ static int vxge_hwtstamp_ioctl(struct vxgedev *vdev, void __user *data)
return 0;
}
+static int vxge_hwtstamp_get(struct vxgedev *vdev, void __user *data)
+{
+ struct hwtstamp_config config;
+
+ config.flags = 0;
+ config.tx_type = HWTSTAMP_TX_OFF;
+ config.rx_filter = (vdev->rx_hwts ?
+ HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE);
+
+ if (copy_to_user(data, &config, sizeof(config)))
+ return -EFAULT;
+
+ return 0;
+}
+
/**
* vxge_ioctl
* @dev: Device pointer.
@@ -3263,19 +3279,15 @@ static int vxge_hwtstamp_ioctl(struct vxgedev *vdev, void __user *data)
static int vxge_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct vxgedev *vdev = netdev_priv(dev);
- int ret;
switch (cmd) {
case SIOCSHWTSTAMP:
- ret = vxge_hwtstamp_ioctl(vdev, rq->ifr_data);
- if (ret)
- return ret;
- break;
+ return vxge_hwtstamp_set(vdev, rq->ifr_data);
+ case SIOCGHWTSTAMP:
+ return vxge_hwtstamp_get(vdev, rq->ifr_data);
default:
return -EOPNOTSUPP;
}
-
- return 0;
}
/**
diff --git a/drivers/net/ethernet/netx-eth.c b/drivers/net/ethernet/netx-eth.c
index e6f0a4366f90..31eb911e4763 100644
--- a/drivers/net/ethernet/netx-eth.c
+++ b/drivers/net/ethernet/netx-eth.c
@@ -13,8 +13,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/init.h>
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index 2d045be4b5cf..493a1125f54f 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -26,8 +26,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
* Known bugs:
* We suspect that on some hardware no TX done interrupts are generated.
@@ -5150,8 +5149,10 @@ static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64
{
struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev);
- int result;
- memset(buffer, 0, nv_get_sset_count(dev, ETH_SS_TEST)*sizeof(u64));
+ int result, count;
+
+ count = nv_get_sset_count(dev, ETH_SS_TEST);
+ memset(buffer, 0, count * sizeof(u64));
if (!nv_link_test(dev)) {
test->flags |= ETH_TEST_FL_FAILED;
@@ -5195,7 +5196,7 @@ static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64
return;
}
- if (!nv_loopback_test(dev)) {
+ if (count > NV_TEST_COUNT_BASE && !nv_loopback_test(dev)) {
test->flags |= ETH_TEST_FL_FAILED;
buffer[3] = 1;
}
@@ -6018,7 +6019,6 @@ static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
out_error:
if (phystate_orig)
writel(phystate|NVREG_ADAPTCTL_RUNNING, base + NvRegAdapterControl);
- pci_set_drvdata(pci_dev, NULL);
out_freering:
free_rings(dev);
out_unmap:
@@ -6089,7 +6089,6 @@ static void nv_remove(struct pci_dev *pci_dev)
pci_release_regions(pci_dev);
pci_disable_device(pci_dev);
free_netdev(dev);
- pci_set_drvdata(pci_dev, NULL);
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
index 2a9003071d51..2a55d6d53ee6 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
@@ -14,8 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _PCH_GBE_H_
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c
index ff3ad70935a6..51250363566b 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c
@@ -14,8 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include "pch_gbe.h"
#include "pch_gbe_phy.h"
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.h
index 94aaac5b057b..91ce07c8306c 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.h
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.h
@@ -14,8 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _PCH_GBE_API_H_
#define _PCH_GBE_API_H_
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
index f0ceb89af931..826f0ccdc23c 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
@@ -14,8 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include "pch_gbe.h"
#include "pch_gbe_api.h"
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index 27ffe0ebf0a6..464e91058c81 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -14,8 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include "pch_gbe.h"
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c
index cf7c9b3a255b..08d4be616064 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c
@@ -14,8 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include "pch_gbe.h"
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c
index 8b7ff75fc8e0..a5cad5ea9436 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c
@@ -14,8 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include "pch_gbe.h"
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h
index 0cbe69206e04..95ad0151ad02 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h
@@ -14,8 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _PCH_GBE_PHY_H_
#define _PCH_GBE_PHY_H_
diff --git a/drivers/net/ethernet/packetengines/yellowfin.c b/drivers/net/ethernet/packetengines/yellowfin.c
index 07a890eb72ad..9a6cb482dcd0 100644
--- a/drivers/net/ethernet/packetengines/yellowfin.c
+++ b/drivers/net/ethernet/packetengines/yellowfin.c
@@ -1053,7 +1053,7 @@ static int yellowfin_rx(struct net_device *dev)
struct sk_buff *rx_skb = yp->rx_skbuff[entry];
s16 frame_status;
u16 desc_status;
- int data_size;
+ int data_size, yf_size;
u8 *buf_addr;
if(!desc->result_status)
@@ -1070,6 +1070,9 @@ static int yellowfin_rx(struct net_device *dev)
__func__, frame_status);
if (--boguscnt < 0)
break;
+
+ yf_size = sizeof(struct yellowfin_desc);
+
if ( ! (desc_status & RX_EOP)) {
if (data_size != 0)
netdev_warn(dev, "Oversized Ethernet frame spanned multiple buffers, status %04x, data_size %d!\n",
@@ -1096,12 +1099,12 @@ static int yellowfin_rx(struct net_device *dev)
if (status2 & 0x80) dev->stats.rx_dropped++;
#ifdef YF_PROTOTYPE /* Support for prototype hardware errata. */
} else if ((yp->flags & HasMACAddrBug) &&
- memcmp(le32_to_cpu(yp->rx_ring_dma +
- entry*sizeof(struct yellowfin_desc)),
- dev->dev_addr, 6) != 0 &&
- memcmp(le32_to_cpu(yp->rx_ring_dma +
- entry*sizeof(struct yellowfin_desc)),
- "\377\377\377\377\377\377", 6) != 0) {
+ !ether_addr_equal(le32_to_cpu(yp->rx_ring_dma +
+ entry * yf_size),
+ dev->dev_addr) &&
+ !ether_addr_equal(le32_to_cpu(yp->rx_ring_dma +
+ entry * yf_size),
+ "\377\377\377\377\377\377")) {
if (bogus_rx++ == 0)
netdev_warn(dev, "Bad frame to %pM\n",
buf_addr);
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c
index dbaa49e58b0c..0734758ff5ba 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.c
@@ -13,8 +13,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/init.h>
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.h b/drivers/net/ethernet/pasemi/pasemi_mac.h
index f2749d46c125..a5807703ab96 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.h
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.h
@@ -14,8 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef PASEMI_MAC_H
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac_ethtool.c b/drivers/net/ethernet/pasemi/pasemi_mac_ethtool.c
index 4825959a0efe..25fae568261f 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac_ethtool.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac_ethtool.c
@@ -13,8 +13,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
diff --git a/drivers/net/ethernet/qlogic/netxen/Makefile b/drivers/net/ethernet/qlogic/netxen/Makefile
index 861a0590b1f4..e14e60c88381 100644
--- a/drivers/net/ethernet/qlogic/netxen/Makefile
+++ b/drivers/net/ethernet/qlogic/netxen/Makefile
@@ -13,9 +13,7 @@
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place - Suite 330, Boston,
-# MA 02111-1307, USA.
+# along with this program; if not, see <http://www.gnu.org/licenses/>.
#
# The full GNU General Public License is included in this distribution
# in the file called "COPYING".
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
index 9adcdbb49476..6e426ae94692 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
@@ -14,9 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
- * MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution
* in the file called "COPYING".
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
index 1bcaf45aa864..6f6be57f4690 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
@@ -14,9 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
- * MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution
* in the file called "COPYING".
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
index 4ca2c196c98a..87e073c6e291 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
@@ -14,9 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
- * MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution
* in the file called "COPYING".
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h
index 0c64c82b9acf..a310c2f6502a 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h
@@ -14,9 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
- * MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution
* in the file called "COPYING".
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
index 67efe754367d..db4280ce9c09 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
@@ -14,9 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
- * MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution
* in the file called "COPYING".
@@ -663,7 +661,7 @@ static int nx_p3_nic_add_mac(struct netxen_adapter *adapter,
list_for_each(head, del_list) {
cur = list_entry(head, nx_mac_list_t, list);
- if (memcmp(addr, cur->mac_addr, ETH_ALEN) == 0) {
+ if (ether_addr_equal(addr, cur->mac_addr)) {
list_move_tail(head, &adapter->mac_list);
return 0;
}
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.h
index e2c5b6f2df03..7433c4d21601 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.h
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.h
@@ -14,9 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
- * MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution
* in the file called "COPYING".
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
index 7692dfd4f262..32058614151a 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
@@ -14,9 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
- * MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution
* in the file called "COPYING".
@@ -1604,13 +1602,13 @@ netxen_process_lro(struct netxen_adapter *adapter,
u32 seq_number;
u8 vhdr_len = 0;
- if (unlikely(ring > adapter->max_rds_rings))
+ if (unlikely(ring >= adapter->max_rds_rings))
return NULL;
rds_ring = &recv_ctx->rds_rings[ring];
index = netxen_get_lro_sts_refhandle(sts_data0);
- if (unlikely(index > rds_ring->num_desc))
+ if (unlikely(index >= rds_ring->num_desc))
return NULL;
buffer = &rds_ring->rx_buf_arr[index];
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index 3bec8cfebf99..70849dea32b1 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -14,9 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
- * MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution
* in the file called "COPYING".
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 631ea0ac1cd8..35d48766d842 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -38,8 +38,8 @@
#define _QLCNIC_LINUX_MAJOR 5
#define _QLCNIC_LINUX_MINOR 3
-#define _QLCNIC_LINUX_SUBVERSION 52
-#define QLCNIC_LINUX_VERSIONID "5.3.52"
+#define _QLCNIC_LINUX_SUBVERSION 53
+#define QLCNIC_LINUX_VERSIONID "5.3.53"
#define QLCNIC_DRV_IDC_VER 0x01
#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\
(_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
@@ -115,6 +115,10 @@ enum qlcnic_queue_type {
#define QLCNIC_VNIC_MODE 0xFF
#define QLCNIC_DEFAULT_MODE 0x0
+/* Virtual NIC function count */
+#define QLC_DEFAULT_VNIC_COUNT 8
+#define QLC_84XX_VNIC_COUNT 16
+
/*
* Following are the states of the Phantom. Phantom will set them and
* Host will read to check if the fields are correct.
@@ -374,7 +378,7 @@ struct qlcnic_rx_buffer {
#define QLCNIC_INTR_DEFAULT 0x04
#define QLCNIC_CONFIG_INTR_COALESCE 3
-#define QLCNIC_DEV_INFO_SIZE 1
+#define QLCNIC_DEV_INFO_SIZE 2
struct qlcnic_nic_intr_coalesce {
u8 type;
@@ -462,8 +466,10 @@ struct qlcnic_hardware_context {
u16 max_rx_ques;
u16 max_mtu;
u32 msg_enable;
- u16 act_pci_func;
+ u16 total_nic_func;
u16 max_pci_func;
+ u32 max_vnic_func;
+ u32 total_pci_func;
u32 capabilities;
u32 extra_capability[3];
@@ -487,6 +493,7 @@ struct qlcnic_hardware_context {
struct qlcnic_mailbox *mailbox;
u8 extend_lb_time;
u8 phys_port_id[ETH_ALEN];
+ u8 lb_mode;
};
struct qlcnic_adapter_stats {
@@ -578,6 +585,8 @@ struct qlcnic_host_tx_ring {
dma_addr_t phys_addr;
dma_addr_t hw_cons_phys_addr;
struct netdev_queue *txq;
+ /* Lock to protect Tx descriptors cleanup */
+ spinlock_t tx_clean_lock;
} ____cacheline_internodealigned_in_smp;
/*
@@ -788,9 +797,10 @@ struct qlcnic_cardrsp_tx_ctx {
#define QLCNIC_MAC_VLAN_ADD 3
#define QLCNIC_MAC_VLAN_DEL 4
-struct qlcnic_mac_list_s {
+struct qlcnic_mac_vlan_list {
struct list_head list;
uint8_t mac_addr[ETH_ALEN+2];
+ u16 vlan_id;
};
/* MAC Learn */
@@ -808,6 +818,7 @@ struct qlcnic_mac_list_s {
#define QLCNIC_ILB_MODE 0x1
#define QLCNIC_ELB_MODE 0x2
+#define QLCNIC_LB_MODE_MASK 0x3
#define QLCNIC_LINKEVENT 0x1
#define QLCNIC_LB_RESPONSE 0x2
@@ -856,7 +867,7 @@ struct qlcnic_mac_list_s {
#define QLCNIC_FW_CAP2_HW_LRO_IPV6 BIT_3
#define QLCNIC_FW_CAPABILITY_SET_DRV_VER BIT_5
#define QLCNIC_FW_CAPABILITY_2_BEACON BIT_7
-#define QLCNIC_FW_CAPABILITY_2_PER_PORT_ESWITCH_CFG BIT_8
+#define QLCNIC_FW_CAPABILITY_2_PER_PORT_ESWITCH_CFG BIT_9
/* module types */
#define LINKEVENT_MODULE_NOT_PRESENT 1
@@ -1093,7 +1104,6 @@ struct qlcnic_adapter {
struct qlcnic_filter_hash rx_fhash;
struct list_head vf_mc_list;
- spinlock_t tx_clean_lock;
spinlock_t mac_learn_lock;
/* spinlock for catching rcv filters for eswitch traffic */
spinlock_t rx_mac_learn_lock;
@@ -1637,7 +1647,9 @@ int qlcnic_setup_netdev(struct qlcnic_adapter *, struct net_device *, int);
void qlcnic_set_netdev_features(struct qlcnic_adapter *,
struct qlcnic_esw_func_cfg *);
void qlcnic_sriov_vf_schedule_multi(struct net_device *);
-void qlcnic_vf_add_mc_list(struct net_device *, u16);
+int qlcnic_is_valid_nic_func(struct qlcnic_adapter *, u8);
+int qlcnic_get_pci_func_type(struct qlcnic_adapter *, u16, u16 *, u16 *,
+ u16 *);
/*
* QLOGIC Board information
@@ -2136,4 +2148,26 @@ static inline bool qlcnic_sriov_vf_check(struct qlcnic_adapter *adapter)
return status;
}
+
+static inline bool qlcnic_83xx_pf_check(struct qlcnic_adapter *adapter)
+{
+ unsigned short device = adapter->pdev->device;
+
+ return (device == PCI_DEVICE_ID_QLOGIC_QLE834X) ? true : false;
+}
+
+static inline bool qlcnic_83xx_vf_check(struct qlcnic_adapter *adapter)
+{
+ unsigned short device = adapter->pdev->device;
+
+ return (device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X) ? true : false;
+}
+
+static inline u32 qlcnic_get_vnic_func_count(struct qlcnic_adapter *adapter)
+{
+ if (qlcnic_84xx_check(adapter))
+ return QLC_84XX_VNIC_COUNT;
+ else
+ return QLC_DEFAULT_VNIC_COUNT;
+}
#endif /* __QLCNIC_H_ */
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index b1cb0ffb15c7..03eb2ad9611a 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -15,6 +15,7 @@
#define RSS_HASHTYPE_IP_TCP 0x3
#define QLC_83XX_FW_MBX_CMD 0
+#define QLC_SKIP_INACTIVE_PCI_REGS 7
static const struct qlcnic_mailbox_metadata qlcnic_83xx_mbx_tbl[] = {
{QLCNIC_CMD_CONFIGURE_IP_ADDR, 6, 1},
@@ -34,7 +35,7 @@ static const struct qlcnic_mailbox_metadata qlcnic_83xx_mbx_tbl[] = {
{QLCNIC_CMD_READ_MAX_MTU, 4, 2},
{QLCNIC_CMD_READ_MAX_LRO, 4, 2},
{QLCNIC_CMD_MAC_ADDRESS, 4, 3},
- {QLCNIC_CMD_GET_PCI_INFO, 1, 66},
+ {QLCNIC_CMD_GET_PCI_INFO, 1, 129},
{QLCNIC_CMD_GET_NIC_INFO, 2, 19},
{QLCNIC_CMD_SET_NIC_INFO, 32, 1},
{QLCNIC_CMD_GET_ESWITCH_CAPABILITY, 4, 3},
@@ -68,7 +69,7 @@ static const struct qlcnic_mailbox_metadata qlcnic_83xx_mbx_tbl[] = {
{QLCNIC_CMD_CONFIG_VPORT, 4, 4},
{QLCNIC_CMD_BC_EVENT_SETUP, 2, 1},
{QLCNIC_CMD_DCB_QUERY_CAP, 1, 2},
- {QLCNIC_CMD_DCB_QUERY_PARAM, 2, 50},
+ {QLCNIC_CMD_DCB_QUERY_PARAM, 1, 50},
};
const u32 qlcnic_83xx_ext_reg_tbl[] = {
@@ -289,6 +290,7 @@ int qlcnic_83xx_setup_intr(struct qlcnic_adapter *adapter)
if (qlcnic_sriov_vf_check(adapter))
return -EINVAL;
num_msix = 1;
+ adapter->drv_tx_rings = QLCNIC_SINGLE_RING;
}
/* setup interrupt mapping table for fw */
ahw->intr_tbl = vzalloc(num_msix *
@@ -315,12 +317,12 @@ int qlcnic_83xx_setup_intr(struct qlcnic_adapter *adapter)
return 0;
}
-inline void qlcnic_83xx_clear_legacy_intr_mask(struct qlcnic_adapter *adapter)
+static inline void qlcnic_83xx_clear_legacy_intr_mask(struct qlcnic_adapter *adapter)
{
writel(0, adapter->tgt_mask_reg);
}
-inline void qlcnic_83xx_set_legacy_intr_mask(struct qlcnic_adapter *adapter)
+static inline void qlcnic_83xx_set_legacy_intr_mask(struct qlcnic_adapter *adapter)
{
if (adapter->tgt_mask_reg)
writel(1, adapter->tgt_mask_reg);
@@ -340,7 +342,7 @@ void qlcnic_83xx_disable_intr(struct qlcnic_adapter *adapter,
writel(1, sds_ring->crb_intr_mask);
}
-inline void qlcnic_83xx_enable_legacy_msix_mbx_intr(struct qlcnic_adapter
+static inline void qlcnic_83xx_enable_legacy_msix_mbx_intr(struct qlcnic_adapter
*adapter)
{
u32 mask;
@@ -447,8 +449,9 @@ irqreturn_t qlcnic_83xx_intr(int irq, void *data)
qlcnic_83xx_poll_process_aen(adapter);
- if (ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
- ahw->diag_cnt++;
+ if (ahw->diag_test) {
+ if (ahw->diag_test == QLCNIC_INTERRUPT_TEST)
+ ahw->diag_cnt++;
qlcnic_83xx_enable_legacy_msix_mbx_intr(adapter);
return IRQ_HANDLED;
}
@@ -636,7 +639,7 @@ int qlcnic_83xx_get_port_info(struct qlcnic_adapter *adapter)
void qlcnic_83xx_set_mac_filter_count(struct qlcnic_adapter *adapter)
{
struct qlcnic_hardware_context *ahw = adapter->ahw;
- u16 act_pci_fn = ahw->act_pci_func;
+ u16 act_pci_fn = ahw->total_nic_func;
u16 count;
ahw->max_mc_count = QLC_83XX_MAX_MC_COUNT;
@@ -1345,11 +1348,6 @@ static int qlcnic_83xx_diag_alloc_res(struct net_device *netdev, int test,
}
if (adapter->ahw->diag_test == QLCNIC_LOOPBACK_TEST) {
- /* disable and free mailbox interrupt */
- if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) {
- qlcnic_83xx_enable_mbx_poll(adapter);
- qlcnic_83xx_free_mbx_intr(adapter);
- }
adapter->ahw->loopback_state = 0;
adapter->ahw->hw_ops->setup_link_event(adapter, 1);
}
@@ -1363,33 +1361,20 @@ static void qlcnic_83xx_diag_free_res(struct net_device *netdev,
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct qlcnic_host_sds_ring *sds_ring;
- int ring, err;
+ int ring;
clear_bit(__QLCNIC_DEV_UP, &adapter->state);
if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &adapter->recv_ctx->sds_rings[ring];
- qlcnic_83xx_disable_intr(adapter, sds_ring);
- if (!(adapter->flags & QLCNIC_MSIX_ENABLED))
- qlcnic_83xx_enable_mbx_poll(adapter);
+ if (adapter->flags & QLCNIC_MSIX_ENABLED)
+ qlcnic_83xx_disable_intr(adapter, sds_ring);
}
}
qlcnic_fw_destroy_ctx(adapter);
qlcnic_detach(adapter);
- if (adapter->ahw->diag_test == QLCNIC_LOOPBACK_TEST) {
- if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) {
- err = qlcnic_83xx_setup_mbx_intr(adapter);
- qlcnic_83xx_disable_mbx_poll(adapter);
- if (err) {
- dev_err(&adapter->pdev->dev,
- "%s: failed to setup mbx interrupt\n",
- __func__);
- goto out;
- }
- }
- }
adapter->ahw->diag_test = 0;
adapter->drv_sds_rings = drv_sds_rings;
@@ -1399,9 +1384,6 @@ static void qlcnic_83xx_diag_free_res(struct net_device *netdev,
if (netif_running(netdev))
__qlcnic_up(adapter, netdev);
- if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST &&
- !(adapter->flags & QLCNIC_MSIX_ENABLED))
- qlcnic_83xx_disable_mbx_poll(adapter);
out:
netif_device_attach(netdev);
}
@@ -1518,8 +1500,7 @@ int qlcnic_83xx_set_led(struct net_device *netdev,
return err;
}
-void qlcnic_83xx_register_nic_idc_func(struct qlcnic_adapter *adapter,
- int enable)
+void qlcnic_83xx_initialize_nic(struct qlcnic_adapter *adapter, int enable)
{
struct qlcnic_cmd_args cmd;
int status;
@@ -1527,21 +1508,21 @@ void qlcnic_83xx_register_nic_idc_func(struct qlcnic_adapter *adapter,
if (qlcnic_sriov_vf_check(adapter))
return;
- if (enable) {
+ if (enable)
status = qlcnic_alloc_mbx_args(&cmd, adapter,
QLCNIC_CMD_INIT_NIC_FUNC);
- if (status)
- return;
-
- cmd.req.arg[1] = BIT_0 | BIT_31;
- } else {
+ else
status = qlcnic_alloc_mbx_args(&cmd, adapter,
QLCNIC_CMD_STOP_NIC_FUNC);
- if (status)
- return;
- cmd.req.arg[1] = BIT_0 | BIT_31;
- }
+ if (status)
+ return;
+
+ cmd.req.arg[1] = QLC_REGISTER_LB_IDC | QLC_INIT_FW_RESOURCES;
+
+ if (adapter->dcb)
+ cmd.req.arg[1] |= QLC_REGISTER_DCB_AEN;
+
status = qlcnic_issue_cmd(adapter, &cmd);
if (status)
dev_err(&adapter->pdev->dev,
@@ -1637,7 +1618,7 @@ int qlcnic_83xx_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode)
cmd->type = QLC_83XX_MBX_CMD_NO_WAIT;
qlcnic_83xx_set_interface_id_promisc(adapter, &temp);
- cmd->req.arg[1] = (mode ? 1 : 0) | temp;
+ cmd->req.arg[1] = mode | temp;
err = qlcnic_issue_cmd(adapter, cmd);
if (!err)
return err;
@@ -1704,12 +1685,6 @@ int qlcnic_83xx_loopback_test(struct net_device *netdev, u8 mode)
}
} while ((adapter->ahw->linkup && ahw->has_link_events) != 1);
- /* Make sure carrier is off and queue is stopped during loopback */
- if (netif_running(netdev)) {
- netif_carrier_off(netdev);
- netif_tx_stop_all_queues(netdev);
- }
-
ret = qlcnic_do_lb_test(adapter, mode);
qlcnic_83xx_clear_lb_mode(adapter, mode);
@@ -2141,6 +2116,7 @@ static void qlcnic_83xx_handle_link_aen(struct qlcnic_adapter *adapter,
ahw->link_autoneg = MSB(MSW(data[3]));
ahw->module_type = MSB(LSW(data[3]));
ahw->has_link_events = 1;
+ ahw->lb_mode = data[4] & QLCNIC_LB_MODE_MASK;
qlcnic_advert_link_change(adapter, link_status);
}
@@ -2293,11 +2269,37 @@ out:
return err;
}
+int qlcnic_get_pci_func_type(struct qlcnic_adapter *adapter, u16 type,
+ u16 *nic, u16 *fcoe, u16 *iscsi)
+{
+ struct device *dev = &adapter->pdev->dev;
+ int err = 0;
+
+ switch (type) {
+ case QLCNIC_TYPE_NIC:
+ (*nic)++;
+ break;
+ case QLCNIC_TYPE_FCOE:
+ (*fcoe)++;
+ break;
+ case QLCNIC_TYPE_ISCSI:
+ (*iscsi)++;
+ break;
+ default:
+ dev_err(dev, "%s: Unknown PCI type[%x]\n",
+ __func__, type);
+ err = -EIO;
+ }
+
+ return err;
+}
+
int qlcnic_83xx_get_pci_info(struct qlcnic_adapter *adapter,
struct qlcnic_pci_info *pci_info)
{
struct qlcnic_hardware_context *ahw = adapter->ahw;
struct device *dev = &adapter->pdev->dev;
+ u16 nic = 0, fcoe = 0, iscsi = 0;
struct qlcnic_cmd_args cmd;
int i, err = 0, j = 0;
u32 temp;
@@ -2308,16 +2310,20 @@ int qlcnic_83xx_get_pci_info(struct qlcnic_adapter *adapter,
err = qlcnic_issue_cmd(adapter, &cmd);
- ahw->act_pci_func = 0;
+ ahw->total_nic_func = 0;
if (err == QLCNIC_RCODE_SUCCESS) {
ahw->max_pci_func = cmd.rsp.arg[1] & 0xFF;
- for (i = 2, j = 0; j < QLCNIC_MAX_PCI_FUNC; j++, pci_info++) {
+ for (i = 2, j = 0; j < ahw->max_vnic_func; j++, pci_info++) {
pci_info->id = cmd.rsp.arg[i] & 0xFFFF;
pci_info->active = (cmd.rsp.arg[i] & 0xFFFF0000) >> 16;
i++;
+ if (!pci_info->active) {
+ i += QLC_SKIP_INACTIVE_PCI_REGS;
+ continue;
+ }
pci_info->type = cmd.rsp.arg[i] & 0xFFFF;
- if (pci_info->type == QLCNIC_TYPE_NIC)
- ahw->act_pci_func++;
+ err = qlcnic_get_pci_func_type(adapter, pci_info->type,
+ &nic, &fcoe, &iscsi);
temp = (cmd.rsp.arg[i] & 0xFFFF0000) >> 16;
pci_info->default_port = temp;
i++;
@@ -2335,6 +2341,13 @@ int qlcnic_83xx_get_pci_info(struct qlcnic_adapter *adapter,
err = -EIO;
}
+ ahw->total_nic_func = nic;
+ ahw->total_pci_func = nic + fcoe + iscsi;
+ if (ahw->total_nic_func == 0 || ahw->total_pci_func == 0) {
+ dev_err(dev, "%s: Invalid function count: total nic func[%x], total pci func[%x]\n",
+ __func__, ahw->total_nic_func, ahw->total_pci_func);
+ err = -EIO;
+ }
qlcnic_free_mbx_args(&cmd);
return err;
@@ -3754,6 +3767,19 @@ static void qlcnic_83xx_decode_mbx_rsp(struct qlcnic_adapter *adapter,
return;
}
+static inline void qlcnic_dump_mailbox_registers(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ u32 offset;
+
+ offset = QLCRDX(ahw, QLCNIC_DEF_INT_MASK);
+ dev_info(&adapter->pdev->dev, "Mbx interrupt mask=0x%x, Mbx interrupt enable=0x%x, Host mbx control=0x%x, Fw mbx control=0x%x",
+ readl(ahw->pci_base0 + offset),
+ QLCRDX(ahw, QLCNIC_MBX_INTR_ENBL),
+ QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL),
+ QLCRDX(ahw, QLCNIC_FW_MBX_CTRL));
+}
+
static void qlcnic_83xx_mailbox_worker(struct work_struct *work)
{
struct qlcnic_mailbox *mbx = container_of(work, struct qlcnic_mailbox,
@@ -3798,6 +3824,8 @@ static void qlcnic_83xx_mailbox_worker(struct work_struct *work)
__func__, cmd->cmd_op, cmd->type, ahw->pci_func,
ahw->op_mode);
clear_bit(QLC_83XX_MBX_READY, &mbx->status);
+ qlcnic_dump_mailbox_registers(adapter);
+ qlcnic_83xx_get_mbx_data(adapter, cmd);
qlcnic_dump_mbx(adapter, cmd);
qlcnic_83xx_idc_request_reset(adapter,
QLCNIC_FORCE_FW_DUMP_KEY);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
index 4cae6caa6bfa..34d291168b79 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
@@ -324,6 +324,11 @@ struct qlc_83xx_idc {
char **name;
};
+enum qlcnic_vlan_operations {
+ QLC_VLAN_ADD = 0,
+ QLC_VLAN_DELETE
+};
+
/* Device States */
enum qlcnic_83xx_states {
QLC_83XX_IDC_DEV_UNKNOWN,
@@ -518,6 +523,11 @@ enum qlc_83xx_ext_regs {
QLC_83XX_ASIC_TEMP,
};
+/* Initialize/Stop NIC command bit definitions */
+#define QLC_REGISTER_DCB_AEN BIT_1
+#define QLC_REGISTER_LB_IDC BIT_0
+#define QLC_INIT_FW_RESOURCES BIT_31
+
/* 83xx funcitons */
int qlcnic_83xx_get_fw_version(struct qlcnic_adapter *);
int qlcnic_83xx_issue_cmd(struct qlcnic_adapter *, struct qlcnic_cmd_args *);
@@ -542,7 +552,7 @@ int qlcnic_83xx_config_intr_coalesce(struct qlcnic_adapter *);
void qlcnic_83xx_change_l2_filter(struct qlcnic_adapter *, u64 *, u16);
int qlcnic_83xx_get_pci_info(struct qlcnic_adapter *, struct qlcnic_pci_info *);
int qlcnic_83xx_set_nic_info(struct qlcnic_adapter *, struct qlcnic_info *);
-void qlcnic_83xx_register_nic_idc_func(struct qlcnic_adapter *, int);
+void qlcnic_83xx_initialize_nic(struct qlcnic_adapter *, int);
int qlcnic_83xx_napi_add(struct qlcnic_adapter *, struct net_device *);
void qlcnic_83xx_napi_del(struct qlcnic_adapter *);
@@ -662,4 +672,5 @@ pci_ers_result_t qlcnic_83xx_io_error_detected(struct pci_dev *,
pci_channel_state_t);
pci_ers_result_t qlcnic_83xx_io_slot_reset(struct pci_dev *);
void qlcnic_83xx_io_resume(struct pci_dev *);
+void qlcnic_83xx_stop_hw(struct qlcnic_adapter *);
#endif
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index 89208e5b25d6..22ae884728b8 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -614,8 +614,7 @@ int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *adapter)
qlcnic_83xx_reinit_mbx_work(adapter->ahw->mailbox);
qlcnic_83xx_enable_mbx_interrupt(adapter);
- /* register for NIC IDC AEN Events */
- qlcnic_83xx_register_nic_idc_func(adapter, 1);
+ qlcnic_83xx_initialize_nic(adapter, 1);
err = qlcnic_sriov_pf_reinit(adapter);
if (err)
@@ -740,6 +739,7 @@ static int qlcnic_83xx_idc_unknown_state(struct qlcnic_adapter *adapter)
adapter->ahw->idc.err_code = -EIO;
dev_err(&adapter->pdev->dev,
"%s: Device in unknown state\n", __func__);
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
return 0;
}
@@ -818,7 +818,6 @@ static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter)
struct qlcnic_hardware_context *ahw = adapter->ahw;
struct qlcnic_mailbox *mbx = ahw->mailbox;
int ret = 0;
- u32 owner;
u32 val;
/* Perform NIC configuration based ready state entry actions */
@@ -848,9 +847,9 @@ static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter)
set_bit(__QLCNIC_RESETTING, &adapter->state);
qlcnic_83xx_idc_enter_need_reset_state(adapter, 1);
} else {
- owner = qlcnic_83xx_idc_find_reset_owner_id(adapter);
- if (ahw->pci_func == owner)
- qlcnic_dump_fw(adapter);
+ netdev_info(adapter->netdev, "%s: Auto firmware recovery is disabled\n",
+ __func__);
+ qlcnic_83xx_idc_enter_failed_state(adapter, 1);
}
return -EIO;
}
@@ -948,13 +947,26 @@ static int qlcnic_83xx_idc_need_quiesce_state(struct qlcnic_adapter *adapter)
return 0;
}
-static int qlcnic_83xx_idc_failed_state(struct qlcnic_adapter *adapter)
+static void qlcnic_83xx_idc_failed_state(struct qlcnic_adapter *adapter)
{
- dev_err(&adapter->pdev->dev, "%s: please restart!!\n", __func__);
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ u32 val, owner;
+
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
+ if (val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY) {
+ owner = qlcnic_83xx_idc_find_reset_owner_id(adapter);
+ if (ahw->pci_func == owner) {
+ qlcnic_83xx_stop_hw(adapter);
+ qlcnic_dump_fw(adapter);
+ }
+ }
+
+ netdev_warn(adapter->netdev, "%s: Reboot will be required to recover the adapter!!\n",
+ __func__);
clear_bit(__QLCNIC_RESETTING, &adapter->state);
- adapter->ahw->idc.err_code = -EIO;
+ ahw->idc.err_code = -EIO;
- return 0;
+ return;
}
static int qlcnic_83xx_idc_quiesce_state(struct qlcnic_adapter *adapter)
@@ -1063,12 +1075,6 @@ void qlcnic_83xx_idc_poll_dev_state(struct work_struct *work)
adapter->ahw->idc.prev_state = adapter->ahw->idc.curr_state;
qlcnic_83xx_periodic_tasks(adapter);
- /* Do not reschedule if firmaware is in hanged state and auto
- * recovery is disabled
- */
- if ((adapter->flags & QLCNIC_FW_HANG) && !qlcnic_auto_fw_reset)
- return;
-
/* Re-schedule the function */
if (test_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status))
qlcnic_schedule_work(adapter, qlcnic_83xx_idc_poll_dev_state,
@@ -1219,10 +1225,10 @@ void qlcnic_83xx_idc_request_reset(struct qlcnic_adapter *adapter, u32 key)
}
val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
- if ((val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY) ||
- !qlcnic_auto_fw_reset) {
- dev_err(&adapter->pdev->dev,
- "%s:failed, device in non reset mode\n", __func__);
+ if (val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY) {
+ netdev_info(adapter->netdev, "%s: Auto firmware recovery is disabled\n",
+ __func__);
+ qlcnic_83xx_idc_enter_failed_state(adapter, 0);
qlcnic_83xx_unlock_driver(adapter);
return;
}
@@ -1254,24 +1260,24 @@ static int qlcnic_83xx_copy_bootloader(struct qlcnic_adapter *adapter)
if (size & 0xF)
size = (size + 16) & ~0xF;
- p_cache = kzalloc(size, GFP_KERNEL);
+ p_cache = vzalloc(size);
if (p_cache == NULL)
return -ENOMEM;
ret = qlcnic_83xx_lockless_flash_read32(adapter, src, p_cache,
size / sizeof(u32));
if (ret) {
- kfree(p_cache);
+ vfree(p_cache);
return ret;
}
/* 16 byte write to MS memory */
ret = qlcnic_83xx_ms_mem_write128(adapter, dest, (u32 *)p_cache,
size / 16);
if (ret) {
- kfree(p_cache);
+ vfree(p_cache);
return ret;
}
- kfree(p_cache);
+ vfree(p_cache);
return ret;
}
@@ -1939,7 +1945,7 @@ static void qlcnic_83xx_exec_template_cmd(struct qlcnic_adapter *p_dev,
p_dev->ahw->reset.seq_index = index;
}
-static void qlcnic_83xx_stop_hw(struct qlcnic_adapter *p_dev)
+void qlcnic_83xx_stop_hw(struct qlcnic_adapter *p_dev)
{
p_dev->ahw->reset.seq_index = 0;
@@ -1994,6 +2000,14 @@ static int qlcnic_83xx_restart_hw(struct qlcnic_adapter *adapter)
val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
if (!(val & QLC_83XX_IDC_GRACEFULL_RESET))
qlcnic_dump_fw(adapter);
+
+ if (val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY) {
+ netdev_info(adapter->netdev, "%s: Auto firmware recovery is disabled\n",
+ __func__);
+ qlcnic_83xx_idc_enter_failed_state(adapter, 1);
+ return err;
+ }
+
qlcnic_83xx_init_hw(adapter);
if (qlcnic_83xx_copy_bootloader(adapter))
@@ -2073,8 +2087,8 @@ int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter)
ahw->nic_mode = QLCNIC_DEFAULT_MODE;
adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
- adapter->max_sds_rings = ahw->max_rx_ques;
- adapter->max_tx_rings = ahw->max_tx_ques;
+ adapter->max_sds_rings = QLCNIC_MAX_SDS_RINGS;
+ adapter->max_tx_rings = QLCNIC_MAX_TX_RINGS;
} else {
return -EIO;
}
@@ -2198,7 +2212,6 @@ static void qlcnic_83xx_init_rings(struct qlcnic_adapter *adapter)
int qlcnic_83xx_init(struct qlcnic_adapter *adapter, int pci_using_dac)
{
struct qlcnic_hardware_context *ahw = adapter->ahw;
- struct qlcnic_dcb *dcb;
int err = 0;
ahw->msix_supported = !!qlcnic_use_msi_x;
@@ -2250,8 +2263,7 @@ int qlcnic_83xx_init(struct qlcnic_adapter *adapter, int pci_using_dac)
INIT_DELAYED_WORK(&adapter->idc_aen_work, qlcnic_83xx_idc_aen_work);
- /* register for NIC IDC AEN Events */
- qlcnic_83xx_register_nic_idc_func(adapter, 1);
+ qlcnic_83xx_initialize_nic(adapter, 1);
/* Configure default, SR-IOV or Virtual NIC mode of operation */
err = qlcnic_83xx_configure_opmode(adapter);
@@ -2264,11 +2276,6 @@ int qlcnic_83xx_init(struct qlcnic_adapter *adapter, int pci_using_dac)
if (err)
goto disable_mbx_intr;
- dcb = adapter->dcb;
-
- if (dcb && qlcnic_dcb_attach(dcb))
- qlcnic_clear_dcb_ops(dcb);
-
/* Periodically monitor device status */
qlcnic_83xx_idc_poll_dev_state(&adapter->fw_work.work);
return 0;
@@ -2299,7 +2306,7 @@ void qlcnic_83xx_aer_stop_poll_work(struct qlcnic_adapter *adapter)
qlcnic_83xx_disable_vnic_mode(adapter, 1);
qlcnic_83xx_idc_detach_driver(adapter);
- qlcnic_83xx_register_nic_idc_func(adapter, 0);
+ qlcnic_83xx_initialize_nic(adapter, 0);
cancel_delayed_work_sync(&adapter->idc_aen_work);
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
index 734d28602ac3..474320a5f0c1 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
@@ -107,7 +107,7 @@ static int qlcnic_83xx_init_mgmt_vnic(struct qlcnic_adapter *adapter)
npar = adapter->npars;
- for (i = 0; i < ahw->act_pci_func; i++, npar++) {
+ for (i = 0; i < ahw->total_nic_func; i++, npar++) {
dev_info(dev, "id:%d active:%d type:%d port:%d min_bw:%d max_bw:%d mac_addr:%pM\n",
npar->pci_func, npar->active, npar->type,
npar->phy_port, npar->min_bw, npar->max_bw,
@@ -115,7 +115,7 @@ static int qlcnic_83xx_init_mgmt_vnic(struct qlcnic_adapter *adapter)
}
dev_info(dev, "Max functions = %d, active functions = %d\n",
- ahw->max_pci_func, ahw->act_pci_func);
+ ahw->max_pci_func, ahw->total_nic_func);
if (qlcnic_83xx_set_vnic_opmode(adapter))
return err;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
index 859cb161fc63..64dcbf33d8f0 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
@@ -91,18 +91,6 @@ void qlcnic_free_mbx_args(struct qlcnic_cmd_args *cmd)
cmd->rsp.arg = NULL;
}
-static int qlcnic_is_valid_nic_func(struct qlcnic_adapter *adapter, u8 pci_func)
-{
- int i;
-
- for (i = 0; i < adapter->ahw->act_pci_func; i++) {
- if (adapter->npars[i].pci_func == pci_func)
- return i;
- }
-
- return -1;
-}
-
static u32
qlcnic_poll_rsp(struct qlcnic_adapter *adapter)
{
@@ -966,13 +954,15 @@ out_free_dma:
int qlcnic_82xx_get_pci_info(struct qlcnic_adapter *adapter,
struct qlcnic_pci_info *pci_info)
{
- int err = 0, i;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ size_t npar_size = sizeof(struct qlcnic_pci_info_le);
+ size_t pci_size = npar_size * ahw->max_vnic_func;
+ u16 nic = 0, fcoe = 0, iscsi = 0;
+ struct qlcnic_pci_info_le *npar;
struct qlcnic_cmd_args cmd;
dma_addr_t pci_info_dma_t;
- struct qlcnic_pci_info_le *npar;
void *pci_info_addr;
- size_t npar_size = sizeof(struct qlcnic_pci_info_le);
- size_t pci_size = npar_size * QLCNIC_MAX_PCI_FUNC;
+ int err = 0, i;
pci_info_addr = dma_zalloc_coherent(&adapter->pdev->dev, pci_size,
&pci_info_dma_t, GFP_KERNEL);
@@ -989,14 +979,16 @@ int qlcnic_82xx_get_pci_info(struct qlcnic_adapter *adapter,
cmd.req.arg[3] = pci_size;
err = qlcnic_issue_cmd(adapter, &cmd);
- adapter->ahw->act_pci_func = 0;
+ ahw->total_nic_func = 0;
if (err == QLCNIC_RCODE_SUCCESS) {
- for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++, npar++, pci_info++) {
+ for (i = 0; i < ahw->max_vnic_func; i++, npar++, pci_info++) {
pci_info->id = le16_to_cpu(npar->id);
pci_info->active = le16_to_cpu(npar->active);
+ if (!pci_info->active)
+ continue;
pci_info->type = le16_to_cpu(npar->type);
- if (pci_info->type == QLCNIC_TYPE_NIC)
- adapter->ahw->act_pci_func++;
+ err = qlcnic_get_pci_func_type(adapter, pci_info->type,
+ &nic, &fcoe, &iscsi);
pci_info->default_port =
le16_to_cpu(npar->default_port);
pci_info->tx_min_bw =
@@ -1011,6 +1003,14 @@ int qlcnic_82xx_get_pci_info(struct qlcnic_adapter *adapter,
err = -EIO;
}
+ ahw->total_nic_func = nic;
+ ahw->total_pci_func = nic + fcoe + iscsi;
+ if (ahw->total_nic_func == 0 || ahw->total_pci_func == 0) {
+ dev_err(&adapter->pdev->dev,
+ "%s: Invalid function count: total nic func[%x], total pci func[%x]\n",
+ __func__, ahw->total_nic_func, ahw->total_pci_func);
+ err = -EIO;
+ }
qlcnic_free_mbx_args(&cmd);
out_free_dma:
dma_free_coherent(&adapter->pdev->dev, pci_size, pci_info_addr,
@@ -1203,7 +1203,7 @@ int qlcnic_get_eswitch_stats(struct qlcnic_adapter *adapter, const u8 eswitch,
esw_stats->numbytes = QLCNIC_STATS_NOT_AVAIL;
esw_stats->context_id = eswitch;
- for (i = 0; i < adapter->ahw->act_pci_func; i++) {
+ for (i = 0; i < adapter->ahw->total_nic_func; i++) {
if (adapter->npars[i].phy_port != eswitch)
continue;
@@ -1236,15 +1236,16 @@ int qlcnic_get_eswitch_stats(struct qlcnic_adapter *adapter, const u8 eswitch,
int qlcnic_clear_esw_stats(struct qlcnic_adapter *adapter, const u8 func_esw,
const u8 port, const u8 rx_tx)
{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_cmd_args cmd;
int err;
u32 arg1;
- struct qlcnic_cmd_args cmd;
- if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC)
+ if (ahw->op_mode != QLCNIC_MGMT_FUNC)
return -EIO;
if (func_esw == QLCNIC_STATS_PORT) {
- if (port >= QLCNIC_MAX_PCI_FUNC)
+ if (port >= ahw->max_vnic_func)
goto err_ret;
} else if (func_esw == QLCNIC_STATS_ESWITCH) {
if (port >= QLCNIC_NIU_MAX_XG_PORTS)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
index 86bca7c14f99..77f1bce432d2 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
@@ -15,7 +15,6 @@
#define QLC_DCB_GET_MAP(V) (1 << V)
-#define QLC_DCB_AEN_BIT 0x2
#define QLC_DCB_FW_VER 0x2
#define QLC_DCB_MAX_TC 0x8
#define QLC_DCB_MAX_APP 0x8
@@ -71,7 +70,6 @@ static void qlcnic_82xx_dcb_aen_handler(struct qlcnic_dcb *, void *);
static int qlcnic_83xx_dcb_get_hw_capability(struct qlcnic_dcb *);
static int qlcnic_83xx_dcb_query_cee_param(struct qlcnic_dcb *, char *, u8);
static int qlcnic_83xx_dcb_get_cee_cfg(struct qlcnic_dcb *);
-static int qlcnic_83xx_dcb_register_aen(struct qlcnic_dcb *, bool);
static void qlcnic_83xx_dcb_aen_handler(struct qlcnic_dcb *, void *);
struct qlcnic_dcb_capability {
@@ -179,7 +177,6 @@ static struct qlcnic_dcb_ops qlcnic_83xx_dcb_ops = {
.get_hw_capability = qlcnic_83xx_dcb_get_hw_capability,
.query_cee_param = qlcnic_83xx_dcb_query_cee_param,
.get_cee_cfg = qlcnic_83xx_dcb_get_cee_cfg,
- .register_aen = qlcnic_83xx_dcb_register_aen,
.aen_handler = qlcnic_83xx_dcb_aen_handler,
};
@@ -260,6 +257,9 @@ int qlcnic_register_dcb(struct qlcnic_adapter *adapter)
{
struct qlcnic_dcb *dcb;
+ if (qlcnic_sriov_vf_check(adapter))
+ return 0;
+
dcb = kzalloc(sizeof(struct qlcnic_dcb), GFP_ATOMIC);
if (!dcb)
return -ENOMEM;
@@ -280,7 +280,6 @@ static void __qlcnic_dcb_free(struct qlcnic_dcb *dcb)
return;
adapter = dcb->adapter;
- qlcnic_dcb_register_aen(dcb, 0);
while (test_bit(QLCNIC_DCB_AEN_MODE, &dcb->state))
usleep_range(10000, 11000);
@@ -304,7 +303,6 @@ static void __qlcnic_dcb_get_info(struct qlcnic_dcb *dcb)
{
qlcnic_dcb_get_hw_capability(dcb);
qlcnic_dcb_get_cee_cfg(dcb);
- qlcnic_dcb_register_aen(dcb, 1);
}
static int __qlcnic_dcb_attach(struct qlcnic_dcb *dcb)
@@ -642,29 +640,6 @@ static int qlcnic_83xx_dcb_get_cee_cfg(struct qlcnic_dcb *dcb)
return err;
}
-static int qlcnic_83xx_dcb_register_aen(struct qlcnic_dcb *dcb, bool flag)
-{
- u8 val = (flag ? QLCNIC_CMD_INIT_NIC_FUNC : QLCNIC_CMD_STOP_NIC_FUNC);
- struct qlcnic_adapter *adapter = dcb->adapter;
- struct qlcnic_cmd_args cmd;
- int err;
-
- err = qlcnic_alloc_mbx_args(&cmd, adapter, val);
- if (err)
- return err;
-
- cmd.req.arg[1] = QLC_DCB_AEN_BIT;
-
- err = qlcnic_issue_cmd(adapter, &cmd);
- if (err)
- dev_err(&adapter->pdev->dev, "Failed to %s DCBX AEN, err %d\n",
- (flag ? "register" : "unregister"), err);
-
- qlcnic_free_mbx_args(&cmd);
-
- return err;
-}
-
static void qlcnic_83xx_dcb_aen_handler(struct qlcnic_dcb *dcb, void *data)
{
u32 *val = data;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h
index c04ae0cdc108..3cf4a10fbe1e 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h
@@ -25,7 +25,6 @@ struct qlcnic_dcb_ops {
int (*get_hw_capability) (struct qlcnic_dcb *);
int (*query_cee_param) (struct qlcnic_dcb *, char *, u8);
void (*init_dcbnl_ops) (struct qlcnic_dcb *);
- int (*register_aen) (struct qlcnic_dcb *, bool);
void (*aen_handler) (struct qlcnic_dcb *, void *);
int (*get_cee_cfg) (struct qlcnic_dcb *);
void (*get_info) (struct qlcnic_dcb *);
@@ -103,13 +102,6 @@ static inline int qlcnic_dcb_get_cee_cfg(struct qlcnic_dcb *dcb)
return 0;
}
-static inline void
-qlcnic_dcb_register_aen(struct qlcnic_dcb *dcb, u8 flag)
-{
- if (dcb && dcb->ops->register_aen)
- dcb->ops->register_aen(dcb, flag);
-}
-
static inline void qlcnic_dcb_aen_handler(struct qlcnic_dcb *dcb, void *msg)
{
if (dcb && dcb->ops->aen_handler)
@@ -121,4 +113,10 @@ static inline void qlcnic_dcb_init_dcbnl_ops(struct qlcnic_dcb *dcb)
if (dcb && dcb->ops->init_dcbnl_ops)
dcb->ops->init_dcbnl_ops(dcb);
}
+
+static inline void qlcnic_dcb_enable(struct qlcnic_dcb *dcb)
+{
+ if (dcb && qlcnic_dcb_attach(dcb))
+ qlcnic_clear_dcb_ops(dcb);
+}
#endif
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index b36c02fafcfd..45fa6eff56c9 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -221,7 +221,7 @@ static const u32 ext_diag_registers[] = {
-1
};
-#define QLCNIC_MGMT_API_VERSION 2
+#define QLCNIC_MGMT_API_VERSION 3
#define QLCNIC_ETHTOOL_REGS_VER 4
static inline int qlcnic_get_ring_regs_len(struct qlcnic_adapter *adapter)
@@ -519,6 +519,9 @@ qlcnic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
regs_buff[0] = (0xcafe0000 | (QLCNIC_DEV_INFO_SIZE & 0xffff));
regs_buff[1] = QLCNIC_MGMT_API_VERSION;
+ if (adapter->ahw->capabilities & QLC_83XX_ESWITCH_CAPABILITY)
+ regs_buff[2] = adapter->ahw->max_vnic_func;
+
if (qlcnic_82xx_check(adapter))
i = qlcnic_82xx_get_registers(adapter, regs_buff);
else
@@ -667,30 +670,25 @@ qlcnic_set_ringparam(struct net_device *dev,
static int qlcnic_validate_ring_count(struct qlcnic_adapter *adapter,
u8 rx_ring, u8 tx_ring)
{
+ if (rx_ring == 0 || tx_ring == 0)
+ return -EINVAL;
+
if (rx_ring != 0) {
if (rx_ring > adapter->max_sds_rings) {
- netdev_err(adapter->netdev, "Invalid ring count, SDS ring count %d should not be greater than max %d driver sds rings.\n",
+ netdev_err(adapter->netdev,
+ "Invalid ring count, SDS ring count %d should not be greater than max %d driver sds rings.\n",
rx_ring, adapter->max_sds_rings);
return -EINVAL;
}
}
if (tx_ring != 0) {
- if (qlcnic_82xx_check(adapter) &&
- (tx_ring > adapter->max_tx_rings)) {
+ if (tx_ring > adapter->max_tx_rings) {
netdev_err(adapter->netdev,
"Invalid ring count, Tx ring count %d should not be greater than max %d driver Tx rings.\n",
tx_ring, adapter->max_tx_rings);
return -EINVAL;
}
-
- if (qlcnic_83xx_check(adapter) &&
- (tx_ring > QLCNIC_SINGLE_RING)) {
- netdev_err(adapter->netdev,
- "Invalid ring count, Tx ring count %d should not be greater than %d driver Tx rings.\n",
- tx_ring, QLCNIC_SINGLE_RING);
- return -EINVAL;
- }
}
return 0;
@@ -948,6 +946,7 @@ static int qlcnic_irq_test(struct net_device *netdev)
struct qlcnic_hardware_context *ahw = adapter->ahw;
struct qlcnic_cmd_args cmd;
int ret, drv_sds_rings = adapter->drv_sds_rings;
+ int drv_tx_rings = adapter->drv_tx_rings;
if (qlcnic_83xx_check(adapter))
return qlcnic_83xx_interrupt_test(netdev);
@@ -980,6 +979,7 @@ free_diag_res:
clear_diag_irq:
adapter->drv_sds_rings = drv_sds_rings;
+ adapter->drv_tx_rings = drv_tx_rings;
clear_bit(__QLCNIC_RESETTING, &adapter->state);
return ret;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h
index d262211b03b3..34e467b239a1 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h
@@ -698,7 +698,6 @@ struct qlcnic_legacy_intr_set {
};
#define QLCNIC_MSIX_BASE 0x132110
-#define QLCNIC_MAX_PCI_FUNC 8
#define QLCNIC_MAX_VLAN_FILTERS 64
#define FLASH_ROM_WINDOW 0x42110030
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
index 6f7f60c09f07..a9a149b82375 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
@@ -455,14 +455,14 @@ int qlcnic_82xx_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr,
int qlcnic_nic_del_mac(struct qlcnic_adapter *adapter, const u8 *addr)
{
+ struct qlcnic_mac_vlan_list *cur;
struct list_head *head;
- struct qlcnic_mac_list_s *cur;
int err = -EINVAL;
/* Delete MAC from the existing list */
list_for_each(head, &adapter->mac_list) {
- cur = list_entry(head, struct qlcnic_mac_list_s, list);
- if (memcmp(addr, cur->mac_addr, ETH_ALEN) == 0) {
+ cur = list_entry(head, struct qlcnic_mac_vlan_list, list);
+ if (ether_addr_equal(addr, cur->mac_addr)) {
err = qlcnic_sre_macaddr_change(adapter, cur->mac_addr,
0, QLCNIC_MAC_DEL);
if (err)
@@ -477,17 +477,18 @@ int qlcnic_nic_del_mac(struct qlcnic_adapter *adapter, const u8 *addr)
int qlcnic_nic_add_mac(struct qlcnic_adapter *adapter, const u8 *addr, u16 vlan)
{
+ struct qlcnic_mac_vlan_list *cur;
struct list_head *head;
- struct qlcnic_mac_list_s *cur;
/* look up if already exists */
list_for_each(head, &adapter->mac_list) {
- cur = list_entry(head, struct qlcnic_mac_list_s, list);
- if (memcmp(addr, cur->mac_addr, ETH_ALEN) == 0)
+ cur = list_entry(head, struct qlcnic_mac_vlan_list, list);
+ if (ether_addr_equal(addr, cur->mac_addr) &&
+ cur->vlan_id == vlan)
return 0;
}
- cur = kzalloc(sizeof(struct qlcnic_mac_list_s), GFP_ATOMIC);
+ cur = kzalloc(sizeof(*cur), GFP_ATOMIC);
if (cur == NULL)
return -ENOMEM;
@@ -499,6 +500,7 @@ int qlcnic_nic_add_mac(struct qlcnic_adapter *adapter, const u8 *addr, u16 vlan)
return -EIO;
}
+ cur->vlan_id = vlan;
list_add_tail(&cur->list, &adapter->mac_list);
return 0;
}
@@ -516,8 +518,7 @@ void __qlcnic_set_multi(struct net_device *netdev, u16 vlan)
if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
return;
- if (!qlcnic_sriov_vf_check(adapter))
- qlcnic_nic_add_mac(adapter, adapter->mac_addr, vlan);
+ qlcnic_nic_add_mac(adapter, adapter->mac_addr, vlan);
qlcnic_nic_add_mac(adapter, bcast_addr, vlan);
if (netdev->flags & IFF_PROMISC) {
@@ -526,15 +527,11 @@ void __qlcnic_set_multi(struct net_device *netdev, u16 vlan)
} else if ((netdev->flags & IFF_ALLMULTI) ||
(netdev_mc_count(netdev) > ahw->max_mc_count)) {
mode = VPORT_MISS_MODE_ACCEPT_MULTI;
- } else if (!netdev_mc_empty(netdev) &&
- !qlcnic_sriov_vf_check(adapter)) {
+ } else if (!netdev_mc_empty(netdev)) {
netdev_for_each_mc_addr(ha, netdev)
qlcnic_nic_add_mac(adapter, ha->addr, vlan);
}
- if (qlcnic_sriov_vf_check(adapter))
- qlcnic_vf_add_mc_list(netdev, vlan);
-
/* configure unicast MAC address, if there is not sufficient space
* to store all the unicast addresses then enable promiscuous mode
*/
@@ -545,14 +542,12 @@ void __qlcnic_set_multi(struct net_device *netdev, u16 vlan)
qlcnic_nic_add_mac(adapter, ha->addr, vlan);
}
- if (!qlcnic_sriov_vf_check(adapter)) {
- if (mode == VPORT_MISS_MODE_ACCEPT_ALL &&
- !adapter->fdb_mac_learn) {
- qlcnic_alloc_lb_filters_mem(adapter);
- adapter->drv_mac_learn = true;
- } else {
- adapter->drv_mac_learn = false;
- }
+ if (mode == VPORT_MISS_MODE_ACCEPT_ALL &&
+ !adapter->fdb_mac_learn) {
+ qlcnic_alloc_lb_filters_mem(adapter);
+ adapter->drv_mac_learn = 1;
+ } else {
+ adapter->drv_mac_learn = 0;
}
qlcnic_nic_set_promisc(adapter, mode);
@@ -561,16 +556,17 @@ void __qlcnic_set_multi(struct net_device *netdev, u16 vlan)
void qlcnic_set_multi(struct net_device *netdev)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_mac_vlan_list *cur;
struct netdev_hw_addr *ha;
- struct qlcnic_mac_list_s *cur;
+ size_t temp;
if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
return;
if (qlcnic_sriov_vf_check(adapter)) {
if (!netdev_mc_empty(netdev)) {
netdev_for_each_mc_addr(ha, netdev) {
- cur = kzalloc(sizeof(struct qlcnic_mac_list_s),
- GFP_ATOMIC);
+ temp = sizeof(struct qlcnic_mac_vlan_list);
+ cur = kzalloc(temp, GFP_ATOMIC);
if (cur == NULL)
break;
memcpy(cur->mac_addr,
@@ -605,11 +601,11 @@ int qlcnic_82xx_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode)
void qlcnic_82xx_free_mac_list(struct qlcnic_adapter *adapter)
{
- struct qlcnic_mac_list_s *cur;
struct list_head *head = &adapter->mac_list;
+ struct qlcnic_mac_vlan_list *cur;
while (!list_empty(head)) {
- cur = list_entry(head->next, struct qlcnic_mac_list_s, list);
+ cur = list_entry(head->next, struct qlcnic_mac_vlan_list, list);
qlcnic_sre_macaddr_change(adapter,
cur->mac_addr, 0, QLCNIC_MAC_DEL);
list_del(&cur->list);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
index e9c21e5d0ca9..c4262c23ed7c 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
@@ -134,6 +134,8 @@ void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter,
struct qlcnic_skb_frag *buffrag;
int i, j;
+ spin_lock(&tx_ring->tx_clean_lock);
+
cmd_buf = tx_ring->cmd_buf_arr;
for (i = 0; i < tx_ring->num_desc; i++) {
buffrag = cmd_buf->frag_array;
@@ -157,6 +159,8 @@ void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter,
}
cmd_buf++;
}
+
+ spin_unlock(&tx_ring->tx_clean_lock);
}
void qlcnic_free_sw_resources(struct qlcnic_adapter *adapter)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index 0149c9495347..6373f6022486 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -127,7 +127,7 @@
struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *,
struct qlcnic_host_rds_ring *, u16, u16);
-inline void qlcnic_enable_tx_intr(struct qlcnic_adapter *adapter,
+static inline void qlcnic_enable_tx_intr(struct qlcnic_adapter *adapter,
struct qlcnic_host_tx_ring *tx_ring)
{
if (qlcnic_check_multi_tx(adapter) &&
@@ -144,13 +144,13 @@ static inline void qlcnic_disable_tx_int(struct qlcnic_adapter *adapter,
writel(1, tx_ring->crb_intr_mask);
}
-inline void qlcnic_83xx_enable_tx_intr(struct qlcnic_adapter *adapter,
+static inline void qlcnic_83xx_enable_tx_intr(struct qlcnic_adapter *adapter,
struct qlcnic_host_tx_ring *tx_ring)
{
writel(0, tx_ring->crb_intr_mask);
}
-inline void qlcnic_83xx_disable_tx_intr(struct qlcnic_adapter *adapter,
+static inline void qlcnic_83xx_disable_tx_intr(struct qlcnic_adapter *adapter,
struct qlcnic_host_tx_ring *tx_ring)
{
writel(1, tx_ring->crb_intr_mask);
@@ -202,7 +202,7 @@ static struct qlcnic_filter *qlcnic_find_mac_filter(struct hlist_head *head,
struct hlist_node *n;
hlist_for_each_entry_safe(tmp_fil, n, head, fnode) {
- if (!memcmp(tmp_fil->faddr, addr, ETH_ALEN) &&
+ if (ether_addr_equal(tmp_fil->faddr, addr) &&
tmp_fil->vlan_id == vlan_id)
return tmp_fil;
}
@@ -346,7 +346,7 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
head = &(adapter->fhash.fhead[hindex]);
hlist_for_each_entry_safe(tmp_fil, n, head, fnode) {
- if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) &&
+ if (ether_addr_equal(tmp_fil->faddr, &src_addr) &&
tmp_fil->vlan_id == vlan_id) {
if (jiffies > (QLCNIC_READD_AGE * HZ + tmp_fil->ftime))
qlcnic_change_filter(adapter, &src_addr,
@@ -687,17 +687,15 @@ void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup)
if (adapter->ahw->linkup && !linkup) {
netdev_info(netdev, "NIC Link is down\n");
adapter->ahw->linkup = 0;
- if (netif_running(netdev)) {
- netif_carrier_off(netdev);
- netif_tx_stop_all_queues(netdev);
- }
+ netif_carrier_off(netdev);
} else if (!adapter->ahw->linkup && linkup) {
+ /* Do not advertise Link up if the port is in loopback mode */
+ if (qlcnic_83xx_check(adapter) && adapter->ahw->lb_mode)
+ return;
+
netdev_info(netdev, "NIC Link is up\n");
adapter->ahw->linkup = 1;
- if (netif_running(netdev)) {
- netif_carrier_on(netdev);
- netif_wake_queue(netdev);
- }
+ netif_carrier_on(netdev);
}
}
@@ -784,7 +782,7 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter,
struct net_device *netdev = adapter->netdev;
struct qlcnic_skb_frag *frag;
- if (!spin_trylock(&adapter->tx_clean_lock))
+ if (!spin_trylock(&tx_ring->tx_clean_lock))
return 1;
sw_consumer = tx_ring->sw_consumer;
@@ -813,8 +811,9 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter,
break;
}
+ tx_ring->sw_consumer = sw_consumer;
+
if (count && netif_running(netdev)) {
- tx_ring->sw_consumer = sw_consumer;
smp_mb();
if (netif_tx_queue_stopped(tx_ring->txq) &&
netif_carrier_ok(netdev)) {
@@ -840,7 +839,8 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter,
*/
hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
done = (sw_consumer == hw_consumer);
- spin_unlock(&adapter->tx_clean_lock);
+
+ spin_unlock(&tx_ring->tx_clean_lock);
return done;
}
@@ -1466,8 +1466,7 @@ int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter,
for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
if (qlcnic_check_multi_tx(adapter) &&
- !adapter->ahw->diag_test &&
- (adapter->drv_tx_rings > QLCNIC_SINGLE_RING)) {
+ !adapter->ahw->diag_test) {
netif_napi_add(netdev, &sds_ring->napi, qlcnic_rx_poll,
NAPI_POLL_WEIGHT);
} else {
@@ -1540,8 +1539,7 @@ void qlcnic_82xx_napi_enable(struct qlcnic_adapter *adapter)
if (qlcnic_check_multi_tx(adapter) &&
(adapter->flags & QLCNIC_MSIX_ENABLED) &&
- !adapter->ahw->diag_test &&
- (adapter->drv_tx_rings > QLCNIC_SINGLE_RING)) {
+ !adapter->ahw->diag_test) {
for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
tx_ring = &adapter->tx_ring[ring];
napi_enable(&tx_ring->napi);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 05c1eef8df13..eeec83a0e664 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -308,12 +308,12 @@ int qlcnic_read_mac_addr(struct qlcnic_adapter *adapter)
static void qlcnic_delete_adapter_mac(struct qlcnic_adapter *adapter)
{
- struct qlcnic_mac_list_s *cur;
+ struct qlcnic_mac_vlan_list *cur;
struct list_head *head;
list_for_each(head, &adapter->mac_list) {
- cur = list_entry(head, struct qlcnic_mac_list_s, list);
- if (!memcmp(adapter->mac_addr, cur->mac_addr, ETH_ALEN)) {
+ cur = list_entry(head, struct qlcnic_mac_vlan_list, list);
+ if (ether_addr_equal_unaligned(adapter->mac_addr, cur->mac_addr)) {
qlcnic_sre_macaddr_change(adapter, cur->mac_addr,
0, QLCNIC_MAC_DEL);
list_del(&cur->list);
@@ -337,7 +337,7 @@ static int qlcnic_set_mac(struct net_device *netdev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EINVAL;
- if (!memcmp(adapter->mac_addr, addr->sa_data, ETH_ALEN))
+ if (ether_addr_equal_unaligned(adapter->mac_addr, addr->sa_data))
return 0;
if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
@@ -646,8 +646,7 @@ int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
} else {
adapter->ahw->num_msix = num_msix;
if (qlcnic_check_multi_tx(adapter) &&
- !adapter->ahw->diag_test &&
- (adapter->drv_tx_rings > 1))
+ !adapter->ahw->diag_test)
drv_sds_rings = num_msix - drv_tx_rings;
else
drv_sds_rings = num_msix;
@@ -800,25 +799,26 @@ static void qlcnic_cleanup_pci_map(struct qlcnic_hardware_context *ahw)
static int qlcnic_get_act_pci_func(struct qlcnic_adapter *adapter)
{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
struct qlcnic_pci_info *pci_info;
int ret;
if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
- switch (adapter->ahw->port_type) {
+ switch (ahw->port_type) {
case QLCNIC_GBE:
- adapter->ahw->act_pci_func = QLCNIC_NIU_MAX_GBE_PORTS;
+ ahw->total_nic_func = QLCNIC_NIU_MAX_GBE_PORTS;
break;
case QLCNIC_XGBE:
- adapter->ahw->act_pci_func = QLCNIC_NIU_MAX_XG_PORTS;
+ ahw->total_nic_func = QLCNIC_NIU_MAX_XG_PORTS;
break;
}
return 0;
}
- if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC)
+ if (ahw->op_mode == QLCNIC_MGMT_FUNC)
return 0;
- pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL);
+ pci_info = kcalloc(ahw->max_vnic_func, sizeof(*pci_info), GFP_KERNEL);
if (!pci_info)
return -ENOMEM;
@@ -846,12 +846,13 @@ static bool qlcnic_port_eswitch_cfg_capability(struct qlcnic_adapter *adapter)
int qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
struct qlcnic_pci_info *pci_info;
int i, id = 0, ret = 0, j = 0;
u16 act_pci_func;
u8 pfn;
- pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL);
+ pci_info = kcalloc(ahw->max_vnic_func, sizeof(*pci_info), GFP_KERNEL);
if (!pci_info)
return -ENOMEM;
@@ -859,7 +860,7 @@ int qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
if (ret)
goto err_pci_info;
- act_pci_func = adapter->ahw->act_pci_func;
+ act_pci_func = ahw->total_nic_func;
adapter->npars = kzalloc(sizeof(struct qlcnic_npar_info) *
act_pci_func, GFP_KERNEL);
@@ -875,10 +876,10 @@ int qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
goto err_npars;
}
- for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
+ for (i = 0; i < ahw->max_vnic_func; i++) {
pfn = pci_info[i].id;
- if (pfn >= QLCNIC_MAX_PCI_FUNC) {
+ if (pfn >= ahw->max_vnic_func) {
ret = QL_STATUS_INVALID_PARAM;
goto err_eswitch;
}
@@ -1178,6 +1179,7 @@ qlcnic_initialize_nic(struct qlcnic_adapter *adapter)
} else {
adapter->ahw->nic_mode = QLCNIC_DEFAULT_MODE;
adapter->max_tx_rings = QLCNIC_MAX_HW_TX_RINGS;
+ adapter->max_sds_rings = QLCNIC_MAX_SDS_RINGS;
adapter->flags &= ~QLCNIC_ESWITCH_ENABLED;
}
@@ -1345,7 +1347,7 @@ int qlcnic_set_default_offload_settings(struct qlcnic_adapter *adapter)
if (adapter->need_fw_reset)
return 0;
- for (i = 0; i < adapter->ahw->act_pci_func; i++) {
+ for (i = 0; i < adapter->ahw->total_nic_func; i++) {
if (!adapter->npars[i].eswitch_status)
continue;
@@ -1408,7 +1410,7 @@ int qlcnic_reset_npar_config(struct qlcnic_adapter *adapter)
return 0;
/* Set the NPAR config data after FW reset */
- for (i = 0; i < adapter->ahw->act_pci_func; i++) {
+ for (i = 0; i < adapter->ahw->total_nic_func; i++) {
npar = &adapter->npars[i];
pci_func = npar->pci_func;
if (!adapter->npars[i].eswitch_status)
@@ -1755,7 +1757,6 @@ void __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
if (qlcnic_sriov_vf_check(adapter))
qlcnic_sriov_cleanup_async_list(&adapter->ahw->sriov->bc);
smp_mb();
- spin_lock(&adapter->tx_clean_lock);
netif_carrier_off(netdev);
adapter->ahw->linkup = 0;
netif_tx_disable(netdev);
@@ -1776,7 +1777,6 @@ void __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
for (ring = 0; ring < adapter->drv_tx_rings; ring++)
qlcnic_release_tx_buffers(adapter, &adapter->tx_ring[ring]);
- spin_unlock(&adapter->tx_clean_lock);
}
/* Usage: During suspend and firmware recovery module */
@@ -1940,7 +1940,6 @@ int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
qlcnic_detach(adapter);
adapter->drv_sds_rings = QLCNIC_SINGLE_RING;
- adapter->drv_tx_rings = QLCNIC_SINGLE_RING;
adapter->ahw->diag_test = test;
adapter->ahw->linkup = 0;
@@ -2037,7 +2036,7 @@ qlcnic_reset_context(struct qlcnic_adapter *adapter)
void qlcnic_82xx_set_mac_filter_count(struct qlcnic_adapter *adapter)
{
struct qlcnic_hardware_context *ahw = adapter->ahw;
- u16 act_pci_fn = ahw->act_pci_func;
+ u16 act_pci_fn = ahw->total_nic_func;
u16 count;
ahw->max_mc_count = QLCNIC_MAX_MC_COUNT;
@@ -2172,6 +2171,7 @@ int qlcnic_alloc_tx_rings(struct qlcnic_adapter *adapter,
}
memset(cmd_buf_arr, 0, TX_BUFF_RINGSIZE(tx_ring));
tx_ring->cmd_buf_arr = cmd_buf_arr;
+ spin_lock_init(&tx_ring->tx_clean_lock);
}
if (qlcnic_83xx_check(adapter) ||
@@ -2212,7 +2212,6 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct qlcnic_hardware_context *ahw;
int err, pci_using_dac = -1;
char board_name[QLCNIC_MAX_BOARD_NAME_LEN + 19]; /* MAC + ": " + name */
- struct qlcnic_dcb *dcb;
if (pdev->is_virtfn)
return -ENODEV;
@@ -2290,7 +2289,8 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_free_wq;
adapter->dev_rst_time = jiffies;
- adapter->ahw->revision_id = pdev->revision;
+ ahw->revision_id = pdev->revision;
+ ahw->max_vnic_func = qlcnic_get_vnic_func_count(adapter);
if (qlcnic_mac_learn == FDB_MAC_LEARN)
adapter->fdb_mac_learn = true;
else if (qlcnic_mac_learn == DRV_MAC_LEARN)
@@ -2299,7 +2299,6 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
rwlock_init(&adapter->ahw->crb_lock);
mutex_init(&adapter->ahw->mem_lock);
- spin_lock_init(&adapter->tx_clean_lock);
INIT_LIST_HEAD(&adapter->mac_list);
qlcnic_register_dcb(adapter);
@@ -2335,10 +2334,6 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->flags |= QLCNIC_NEED_FLR;
- dcb = adapter->dcb;
-
- if (dcb && qlcnic_dcb_attach(dcb))
- qlcnic_clear_dcb_ops(dcb);
} else if (qlcnic_83xx_check(adapter)) {
qlcnic_83xx_check_vf(adapter, ent);
adapter->portnum = adapter->ahw->pci_func;
@@ -2367,6 +2362,8 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_free_hw;
}
+ qlcnic_dcb_enable(adapter->dcb);
+
if (qlcnic_read_mac_addr(adapter))
dev_warn(&pdev->dev, "failed to read mac addr\n");
@@ -2500,13 +2497,11 @@ static void qlcnic_remove(struct pci_dev *pdev)
qlcnic_cancel_idc_work(adapter);
ahw = adapter->ahw;
- qlcnic_dcb_free(adapter->dcb);
-
unregister_netdev(netdev);
qlcnic_sriov_cleanup(adapter);
if (qlcnic_83xx_check(adapter)) {
- qlcnic_83xx_register_nic_idc_func(adapter, 0);
+ qlcnic_83xx_initialize_nic(adapter, 0);
cancel_delayed_work_sync(&adapter->idc_aen_work);
qlcnic_83xx_free_mbx_intr(adapter);
qlcnic_83xx_detach_mailbox_work(adapter);
@@ -2514,6 +2509,8 @@ static void qlcnic_remove(struct pci_dev *pdev)
kfree(ahw->fw_info);
}
+ qlcnic_dcb_free(adapter->dcb);
+
qlcnic_detach(adapter);
if (adapter->npars != NULL)
@@ -2642,7 +2639,7 @@ void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter)
if (adapter->fhash.fmax && adapter->fhash.fhead)
return;
- act_pci_func = adapter->ahw->act_pci_func;
+ act_pci_func = adapter->ahw->total_nic_func;
spin_lock_init(&adapter->mac_learn_lock);
spin_lock_init(&adapter->rx_mac_learn_lock);
@@ -3725,12 +3722,6 @@ int qlcnic_validate_rings(struct qlcnic_adapter *adapter, __u32 ring_cnt,
return -EINVAL;
}
- if (ring_cnt < 2) {
- netdev_err(netdev,
- "%s rings value should not be lower than 2\n", buf);
- return -EINVAL;
- }
-
if (!is_power_of_2(ring_cnt)) {
netdev_err(netdev, "%s rings value should be a power of 2\n",
buf);
@@ -3788,8 +3779,7 @@ int qlcnic_setup_rings(struct qlcnic_adapter *adapter, u8 rx_cnt, u8 tx_cnt)
}
if (qlcnic_83xx_check(adapter)) {
- /* register for NIC IDC AEN Events */
- qlcnic_83xx_register_nic_idc_func(adapter, 1);
+ qlcnic_83xx_initialize_nic(adapter, 1);
err = qlcnic_83xx_setup_mbx_intr(adapter);
qlcnic_83xx_disable_mbx_poll(adapter);
if (err) {
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
index 0daf660e12a1..e14d58c59b01 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
@@ -126,8 +126,8 @@ struct qlcnic_vport {
u16 handle;
u16 max_tx_bw;
u16 min_tx_bw;
+ u16 pvid;
u8 vlan_mode;
- u16 vlan;
u8 qos;
bool spoofchk;
u8 mac[6];
@@ -137,6 +137,8 @@ struct qlcnic_vf_info {
u8 pci_func;
u16 rx_ctx_id;
u16 tx_ctx_id;
+ u16 *sriov_vlans;
+ int num_vlan;
unsigned long state;
struct completion ch_free_cmpl;
struct work_struct trans_work;
@@ -149,6 +151,7 @@ struct qlcnic_vf_info {
struct qlcnic_trans_list rcv_pend;
struct qlcnic_adapter *adapter;
struct qlcnic_vport *vp;
+ struct mutex vlan_list_lock; /* Lock for VLAN list */
};
struct qlcnic_async_work_list {
@@ -197,6 +200,13 @@ int qlcnic_sriov_get_vf_vport_info(struct qlcnic_adapter *,
int qlcnic_sriov_cfg_vf_guest_vlan(struct qlcnic_adapter *, u16, u8);
int qlcnic_sriov_vf_shutdown(struct pci_dev *);
int qlcnic_sriov_vf_resume(struct qlcnic_adapter *);
+void qlcnic_sriov_free_vlans(struct qlcnic_adapter *);
+void qlcnic_sriov_alloc_vlans(struct qlcnic_adapter *);
+bool qlcnic_sriov_check_any_vlan(struct qlcnic_vf_info *);
+void qlcnic_sriov_del_vlan_id(struct qlcnic_sriov *,
+ struct qlcnic_vf_info *, u16);
+void qlcnic_sriov_add_vlan_id(struct qlcnic_sriov *,
+ struct qlcnic_vf_info *, u16);
static inline bool qlcnic_sriov_enable_check(struct qlcnic_adapter *adapter)
{
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
index 21a4b274d2e4..bf8fca7d874f 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
@@ -176,6 +176,7 @@ int qlcnic_sriov_init(struct qlcnic_adapter *adapter, int num_vfs)
vf->adapter = adapter;
vf->pci_func = qlcnic_sriov_virtid_fn(adapter, i);
mutex_init(&vf->send_cmd_lock);
+ mutex_init(&vf->vlan_list_lock);
INIT_LIST_HEAD(&vf->rcv_act.wait_list);
INIT_LIST_HEAD(&vf->rcv_pend.wait_list);
spin_lock_init(&vf->rcv_act.lock);
@@ -276,6 +277,13 @@ static void qlcnic_sriov_vf_cleanup(struct qlcnic_adapter *adapter)
void qlcnic_sriov_cleanup(struct qlcnic_adapter *adapter)
{
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+
+ if (!sriov)
+ return;
+
+ qlcnic_sriov_free_vlans(adapter);
+
if (qlcnic_sriov_pf_check(adapter))
qlcnic_sriov_pf_cleanup(adapter);
@@ -416,10 +424,15 @@ static int qlcnic_sriov_set_guest_vlan_mode(struct qlcnic_adapter *adapter,
return 0;
sriov->any_vlan = cmd->rsp.arg[2] & 0xf;
+ sriov->num_allowed_vlans = cmd->rsp.arg[2] >> 16;
+ dev_info(&adapter->pdev->dev, "Number of allowed Guest VLANs = %d\n",
+ sriov->num_allowed_vlans);
+
+ qlcnic_sriov_alloc_vlans(adapter);
+
if (!sriov->any_vlan)
return 0;
- sriov->num_allowed_vlans = cmd->rsp.arg[2] >> 16;
num_vlans = sriov->num_allowed_vlans;
sriov->allowed_vlans = kzalloc(sizeof(u16) * num_vlans, GFP_KERNEL);
if (!sriov->allowed_vlans)
@@ -473,6 +486,8 @@ static int qlcnic_sriov_vf_init_driver(struct qlcnic_adapter *adapter)
if (err)
return err;
+ ahw->max_mc_count = nic_info.max_rx_mcast_mac_filters;
+
err = qlcnic_get_nic_info(adapter, &nic_info, ahw->pci_func);
if (err)
return -EIO;
@@ -500,7 +515,6 @@ static int qlcnic_sriov_vf_init_driver(struct qlcnic_adapter *adapter)
static int qlcnic_sriov_setup_vf(struct qlcnic_adapter *adapter,
int pci_using_dac)
{
- struct qlcnic_dcb *dcb;
int err;
INIT_LIST_HEAD(&adapter->vf_mc_list);
@@ -538,11 +552,6 @@ static int qlcnic_sriov_setup_vf(struct qlcnic_adapter *adapter,
if (err)
goto err_out_send_channel_term;
- dcb = adapter->dcb;
-
- if (dcb && qlcnic_dcb_attach(dcb))
- qlcnic_clear_dcb_ops(dcb);
-
err = qlcnic_setup_netdev(adapter, adapter->netdev, pci_using_dac);
if (err)
goto err_out_send_channel_term;
@@ -1447,18 +1456,27 @@ out:
return ret;
}
-void qlcnic_vf_add_mc_list(struct net_device *netdev, u16 vlan)
+static void qlcnic_vf_add_mc_list(struct net_device *netdev)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
- struct qlcnic_mac_list_s *cur;
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ struct qlcnic_mac_vlan_list *cur;
struct list_head *head, tmp_list;
+ struct qlcnic_vf_info *vf;
+ u16 vlan_id;
+ int i;
+
+ static const u8 bcast_addr[ETH_ALEN] = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+ };
+ vf = &adapter->ahw->sriov->vf_info[0];
INIT_LIST_HEAD(&tmp_list);
head = &adapter->vf_mc_list;
netif_addr_lock_bh(netdev);
while (!list_empty(head)) {
- cur = list_entry(head->next, struct qlcnic_mac_list_s, list);
+ cur = list_entry(head->next, struct qlcnic_mac_vlan_list, list);
list_move(&cur->list, &tmp_list);
}
@@ -1466,8 +1484,28 @@ void qlcnic_vf_add_mc_list(struct net_device *netdev, u16 vlan)
while (!list_empty(&tmp_list)) {
cur = list_entry((&tmp_list)->next,
- struct qlcnic_mac_list_s, list);
- qlcnic_nic_add_mac(adapter, cur->mac_addr, vlan);
+ struct qlcnic_mac_vlan_list, list);
+ if (!qlcnic_sriov_check_any_vlan(vf)) {
+ qlcnic_nic_add_mac(adapter, bcast_addr, 0);
+ qlcnic_nic_add_mac(adapter, cur->mac_addr, 0);
+ } else {
+ mutex_lock(&vf->vlan_list_lock);
+ for (i = 0; i < sriov->num_allowed_vlans; i++) {
+ vlan_id = vf->sriov_vlans[i];
+ if (vlan_id) {
+ qlcnic_nic_add_mac(adapter, bcast_addr,
+ vlan_id);
+ qlcnic_nic_add_mac(adapter,
+ cur->mac_addr,
+ vlan_id);
+ }
+ }
+ mutex_unlock(&vf->vlan_list_lock);
+ if (qlcnic_84xx_check(adapter)) {
+ qlcnic_nic_add_mac(adapter, bcast_addr, 0);
+ qlcnic_nic_add_mac(adapter, cur->mac_addr, 0);
+ }
+ }
list_del(&cur->list);
kfree(cur);
}
@@ -1490,13 +1528,24 @@ void qlcnic_sriov_cleanup_async_list(struct qlcnic_back_channel *bc)
static void qlcnic_sriov_vf_set_multi(struct net_device *netdev)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
- u16 vlan;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ u32 mode = VPORT_MISS_MODE_DROP;
if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
return;
- vlan = adapter->ahw->sriov->vlan;
- __qlcnic_set_multi(netdev, vlan);
+ if (netdev->flags & IFF_PROMISC) {
+ if (!(adapter->flags & QLCNIC_PROMISC_DISABLED))
+ mode = VPORT_MISS_MODE_ACCEPT_ALL;
+ } else if ((netdev->flags & IFF_ALLMULTI) ||
+ (netdev_mc_count(netdev) > ahw->max_mc_count)) {
+ mode = VPORT_MISS_MODE_ACCEPT_MULTI;
+ }
+
+ if (qlcnic_sriov_vf_check(adapter))
+ qlcnic_vf_add_mc_list(netdev);
+
+ qlcnic_nic_set_promisc(adapter, mode);
}
static void qlcnic_sriov_handle_async_multi(struct work_struct *work)
@@ -1584,8 +1633,6 @@ static int qlcnic_sriov_vf_reinit_driver(struct qlcnic_adapter *adapter)
if (err)
goto err_out_term_channel;
- qlcnic_dcb_get_info(adapter->dcb);
-
return 0;
err_out_term_channel:
@@ -1833,18 +1880,60 @@ static void qlcnic_sriov_vf_cancel_fw_work(struct qlcnic_adapter *adapter)
cancel_delayed_work_sync(&adapter->fw_work);
}
-static int qlcnic_sriov_validate_vlan_cfg(struct qlcnic_sriov *sriov,
+static int qlcnic_sriov_check_vlan_id(struct qlcnic_sriov *sriov,
+ struct qlcnic_vf_info *vf, u16 vlan_id)
+{
+ int i, err = -EINVAL;
+
+ if (!vf->sriov_vlans)
+ return err;
+
+ mutex_lock(&vf->vlan_list_lock);
+
+ for (i = 0; i < sriov->num_allowed_vlans; i++) {
+ if (vf->sriov_vlans[i] == vlan_id) {
+ err = 0;
+ break;
+ }
+ }
+
+ mutex_unlock(&vf->vlan_list_lock);
+ return err;
+}
+
+static int qlcnic_sriov_validate_num_vlans(struct qlcnic_sriov *sriov,
+ struct qlcnic_vf_info *vf)
+{
+ int err = 0;
+
+ mutex_lock(&vf->vlan_list_lock);
+
+ if (vf->num_vlan >= sriov->num_allowed_vlans)
+ err = -EINVAL;
+
+ mutex_unlock(&vf->vlan_list_lock);
+ return err;
+}
+
+static int qlcnic_sriov_validate_vlan_cfg(struct qlcnic_adapter *adapter,
u16 vid, u8 enable)
{
- u16 vlan = sriov->vlan;
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ struct qlcnic_vf_info *vf;
+ bool vlan_exist;
u8 allowed = 0;
int i;
+ vf = &adapter->ahw->sriov->vf_info[0];
+ vlan_exist = qlcnic_sriov_check_any_vlan(vf);
if (sriov->vlan_mode != QLC_GUEST_VLAN_MODE)
return -EINVAL;
if (enable) {
- if (vlan)
+ if (qlcnic_83xx_vf_check(adapter) && vlan_exist)
+ return -EINVAL;
+
+ if (qlcnic_sriov_validate_num_vlans(sriov, vf))
return -EINVAL;
if (sriov->any_vlan) {
@@ -1857,24 +1946,54 @@ static int qlcnic_sriov_validate_vlan_cfg(struct qlcnic_sriov *sriov,
return -EINVAL;
}
} else {
- if (!vlan || vlan != vid)
+ if (!vlan_exist || qlcnic_sriov_check_vlan_id(sriov, vf, vid))
return -EINVAL;
}
return 0;
}
+static void qlcnic_sriov_vlan_operation(struct qlcnic_vf_info *vf, u16 vlan_id,
+ enum qlcnic_vlan_operations opcode)
+{
+ struct qlcnic_adapter *adapter = vf->adapter;
+ struct qlcnic_sriov *sriov;
+
+ sriov = adapter->ahw->sriov;
+
+ if (!vf->sriov_vlans)
+ return;
+
+ mutex_lock(&vf->vlan_list_lock);
+
+ switch (opcode) {
+ case QLC_VLAN_ADD:
+ qlcnic_sriov_add_vlan_id(sriov, vf, vlan_id);
+ break;
+ case QLC_VLAN_DELETE:
+ qlcnic_sriov_del_vlan_id(sriov, vf, vlan_id);
+ break;
+ default:
+ netdev_err(adapter->netdev, "Invalid VLAN operation\n");
+ }
+
+ mutex_unlock(&vf->vlan_list_lock);
+ return;
+}
+
int qlcnic_sriov_cfg_vf_guest_vlan(struct qlcnic_adapter *adapter,
u16 vid, u8 enable)
{
struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ struct qlcnic_vf_info *vf;
struct qlcnic_cmd_args cmd;
int ret;
if (vid == 0)
return 0;
- ret = qlcnic_sriov_validate_vlan_cfg(sriov, vid, enable);
+ vf = &adapter->ahw->sriov->vf_info[0];
+ ret = qlcnic_sriov_validate_vlan_cfg(adapter, vid, enable);
if (ret)
return ret;
@@ -1894,11 +2013,11 @@ int qlcnic_sriov_cfg_vf_guest_vlan(struct qlcnic_adapter *adapter,
qlcnic_free_mac_list(adapter);
if (enable)
- sriov->vlan = vid;
+ qlcnic_sriov_vlan_operation(vf, vid, QLC_VLAN_ADD);
else
- sriov->vlan = 0;
+ qlcnic_sriov_vlan_operation(vf, vid, QLC_VLAN_DELETE);
- qlcnic_sriov_vf_set_multi(adapter->netdev);
+ qlcnic_set_multi(adapter->netdev);
}
qlcnic_free_mbx_args(&cmd);
@@ -1908,20 +2027,18 @@ int qlcnic_sriov_cfg_vf_guest_vlan(struct qlcnic_adapter *adapter,
static void qlcnic_sriov_vf_free_mac_list(struct qlcnic_adapter *adapter)
{
struct list_head *head = &adapter->mac_list;
- struct qlcnic_mac_list_s *cur;
- u16 vlan;
-
- vlan = adapter->ahw->sriov->vlan;
+ struct qlcnic_mac_vlan_list *cur;
while (!list_empty(head)) {
- cur = list_entry(head->next, struct qlcnic_mac_list_s, list);
- qlcnic_sre_macaddr_change(adapter, cur->mac_addr,
- vlan, QLCNIC_MAC_DEL);
+ cur = list_entry(head->next, struct qlcnic_mac_vlan_list, list);
+ qlcnic_sre_macaddr_change(adapter, cur->mac_addr, cur->vlan_id,
+ QLCNIC_MAC_DEL);
list_del(&cur->list);
kfree(cur);
}
}
+
int qlcnic_sriov_vf_shutdown(struct pci_dev *pdev)
{
struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
@@ -1972,3 +2089,70 @@ int qlcnic_sriov_vf_resume(struct qlcnic_adapter *adapter)
idc->delay);
return err;
}
+
+void qlcnic_sriov_alloc_vlans(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ struct qlcnic_vf_info *vf;
+ int i;
+
+ for (i = 0; i < sriov->num_vfs; i++) {
+ vf = &sriov->vf_info[i];
+ vf->sriov_vlans = kcalloc(sriov->num_allowed_vlans,
+ sizeof(*vf->sriov_vlans), GFP_KERNEL);
+ }
+}
+
+void qlcnic_sriov_free_vlans(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ struct qlcnic_vf_info *vf;
+ int i;
+
+ for (i = 0; i < sriov->num_vfs; i++) {
+ vf = &sriov->vf_info[i];
+ kfree(vf->sriov_vlans);
+ vf->sriov_vlans = NULL;
+ }
+}
+
+void qlcnic_sriov_add_vlan_id(struct qlcnic_sriov *sriov,
+ struct qlcnic_vf_info *vf, u16 vlan_id)
+{
+ int i;
+
+ for (i = 0; i < sriov->num_allowed_vlans; i++) {
+ if (!vf->sriov_vlans[i]) {
+ vf->sriov_vlans[i] = vlan_id;
+ vf->num_vlan++;
+ return;
+ }
+ }
+}
+
+void qlcnic_sriov_del_vlan_id(struct qlcnic_sriov *sriov,
+ struct qlcnic_vf_info *vf, u16 vlan_id)
+{
+ int i;
+
+ for (i = 0; i < sriov->num_allowed_vlans; i++) {
+ if (vf->sriov_vlans[i] == vlan_id) {
+ vf->sriov_vlans[i] = 0;
+ vf->num_vlan--;
+ return;
+ }
+ }
+}
+
+bool qlcnic_sriov_check_any_vlan(struct qlcnic_vf_info *vf)
+{
+ bool err = false;
+
+ mutex_lock(&vf->vlan_list_lock);
+
+ if (vf->num_vlan)
+ err = true;
+
+ mutex_unlock(&vf->vlan_list_lock);
+ return err;
+}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
index 686f460b1502..d14d9a139eef 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
@@ -9,7 +9,7 @@
#include "qlcnic.h"
#include <linux/types.h>
-#define QLCNIC_SRIOV_VF_MAX_MAC 1
+#define QLCNIC_SRIOV_VF_MAX_MAC 8
#define QLC_VF_MIN_TX_RATE 100
#define QLC_VF_MAX_TX_RATE 9999
@@ -64,9 +64,10 @@ static int qlcnic_sriov_pf_cal_res_limit(struct qlcnic_adapter *adapter,
{
struct qlcnic_sriov *sriov = adapter->ahw->sriov;
struct qlcnic_resources *res = &sriov->ff_max;
- u32 temp, num_vf_macs, num_vfs, max;
+ u16 num_macs = sriov->num_allowed_vlans + 1;
int ret = -EIO, vpid, id;
struct qlcnic_vport *vp;
+ u32 num_vfs, max, temp;
vpid = qlcnic_sriov_pf_get_vport_handle(adapter, func);
if (vpid < 0)
@@ -76,16 +77,25 @@ static int qlcnic_sriov_pf_cal_res_limit(struct qlcnic_adapter *adapter,
max = num_vfs + 1;
info->bit_offsets = 0xffff;
info->max_tx_ques = res->num_tx_queues / max;
+
+ if (qlcnic_83xx_pf_check(adapter))
+ num_macs = 1;
+
info->max_rx_mcast_mac_filters = res->num_rx_mcast_mac_filters;
- num_vf_macs = QLCNIC_SRIOV_VF_MAX_MAC;
if (adapter->ahw->pci_func == func) {
- temp = res->num_rx_mcast_mac_filters - (num_vfs * num_vf_macs);
- info->max_rx_ucast_mac_filters = temp;
- temp = res->num_tx_mac_filters - (num_vfs * num_vf_macs);
- info->max_tx_mac_filters = temp;
info->min_tx_bw = 0;
info->max_tx_bw = MAX_BW;
+
+ temp = res->num_rx_ucast_mac_filters - num_macs * num_vfs;
+ info->max_rx_ucast_mac_filters = temp;
+ temp = res->num_tx_mac_filters - num_macs * num_vfs;
+ info->max_tx_mac_filters = temp;
+ temp = num_macs * num_vfs * QLCNIC_SRIOV_VF_MAX_MAC;
+ temp = res->num_rx_mcast_mac_filters - temp;
+ info->max_rx_mcast_mac_filters = temp;
+
+ info->max_tx_ques = res->num_tx_queues - sriov->num_vfs;
} else {
id = qlcnic_sriov_func_to_index(adapter, func);
if (id < 0)
@@ -93,8 +103,13 @@ static int qlcnic_sriov_pf_cal_res_limit(struct qlcnic_adapter *adapter,
vp = sriov->vf_info[id].vp;
info->min_tx_bw = vp->min_tx_bw;
info->max_tx_bw = vp->max_tx_bw;
- info->max_rx_ucast_mac_filters = num_vf_macs;
- info->max_tx_mac_filters = num_vf_macs;
+
+ info->max_rx_ucast_mac_filters = num_macs;
+ info->max_tx_mac_filters = num_macs;
+ temp = num_macs * QLCNIC_SRIOV_VF_MAX_MAC;
+ info->max_rx_mcast_mac_filters = temp;
+
+ info->max_tx_ques = QLCNIC_SINGLE_RING;
}
info->max_rx_ip_addr = res->num_destip / max;
@@ -132,6 +147,25 @@ static void qlcnic_sriov_pf_set_ff_max_res(struct qlcnic_adapter *adapter,
ff_max->max_local_ipv6_addrs = info->max_local_ipv6_addrs;
}
+static void qlcnic_sriov_set_vf_max_vlan(struct qlcnic_adapter *adapter,
+ struct qlcnic_info *npar_info)
+{
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ int temp, total_fn;
+
+ temp = npar_info->max_rx_mcast_mac_filters;
+ total_fn = sriov->num_vfs + 1;
+
+ temp = temp / (QLCNIC_SRIOV_VF_MAX_MAC * total_fn);
+ sriov->num_allowed_vlans = temp - 1;
+
+ if (qlcnic_83xx_pf_check(adapter))
+ sriov->num_allowed_vlans = 1;
+
+ netdev_info(adapter->netdev, "Max Guest VLANs supported per VF = %d\n",
+ sriov->num_allowed_vlans);
+}
+
static int qlcnic_sriov_get_pf_info(struct qlcnic_adapter *adapter,
struct qlcnic_info *npar_info)
{
@@ -165,6 +199,7 @@ static int qlcnic_sriov_get_pf_info(struct qlcnic_adapter *adapter,
npar_info->max_local_ipv6_addrs = LSW(cmd.rsp.arg[8]);
npar_info->max_remote_ipv6_addrs = MSW(cmd.rsp.arg[8]);
+ qlcnic_sriov_set_vf_max_vlan(adapter, npar_info);
qlcnic_sriov_pf_set_ff_max_res(adapter, npar_info);
dev_info(&adapter->pdev->dev,
"\n\ttotal_pf: %d,\n"
@@ -403,6 +438,8 @@ static int qlcnic_pci_sriov_disable(struct qlcnic_adapter *adapter)
qlcnic_sriov_pf_disable(adapter);
+ qlcnic_sriov_free_vlans(adapter);
+
qlcnic_sriov_pf_cleanup(adapter);
/* After disabling SRIOV re-init the driver in default mode
@@ -511,6 +548,8 @@ static int __qlcnic_pci_sriov_enable(struct qlcnic_adapter *adapter,
if (err)
goto del_flr_queue;
+ qlcnic_sriov_alloc_vlans(adapter);
+
err = qlcnic_sriov_pf_enable(adapter, num_vfs);
return err;
@@ -608,7 +647,7 @@ static int qlcnic_sriov_set_vf_acl(struct qlcnic_adapter *adapter, u8 func)
if (vp->vlan_mode == QLC_PVID_MODE) {
cmd.req.arg[2] |= BIT_6;
- cmd.req.arg[3] |= vp->vlan << 8;
+ cmd.req.arg[3] |= vp->pvid << 8;
}
err = qlcnic_issue_cmd(adapter, &cmd);
@@ -643,10 +682,13 @@ static int qlcnic_sriov_pf_channel_cfg_cmd(struct qlcnic_bc_trans *trans,
struct qlcnic_vf_info *vf = trans->vf;
struct qlcnic_vport *vp = vf->vp;
struct qlcnic_adapter *adapter;
+ struct qlcnic_sriov *sriov;
u16 func = vf->pci_func;
+ size_t size;
int err;
adapter = vf->adapter;
+ sriov = adapter->ahw->sriov;
if (trans->req_hdr->cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT) {
err = qlcnic_sriov_pf_config_vport(adapter, 1, func);
@@ -656,8 +698,12 @@ static int qlcnic_sriov_pf_channel_cfg_cmd(struct qlcnic_bc_trans *trans,
qlcnic_sriov_pf_config_vport(adapter, 0, func);
}
} else {
- if (vp->vlan_mode == QLC_GUEST_VLAN_MODE)
- vp->vlan = 0;
+ if (vp->vlan_mode == QLC_GUEST_VLAN_MODE) {
+ size = sizeof(*vf->sriov_vlans);
+ size = size * sriov->num_allowed_vlans;
+ memset(vf->sriov_vlans, 0, size);
+ }
+
err = qlcnic_sriov_pf_config_vport(adapter, 0, func);
}
@@ -679,20 +725,23 @@ err_out:
}
static int qlcnic_sriov_cfg_vf_def_mac(struct qlcnic_adapter *adapter,
- struct qlcnic_vport *vp,
- u16 func, u16 vlan, u8 op)
+ struct qlcnic_vf_info *vf,
+ u16 vlan, u8 op)
{
struct qlcnic_cmd_args cmd;
struct qlcnic_macvlan_mbx mv;
+ struct qlcnic_vport *vp;
u8 *addr;
int err;
u32 *buf;
int vpid;
+ vp = vf->vp;
+
if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_MAC_VLAN))
return -ENOMEM;
- vpid = qlcnic_sriov_pf_get_vport_handle(adapter, func);
+ vpid = qlcnic_sriov_pf_get_vport_handle(adapter, vf->pci_func);
if (vpid < 0) {
err = -EINVAL;
goto out;
@@ -736,6 +785,35 @@ static int qlcnic_sriov_validate_create_rx_ctx(struct qlcnic_cmd_args *cmd)
return 0;
}
+static void qlcnic_83xx_cfg_default_mac_vlan(struct qlcnic_adapter *adapter,
+ struct qlcnic_vf_info *vf,
+ int opcode)
+{
+ struct qlcnic_sriov *sriov;
+ u16 vlan;
+ int i;
+
+ sriov = adapter->ahw->sriov;
+
+ mutex_lock(&vf->vlan_list_lock);
+ if (vf->num_vlan) {
+ for (i = 0; i < sriov->num_allowed_vlans; i++) {
+ vlan = vf->sriov_vlans[i];
+ if (vlan)
+ qlcnic_sriov_cfg_vf_def_mac(adapter, vf, vlan,
+ opcode);
+ }
+ }
+ mutex_unlock(&vf->vlan_list_lock);
+
+ if (vf->vp->vlan_mode != QLC_PVID_MODE) {
+ if (qlcnic_83xx_pf_check(adapter) &&
+ qlcnic_sriov_check_any_vlan(vf))
+ return;
+ qlcnic_sriov_cfg_vf_def_mac(adapter, vf, 0, opcode);
+ }
+}
+
static int qlcnic_sriov_pf_create_rx_ctx_cmd(struct qlcnic_bc_trans *tran,
struct qlcnic_cmd_args *cmd)
{
@@ -743,7 +821,6 @@ static int qlcnic_sriov_pf_create_rx_ctx_cmd(struct qlcnic_bc_trans *tran,
struct qlcnic_adapter *adapter = vf->adapter;
struct qlcnic_rcv_mbx_out *mbx_out;
int err;
- u16 vlan;
err = qlcnic_sriov_validate_create_rx_ctx(cmd);
if (err) {
@@ -754,12 +831,10 @@ static int qlcnic_sriov_pf_create_rx_ctx_cmd(struct qlcnic_bc_trans *tran,
cmd->req.arg[6] = vf->vp->handle;
err = qlcnic_issue_cmd(adapter, cmd);
- vlan = vf->vp->vlan;
if (!err) {
mbx_out = (struct qlcnic_rcv_mbx_out *)&cmd->rsp.arg[1];
vf->rx_ctx_id = mbx_out->ctx_id;
- qlcnic_sriov_cfg_vf_def_mac(adapter, vf->vp, vf->pci_func,
- vlan, QLCNIC_MAC_ADD);
+ qlcnic_83xx_cfg_default_mac_vlan(adapter, vf, QLCNIC_MAC_ADD);
} else {
vf->rx_ctx_id = 0;
}
@@ -843,7 +918,6 @@ static int qlcnic_sriov_pf_del_rx_ctx_cmd(struct qlcnic_bc_trans *trans,
struct qlcnic_vf_info *vf = trans->vf;
struct qlcnic_adapter *adapter = vf->adapter;
int err;
- u16 vlan;
err = qlcnic_sriov_validate_del_rx_ctx(vf, cmd);
if (err) {
@@ -851,9 +925,7 @@ static int qlcnic_sriov_pf_del_rx_ctx_cmd(struct qlcnic_bc_trans *trans,
return err;
}
- vlan = vf->vp->vlan;
- qlcnic_sriov_cfg_vf_def_mac(adapter, vf->vp, vf->pci_func,
- vlan, QLCNIC_MAC_DEL);
+ qlcnic_83xx_cfg_default_mac_vlan(adapter, vf, QLCNIC_MAC_DEL);
cmd->req.arg[1] |= vf->vp->handle << 16;
err = qlcnic_issue_cmd(adapter, cmd);
@@ -1120,7 +1192,7 @@ static int qlcnic_sriov_validate_cfg_macvlan(struct qlcnic_adapter *adapter,
cmd->req.arg[1] &= ~0x7;
new_op = (op == QLCNIC_MAC_ADD || op == QLCNIC_MAC_VLAN_ADD) ?
QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_VLAN_DEL;
- cmd->req.arg[3] |= vp->vlan << 16;
+ cmd->req.arg[3] |= vp->pvid << 16;
cmd->req.arg[1] |= new_op;
}
@@ -1190,8 +1262,10 @@ static int qlcnic_sriov_pf_get_acl_cmd(struct qlcnic_bc_trans *trans,
struct qlcnic_vport *vp = vf->vp;
u8 cmd_op, mode = vp->vlan_mode;
struct qlcnic_adapter *adapter;
+ struct qlcnic_sriov *sriov;
adapter = vf->adapter;
+ sriov = adapter->ahw->sriov;
cmd_op = trans->req_hdr->cmd_op;
cmd->rsp.arg[0] |= 1 << 25;
@@ -1205,10 +1279,10 @@ static int qlcnic_sriov_pf_get_acl_cmd(struct qlcnic_bc_trans *trans,
switch (mode) {
case QLC_GUEST_VLAN_MODE:
cmd->rsp.arg[1] = mode | 1 << 8;
- cmd->rsp.arg[2] = 1 << 16;
+ cmd->rsp.arg[2] = sriov->num_allowed_vlans << 16;
break;
case QLC_PVID_MODE:
- cmd->rsp.arg[1] = mode | 1 << 8 | vp->vlan << 16;
+ cmd->rsp.arg[1] = mode | 1 << 8 | vp->pvid << 16;
break;
}
@@ -1216,24 +1290,27 @@ static int qlcnic_sriov_pf_get_acl_cmd(struct qlcnic_bc_trans *trans,
}
static int qlcnic_sriov_pf_del_guest_vlan(struct qlcnic_adapter *adapter,
- struct qlcnic_vf_info *vf)
-
+ struct qlcnic_vf_info *vf,
+ struct qlcnic_cmd_args *cmd)
{
- struct qlcnic_vport *vp = vf->vp;
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ u16 vlan;
- if (!vp->vlan)
+ if (!qlcnic_sriov_check_any_vlan(vf))
return -EINVAL;
+ vlan = cmd->req.arg[1] >> 16;
if (!vf->rx_ctx_id) {
- vp->vlan = 0;
+ qlcnic_sriov_del_vlan_id(sriov, vf, vlan);
return 0;
}
- qlcnic_sriov_cfg_vf_def_mac(adapter, vp, vf->pci_func,
- vp->vlan, QLCNIC_MAC_DEL);
- vp->vlan = 0;
- qlcnic_sriov_cfg_vf_def_mac(adapter, vp, vf->pci_func,
- 0, QLCNIC_MAC_ADD);
+ qlcnic_sriov_cfg_vf_def_mac(adapter, vf, vlan, QLCNIC_MAC_DEL);
+ qlcnic_sriov_del_vlan_id(sriov, vf, vlan);
+
+ if (qlcnic_83xx_pf_check(adapter))
+ qlcnic_sriov_cfg_vf_def_mac(adapter, vf,
+ 0, QLCNIC_MAC_ADD);
return 0;
}
@@ -1241,32 +1318,37 @@ static int qlcnic_sriov_pf_add_guest_vlan(struct qlcnic_adapter *adapter,
struct qlcnic_vf_info *vf,
struct qlcnic_cmd_args *cmd)
{
- struct qlcnic_vport *vp = vf->vp;
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
int err = -EIO;
+ u16 vlan;
- if (vp->vlan)
+ if (qlcnic_83xx_pf_check(adapter) && qlcnic_sriov_check_any_vlan(vf))
return err;
+ vlan = cmd->req.arg[1] >> 16;
+
if (!vf->rx_ctx_id) {
- vp->vlan = cmd->req.arg[1] >> 16;
+ qlcnic_sriov_add_vlan_id(sriov, vf, vlan);
return 0;
}
- err = qlcnic_sriov_cfg_vf_def_mac(adapter, vp, vf->pci_func,
- 0, QLCNIC_MAC_DEL);
- if (err)
- return err;
+ if (qlcnic_83xx_pf_check(adapter)) {
+ err = qlcnic_sriov_cfg_vf_def_mac(adapter, vf, 0,
+ QLCNIC_MAC_DEL);
+ if (err)
+ return err;
+ }
- vp->vlan = cmd->req.arg[1] >> 16;
- err = qlcnic_sriov_cfg_vf_def_mac(adapter, vp, vf->pci_func,
- vp->vlan, QLCNIC_MAC_ADD);
+ err = qlcnic_sriov_cfg_vf_def_mac(adapter, vf, vlan, QLCNIC_MAC_ADD);
if (err) {
- qlcnic_sriov_cfg_vf_def_mac(adapter, vp, vf->pci_func,
- 0, QLCNIC_MAC_ADD);
- vp->vlan = 0;
+ if (qlcnic_83xx_pf_check(adapter))
+ qlcnic_sriov_cfg_vf_def_mac(adapter, vf, 0,
+ QLCNIC_MAC_ADD);
+ return err;
}
+ qlcnic_sriov_add_vlan_id(sriov, vf, vlan);
return err;
}
@@ -1289,7 +1371,7 @@ static int qlcnic_sriov_pf_cfg_guest_vlan_cmd(struct qlcnic_bc_trans *tran,
if (op)
err = qlcnic_sriov_pf_add_guest_vlan(adapter, vf, cmd);
else
- err = qlcnic_sriov_pf_del_guest_vlan(adapter, vf);
+ err = qlcnic_sriov_pf_del_guest_vlan(adapter, vf, cmd);
cmd->rsp.arg[0] |= err ? 2 << 25 : 1 << 25;
return err;
@@ -1299,8 +1381,6 @@ static const int qlcnic_pf_passthru_supp_cmds[] = {
QLCNIC_CMD_GET_STATISTICS,
QLCNIC_CMD_GET_PORT_CONFIG,
QLCNIC_CMD_GET_LINK_STATUS,
- QLCNIC_CMD_DCB_QUERY_CAP,
- QLCNIC_CMD_DCB_QUERY_PARAM,
QLCNIC_CMD_INIT_NIC_FUNC,
QLCNIC_CMD_STOP_NIC_FUNC,
};
@@ -1596,7 +1676,8 @@ void qlcnic_sriov_pf_handle_flr(struct qlcnic_sriov *sriov,
}
if (vp->vlan_mode == QLC_GUEST_VLAN_MODE)
- vp->vlan = 0;
+ memset(vf->sriov_vlans, 0,
+ sizeof(*vf->sriov_vlans) * sriov->num_allowed_vlans);
qlcnic_sriov_schedule_flr(sriov, vf, qlcnic_sriov_pf_process_flr);
netdev_info(dev, "FLR received for PCI func %d\n", vf->pci_func);
@@ -1766,20 +1847,22 @@ int qlcnic_sriov_set_vf_vlan(struct net_device *netdev, int vf,
return -EOPNOTSUPP;
}
+ memset(vf_info->sriov_vlans, 0,
+ sizeof(*vf_info->sriov_vlans) * sriov->num_allowed_vlans);
+
switch (vlan) {
case 4095:
- vp->vlan = 0;
vp->vlan_mode = QLC_GUEST_VLAN_MODE;
break;
case 0:
vp->vlan_mode = QLC_NO_VLAN_MODE;
- vp->vlan = 0;
vp->qos = 0;
break;
default:
vp->vlan_mode = QLC_PVID_MODE;
- vp->vlan = vlan;
+ qlcnic_sriov_add_vlan_id(sriov, vf_info, vlan);
vp->qos = qos;
+ vp->pvid = vlan;
}
netdev_info(netdev, "Setting VLAN %d, QoS %d, for VF %d\n",
@@ -1794,7 +1877,7 @@ static __u32 qlcnic_sriov_get_vf_vlan(struct qlcnic_adapter *adapter,
switch (vp->vlan_mode) {
case QLC_PVID_MODE:
- vlan = vp->vlan;
+ vlan = vp->pvid;
break;
case QLC_GUEST_VLAN_MODE:
vlan = MAX_VLAN_ID;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
index 1a9f8a400e50..b5296672c447 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
@@ -360,10 +360,28 @@ static ssize_t qlcnic_sysfs_write_mem(struct file *filp, struct kobject *kobj,
return size;
}
-static int qlcnic_is_valid_nic_func(struct qlcnic_adapter *adapter, u8 pci_func)
+static u32 qlcnic_get_pci_func_count(struct qlcnic_adapter *adapter)
{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ u32 count = 0;
+
+ if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
+ return ahw->total_nic_func;
+
+ if (ahw->total_pci_func <= QLC_DEFAULT_VNIC_COUNT)
+ count = QLC_DEFAULT_VNIC_COUNT;
+ else
+ count = ahw->max_vnic_func;
+
+ return count;
+}
+
+int qlcnic_is_valid_nic_func(struct qlcnic_adapter *adapter, u8 pci_func)
+{
+ u32 pci_func_count = qlcnic_get_pci_func_count(adapter);
int i;
- for (i = 0; i < adapter->ahw->act_pci_func; i++) {
+
+ for (i = 0; i < pci_func_count; i++) {
if (adapter->npars[i].pci_func == pci_func)
return i;
}
@@ -382,7 +400,6 @@ static int validate_pm_config(struct qlcnic_adapter *adapter,
src_pci_func = pm_cfg[i].pci_func;
dest_pci_func = pm_cfg[i].dest_npar;
src_index = qlcnic_is_valid_nic_func(adapter, src_pci_func);
-
if (src_index < 0)
return QL_STATUS_INVALID_PARAM;
@@ -439,6 +456,8 @@ static ssize_t qlcnic_sysfs_write_pm_config(struct file *filp,
for (i = 0; i < count; i++) {
pci_func = pm_cfg[i].pci_func;
index = qlcnic_is_valid_nic_func(adapter, pci_func);
+ if (index < 0)
+ return QL_STATUS_INVALID_PARAM;
id = adapter->npars[index].phy_port;
adapter->npars[index].enable_pm = !!pm_cfg[i].action;
adapter->npars[index].dest_npar = id;
@@ -455,17 +474,19 @@ static ssize_t qlcnic_sysfs_read_pm_config(struct file *filp,
{
struct device *dev = container_of(kobj, struct device, kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
- struct qlcnic_pm_func_cfg pm_cfg[QLCNIC_MAX_PCI_FUNC];
- int i;
+ u32 pci_func_count = qlcnic_get_pci_func_count(adapter);
+ struct qlcnic_pm_func_cfg *pm_cfg;
+ int i, pm_cfg_size;
u8 pci_func;
- if (size != sizeof(pm_cfg))
+ pm_cfg_size = pci_func_count * sizeof(*pm_cfg);
+ if (size != pm_cfg_size)
return QL_STATUS_INVALID_PARAM;
- memset(&pm_cfg, 0,
- sizeof(struct qlcnic_pm_func_cfg) * QLCNIC_MAX_PCI_FUNC);
+ memset(buf, 0, pm_cfg_size);
+ pm_cfg = (struct qlcnic_pm_func_cfg *)buf;
- for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
+ for (i = 0; i < pci_func_count; i++) {
pci_func = adapter->npars[i].pci_func;
if (!adapter->npars[i].active)
continue;
@@ -477,26 +498,26 @@ static ssize_t qlcnic_sysfs_read_pm_config(struct file *filp,
pm_cfg[pci_func].dest_npar = 0;
pm_cfg[pci_func].pci_func = i;
}
- memcpy(buf, &pm_cfg, size);
-
return size;
}
static int validate_esw_config(struct qlcnic_adapter *adapter,
struct qlcnic_esw_func_cfg *esw_cfg, int count)
{
+ u32 pci_func_count = qlcnic_get_pci_func_count(adapter);
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int i, ret;
u32 op_mode;
u8 pci_func;
- int i, ret;
if (qlcnic_82xx_check(adapter))
- op_mode = readl(adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE);
+ op_mode = readl(ahw->pci_base0 + QLCNIC_DRV_OP_MODE);
else
- op_mode = QLCRDX(adapter->ahw, QLC_83XX_DRV_OP_MODE);
+ op_mode = QLCRDX(ahw, QLC_83XX_DRV_OP_MODE);
for (i = 0; i < count; i++) {
pci_func = esw_cfg[i].pci_func;
- if (pci_func >= QLCNIC_MAX_PCI_FUNC)
+ if (pci_func >= pci_func_count)
return QL_STATUS_INVALID_PARAM;
if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC)
@@ -600,6 +621,8 @@ static ssize_t qlcnic_sysfs_write_esw_config(struct file *file,
for (i = 0; i < count; i++) {
pci_func = esw_cfg[i].pci_func;
index = qlcnic_is_valid_nic_func(adapter, pci_func);
+ if (index < 0)
+ return QL_STATUS_INVALID_PARAM;
npar = &adapter->npars[index];
switch (esw_cfg[i].op_mode) {
case QLCNIC_PORT_DEFAULTS:
@@ -629,16 +652,19 @@ static ssize_t qlcnic_sysfs_read_esw_config(struct file *file,
{
struct device *dev = container_of(kobj, struct device, kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
- struct qlcnic_esw_func_cfg esw_cfg[QLCNIC_MAX_PCI_FUNC];
+ u32 pci_func_count = qlcnic_get_pci_func_count(adapter);
+ struct qlcnic_esw_func_cfg *esw_cfg;
+ size_t esw_cfg_size;
u8 i, pci_func;
- if (size != sizeof(esw_cfg))
+ esw_cfg_size = pci_func_count * sizeof(*esw_cfg);
+ if (size != esw_cfg_size)
return QL_STATUS_INVALID_PARAM;
- memset(&esw_cfg, 0,
- sizeof(struct qlcnic_esw_func_cfg) * QLCNIC_MAX_PCI_FUNC);
+ memset(buf, 0, esw_cfg_size);
+ esw_cfg = (struct qlcnic_esw_func_cfg *)buf;
- for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
+ for (i = 0; i < pci_func_count; i++) {
pci_func = adapter->npars[i].pci_func;
if (!adapter->npars[i].active)
continue;
@@ -650,9 +676,6 @@ static ssize_t qlcnic_sysfs_read_esw_config(struct file *file,
if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg[pci_func]))
return QL_STATUS_INVALID_PARAM;
}
-
- memcpy(buf, &esw_cfg, size);
-
return size;
}
@@ -711,6 +734,8 @@ static ssize_t qlcnic_sysfs_write_npar_config(struct file *file,
if (ret)
return ret;
index = qlcnic_is_valid_nic_func(adapter, pci_func);
+ if (index < 0)
+ return QL_STATUS_INVALID_PARAM;
adapter->npars[index].min_bw = nic_info.min_tx_bw;
adapter->npars[index].max_bw = nic_info.max_tx_bw;
}
@@ -726,27 +751,28 @@ static ssize_t qlcnic_sysfs_read_npar_config(struct file *file,
{
struct device *dev = container_of(kobj, struct device, kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ u32 pci_func_count = qlcnic_get_pci_func_count(adapter);
+ struct qlcnic_npar_func_cfg *np_cfg;
struct qlcnic_info nic_info;
- struct qlcnic_npar_func_cfg np_cfg[QLCNIC_MAX_PCI_FUNC];
+ size_t np_cfg_size;
int i, ret;
- if (size != sizeof(np_cfg))
+ np_cfg_size = pci_func_count * sizeof(*np_cfg);
+ if (size != np_cfg_size)
return QL_STATUS_INVALID_PARAM;
memset(&nic_info, 0, sizeof(struct qlcnic_info));
- memset(&np_cfg, 0,
- sizeof(struct qlcnic_npar_func_cfg) * QLCNIC_MAX_PCI_FUNC);
+ memset(buf, 0, np_cfg_size);
+ np_cfg = (struct qlcnic_npar_func_cfg *)buf;
- for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
+ for (i = 0; i < pci_func_count; i++) {
if (qlcnic_is_valid_nic_func(adapter, i) < 0)
continue;
ret = qlcnic_get_nic_info(adapter, &nic_info, i);
if (ret)
return ret;
-
if (!adapter->npars[i].eswitch_status)
continue;
-
np_cfg[i].pci_func = i;
np_cfg[i].op_mode = (u8)nic_info.op_mode;
np_cfg[i].port_num = nic_info.phys_port;
@@ -756,8 +782,6 @@ static ssize_t qlcnic_sysfs_read_npar_config(struct file *file,
np_cfg[i].max_tx_queues = nic_info.max_tx_ques;
np_cfg[i].max_rx_queues = nic_info.max_rx_ques;
}
-
- memcpy(buf, &np_cfg, size);
return size;
}
@@ -769,6 +793,7 @@ static ssize_t qlcnic_sysfs_get_port_stats(struct file *file,
{
struct device *dev = container_of(kobj, struct device, kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ u32 pci_func_count = qlcnic_get_pci_func_count(adapter);
struct qlcnic_esw_statistics port_stats;
int ret;
@@ -778,7 +803,7 @@ static ssize_t qlcnic_sysfs_get_port_stats(struct file *file,
if (size != sizeof(struct qlcnic_esw_statistics))
return QL_STATUS_INVALID_PARAM;
- if (offset >= QLCNIC_MAX_PCI_FUNC)
+ if (offset >= pci_func_count)
return QL_STATUS_INVALID_PARAM;
memset(&port_stats, 0, size);
@@ -869,12 +894,13 @@ static ssize_t qlcnic_sysfs_clear_port_stats(struct file *file,
struct device *dev = container_of(kobj, struct device, kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ u32 pci_func_count = qlcnic_get_pci_func_count(adapter);
int ret;
if (qlcnic_83xx_check(adapter))
return QLC_STATUS_UNSUPPORTED_CMD;
- if (offset >= QLCNIC_MAX_PCI_FUNC)
+ if (offset >= pci_func_count)
return QL_STATUS_INVALID_PARAM;
ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset,
@@ -898,27 +924,32 @@ static ssize_t qlcnic_sysfs_read_pci_config(struct file *file,
{
struct device *dev = container_of(kobj, struct device, kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
- struct qlcnic_pci_func_cfg pci_cfg[QLCNIC_MAX_PCI_FUNC];
+ u32 pci_func_count = qlcnic_get_pci_func_count(adapter);
+ struct qlcnic_pci_func_cfg *pci_cfg;
struct qlcnic_pci_info *pci_info;
+ size_t pci_info_sz, pci_cfg_sz;
int i, ret;
- if (size != sizeof(pci_cfg))
+ pci_cfg_sz = pci_func_count * sizeof(*pci_cfg);
+ if (size != pci_cfg_sz)
return QL_STATUS_INVALID_PARAM;
- pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL);
+ pci_info_sz = pci_func_count * sizeof(*pci_info);
+ pci_info = vmalloc(pci_info_sz);
if (!pci_info)
return -ENOMEM;
+ memset(pci_info, 0, pci_info_sz);
+ memset(buf, 0, pci_cfg_sz);
+ pci_cfg = (struct qlcnic_pci_func_cfg *)buf;
+
ret = qlcnic_get_pci_info(adapter, pci_info);
if (ret) {
- kfree(pci_info);
+ vfree(pci_info);
return ret;
}
- memset(&pci_cfg, 0,
- sizeof(struct qlcnic_pci_func_cfg) * QLCNIC_MAX_PCI_FUNC);
-
- for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
+ for (i = 0; i < pci_func_count; i++) {
pci_cfg[i].pci_func = pci_info[i].id;
pci_cfg[i].func_type = pci_info[i].type;
pci_cfg[i].port_num = pci_info[i].default_port;
@@ -927,8 +958,7 @@ static ssize_t qlcnic_sysfs_read_pci_config(struct file *file,
memcpy(&pci_cfg[i].def_mac_addr, &pci_info[i].mac, ETH_ALEN);
}
- memcpy(buf, &pci_cfg, size);
- kfree(pci_info);
+ vfree(pci_info);
return size;
}
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge.h b/drivers/net/ethernet/qlogic/qlge/qlge.h
index 0c9c4e895595..ef332708e5f2 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge.h
+++ b/drivers/net/ethernet/qlogic/qlge/qlge.h
@@ -18,7 +18,7 @@
*/
#define DRV_NAME "qlge"
#define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver "
-#define DRV_VERSION "1.00.00.33"
+#define DRV_VERSION "1.00.00.34"
#define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */
@@ -2248,7 +2248,6 @@ int ql_mb_get_port_cfg(struct ql_adapter *qdev);
int ql_mb_set_port_cfg(struct ql_adapter *qdev);
int ql_wait_fifo_empty(struct ql_adapter *qdev);
void ql_get_dump(struct ql_adapter *qdev, void *buff);
-void ql_gen_reg_dump(struct ql_adapter *qdev, struct ql_reg_dump *mpi_coredump);
netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev);
void ql_check_lb_frame(struct ql_adapter *, struct sk_buff *);
int ql_own_firmware(struct ql_adapter *qdev);
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
index 6bc5db703920..829be21f97b2 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
@@ -1242,8 +1242,8 @@ static void ql_get_core_dump(struct ql_adapter *qdev)
ql_queue_fw_error(qdev);
}
-void ql_gen_reg_dump(struct ql_adapter *qdev,
- struct ql_reg_dump *mpi_coredump)
+static void ql_gen_reg_dump(struct ql_adapter *qdev,
+ struct ql_reg_dump *mpi_coredump)
{
int i, status;
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c b/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
index 0780e039b271..8dee1beb9854 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
@@ -181,6 +181,7 @@ static const char ql_gstrings_test[][ETH_GSTRING_LEN] = {
};
#define QLGE_TEST_LEN (sizeof(ql_gstrings_test) / ETH_GSTRING_LEN)
#define QLGE_STATS_LEN ARRAY_SIZE(ql_gstrings_stats)
+#define QLGE_RCV_MAC_ERR_STATS 7
static int ql_update_ring_coalescing(struct ql_adapter *qdev)
{
@@ -280,6 +281,9 @@ static void ql_update_stats(struct ql_adapter *qdev)
iter++;
}
+ /* Update receive mac error statistics */
+ iter += QLGE_RCV_MAC_ERR_STATS;
+
/*
* Get Per-priority TX pause frame counter statistics.
*/
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index a245dc18d769..449f506d2e8f 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -2376,14 +2376,6 @@ static netdev_features_t qlge_fix_features(struct net_device *ndev,
netdev_features_t features)
{
int err;
- /*
- * Since there is no support for separate rx/tx vlan accel
- * enable/disable make sure tx flag is always in same state as rx.
- */
- if (features & NETIF_F_HW_VLAN_CTAG_RX)
- features |= NETIF_F_HW_VLAN_CTAG_TX;
- else
- features &= ~NETIF_F_HW_VLAN_CTAG_TX;
/* Update the behavior of vlan accel in the adapter */
err = qlge_update_hw_vlan_features(ndev, features);
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index f2a2128165dd..737c1a881f78 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -678,9 +678,6 @@ static void cp_tx (struct cp_private *cp)
le32_to_cpu(txd->opts1) & 0xffff,
PCI_DMA_TODEVICE);
- bytes_compl += skb->len;
- pkts_compl++;
-
if (status & LastFrag) {
if (status & (TxError | TxFIFOUnder)) {
netif_dbg(cp, tx_err, cp->dev,
@@ -702,6 +699,8 @@ static void cp_tx (struct cp_private *cp)
netif_dbg(cp, tx_done, cp->dev,
"tx done, slot %d\n", tx_tail);
}
+ bytes_compl += skb->len;
+ pkts_compl++;
dev_kfree_skb_irq(skb);
}
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 799387570766..c737f0ea5de7 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -3465,6 +3465,11 @@ static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp)
rtl_writephy(tp, 0x14, 0x9065);
rtl_writephy(tp, 0x14, 0x1065);
+ /* Check ALDPS bit, disable it if enabled */
+ rtl_writephy(tp, 0x1f, 0x0a43);
+ if (rtl_readphy(tp, 0x10) & 0x0004)
+ rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0004);
+
rtl_writephy(tp, 0x1f, 0x0000);
}
diff --git a/drivers/net/ethernet/renesas/Kconfig b/drivers/net/ethernet/renesas/Kconfig
index a30c4395b232..9e757c792d84 100644
--- a/drivers/net/ethernet/renesas/Kconfig
+++ b/drivers/net/ethernet/renesas/Kconfig
@@ -13,4 +13,4 @@ config SH_ETH
Renesas SuperH Ethernet device driver.
This driver supporting CPUs are:
- SH7619, SH7710, SH7712, SH7724, SH7734, SH7763, SH7757,
- R8A7740, R8A777x and R8A7790.
+ R8A7740, R8A777x and R8A779x.
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index d256ce19d4de..888410737dbd 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -1,5 +1,4 @@
-/*
- * SuperH Ethernet device driver
+/* SuperH Ethernet device driver
*
* Copyright (C) 2006-2012 Nobuhiro Iwamatsu
* Copyright (C) 2008-2013 Renesas Solutions Corp.
@@ -13,9 +12,6 @@
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -395,8 +391,8 @@ static struct sh_eth_cpu_data r8a777x_data = {
.hw_swap = 1,
};
-/* R8A7790 */
-static struct sh_eth_cpu_data r8a7790_data = {
+/* R8A7790/1 */
+static struct sh_eth_cpu_data r8a779x_data = {
.set_duplex = sh_eth_set_duplex,
.set_rate = sh_eth_set_rate_r8a777x,
@@ -646,8 +642,8 @@ static struct sh_eth_cpu_data sh7763_data = {
.eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
.tx_check = EESR_TC1 | EESR_FTC,
- .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \
- EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \
+ .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
+ EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
EESR_ECI,
.apr = 1,
@@ -732,7 +728,7 @@ static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd)
cd->ecsipr_value = DEFAULT_ECSIPR_INIT;
if (!cd->fcftr_value)
- cd->fcftr_value = DEFAULT_FIFO_F_D_RFF | \
+ cd->fcftr_value = DEFAULT_FIFO_F_D_RFF |
DEFAULT_FIFO_F_D_RFD;
if (!cd->fdr_value)
@@ -849,20 +845,17 @@ static inline __u32 edmac_to_cpu(struct sh_eth_private *mdp, u32 x)
return x;
}
-/*
- * Program the hardware MAC address from dev->dev_addr.
- */
+/* Program the hardware MAC address from dev->dev_addr. */
static void update_mac_address(struct net_device *ndev)
{
sh_eth_write(ndev,
- (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
- (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR);
+ (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
+ (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR);
sh_eth_write(ndev,
- (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR);
+ (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR);
}
-/*
- * Get MAC address from SuperH MAC address register
+/* Get MAC address from SuperH MAC address register
*
* SuperH's Ethernet device doesn't have 'ROM' to MAC address.
* This driver get MAC address that use by bootloader(U-boot or sh-ipl+g).
@@ -1019,8 +1012,10 @@ static void sh_eth_ring_format(struct net_device *ndev)
int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring;
int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring;
- mdp->cur_rx = mdp->cur_tx = 0;
- mdp->dirty_rx = mdp->dirty_tx = 0;
+ mdp->cur_rx = 0;
+ mdp->cur_tx = 0;
+ mdp->dirty_rx = 0;
+ mdp->dirty_tx = 0;
memset(mdp->rx_ring, 0, rx_ringsize);
@@ -1033,7 +1028,7 @@ static void sh_eth_ring_format(struct net_device *ndev)
if (skb == NULL)
break;
dma_map_single(&ndev->dev, skb->data, mdp->rx_buf_sz,
- DMA_FROM_DEVICE);
+ DMA_FROM_DEVICE);
sh_eth_set_receive_align(skb);
/* RX descriptor */
@@ -1081,8 +1076,7 @@ static int sh_eth_ring_init(struct net_device *ndev)
struct sh_eth_private *mdp = netdev_priv(ndev);
int rx_ringsize, tx_ringsize, ret = 0;
- /*
- * +26 gets the maximum ethernet encapsulation, +7 & ~7 because the
+ /* +26 gets the maximum ethernet encapsulation, +7 & ~7 because the
* card needs room to do 8 byte alignment, +2 so we can reserve
* the first 2 bytes, and +16 gets room for the status word from the
* card.
@@ -1257,7 +1251,7 @@ static int sh_eth_txfree(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
struct sh_eth_txdesc *txdesc;
- int freeNum = 0;
+ int free_num = 0;
int entry = 0;
for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) {
@@ -1271,7 +1265,7 @@ static int sh_eth_txfree(struct net_device *ndev)
txdesc->buffer_length, DMA_TO_DEVICE);
dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
mdp->tx_skbuff[entry] = NULL;
- freeNum++;
+ free_num++;
}
txdesc->status = cpu_to_edmac(mdp, TD_TFP);
if (entry >= mdp->num_tx_ring - 1)
@@ -1280,7 +1274,7 @@ static int sh_eth_txfree(struct net_device *ndev)
ndev->stats.tx_packets++;
ndev->stats.tx_bytes += txdesc->buffer_length;
}
- return freeNum;
+ return free_num;
}
/* Packet receive function */
@@ -1313,8 +1307,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
if (!(desc_status & RDFEND))
ndev->stats.rx_length_errors++;
- /*
- * In case of almost all GETHER/ETHERs, the Receive Frame State
+ /* In case of almost all GETHER/ETHERs, the Receive Frame State
* (RFS) bits in the Receive Descriptor 0 are from bit 9 to
* bit 0. However, in case of the R8A7740's GETHER, the RFS
* bits are from bit 25 to bit 16. So, the driver needs right
@@ -1374,7 +1367,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
if (skb == NULL)
break; /* Better luck next round. */
dma_map_single(&ndev->dev, skb->data, mdp->rx_buf_sz,
- DMA_FROM_DEVICE);
+ DMA_FROM_DEVICE);
sh_eth_set_receive_align(skb);
skb_checksum_none_assert(skb);
@@ -1392,10 +1385,13 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
/* If we don't need to check status, don't. -KDU */
if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R)) {
/* fix the values for the next receiving if RDE is set */
- if (intr_status & EESR_RDE)
- mdp->cur_rx = mdp->dirty_rx =
- (sh_eth_read(ndev, RDFAR) -
- sh_eth_read(ndev, RDLAR)) >> 4;
+ if (intr_status & EESR_RDE) {
+ u32 count = (sh_eth_read(ndev, RDFAR) -
+ sh_eth_read(ndev, RDLAR)) >> 4;
+
+ mdp->cur_rx = count;
+ mdp->dirty_rx = count;
+ }
sh_eth_write(ndev, EDRRR_R, EDRRR);
}
@@ -1438,17 +1434,17 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
if (mdp->ether_link_active_low)
link_stat = ~link_stat;
}
- if (!(link_stat & PHY_ST_LINK))
+ if (!(link_stat & PHY_ST_LINK)) {
sh_eth_rcv_snd_disable(ndev);
- else {
+ } else {
/* Link Up */
sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) &
- ~DMAC_M_ECI, EESIPR);
- /*clear int */
+ ~DMAC_M_ECI, EESIPR);
+ /* clear int */
sh_eth_write(ndev, sh_eth_read(ndev, ECSR),
- ECSR);
+ ECSR);
sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) |
- DMAC_M_ECI, EESIPR);
+ DMAC_M_ECI, EESIPR);
/* enable tx and rx */
sh_eth_rcv_snd_enable(ndev);
}
@@ -1519,9 +1515,9 @@ ignore_link:
u32 edtrr = sh_eth_read(ndev, EDTRR);
/* dmesg */
dev_err(&ndev->dev, "TX error. status=%8.8x cur_tx=%8.8x ",
- intr_status, mdp->cur_tx);
+ intr_status, mdp->cur_tx);
dev_err(&ndev->dev, "dirty_tx=%8.8x state=%8.8x EDTRR=%8.8x.\n",
- mdp->dirty_tx, (u32) ndev->state, edtrr);
+ mdp->dirty_tx, (u32) ndev->state, edtrr);
/* dirty buffer free */
sh_eth_txfree(ndev);
@@ -1644,7 +1640,8 @@ static void sh_eth_adjust_link(struct net_device *ndev)
}
if (!mdp->link) {
sh_eth_write(ndev,
- (sh_eth_read(ndev, ECMR) & ~ECMR_TXF), ECMR);
+ sh_eth_read(ndev, ECMR) & ~ECMR_TXF,
+ ECMR);
new_state = 1;
mdp->link = phydev->link;
if (mdp->cd->no_psr || mdp->no_ether_link)
@@ -1671,7 +1668,7 @@ static int sh_eth_phy_init(struct net_device *ndev)
struct phy_device *phydev = NULL;
snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
- mdp->mii_bus->id , mdp->phy_id);
+ mdp->mii_bus->id, mdp->phy_id);
mdp->link = 0;
mdp->speed = 0;
@@ -1685,8 +1682,8 @@ static int sh_eth_phy_init(struct net_device *ndev)
return PTR_ERR(phydev);
}
- dev_info(&ndev->dev, "attached phy %i to driver %s\n",
- phydev->addr, phydev->drv->name);
+ dev_info(&ndev->dev, "attached PHY %d (IRQ %d) to driver %s\n",
+ phydev->addr, phydev->irq, phydev->drv->name);
mdp->phydev = phydev;
@@ -1703,15 +1700,13 @@ static int sh_eth_phy_start(struct net_device *ndev)
if (ret)
return ret;
- /* reset phy - this also wakes it from PDOWN */
- phy_write(mdp->phydev, MII_BMCR, BMCR_RESET);
phy_start(mdp->phydev);
return 0;
}
static int sh_eth_get_settings(struct net_device *ndev,
- struct ethtool_cmd *ecmd)
+ struct ethtool_cmd *ecmd)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
unsigned long flags;
@@ -1725,7 +1720,7 @@ static int sh_eth_get_settings(struct net_device *ndev,
}
static int sh_eth_set_settings(struct net_device *ndev,
- struct ethtool_cmd *ecmd)
+ struct ethtool_cmd *ecmd)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
unsigned long flags;
@@ -1801,7 +1796,7 @@ static int sh_eth_get_sset_count(struct net_device *netdev, int sset)
}
static void sh_eth_get_ethtool_stats(struct net_device *ndev,
- struct ethtool_stats *stats, u64 *data)
+ struct ethtool_stats *stats, u64 *data)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
int i = 0;
@@ -1818,7 +1813,7 @@ static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
switch (stringset) {
case ETH_SS_STATS:
memcpy(data, *sh_eth_gstrings_stats,
- sizeof(sh_eth_gstrings_stats));
+ sizeof(sh_eth_gstrings_stats));
break;
}
}
@@ -1953,9 +1948,10 @@ static void sh_eth_tx_timeout(struct net_device *ndev)
netif_stop_queue(ndev);
- if (netif_msg_timer(mdp))
- dev_err(&ndev->dev, "%s: transmit timed out, status %8.8x,"
- " resetting...\n", ndev->name, (int)sh_eth_read(ndev, EESR));
+ if (netif_msg_timer(mdp)) {
+ dev_err(&ndev->dev, "%s: transmit timed out, status %8.8x, resetting...\n",
+ ndev->name, (int)sh_eth_read(ndev, EESR));
+ }
/* tx_errors count up */
ndev->stats.tx_errors++;
@@ -2088,8 +2084,7 @@ static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
}
/* ioctl to device function */
-static int sh_eth_do_ioctl(struct net_device *ndev, struct ifreq *rq,
- int cmd)
+static int sh_eth_do_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
struct phy_device *phydev = mdp->phydev;
@@ -2209,7 +2204,7 @@ static int sh_eth_tsu_find_entry(struct net_device *ndev, const u8 *addr)
for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) {
sh_eth_tsu_read_entry(reg_offset, c_addr);
- if (memcmp(addr, c_addr, ETH_ALEN) == 0)
+ if (ether_addr_equal(addr, c_addr))
return i;
}
@@ -2344,8 +2339,7 @@ static void sh_eth_set_multicast_list(struct net_device *ndev)
unsigned long flags;
spin_lock_irqsave(&mdp->lock, flags);
- /*
- * Initial condition is MCT = 1, PRM = 0.
+ /* Initial condition is MCT = 1, PRM = 0.
* Depending on ndev->flags, set PRM or clear MCT
*/
ecmr_bits = (sh_eth_read(ndev, ECMR) & ~ECMR_PRM) | ECMR_MCT;
@@ -2411,8 +2405,7 @@ static int sh_eth_vlan_rx_add_vid(struct net_device *ndev,
mdp->vlan_num_ids++;
- /*
- * The controller has one VLAN tag HW filter. So, if the filter is
+ /* The controller has one VLAN tag HW filter. So, if the filter is
* already enabled, the driver disables it and the filte
*/
if (mdp->vlan_num_ids > 1) {
@@ -2528,7 +2521,7 @@ static int sh_mdio_init(struct net_device *ndev, int id,
mdp->mii_bus->name = "sh_mii";
mdp->mii_bus->parent = &ndev->dev;
snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
- mdp->pdev->name, id);
+ mdp->pdev->name, id);
/* PHY IRQ */
mdp->mii_bus->irq = devm_kzalloc(&ndev->dev,
@@ -2541,6 +2534,8 @@ static int sh_mdio_init(struct net_device *ndev, int id,
for (i = 0; i < PHY_MAX_ADDR; i++)
mdp->mii_bus->irq[i] = PHY_POLL;
+ if (pd->phy_irq > 0)
+ mdp->mii_bus->irq[pd->phy] = pd->phy_irq;
/* register mdio bus */
ret = mdiobus_register(mdp->mii_bus);
@@ -2739,7 +2734,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
/* print device information */
pr_info("Base address at 0x%x, %pM, IRQ %d.\n",
- (u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
+ (u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
platform_set_drvdata(pdev, ndev);
@@ -2777,8 +2772,7 @@ static int sh_eth_drv_remove(struct platform_device *pdev)
#ifdef CONFIG_PM
static int sh_eth_runtime_nop(struct device *dev)
{
- /*
- * Runtime PM callback shared between ->runtime_suspend()
+ /* Runtime PM callback shared between ->runtime_suspend()
* and ->runtime_resume(). Simply returns success.
*
* This driver re-initializes all registers after
@@ -2807,7 +2801,8 @@ static struct platform_device_id sh_eth_id_table[] = {
{ "sh7763-gether", (kernel_ulong_t)&sh7763_data },
{ "r8a7740-gether", (kernel_ulong_t)&r8a7740_data },
{ "r8a777x-ether", (kernel_ulong_t)&r8a777x_data },
- { "r8a7790-ether", (kernel_ulong_t)&r8a7790_data },
+ { "r8a7790-ether", (kernel_ulong_t)&r8a779x_data },
+ { "r8a7791-ether", (kernel_ulong_t)&r8a779x_data },
{ }
};
MODULE_DEVICE_TABLE(platform, sh_eth_id_table);
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index f32c1692d310..0fe35b72a1d0 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -1,5 +1,4 @@
-/*
- * SuperH Ethernet device driver
+/* SuperH Ethernet device driver
*
* Copyright (C) 2006-2012 Nobuhiro Iwamatsu
* Copyright (C) 2008-2012 Renesas Solutions Corp.
@@ -12,9 +11,6 @@
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -171,8 +167,7 @@ enum {
#define SH2_SH3_SKB_RX_ALIGN 2
#endif
-/*
- * Register's bits
+/* Register's bits
*/
/* EDSR : sh7734, sh7757, sh7763, and r8a7740 only */
enum EDSR_BIT {
@@ -199,7 +194,7 @@ enum DMAC_T_BIT {
EDTRR_TRNS_ETHER = 0x01,
};
-/* EDRRR*/
+/* EDRRR */
enum EDRRR_R_BIT {
EDRRR_R = 0x01,
};
@@ -422,8 +417,7 @@ enum TSU_FWSLC_BIT {
#define TSU_VTAG_ENABLE 0x80000000
#define TSU_VTAG_VID_MASK 0x00000fff
-/*
- * The sh ether Tx buffer descriptors.
+/* The sh ether Tx buffer descriptors.
* This structure should be 20 bytes.
*/
struct sh_eth_txdesc {
@@ -437,10 +431,9 @@ struct sh_eth_txdesc {
#endif
u32 addr; /* TD2 */
u32 pad1; /* padding data */
-} __attribute__((aligned(2), packed));
+} __aligned(2) __packed;
-/*
- * The sh ether Rx buffer descriptors.
+/* The sh ether Rx buffer descriptors.
* This structure should be 20 bytes.
*/
struct sh_eth_rxdesc {
@@ -454,7 +447,7 @@ struct sh_eth_rxdesc {
#endif
u32 addr; /* RD2 */
u32 pad0; /* padding data */
-} __attribute__((aligned(2), packed));
+} __aligned(2) __packed;
/* This structure is used by each CPU dependency handling. */
struct sh_eth_cpu_data {
@@ -480,16 +473,16 @@ struct sh_eth_cpu_data {
unsigned long eesr_err_check;
/* hardware features */
- unsigned long irq_flags; /* IRQ configuration flags */
- unsigned no_psr:1; /* EtherC DO NOT have PSR */
- unsigned apr:1; /* EtherC have APR */
- unsigned mpr:1; /* EtherC have MPR */
- unsigned tpauser:1; /* EtherC have TPAUSER */
- unsigned bculr:1; /* EtherC have BCULR */
- unsigned tsu:1; /* EtherC have TSU */
- unsigned hw_swap:1; /* E-DMAC have DE bit in EDMR */
- unsigned rpadir:1; /* E-DMAC have RPADIR */
- unsigned no_trimd:1; /* E-DMAC DO NOT have TRIMD */
+ unsigned long irq_flags; /* IRQ configuration flags */
+ unsigned no_psr:1; /* EtherC DO NOT have PSR */
+ unsigned apr:1; /* EtherC have APR */
+ unsigned mpr:1; /* EtherC have MPR */
+ unsigned tpauser:1; /* EtherC have TPAUSER */
+ unsigned bculr:1; /* EtherC have BCULR */
+ unsigned tsu:1; /* EtherC have TSU */
+ unsigned hw_swap:1; /* E-DMAC have DE bit in EDMR */
+ unsigned rpadir:1; /* E-DMAC have RPADIR */
+ unsigned no_trimd:1; /* E-DMAC DO NOT have TRIMD */
unsigned no_ade:1; /* E-DMAC DO NOT have ADE bit in EESR */
unsigned hw_crc:1; /* E-DMAC have CSMR */
unsigned select_mii:1; /* EtherC have RMII_MII (MII select register) */
@@ -511,14 +504,14 @@ struct sh_eth_private {
struct sh_eth_txdesc *tx_ring;
struct sk_buff **rx_skbuff;
struct sk_buff **tx_skbuff;
- spinlock_t lock;
- u32 cur_rx, dirty_rx; /* Producer/consumer ring indices */
+ spinlock_t lock; /* Register access lock */
+ u32 cur_rx, dirty_rx; /* Producer/consumer ring indices */
u32 cur_tx, dirty_tx;
- u32 rx_buf_sz; /* Based on MTU+slack. */
+ u32 rx_buf_sz; /* Based on MTU+slack. */
int edmac_endian;
struct napi_struct napi;
/* MII transceiver section. */
- u32 phy_id; /* PHY ID */
+ u32 phy_id; /* PHY ID */
struct mii_bus *mii_bus; /* MDIO bus control */
struct phy_device *phydev; /* PHY device control */
int link;
@@ -526,8 +519,8 @@ struct sh_eth_private {
int msg_enable;
int speed;
int duplex;
- int port; /* for TSU */
- int vlan_num_ids; /* for VLAN tag filter */
+ int port; /* for TSU */
+ int vlan_num_ids; /* for VLAN tag filter */
unsigned no_ether_link:1;
unsigned ether_link_active_low:1;
diff --git a/drivers/net/ethernet/seeq/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c
index c76571886011..ced5b13d937f 100644
--- a/drivers/net/ethernet/seeq/sgiseeq.c
+++ b/drivers/net/ethernet/seeq/sgiseeq.c
@@ -356,7 +356,7 @@ static inline void sgiseeq_rx(struct net_device *dev, struct sgiseeq_private *sp
if (pkt_status & SEEQ_RSTAT_FIG) {
/* Packet is OK. */
/* We don't want to receive our own packets */
- if (memcmp(rd->skb->data + 6, dev->dev_addr, ETH_ALEN)) {
+ if (!ether_addr_equal(rd->skb->data + 6, dev->dev_addr)) {
if (len > rx_copybreak) {
skb = rd->skb;
newskb = netdev_alloc_skb(dev, PKT_BUF_SZ);
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 676c3c057bfb..4dfc2296600d 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -14,6 +14,7 @@
#include "mcdi_pcol.h"
#include "nic.h"
#include "workarounds.h"
+#include "selftest.h"
#include <linux/in.h>
#include <linux/jhash.h>
#include <linux/wait.h>
@@ -52,31 +53,31 @@ struct efx_ef10_filter_table {
struct {
unsigned long spec; /* pointer to spec plus flag bits */
-/* BUSY flag indicates that an update is in progress. STACK_OLD is
- * used to mark and sweep stack-owned MAC filters.
+/* BUSY flag indicates that an update is in progress. AUTO_OLD is
+ * used to mark and sweep MAC filters for the device address lists.
*/
#define EFX_EF10_FILTER_FLAG_BUSY 1UL
-#define EFX_EF10_FILTER_FLAG_STACK_OLD 2UL
+#define EFX_EF10_FILTER_FLAG_AUTO_OLD 2UL
#define EFX_EF10_FILTER_FLAGS 3UL
u64 handle; /* firmware handle */
} *entry;
wait_queue_head_t waitq;
/* Shadow of net_device address lists, guarded by mac_lock */
-#define EFX_EF10_FILTER_STACK_UC_MAX 32
-#define EFX_EF10_FILTER_STACK_MC_MAX 256
+#define EFX_EF10_FILTER_DEV_UC_MAX 32
+#define EFX_EF10_FILTER_DEV_MC_MAX 256
struct {
u8 addr[ETH_ALEN];
u16 id;
- } stack_uc_list[EFX_EF10_FILTER_STACK_UC_MAX],
- stack_mc_list[EFX_EF10_FILTER_STACK_MC_MAX];
- int stack_uc_count; /* negative for PROMISC */
- int stack_mc_count; /* negative for PROMISC/ALLMULTI */
+ } dev_uc_list[EFX_EF10_FILTER_DEV_UC_MAX],
+ dev_mc_list[EFX_EF10_FILTER_DEV_MC_MAX];
+ int dev_uc_count; /* negative for PROMISC */
+ int dev_mc_count; /* negative for PROMISC/ALLMULTI */
};
/* An arbitrary search limit for the software hash table */
#define EFX_EF10_FILTER_SEARCH_LIMIT 200
-static void efx_ef10_rx_push_indir_table(struct efx_nic *efx);
+static void efx_ef10_rx_push_rss_config(struct efx_nic *efx);
static void efx_ef10_rx_free_indir_table(struct efx_nic *efx);
static void efx_ef10_filter_table_remove(struct efx_nic *efx);
@@ -263,6 +264,8 @@ static int efx_ef10_probe(struct efx_nic *efx)
if (rc)
goto fail3;
+ efx_ptp_probe(efx, NULL);
+
return 0;
fail3:
@@ -277,11 +280,17 @@ fail1:
static int efx_ef10_free_vis(struct efx_nic *efx)
{
- int rc = efx_mcdi_rpc(efx, MC_CMD_FREE_VIS, NULL, 0, NULL, 0, NULL);
+ MCDI_DECLARE_BUF_OUT_OR_ERR(outbuf, 0);
+ size_t outlen;
+ int rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FREE_VIS, NULL, 0,
+ outbuf, sizeof(outbuf), &outlen);
/* -EALREADY means nothing to free, so ignore */
if (rc == -EALREADY)
rc = 0;
+ if (rc)
+ efx_mcdi_display_error(efx, MC_CMD_FREE_VIS, 0, outbuf, outlen,
+ rc);
return rc;
}
@@ -465,9 +474,10 @@ static void efx_ef10_remove(struct efx_nic *efx)
struct efx_ef10_nic_data *nic_data = efx->nic_data;
int rc;
+ efx_ptp_remove(efx);
+
efx_mcdi_mon_remove(efx);
- /* This needs to be after efx_ptp_remove_channel() with no filters */
efx_ef10_rx_free_indir_table(efx);
if (nic_data->wc_membase)
@@ -669,7 +679,7 @@ static int efx_ef10_init_nic(struct efx_nic *efx)
nic_data->must_restore_piobufs = false;
}
- efx_ef10_rx_push_indir_table(efx);
+ efx_ef10_rx_push_rss_config(efx);
return 0;
}
@@ -764,8 +774,8 @@ static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = {
EF10_DMA_STAT(rx_dp_q_disabled_packets, RXDP_Q_DISABLED_PKTS),
EF10_DMA_STAT(rx_dp_di_dropped_packets, RXDP_DI_DROPPED_PKTS),
EF10_DMA_STAT(rx_dp_streaming_packets, RXDP_STREAMING_PKTS),
- EF10_DMA_STAT(rx_dp_emerg_fetch, RXDP_EMERGENCY_FETCH_CONDITIONS),
- EF10_DMA_STAT(rx_dp_emerg_wait, RXDP_EMERGENCY_WAIT_CONDITIONS),
+ EF10_DMA_STAT(rx_dp_hlb_fetch, RXDP_EMERGENCY_FETCH_CONDITIONS),
+ EF10_DMA_STAT(rx_dp_hlb_wait, RXDP_EMERGENCY_WAIT_CONDITIONS),
};
#define HUNT_COMMON_STAT_MASK ((1ULL << EF10_STAT_tx_bytes) | \
@@ -834,8 +844,8 @@ static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = {
(1ULL << EF10_STAT_rx_dp_q_disabled_packets) | \
(1ULL << EF10_STAT_rx_dp_di_dropped_packets) | \
(1ULL << EF10_STAT_rx_dp_streaming_packets) | \
- (1ULL << EF10_STAT_rx_dp_emerg_fetch) | \
- (1ULL << EF10_STAT_rx_dp_emerg_wait))
+ (1ULL << EF10_STAT_rx_dp_hlb_fetch) | \
+ (1ULL << EF10_STAT_rx_dp_hlb_wait))
static u64 efx_ef10_raw_stat_mask(struct efx_nic *efx)
{
@@ -901,6 +911,7 @@ static int efx_ef10_try_update_nic_stats(struct efx_nic *efx)
return -EAGAIN;
/* Update derived statistics */
+ efx_nic_fix_nodesc_drop_stat(efx, &stats[EF10_STAT_rx_nodesc_drops]);
stats[EF10_STAT_rx_good_bytes] =
stats[EF10_STAT_rx_bytes] -
stats[EF10_STAT_rx_bytes_minus_good_bytes];
@@ -1241,8 +1252,8 @@ static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue)
return;
fail:
- WARN_ON(true);
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ netdev_WARN(efx->net_dev, "failed to initialise TXQ %d\n",
+ tx_queue->queue);
}
static void efx_ef10_tx_fini(struct efx_tx_queue *tx_queue)
@@ -1256,7 +1267,7 @@ static void efx_ef10_tx_fini(struct efx_tx_queue *tx_queue)
MCDI_SET_DWORD(inbuf, FINI_TXQ_IN_INSTANCE,
tx_queue->queue);
- rc = efx_mcdi_rpc(efx, MC_CMD_FINI_TXQ, inbuf, sizeof(inbuf),
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_TXQ, inbuf, sizeof(inbuf),
outbuf, sizeof(outbuf), &outlen);
if (rc && rc != -EALREADY)
@@ -1265,7 +1276,8 @@ static void efx_ef10_tx_fini(struct efx_tx_queue *tx_queue)
return;
fail:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ efx_mcdi_display_error(efx, MC_CMD_FINI_TXQ, MC_CMD_FINI_TXQ_IN_LEN,
+ outbuf, outlen, rc);
}
static void efx_ef10_tx_remove(struct efx_tx_queue *tx_queue)
@@ -1408,12 +1420,12 @@ static void efx_ef10_rx_free_indir_table(struct efx_nic *efx)
nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
}
-static void efx_ef10_rx_push_indir_table(struct efx_nic *efx)
+static void efx_ef10_rx_push_rss_config(struct efx_nic *efx)
{
struct efx_ef10_nic_data *nic_data = efx->nic_data;
int rc;
- netif_dbg(efx, drv, efx->net_dev, "pushing RX indirection table\n");
+ netif_dbg(efx, drv, efx->net_dev, "pushing RSS config\n");
if (nic_data->rx_rss_context == EFX_EF10_RSS_CONTEXT_INVALID) {
rc = efx_ef10_alloc_rss_context(efx, &nic_data->rx_rss_context);
@@ -1461,8 +1473,9 @@ static void efx_ef10_rx_init(struct efx_rx_queue *rx_queue)
MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_LABEL, efx_rx_queue_index(rx_queue));
MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_INSTANCE,
efx_rx_queue_index(rx_queue));
- MCDI_POPULATE_DWORD_1(inbuf, INIT_RXQ_IN_FLAGS,
- INIT_RXQ_IN_FLAG_PREFIX, 1);
+ MCDI_POPULATE_DWORD_2(inbuf, INIT_RXQ_IN_FLAGS,
+ INIT_RXQ_IN_FLAG_PREFIX, 1,
+ INIT_RXQ_IN_FLAG_TIMESTAMP, 1);
MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_OWNER_ID, 0);
MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
@@ -1481,13 +1494,8 @@ static void efx_ef10_rx_init(struct efx_rx_queue *rx_queue)
rc = efx_mcdi_rpc(efx, MC_CMD_INIT_RXQ, inbuf, inlen,
outbuf, sizeof(outbuf), &outlen);
if (rc)
- goto fail;
-
- return;
-
-fail:
- WARN_ON(true);
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ netdev_WARN(efx->net_dev, "failed to initialise RXQ %d\n",
+ efx_rx_queue_index(rx_queue));
}
static void efx_ef10_rx_fini(struct efx_rx_queue *rx_queue)
@@ -1501,7 +1509,7 @@ static void efx_ef10_rx_fini(struct efx_rx_queue *rx_queue)
MCDI_SET_DWORD(inbuf, FINI_RXQ_IN_INSTANCE,
efx_rx_queue_index(rx_queue));
- rc = efx_mcdi_rpc(efx, MC_CMD_FINI_RXQ, inbuf, sizeof(inbuf),
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_RXQ, inbuf, sizeof(inbuf),
outbuf, sizeof(outbuf), &outlen);
if (rc && rc != -EALREADY)
@@ -1510,7 +1518,8 @@ static void efx_ef10_rx_fini(struct efx_rx_queue *rx_queue)
return;
fail:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ efx_mcdi_display_error(efx, MC_CMD_FINI_RXQ, MC_CMD_FINI_RXQ_IN_LEN,
+ outbuf, outlen, rc);
}
static void efx_ef10_rx_remove(struct efx_rx_queue *rx_queue)
@@ -1647,15 +1656,7 @@ static int efx_ef10_ev_init(struct efx_channel *channel)
rc = efx_mcdi_rpc(efx, MC_CMD_INIT_EVQ, inbuf, inlen,
outbuf, sizeof(outbuf), &outlen);
- if (rc)
- goto fail;
-
/* IRQ return is ignored */
-
- return 0;
-
-fail:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
@@ -1669,7 +1670,7 @@ static void efx_ef10_ev_fini(struct efx_channel *channel)
MCDI_SET_DWORD(inbuf, FINI_EVQ_IN_INSTANCE, channel->channel);
- rc = efx_mcdi_rpc(efx, MC_CMD_FINI_EVQ, inbuf, sizeof(inbuf),
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_EVQ, inbuf, sizeof(inbuf),
outbuf, sizeof(outbuf), &outlen);
if (rc && rc != -EALREADY)
@@ -1678,7 +1679,8 @@ static void efx_ef10_ev_fini(struct efx_channel *channel)
return;
fail:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ efx_mcdi_display_error(efx, MC_CMD_FINI_EVQ, MC_CMD_FINI_EVQ_IN_LEN,
+ outbuf, outlen, rc);
}
static void efx_ef10_ev_remove(struct efx_channel *channel)
@@ -1717,8 +1719,6 @@ static void efx_ef10_handle_rx_abort(struct efx_rx_queue *rx_queue)
{
unsigned int rx_desc_ptr;
- WARN_ON(rx_queue->scatter_n == 0);
-
netif_dbg(rx_queue->efx, hw, rx_queue->efx->net_dev,
"scattered RX aborted (dropping %u buffers)\n",
rx_queue->scatter_n);
@@ -1754,7 +1754,10 @@ static int efx_ef10_handle_rx_event(struct efx_channel *channel,
rx_l4_class = EFX_QWORD_FIELD(*event, ESF_DZ_RX_L4_CLASS);
rx_cont = EFX_QWORD_FIELD(*event, ESF_DZ_RX_CONT);
- WARN_ON(EFX_QWORD_FIELD(*event, ESF_DZ_RX_DROP_EVENT));
+ if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_DROP_EVENT))
+ netdev_WARN(efx->net_dev, "saw RX_DROP_EVENT: event="
+ EFX_QWORD_FMT "\n",
+ EFX_QWORD_VAL(*event));
rx_queue = efx_channel_get_rx_queue(channel);
@@ -1765,17 +1768,27 @@ static int efx_ef10_handle_rx_event(struct efx_channel *channel,
((1 << ESF_DZ_RX_DSC_PTR_LBITS_WIDTH) - 1));
if (n_descs != rx_queue->scatter_n + 1) {
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
/* detect rx abort */
if (unlikely(n_descs == rx_queue->scatter_n)) {
- WARN_ON(rx_bytes != 0);
+ if (rx_queue->scatter_n == 0 || rx_bytes != 0)
+ netdev_WARN(efx->net_dev,
+ "invalid RX abort: scatter_n=%u event="
+ EFX_QWORD_FMT "\n",
+ rx_queue->scatter_n,
+ EFX_QWORD_VAL(*event));
efx_ef10_handle_rx_abort(rx_queue);
return 0;
}
- if (unlikely(rx_queue->scatter_n != 0)) {
- /* Scattered packet completions cannot be
- * merged, so something has gone wrong.
- */
+ /* Check that RX completion merging is valid, i.e.
+ * the current firmware supports it and this is a
+ * non-scattered packet.
+ */
+ if (!(nic_data->datapath_caps &
+ (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN)) ||
+ rx_queue->scatter_n != 0 || rx_cont) {
efx_ef10_handle_rx_bad_lbits(
rx_queue, next_ptr_lbits,
(rx_queue->removed_count +
@@ -1901,7 +1914,7 @@ static void efx_ef10_handle_driver_generated_event(struct efx_channel *channel,
* events, so efx_process_channel() won't refill the
* queue. Refill it here
*/
- efx_fast_push_rx_descriptors(&channel->rx_queue);
+ efx_fast_push_rx_descriptors(&channel->rx_queue, true);
break;
default:
netif_err(efx, hw, efx->net_dev,
@@ -2232,7 +2245,9 @@ static void efx_ef10_filter_push_prep(struct efx_nic *efx,
MC_CMD_FILTER_OP_IN_RX_DEST_HOST);
MCDI_SET_DWORD(inbuf, FILTER_OP_IN_TX_DEST,
MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT);
- MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_QUEUE, spec->dmaq_id);
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_QUEUE,
+ spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ?
+ 0 : spec->dmaq_id);
MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_MODE,
(spec->flags & EFX_FILTER_FLAG_RX_RSS) ?
MC_CMD_FILTER_OP_IN_RX_MODE_RSS :
@@ -2257,6 +2272,8 @@ static int efx_ef10_filter_push(struct efx_nic *efx,
outbuf, sizeof(outbuf), NULL);
if (rc == 0)
*handle = MCDI_QWORD(outbuf, FILTER_OP_OUT_HANDLE);
+ if (rc == -ENOSPC)
+ rc = -EBUSY; /* to match efx_farch_filter_insert() */
return rc;
}
@@ -2326,10 +2343,7 @@ static s32 efx_ef10_filter_insert(struct efx_nic *efx,
EFX_EF10_FILTER_FLAG_BUSY)
break;
if (spec->priority < saved_spec->priority &&
- !(saved_spec->priority ==
- EFX_FILTER_PRI_REQUIRED &&
- saved_spec->flags &
- EFX_FILTER_FLAG_RX_STACK)) {
+ spec->priority != EFX_FILTER_PRI_AUTO) {
rc = -EPERM;
goto out_unlock;
}
@@ -2383,11 +2397,13 @@ found:
*/
saved_spec = efx_ef10_filter_entry_spec(table, ins_index);
if (saved_spec) {
- if (spec->flags & EFX_FILTER_FLAG_RX_STACK) {
+ if (spec->priority == EFX_FILTER_PRI_AUTO &&
+ saved_spec->priority >= EFX_FILTER_PRI_AUTO) {
/* Just make sure it won't be removed */
- saved_spec->flags |= EFX_FILTER_FLAG_RX_STACK;
+ if (saved_spec->priority > EFX_FILTER_PRI_AUTO)
+ saved_spec->flags |= EFX_FILTER_FLAG_RX_OVER_AUTO;
table->entry[ins_index].spec &=
- ~EFX_EF10_FILTER_FLAG_STACK_OLD;
+ ~EFX_EF10_FILTER_FLAG_AUTO_OLD;
rc = ins_index;
goto out_unlock;
}
@@ -2427,8 +2443,11 @@ found:
if (rc == 0) {
if (replacing) {
/* Update the fields that may differ */
+ if (saved_spec->priority == EFX_FILTER_PRI_AUTO)
+ saved_spec->flags |=
+ EFX_FILTER_FLAG_RX_OVER_AUTO;
saved_spec->priority = spec->priority;
- saved_spec->flags &= EFX_FILTER_FLAG_RX_STACK;
+ saved_spec->flags &= EFX_FILTER_FLAG_RX_OVER_AUTO;
saved_spec->flags |= spec->flags;
saved_spec->rss_context = spec->rss_context;
saved_spec->dmaq_id = spec->dmaq_id;
@@ -2497,13 +2516,13 @@ static void efx_ef10_filter_update_rx_scatter(struct efx_nic *efx)
}
/* Remove a filter.
- * If !stack_requested, remove by ID
- * If stack_requested, remove by index
+ * If !by_index, remove by ID
+ * If by_index, remove by index
* Filter ID may come from userland and must be range-checked.
*/
static int efx_ef10_filter_remove_internal(struct efx_nic *efx,
- enum efx_filter_priority priority,
- u32 filter_id, bool stack_requested)
+ unsigned int priority_mask,
+ u32 filter_id, bool by_index)
{
unsigned int filter_idx = filter_id % HUNT_FILTER_TBL_ROWS;
struct efx_ef10_filter_table *table = efx->filter_state;
@@ -2527,26 +2546,41 @@ static int efx_ef10_filter_remove_internal(struct efx_nic *efx,
spin_unlock_bh(&efx->filter_lock);
schedule();
}
+
spec = efx_ef10_filter_entry_spec(table, filter_idx);
- if (!spec || spec->priority > priority ||
- (!stack_requested &&
+ if (!spec ||
+ (!by_index &&
efx_ef10_filter_rx_match_pri(table, spec->match_flags) !=
filter_id / HUNT_FILTER_TBL_ROWS)) {
rc = -ENOENT;
goto out_unlock;
}
+
+ if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO &&
+ priority_mask == (1U << EFX_FILTER_PRI_AUTO)) {
+ /* Just remove flags */
+ spec->flags &= ~EFX_FILTER_FLAG_RX_OVER_AUTO;
+ table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_AUTO_OLD;
+ rc = 0;
+ goto out_unlock;
+ }
+
+ if (!(priority_mask & (1U << spec->priority))) {
+ rc = -ENOENT;
+ goto out_unlock;
+ }
+
table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY;
spin_unlock_bh(&efx->filter_lock);
- if (spec->flags & EFX_FILTER_FLAG_RX_STACK && !stack_requested) {
- /* Reset steering of a stack-owned filter */
+ if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO) {
+ /* Reset to an automatic filter */
struct efx_filter_spec new_spec = *spec;
- new_spec.priority = EFX_FILTER_PRI_REQUIRED;
+ new_spec.priority = EFX_FILTER_PRI_AUTO;
new_spec.flags = (EFX_FILTER_FLAG_RX |
- EFX_FILTER_FLAG_RX_RSS |
- EFX_FILTER_FLAG_RX_STACK);
+ EFX_FILTER_FLAG_RX_RSS);
new_spec.dmaq_id = 0;
new_spec.rss_context = EFX_FILTER_RSS_CONTEXT_DEFAULT;
rc = efx_ef10_filter_push(efx, &new_spec,
@@ -2574,6 +2608,7 @@ static int efx_ef10_filter_remove_internal(struct efx_nic *efx,
efx_ef10_filter_set_entry(table, filter_idx, NULL, 0);
}
}
+
table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_BUSY;
wake_up_all(&table->waitq);
out_unlock:
@@ -2586,7 +2621,8 @@ static int efx_ef10_filter_remove_safe(struct efx_nic *efx,
enum efx_filter_priority priority,
u32 filter_id)
{
- return efx_ef10_filter_remove_internal(efx, priority, filter_id, false);
+ return efx_ef10_filter_remove_internal(efx, 1U << priority,
+ filter_id, false);
}
static int efx_ef10_filter_get_safe(struct efx_nic *efx,
@@ -2612,10 +2648,24 @@ static int efx_ef10_filter_get_safe(struct efx_nic *efx,
return rc;
}
-static void efx_ef10_filter_clear_rx(struct efx_nic *efx,
+static int efx_ef10_filter_clear_rx(struct efx_nic *efx,
enum efx_filter_priority priority)
{
- /* TODO */
+ unsigned int priority_mask;
+ unsigned int i;
+ int rc;
+
+ priority_mask = (((1U << (priority + 1)) - 1) &
+ ~(1U << EFX_FILTER_PRI_AUTO));
+
+ for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) {
+ rc = efx_ef10_filter_remove_internal(efx, priority_mask,
+ i, true);
+ if (rc && rc != -ENOENT)
+ return rc;
+ }
+
+ return 0;
}
static u32 efx_ef10_filter_count_rx_used(struct efx_nic *efx,
@@ -2716,8 +2766,6 @@ static s32 efx_ef10_filter_rfs_insert(struct efx_nic *efx,
rc = -EBUSY;
goto fail_unlock;
}
- EFX_WARN_ON_PARANOID(saved_spec->flags &
- EFX_FILTER_FLAG_RX_STACK);
if (spec->priority < saved_spec->priority) {
rc = -EPERM;
goto fail_unlock;
@@ -3027,8 +3075,11 @@ static void efx_ef10_filter_table_remove(struct efx_nic *efx)
table->entry[filter_idx].handle);
rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf),
NULL, 0, NULL);
-
- WARN_ON(rc != 0);
+ if (rc)
+ netdev_WARN(efx->net_dev,
+ "filter_idx=%#x handle=%#llx\n",
+ filter_idx,
+ table->entry[filter_idx].handle);
kfree(spec);
}
@@ -3052,15 +3103,15 @@ static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
/* Mark old filters that may need to be removed */
spin_lock_bh(&efx->filter_lock);
- n = table->stack_uc_count < 0 ? 1 : table->stack_uc_count;
+ n = table->dev_uc_count < 0 ? 1 : table->dev_uc_count;
for (i = 0; i < n; i++) {
- filter_idx = table->stack_uc_list[i].id % HUNT_FILTER_TBL_ROWS;
- table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_STACK_OLD;
+ filter_idx = table->dev_uc_list[i].id % HUNT_FILTER_TBL_ROWS;
+ table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_AUTO_OLD;
}
- n = table->stack_mc_count < 0 ? 1 : table->stack_mc_count;
+ n = table->dev_mc_count < 0 ? 1 : table->dev_mc_count;
for (i = 0; i < n; i++) {
- filter_idx = table->stack_mc_list[i].id % HUNT_FILTER_TBL_ROWS;
- table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_STACK_OLD;
+ filter_idx = table->dev_mc_list[i].id % HUNT_FILTER_TBL_ROWS;
+ table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_AUTO_OLD;
}
spin_unlock_bh(&efx->filter_lock);
@@ -3069,28 +3120,28 @@ static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
*/
netif_addr_lock_bh(net_dev);
if (net_dev->flags & IFF_PROMISC ||
- netdev_uc_count(net_dev) >= EFX_EF10_FILTER_STACK_UC_MAX) {
- table->stack_uc_count = -1;
+ netdev_uc_count(net_dev) >= EFX_EF10_FILTER_DEV_UC_MAX) {
+ table->dev_uc_count = -1;
} else {
- table->stack_uc_count = 1 + netdev_uc_count(net_dev);
- memcpy(table->stack_uc_list[0].addr, net_dev->dev_addr,
+ table->dev_uc_count = 1 + netdev_uc_count(net_dev);
+ memcpy(table->dev_uc_list[0].addr, net_dev->dev_addr,
ETH_ALEN);
i = 1;
netdev_for_each_uc_addr(uc, net_dev) {
- memcpy(table->stack_uc_list[i].addr,
+ memcpy(table->dev_uc_list[i].addr,
uc->addr, ETH_ALEN);
i++;
}
}
if (net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI) ||
- netdev_mc_count(net_dev) >= EFX_EF10_FILTER_STACK_MC_MAX) {
- table->stack_mc_count = -1;
+ netdev_mc_count(net_dev) >= EFX_EF10_FILTER_DEV_MC_MAX) {
+ table->dev_mc_count = -1;
} else {
- table->stack_mc_count = 1 + netdev_mc_count(net_dev);
- eth_broadcast_addr(table->stack_mc_list[0].addr);
+ table->dev_mc_count = 1 + netdev_mc_count(net_dev);
+ eth_broadcast_addr(table->dev_mc_list[0].addr);
i = 1;
netdev_for_each_mc_addr(mc, net_dev) {
- memcpy(table->stack_mc_list[i].addr,
+ memcpy(table->dev_mc_list[i].addr,
mc->addr, ETH_ALEN);
i++;
}
@@ -3098,90 +3149,86 @@ static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
netif_addr_unlock_bh(net_dev);
/* Insert/renew unicast filters */
- if (table->stack_uc_count >= 0) {
- for (i = 0; i < table->stack_uc_count; i++) {
- efx_filter_init_rx(&spec, EFX_FILTER_PRI_REQUIRED,
- EFX_FILTER_FLAG_RX_RSS |
- EFX_FILTER_FLAG_RX_STACK,
+ if (table->dev_uc_count >= 0) {
+ for (i = 0; i < table->dev_uc_count; i++) {
+ efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
+ EFX_FILTER_FLAG_RX_RSS,
0);
efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC,
- table->stack_uc_list[i].addr);
+ table->dev_uc_list[i].addr);
rc = efx_ef10_filter_insert(efx, &spec, true);
if (rc < 0) {
/* Fall back to unicast-promisc */
while (i--)
efx_ef10_filter_remove_safe(
- efx, EFX_FILTER_PRI_REQUIRED,
- table->stack_uc_list[i].id);
- table->stack_uc_count = -1;
+ efx, EFX_FILTER_PRI_AUTO,
+ table->dev_uc_list[i].id);
+ table->dev_uc_count = -1;
break;
}
- table->stack_uc_list[i].id = rc;
+ table->dev_uc_list[i].id = rc;
}
}
- if (table->stack_uc_count < 0) {
- efx_filter_init_rx(&spec, EFX_FILTER_PRI_REQUIRED,
- EFX_FILTER_FLAG_RX_RSS |
- EFX_FILTER_FLAG_RX_STACK,
+ if (table->dev_uc_count < 0) {
+ efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
+ EFX_FILTER_FLAG_RX_RSS,
0);
efx_filter_set_uc_def(&spec);
rc = efx_ef10_filter_insert(efx, &spec, true);
if (rc < 0) {
WARN_ON(1);
- table->stack_uc_count = 0;
+ table->dev_uc_count = 0;
} else {
- table->stack_uc_list[0].id = rc;
+ table->dev_uc_list[0].id = rc;
}
}
/* Insert/renew multicast filters */
- if (table->stack_mc_count >= 0) {
- for (i = 0; i < table->stack_mc_count; i++) {
- efx_filter_init_rx(&spec, EFX_FILTER_PRI_REQUIRED,
- EFX_FILTER_FLAG_RX_RSS |
- EFX_FILTER_FLAG_RX_STACK,
+ if (table->dev_mc_count >= 0) {
+ for (i = 0; i < table->dev_mc_count; i++) {
+ efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
+ EFX_FILTER_FLAG_RX_RSS,
0);
efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC,
- table->stack_mc_list[i].addr);
+ table->dev_mc_list[i].addr);
rc = efx_ef10_filter_insert(efx, &spec, true);
if (rc < 0) {
/* Fall back to multicast-promisc */
while (i--)
efx_ef10_filter_remove_safe(
- efx, EFX_FILTER_PRI_REQUIRED,
- table->stack_mc_list[i].id);
- table->stack_mc_count = -1;
+ efx, EFX_FILTER_PRI_AUTO,
+ table->dev_mc_list[i].id);
+ table->dev_mc_count = -1;
break;
}
- table->stack_mc_list[i].id = rc;
+ table->dev_mc_list[i].id = rc;
}
}
- if (table->stack_mc_count < 0) {
- efx_filter_init_rx(&spec, EFX_FILTER_PRI_REQUIRED,
- EFX_FILTER_FLAG_RX_RSS |
- EFX_FILTER_FLAG_RX_STACK,
+ if (table->dev_mc_count < 0) {
+ efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
+ EFX_FILTER_FLAG_RX_RSS,
0);
efx_filter_set_mc_def(&spec);
rc = efx_ef10_filter_insert(efx, &spec, true);
if (rc < 0) {
WARN_ON(1);
- table->stack_mc_count = 0;
+ table->dev_mc_count = 0;
} else {
- table->stack_mc_list[0].id = rc;
+ table->dev_mc_list[0].id = rc;
}
}
/* Remove filters that weren't renewed. Since nothing else
- * changes the STACK_OLD flag or removes these filters, we
+ * changes the AUTO_OLD flag or removes these filters, we
* don't need to hold the filter_lock while scanning for
* these filters.
*/
for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) {
if (ACCESS_ONCE(table->entry[i].spec) &
- EFX_EF10_FILTER_FLAG_STACK_OLD) {
- if (efx_ef10_filter_remove_internal(efx,
- EFX_FILTER_PRI_REQUIRED,
- i, true) < 0)
+ EFX_EF10_FILTER_FLAG_AUTO_OLD) {
+ if (efx_ef10_filter_remove_internal(
+ efx, 1U << EFX_FILTER_PRI_AUTO,
+ i, true) < 0)
remove_failed = true;
}
}
@@ -3195,6 +3242,87 @@ static int efx_ef10_mac_reconfigure(struct efx_nic *efx)
return efx_mcdi_set_mac(efx);
}
+static int efx_ef10_start_bist(struct efx_nic *efx, u32 bist_type)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_START_BIST_IN_LEN);
+
+ MCDI_SET_DWORD(inbuf, START_BIST_IN_TYPE, bist_type);
+ return efx_mcdi_rpc(efx, MC_CMD_START_BIST, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+}
+
+/* MC BISTs follow a different poll mechanism to phy BISTs.
+ * The BIST is done in the poll handler on the MC, and the MCDI command
+ * will block until the BIST is done.
+ */
+static int efx_ef10_poll_bist(struct efx_nic *efx)
+{
+ int rc;
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_POLL_BIST_OUT_LEN);
+ size_t outlen;
+ u32 result;
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_POLL_BIST, NULL, 0,
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc != 0)
+ return rc;
+
+ if (outlen < MC_CMD_POLL_BIST_OUT_LEN)
+ return -EIO;
+
+ result = MCDI_DWORD(outbuf, POLL_BIST_OUT_RESULT);
+ switch (result) {
+ case MC_CMD_POLL_BIST_PASSED:
+ netif_dbg(efx, hw, efx->net_dev, "BIST passed.\n");
+ return 0;
+ case MC_CMD_POLL_BIST_TIMEOUT:
+ netif_err(efx, hw, efx->net_dev, "BIST timed out\n");
+ return -EIO;
+ case MC_CMD_POLL_BIST_FAILED:
+ netif_err(efx, hw, efx->net_dev, "BIST failed.\n");
+ return -EIO;
+ default:
+ netif_err(efx, hw, efx->net_dev,
+ "BIST returned unknown result %u", result);
+ return -EIO;
+ }
+}
+
+static int efx_ef10_run_bist(struct efx_nic *efx, u32 bist_type)
+{
+ int rc;
+
+ netif_dbg(efx, drv, efx->net_dev, "starting BIST type %u\n", bist_type);
+
+ rc = efx_ef10_start_bist(efx, bist_type);
+ if (rc != 0)
+ return rc;
+
+ return efx_ef10_poll_bist(efx);
+}
+
+static int
+efx_ef10_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
+{
+ int rc, rc2;
+
+ efx_reset_down(efx, RESET_TYPE_WORLD);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_ENABLE_OFFLINE_BIST,
+ NULL, 0, NULL, 0, NULL);
+ if (rc != 0)
+ goto out;
+
+ tests->memory = efx_ef10_run_bist(efx, MC_CMD_MC_MEM_BIST) ? -1 : 1;
+ tests->registers = efx_ef10_run_bist(efx, MC_CMD_REG_BIST) ? -1 : 1;
+
+ rc = efx_mcdi_reset(efx, RESET_TYPE_WORLD);
+
+out:
+ rc2 = efx_reset_up(efx, RESET_TYPE_WORLD, rc == 0);
+ return rc ? rc : rc2;
+}
+
#ifdef CONFIG_SFC_MTD
struct efx_ef10_nvram_type_info {
@@ -3213,6 +3341,7 @@ static const struct efx_ef10_nvram_type_info efx_ef10_nvram_types[] = {
{ NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT1, 0, 1, "sfc_exp_rom_cfg" },
{ NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT2, 0, 2, "sfc_exp_rom_cfg" },
{ NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3, 0, 3, "sfc_exp_rom_cfg" },
+ { NVRAM_PARTITION_TYPE_LICENSE, 0, 0, "sfc_license" },
{ NVRAM_PARTITION_TYPE_PHY_MIN, 0xff, 0, "sfc_phy_fw" },
};
@@ -3320,6 +3449,119 @@ static void efx_ef10_ptp_write_host_time(struct efx_nic *efx, u32 host_time)
_efx_writed(efx, cpu_to_le32(host_time), ER_DZ_MC_DB_LWRD);
}
+static int efx_ef10_rx_enable_timestamping(struct efx_channel *channel,
+ bool temp)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_LEN);
+ int rc;
+
+ if (channel->sync_events_state == SYNC_EVENTS_REQUESTED ||
+ channel->sync_events_state == SYNC_EVENTS_VALID ||
+ (temp && channel->sync_events_state == SYNC_EVENTS_DISABLED))
+ return 0;
+ channel->sync_events_state = SYNC_EVENTS_REQUESTED;
+
+ MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_TIME_EVENT_SUBSCRIBE);
+ MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
+ MCDI_SET_DWORD(inbuf, PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE,
+ channel->channel);
+
+ rc = efx_mcdi_rpc(channel->efx, MC_CMD_PTP,
+ inbuf, sizeof(inbuf), NULL, 0, NULL);
+
+ if (rc != 0)
+ channel->sync_events_state = temp ? SYNC_EVENTS_QUIESCENT :
+ SYNC_EVENTS_DISABLED;
+
+ return rc;
+}
+
+static int efx_ef10_rx_disable_timestamping(struct efx_channel *channel,
+ bool temp)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_LEN);
+ int rc;
+
+ if (channel->sync_events_state == SYNC_EVENTS_DISABLED ||
+ (temp && channel->sync_events_state == SYNC_EVENTS_QUIESCENT))
+ return 0;
+ if (channel->sync_events_state == SYNC_EVENTS_QUIESCENT) {
+ channel->sync_events_state = SYNC_EVENTS_DISABLED;
+ return 0;
+ }
+ channel->sync_events_state = temp ? SYNC_EVENTS_QUIESCENT :
+ SYNC_EVENTS_DISABLED;
+
+ MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_TIME_EVENT_UNSUBSCRIBE);
+ MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
+ MCDI_SET_DWORD(inbuf, PTP_IN_TIME_EVENT_UNSUBSCRIBE_CONTROL,
+ MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_SINGLE);
+ MCDI_SET_DWORD(inbuf, PTP_IN_TIME_EVENT_UNSUBSCRIBE_QUEUE,
+ channel->channel);
+
+ rc = efx_mcdi_rpc(channel->efx, MC_CMD_PTP,
+ inbuf, sizeof(inbuf), NULL, 0, NULL);
+
+ return rc;
+}
+
+static int efx_ef10_ptp_set_ts_sync_events(struct efx_nic *efx, bool en,
+ bool temp)
+{
+ int (*set)(struct efx_channel *channel, bool temp);
+ struct efx_channel *channel;
+
+ set = en ?
+ efx_ef10_rx_enable_timestamping :
+ efx_ef10_rx_disable_timestamping;
+
+ efx_for_each_channel(channel, efx) {
+ int rc = set(channel, temp);
+ if (en && rc != 0) {
+ efx_ef10_ptp_set_ts_sync_events(efx, false, temp);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+static int efx_ef10_ptp_set_ts_config(struct efx_nic *efx,
+ struct hwtstamp_config *init)
+{
+ int rc;
+
+ switch (init->rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ efx_ef10_ptp_set_ts_sync_events(efx, false, false);
+ /* if TX timestamping is still requested then leave PTP on */
+ return efx_ptp_change_mode(efx,
+ init->tx_type != HWTSTAMP_TX_OFF, 0);
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ init->rx_filter = HWTSTAMP_FILTER_ALL;
+ rc = efx_ptp_change_mode(efx, true, 0);
+ if (!rc)
+ rc = efx_ef10_ptp_set_ts_sync_events(efx, true, false);
+ if (rc)
+ efx_ptp_change_mode(efx, false, 0);
+ return rc;
+ default:
+ return -ERANGE;
+ }
+}
+
const struct efx_nic_type efx_hunt_a0_nic_type = {
.mem_map_size = efx_ef10_mem_map_size,
.probe = efx_ef10_probe,
@@ -3336,6 +3578,7 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
.describe_stats = efx_ef10_describe_stats,
.update_stats = efx_ef10_update_stats,
.start_stats = efx_mcdi_mac_start_stats,
+ .pull_stats = efx_mcdi_mac_pull_stats,
.stop_stats = efx_mcdi_mac_stop_stats,
.set_id_led = efx_mcdi_set_id_led,
.push_irq_moderation = efx_ef10_push_irq_moderation,
@@ -3345,7 +3588,7 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
.get_wol = efx_ef10_get_wol,
.set_wol = efx_ef10_set_wol,
.resume_wol = efx_port_dummy_op_void,
- /* TODO: test_chip */
+ .test_chip = efx_ef10_test_chip,
.test_nvram = efx_mcdi_nvram_test_all,
.mcdi_request = efx_ef10_mcdi_request,
.mcdi_poll_response = efx_ef10_mcdi_poll_response,
@@ -3360,7 +3603,7 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
.tx_init = efx_ef10_tx_init,
.tx_remove = efx_ef10_tx_remove,
.tx_write = efx_ef10_tx_write,
- .rx_push_indir_table = efx_ef10_rx_push_indir_table,
+ .rx_push_rss_config = efx_ef10_rx_push_rss_config,
.rx_probe = efx_ef10_rx_probe,
.rx_init = efx_ef10_rx_init,
.rx_remove = efx_ef10_rx_remove,
@@ -3397,11 +3640,14 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
.mtd_sync = efx_mcdi_mtd_sync,
#endif
.ptp_write_host_time = efx_ef10_ptp_write_host_time,
+ .ptp_set_ts_sync_events = efx_ef10_ptp_set_ts_sync_events,
+ .ptp_set_ts_config = efx_ef10_ptp_set_ts_config,
.revision = EFX_REV_HUNT_A0,
.max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH),
.rx_prefix_size = ES_DZ_RX_PREFIX_SIZE,
.rx_hash_offset = ES_DZ_RX_PREFIX_HASH_OFST,
+ .rx_ts_offset = ES_DZ_RX_PREFIX_TSTAMP_OFST,
.can_rx_scatter = true,
.always_rx_scatter = true,
.max_interrupt_mode = EFX_INT_MODE_MSIX,
@@ -3410,4 +3656,6 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
NETIF_F_RXHASH | NETIF_F_NTUPLE),
.mcdi_max_ver = 2,
.max_rx_ip_filters = HUNT_FILTER_TBL_ROWS,
+ .hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE |
+ 1 << HWTSTAMP_FILTER_ALL,
};
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 2e27837ce6a2..83d464347021 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -83,6 +83,7 @@ const char *const efx_reset_type_names[] = {
[RESET_TYPE_DMA_ERROR] = "DMA_ERROR",
[RESET_TYPE_TX_SKIP] = "TX_SKIP",
[RESET_TYPE_MC_FAILURE] = "MC_FAILURE",
+ [RESET_TYPE_MC_BIST] = "MC_BIST",
};
/* Reset workqueue. If any NIC has a hardware failure then a reset will be
@@ -91,6 +92,12 @@ const char *const efx_reset_type_names[] = {
*/
static struct workqueue_struct *reset_workqueue;
+/* How often and how many times to poll for a reset while waiting for a
+ * BIST that another function started to complete.
+ */
+#define BIST_WAIT_DELAY_MS 100
+#define BIST_WAIT_DELAY_COUNT 100
+
/**************************************************************************
*
* Configurable values
@@ -246,7 +253,7 @@ static int efx_process_channel(struct efx_channel *channel, int budget)
efx_channel_get_rx_queue(channel);
efx_rx_flush_packet(channel);
- efx_fast_push_rx_descriptors(rx_queue);
+ efx_fast_push_rx_descriptors(rx_queue, true);
}
return spent;
@@ -585,7 +592,7 @@ static void efx_start_datapath(struct efx_nic *efx)
EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
efx->type->rx_buffer_padding);
rx_buf_len = (sizeof(struct efx_rx_page_state) +
- NET_IP_ALIGN + efx->rx_dma_len);
+ efx->rx_ip_align + efx->rx_dma_len);
if (rx_buf_len <= PAGE_SIZE) {
efx->rx_scatter = efx->type->always_rx_scatter;
efx->rx_buffer_order = 0;
@@ -639,12 +646,16 @@ static void efx_start_datapath(struct efx_nic *efx)
efx_for_each_channel_rx_queue(rx_queue, channel) {
efx_init_rx_queue(rx_queue);
atomic_inc(&efx->active_queues);
- efx_nic_generate_fill_event(rx_queue);
+ efx_stop_eventq(channel);
+ efx_fast_push_rx_descriptors(rx_queue, false);
+ efx_start_eventq(channel);
}
WARN_ON(channel->rx_pkt_n_frags);
}
+ efx_ptp_start_datapath(efx);
+
if (netif_device_present(efx->net_dev))
netif_tx_wake_all_queues(efx->net_dev);
}
@@ -659,6 +670,8 @@ static void efx_stop_datapath(struct efx_nic *efx)
EFX_ASSERT_RESET_SERIALISED(efx);
BUG_ON(efx->port_enabled);
+ efx_ptp_stop_datapath(efx);
+
/* Stop RX refill */
efx_for_each_channel(channel, efx) {
efx_for_each_channel_rx_queue(rx_queue, channel)
@@ -1047,18 +1060,23 @@ static void efx_start_port(struct efx_nic *efx)
mutex_lock(&efx->mac_lock);
efx->port_enabled = true;
- /* efx_mac_work() might have been scheduled after efx_stop_port(),
- * and then cancelled by efx_flush_all() */
+ /* Ensure MAC ingress/egress is enabled */
efx->type->reconfigure_mac(efx);
mutex_unlock(&efx->mac_lock);
}
-/* Prevent efx_mac_work() and efx_monitor() from working */
+/* Cancel work for MAC reconfiguration, periodic hardware monitoring
+ * and the async self-test, wait for them to finish and prevent them
+ * being scheduled again. This doesn't cover online resets, which
+ * should only be cancelled when removing the device.
+ */
static void efx_stop_port(struct efx_nic *efx)
{
netif_dbg(efx, ifdown, efx->net_dev, "stop port\n");
+ EFX_ASSERT_RESET_SERIALISED(efx);
+
mutex_lock(&efx->mac_lock);
efx->port_enabled = false;
mutex_unlock(&efx->mac_lock);
@@ -1066,6 +1084,10 @@ static void efx_stop_port(struct efx_nic *efx)
/* Serialise against efx_set_multicast_list() */
netif_addr_lock_bh(efx->net_dev);
netif_addr_unlock_bh(efx->net_dev);
+
+ cancel_delayed_work_sync(&efx->monitor_work);
+ efx_selftest_async_cancel(efx);
+ cancel_work_sync(&efx->mac_work);
}
static void efx_fini_port(struct efx_nic *efx)
@@ -1095,6 +1117,77 @@ static void efx_remove_port(struct efx_nic *efx)
*
**************************************************************************/
+static LIST_HEAD(efx_primary_list);
+static LIST_HEAD(efx_unassociated_list);
+
+static bool efx_same_controller(struct efx_nic *left, struct efx_nic *right)
+{
+ return left->type == right->type &&
+ left->vpd_sn && right->vpd_sn &&
+ !strcmp(left->vpd_sn, right->vpd_sn);
+}
+
+static void efx_associate(struct efx_nic *efx)
+{
+ struct efx_nic *other, *next;
+
+ if (efx->primary == efx) {
+ /* Adding primary function; look for secondaries */
+
+ netif_dbg(efx, probe, efx->net_dev, "adding to primary list\n");
+ list_add_tail(&efx->node, &efx_primary_list);
+
+ list_for_each_entry_safe(other, next, &efx_unassociated_list,
+ node) {
+ if (efx_same_controller(efx, other)) {
+ list_del(&other->node);
+ netif_dbg(other, probe, other->net_dev,
+ "moving to secondary list of %s %s\n",
+ pci_name(efx->pci_dev),
+ efx->net_dev->name);
+ list_add_tail(&other->node,
+ &efx->secondary_list);
+ other->primary = efx;
+ }
+ }
+ } else {
+ /* Adding secondary function; look for primary */
+
+ list_for_each_entry(other, &efx_primary_list, node) {
+ if (efx_same_controller(efx, other)) {
+ netif_dbg(efx, probe, efx->net_dev,
+ "adding to secondary list of %s %s\n",
+ pci_name(other->pci_dev),
+ other->net_dev->name);
+ list_add_tail(&efx->node,
+ &other->secondary_list);
+ efx->primary = other;
+ return;
+ }
+ }
+
+ netif_dbg(efx, probe, efx->net_dev,
+ "adding to unassociated list\n");
+ list_add_tail(&efx->node, &efx_unassociated_list);
+ }
+}
+
+static void efx_dissociate(struct efx_nic *efx)
+{
+ struct efx_nic *other, *next;
+
+ list_del(&efx->node);
+ efx->primary = NULL;
+
+ list_for_each_entry_safe(other, next, &efx->secondary_list, node) {
+ list_del(&other->node);
+ netif_dbg(other, probe, other->net_dev,
+ "moving to unassociated list\n");
+ list_add_tail(&other->node, &efx_unassociated_list);
+ other->primary = NULL;
+ }
+}
+
/* This configures the PCI device to enable I/O and DMA. */
static int efx_init_io(struct efx_nic *efx)
{
@@ -1671,18 +1764,10 @@ static void efx_start_all(struct efx_nic *efx)
}
efx->type->start_stats(efx);
-}
-
-/* Flush all delayed work. Should only be called when no more delayed work
- * will be scheduled. This doesn't flush pending online resets (efx_reset),
- * since we're holding the rtnl_lock at this point. */
-static void efx_flush_all(struct efx_nic *efx)
-{
- /* Make sure the hardware monitor and event self-test are stopped */
- cancel_delayed_work_sync(&efx->monitor_work);
- efx_selftest_async_cancel(efx);
- /* Stop scheduled port reconfigurations */
- cancel_work_sync(&efx->mac_work);
+ efx->type->pull_stats(efx);
+ spin_lock_bh(&efx->stats_lock);
+ efx->type->update_stats(efx, NULL, NULL);
+ spin_unlock_bh(&efx->stats_lock);
}
/* Quiesce the hardware and software data path, and regular activity
@@ -1698,12 +1783,16 @@ static void efx_stop_all(struct efx_nic *efx)
if (!efx->port_enabled)
return;
+ /* update stats before we go down so we can accurately count
+ * rx_nodesc_drops
+ */
+ efx->type->pull_stats(efx);
+ spin_lock_bh(&efx->stats_lock);
+ efx->type->update_stats(efx, NULL, NULL);
+ spin_unlock_bh(&efx->stats_lock);
efx->type->stop_stats(efx);
efx_stop_port(efx);
- /* Flush efx_mac_work(), refill_workqueue, monitor_work */
- efx_flush_all(efx);
-
/* Stop the kernel transmit interface. This is only valid if
* the device is stopped or detached; otherwise the watchdog
* may fire immediately.
@@ -1847,7 +1936,9 @@ static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
struct mii_ioctl_data *data = if_mii(ifr);
if (cmd == SIOCSHWTSTAMP)
- return efx_ptp_ioctl(efx, ifr, cmd);
+ return efx_ptp_set_ts_config(efx, ifr);
+ if (cmd == SIOCGHWTSTAMP)
+ return efx_ptp_get_ts_config(efx, ifr);
/* Convert phy_id from older PRTAD/DEVAD format */
if ((cmd == SIOCGMIIREG || cmd == SIOCSMIIREG) &&
@@ -2060,7 +2151,7 @@ static int efx_set_features(struct net_device *net_dev, netdev_features_t data)
/* If disabling RX n-tuple filtering, clear existing filters */
if (net_dev->features & ~data & NETIF_F_NTUPLE)
- efx_filter_clear_rx(efx, EFX_FILTER_PRI_MANUAL);
+ return efx->type->filter_clear_rx(efx, EFX_FILTER_PRI_MANUAL);
return 0;
}
@@ -2194,6 +2285,8 @@ static int efx_register_netdev(struct efx_nic *efx)
efx_init_tx_queue_core_txq(tx_queue);
}
+ efx_associate(efx);
+
rtnl_unlock();
rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type);
@@ -2207,6 +2300,7 @@ static int efx_register_netdev(struct efx_nic *efx)
fail_registered:
rtnl_lock();
+ efx_dissociate(efx);
unregister_netdevice(net_dev);
fail_locked:
efx->state = STATE_UNINIT;
@@ -2383,6 +2477,24 @@ int efx_try_recovery(struct efx_nic *efx)
return 0;
}
+static void efx_wait_for_bist_end(struct efx_nic *efx)
+{
+ int i;
+
+ for (i = 0; i < BIST_WAIT_DELAY_COUNT; ++i) {
+ if (efx_mcdi_poll_reboot(efx))
+ goto out;
+ msleep(BIST_WAIT_DELAY_MS);
+ }
+
+ netif_err(efx, drv, efx->net_dev, "Warning: No MC reboot after BIST mode\n");
+out:
+ /* Either way unset the BIST flag. If we found no reboot we probably
+ * won't recover, but we should try.
+ */
+ efx->mc_bist_for_other_fn = false;
+}
+
/* The worker thread exists so that code that cannot sleep can
* schedule a reset for later.
*/
@@ -2395,6 +2507,9 @@ static void efx_reset_work(struct work_struct *data)
pending = ACCESS_ONCE(efx->reset_pending);
method = fls(pending) - 1;
+ if (method == RESET_TYPE_MC_BIST)
+ efx_wait_for_bist_end(efx);
+
if ((method == RESET_TYPE_RECOVER_OR_DISABLE ||
method == RESET_TYPE_RECOVER_OR_ALL) &&
efx_try_recovery(efx))
@@ -2433,6 +2548,7 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
case RESET_TYPE_WORLD:
case RESET_TYPE_DISABLE:
case RESET_TYPE_RECOVER_OR_DISABLE:
+ case RESET_TYPE_MC_BIST:
method = type;
netif_dbg(efx, drv, efx->net_dev, "scheduling %s reset\n",
RESET_TYPE(method));
@@ -2526,6 +2642,8 @@ static int efx_init_struct(struct efx_nic *efx,
int i;
/* Initialise common structures */
+ INIT_LIST_HEAD(&efx->node);
+ INIT_LIST_HEAD(&efx->secondary_list);
spin_lock_init(&efx->biu_lock);
#ifdef CONFIG_SFC_MTD
INIT_LIST_HEAD(&efx->mtd_list);
@@ -2540,8 +2658,12 @@ static int efx_init_struct(struct efx_nic *efx,
efx->net_dev = net_dev;
efx->rx_prefix_size = efx->type->rx_prefix_size;
+ efx->rx_ip_align =
+ NET_IP_ALIGN ? (efx->rx_prefix_size + NET_IP_ALIGN) % 4 : 0;
efx->rx_packet_hash_offset =
efx->type->rx_hash_offset - efx->type->rx_prefix_size;
+ efx->rx_packet_ts_offset =
+ efx->type->rx_ts_offset - efx->type->rx_prefix_size;
spin_lock_init(&efx->stats_lock);
mutex_init(&efx->mac_lock);
efx->phy_op = &efx_dummy_phy_operations;
@@ -2582,6 +2704,8 @@ static void efx_fini_struct(struct efx_nic *efx)
for (i = 0; i < EFX_MAX_CHANNELS; i++)
kfree(efx->channel[i]);
+ kfree(efx->vpd_sn);
+
if (efx->workqueue) {
destroy_workqueue(efx->workqueue);
efx->workqueue = NULL;
@@ -2626,6 +2750,7 @@ static void efx_pci_remove(struct pci_dev *pci_dev)
/* Mark the NIC as fini, then stop the interface */
rtnl_lock();
+ efx_dissociate(efx);
dev_close(efx->net_dev);
efx_disable_interrupts(efx);
rtnl_unlock();
@@ -2641,7 +2766,6 @@ static void efx_pci_remove(struct pci_dev *pci_dev)
netif_dbg(efx, drv, efx->net_dev, "shutdown successful\n");
efx_fini_struct(efx);
- pci_set_drvdata(pci_dev, NULL);
free_netdev(efx->net_dev);
pci_disable_pcie_error_reporting(pci_dev);
@@ -2653,12 +2777,12 @@ static void efx_pci_remove(struct pci_dev *pci_dev)
* always appear within the first 512 bytes.
*/
#define SFC_VPD_LEN 512
-static void efx_print_product_vpd(struct efx_nic *efx)
+static void efx_probe_vpd_strings(struct efx_nic *efx)
{
struct pci_dev *dev = efx->pci_dev;
char vpd_data[SFC_VPD_LEN];
ssize_t vpd_size;
- int i, j;
+ int ro_start, ro_size, i, j;
/* Get the vpd data from the device */
vpd_size = pci_read_vpd(dev, 0, sizeof(vpd_data), vpd_data);
@@ -2668,14 +2792,15 @@ static void efx_print_product_vpd(struct efx_nic *efx)
}
/* Get the Read only section */
- i = pci_vpd_find_tag(vpd_data, 0, vpd_size, PCI_VPD_LRDT_RO_DATA);
- if (i < 0) {
+ ro_start = pci_vpd_find_tag(vpd_data, 0, vpd_size, PCI_VPD_LRDT_RO_DATA);
+ if (ro_start < 0) {
netif_err(efx, drv, efx->net_dev, "VPD Read-only not found\n");
return;
}
- j = pci_vpd_lrdt_size(&vpd_data[i]);
- i += PCI_VPD_LRDT_TAG_SIZE;
+ ro_size = pci_vpd_lrdt_size(&vpd_data[ro_start]);
+ j = ro_size;
+ i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
if (i + j > vpd_size)
j = vpd_size - i;
@@ -2695,6 +2820,27 @@ static void efx_print_product_vpd(struct efx_nic *efx)
netif_info(efx, drv, efx->net_dev,
"Part Number : %.*s\n", j, &vpd_data[i]);
+
+ i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
+ j = ro_size;
+ i = pci_vpd_find_info_keyword(vpd_data, i, j, "SN");
+ if (i < 0) {
+ netif_err(efx, drv, efx->net_dev, "Serial number not found\n");
+ return;
+ }
+
+ j = pci_vpd_info_field_size(&vpd_data[i]);
+ i += PCI_VPD_INFO_FLD_HDR_SIZE;
+ if (i + j > vpd_size) {
+ netif_err(efx, drv, efx->net_dev, "Incomplete serial number\n");
+ return;
+ }
+
+ efx->vpd_sn = kmalloc(j + 1, GFP_KERNEL);
+ if (!efx->vpd_sn)
+ return;
+
+ snprintf(efx->vpd_sn, j + 1, "%s", &vpd_data[i]);
}
@@ -2791,7 +2937,7 @@ static int efx_pci_probe(struct pci_dev *pci_dev,
netif_info(efx, probe, efx->net_dev,
"Solarflare NIC detected\n");
- efx_print_product_vpd(efx);
+ efx_probe_vpd_strings(efx);
/* Set up basic I/O (BAR mappings etc) */
rc = efx_init_io(efx);
@@ -2835,7 +2981,6 @@ static int efx_pci_probe(struct pci_dev *pci_dev,
fail2:
efx_fini_struct(efx);
fail1:
- pci_set_drvdata(pci_dev, NULL);
WARN_ON(rc > 0);
netif_dbg(efx, drv, efx->net_dev, "initialisation failed. rc=%d\n", rc);
free_netdev(net_dev);
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index b8235ee5d7d7..601224736b9b 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -37,7 +37,7 @@ int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
void efx_remove_rx_queue(struct efx_rx_queue *rx_queue);
void efx_init_rx_queue(struct efx_rx_queue *rx_queue);
void efx_fini_rx_queue(struct efx_rx_queue *rx_queue);
-void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue);
+void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic);
void efx_rx_slow_fill(unsigned long context);
void __efx_rx_packet(struct efx_channel *channel);
void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
@@ -134,17 +134,6 @@ efx_filter_get_filter_safe(struct efx_nic *efx,
return efx->type->filter_get_safe(efx, priority, filter_id, spec);
}
-/**
- * efx_farch_filter_clear_rx - remove RX filters by priority
- * @efx: NIC from which to remove the filters
- * @priority: Maximum priority to remove
- */
-static inline void efx_filter_clear_rx(struct efx_nic *efx,
- enum efx_filter_priority priority)
-{
- return efx->type->filter_clear_rx(efx, priority);
-}
-
static inline u32 efx_filter_count_rx_used(struct efx_nic *efx,
enum efx_filter_priority priority)
{
diff --git a/drivers/net/ethernet/sfc/enum.h b/drivers/net/ethernet/sfc/enum.h
index 7fdfee019092..75ef7ef6450b 100644
--- a/drivers/net/ethernet/sfc/enum.h
+++ b/drivers/net/ethernet/sfc/enum.h
@@ -165,6 +165,7 @@ enum reset_type {
RESET_TYPE_DMA_ERROR,
RESET_TYPE_TX_SKIP,
RESET_TYPE_MC_FAILURE,
+ RESET_TYPE_MC_BIST,
RESET_TYPE_MAX,
};
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 1f529fa2edb1..f181522688b2 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -318,6 +318,8 @@ static int efx_ethtool_fill_self_tests(struct efx_nic *efx,
"eventq.int", NULL);
}
+ efx_fill_test(n++, strings, data, &tests->memory,
+ "core", 0, "memory", NULL);
efx_fill_test(n++, strings, data, &tests->registers,
"core", 0, "registers", NULL);
@@ -357,7 +359,8 @@ static int efx_ethtool_get_sset_count(struct net_device *net_dev,
switch (string_set) {
case ETH_SS_STATS:
return efx->type->describe_stats(efx, NULL) +
- EFX_ETHTOOL_SW_STAT_COUNT;
+ EFX_ETHTOOL_SW_STAT_COUNT +
+ efx_ptp_describe_stats(efx, NULL);
case ETH_SS_TEST:
return efx_ethtool_fill_self_tests(efx, NULL, NULL, NULL);
default:
@@ -378,6 +381,8 @@ static void efx_ethtool_get_strings(struct net_device *net_dev,
for (i = 0; i < EFX_ETHTOOL_SW_STAT_COUNT; i++)
strlcpy(strings + i * ETH_GSTRING_LEN,
efx_sw_stat_desc[i].name, ETH_GSTRING_LEN);
+ strings += EFX_ETHTOOL_SW_STAT_COUNT * ETH_GSTRING_LEN;
+ efx_ptp_describe_stats(efx, strings);
break;
case ETH_SS_TEST:
efx_ethtool_fill_self_tests(efx, NULL, strings, NULL);
@@ -427,8 +432,11 @@ static void efx_ethtool_get_stats(struct net_device *net_dev,
break;
}
}
+ data += EFX_ETHTOOL_SW_STAT_COUNT;
spin_unlock_bh(&efx->stats_lock);
+
+ efx_ptp_update_stats(efx, data);
}
static void efx_ethtool_self_test(struct net_device *net_dev,
@@ -1032,7 +1040,7 @@ static int efx_ethtool_set_rxfh_indir(struct net_device *net_dev,
struct efx_nic *efx = netdev_priv(net_dev);
memcpy(efx->rx_indir_table, indir, sizeof(efx->rx_indir_table));
- efx_nic_push_rx_indir_table(efx);
+ efx->type->rx_push_rss_config(efx);
return 0;
}
diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c
index ff5d322b9b49..18d6f761f4d0 100644
--- a/drivers/net/ethernet/sfc/falcon.c
+++ b/drivers/net/ethernet/sfc/falcon.c
@@ -469,6 +469,24 @@ static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
}
/**************************************************************************
*
+ * RSS
+ *
+ **************************************************************************
+ */
+
+static void falcon_b0_rx_push_rss_config(struct efx_nic *efx)
+{
+ efx_oword_t temp;
+
+ /* Set hash key for IPv4 */
+ memcpy(&temp, efx->rx_hash_key, sizeof(temp));
+ efx_writeo(efx, &temp, FR_BZ_RX_RSS_TKEY);
+
+ efx_farch_rx_push_indir_table(efx);
+}
+
+/**************************************************************************
+ *
* EEPROM/flash
*
**************************************************************************
@@ -2247,6 +2265,8 @@ static int falcon_probe_nic(struct efx_nic *efx)
struct falcon_board *board;
int rc;
+ efx->primary = efx; /* only one usable function per controller */
+
/* Allocate storage for hardware specific data */
nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL);
if (!nic_data)
@@ -2482,9 +2502,7 @@ static int falcon_init_nic(struct efx_nic *efx)
falcon_init_rx_cfg(efx);
if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
- /* Set hash key for IPv4 */
- memcpy(&temp, efx->rx_hash_key, sizeof(temp));
- efx_writeo(efx, &temp, FR_BZ_RX_RSS_TKEY);
+ falcon_b0_rx_push_rss_config(efx);
/* Set destination of both TX and RX Flush events */
EFX_POPULATE_OWORD_1(temp, FRF_BZ_FLS_EVQ_ID, 0);
@@ -2593,6 +2611,14 @@ void falcon_start_nic_stats(struct efx_nic *efx)
spin_unlock_bh(&efx->stats_lock);
}
+/* We don't acutally pull stats on falcon. Wait 10ms so that
+ * they arrive when we call this just after start_stats
+ */
+static void falcon_pull_nic_stats(struct efx_nic *efx)
+{
+ msleep(10);
+}
+
void falcon_stop_nic_stats(struct efx_nic *efx)
{
struct falcon_nic_data *nic_data = efx->nic_data;
@@ -2672,6 +2698,7 @@ const struct efx_nic_type falcon_a1_nic_type = {
.describe_stats = falcon_describe_nic_stats,
.update_stats = falcon_update_nic_stats,
.start_stats = falcon_start_nic_stats,
+ .pull_stats = falcon_pull_nic_stats,
.stop_stats = falcon_stop_nic_stats,
.set_id_led = falcon_set_id_led,
.push_irq_moderation = falcon_push_irq_moderation,
@@ -2692,7 +2719,7 @@ const struct efx_nic_type falcon_a1_nic_type = {
.tx_init = efx_farch_tx_init,
.tx_remove = efx_farch_tx_remove,
.tx_write = efx_farch_tx_write,
- .rx_push_indir_table = efx_farch_rx_push_indir_table,
+ .rx_push_rss_config = efx_port_dummy_op_void,
.rx_probe = efx_farch_rx_probe,
.rx_init = efx_farch_rx_init,
.rx_remove = efx_farch_rx_remove,
@@ -2765,6 +2792,7 @@ const struct efx_nic_type falcon_b0_nic_type = {
.describe_stats = falcon_describe_nic_stats,
.update_stats = falcon_update_nic_stats,
.start_stats = falcon_start_nic_stats,
+ .pull_stats = falcon_pull_nic_stats,
.stop_stats = falcon_stop_nic_stats,
.set_id_led = falcon_set_id_led,
.push_irq_moderation = falcon_push_irq_moderation,
@@ -2786,7 +2814,7 @@ const struct efx_nic_type falcon_b0_nic_type = {
.tx_init = efx_farch_tx_init,
.tx_remove = efx_farch_tx_remove,
.tx_write = efx_farch_tx_write,
- .rx_push_indir_table = efx_farch_rx_push_indir_table,
+ .rx_push_rss_config = falcon_b0_rx_push_rss_config,
.rx_probe = efx_farch_rx_probe,
.rx_init = efx_farch_rx_init,
.rx_remove = efx_farch_rx_remove,
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c
index c0907d884d75..f72489a105ca 100644
--- a/drivers/net/ethernet/sfc/farch.c
+++ b/drivers/net/ethernet/sfc/farch.c
@@ -1147,7 +1147,7 @@ static void efx_farch_handle_generated_event(struct efx_channel *channel,
/* The queue must be empty, so we won't receive any rx
* events, so efx_process_channel() won't refill the
* queue. Refill it here */
- efx_fast_push_rx_descriptors(rx_queue);
+ efx_fast_push_rx_descriptors(rx_queue, true);
} else if (rx_queue && magic == EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue)) {
efx_farch_handle_drain_event(channel);
} else if (code == _EFX_CHANNEL_MAGIC_TX_DRAIN) {
@@ -1618,8 +1618,7 @@ void efx_farch_rx_push_indir_table(struct efx_nic *efx)
size_t i = 0;
efx_dword_t dword;
- if (efx_nic_rev(efx) < EFX_REV_FALCON_B0)
- return;
+ BUG_ON(efx_nic_rev(efx) < EFX_REV_FALCON_B0);
BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) !=
FR_BZ_RX_INDIRECTION_TBL_ROWS);
@@ -1745,8 +1744,6 @@ void efx_farch_init_common(struct efx_nic *efx)
EFX_INVERT_OWORD(temp);
efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER);
- efx_farch_rx_push_indir_table(efx);
-
/* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be
* controlled by the RX FIFO fill level. Set arbitration to one pkt/Q.
*/
@@ -2187,14 +2184,14 @@ efx_farch_filter_to_gen_spec(struct efx_filter_spec *gen_spec,
}
static void
-efx_farch_filter_init_rx_for_stack(struct efx_nic *efx,
- struct efx_farch_filter_spec *spec)
+efx_farch_filter_init_rx_auto(struct efx_nic *efx,
+ struct efx_farch_filter_spec *spec)
{
/* If there's only one channel then disable RSS for non VF
* traffic, thereby allowing VFs to use RSS when the PF can't.
*/
- spec->priority = EFX_FILTER_PRI_REQUIRED;
- spec->flags = (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_RX_STACK |
+ spec->priority = EFX_FILTER_PRI_AUTO;
+ spec->flags = (EFX_FILTER_FLAG_RX |
(efx->n_rx_channels > 1 ? EFX_FILTER_FLAG_RX_RSS : 0) |
(efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0));
spec->dmaq_id = 0;
@@ -2459,20 +2456,13 @@ s32 efx_farch_filter_insert(struct efx_nic *efx,
rc = -EEXIST;
goto out;
}
- if (spec.priority < saved_spec->priority &&
- !(saved_spec->priority == EFX_FILTER_PRI_REQUIRED &&
- saved_spec->flags & EFX_FILTER_FLAG_RX_STACK)) {
+ if (spec.priority < saved_spec->priority) {
rc = -EPERM;
goto out;
}
- if (spec.flags & EFX_FILTER_FLAG_RX_STACK) {
- /* Just make sure it won't be removed */
- saved_spec->flags |= EFX_FILTER_FLAG_RX_STACK;
- rc = 0;
- goto out;
- }
- /* Retain the RX_STACK flag */
- spec.flags |= saved_spec->flags & EFX_FILTER_FLAG_RX_STACK;
+ if (saved_spec->priority == EFX_FILTER_PRI_AUTO ||
+ saved_spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO)
+ spec.flags |= EFX_FILTER_FLAG_RX_OVER_AUTO;
}
/* Insert the filter */
@@ -2553,11 +2543,11 @@ static int efx_farch_filter_remove(struct efx_nic *efx,
struct efx_farch_filter_spec *spec = &table->spec[filter_idx];
if (!test_bit(filter_idx, table->used_bitmap) ||
- spec->priority > priority)
+ spec->priority != priority)
return -ENOENT;
- if (spec->flags & EFX_FILTER_FLAG_RX_STACK) {
- efx_farch_filter_init_rx_for_stack(efx, spec);
+ if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO) {
+ efx_farch_filter_init_rx_auto(efx, spec);
efx_farch_filter_push_rx_config(efx);
} else {
efx_farch_filter_table_clear_entry(efx, table, filter_idx);
@@ -2640,12 +2630,15 @@ efx_farch_filter_table_clear(struct efx_nic *efx,
unsigned int filter_idx;
spin_lock_bh(&efx->filter_lock);
- for (filter_idx = 0; filter_idx < table->size; ++filter_idx)
- efx_farch_filter_remove(efx, table, filter_idx, priority);
+ for (filter_idx = 0; filter_idx < table->size; ++filter_idx) {
+ if (table->spec[filter_idx].priority != EFX_FILTER_PRI_AUTO)
+ efx_farch_filter_remove(efx, table,
+ filter_idx, priority);
+ }
spin_unlock_bh(&efx->filter_lock);
}
-void efx_farch_filter_clear_rx(struct efx_nic *efx,
+int efx_farch_filter_clear_rx(struct efx_nic *efx,
enum efx_filter_priority priority)
{
efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_IP,
@@ -2654,6 +2647,7 @@ void efx_farch_filter_clear_rx(struct efx_nic *efx,
priority);
efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_DEF,
priority);
+ return 0;
}
u32 efx_farch_filter_count_rx_used(struct efx_nic *efx,
@@ -2822,7 +2816,7 @@ int efx_farch_filter_table_probe(struct efx_nic *efx)
for (i = 0; i < EFX_FARCH_FILTER_SIZE_RX_DEF; i++) {
spec = &table->spec[i];
spec->type = EFX_FARCH_FILTER_UC_DEF + i;
- efx_farch_filter_init_rx_for_stack(efx, spec);
+ efx_farch_filter_init_rx_auto(efx, spec);
__set_bit(i, table->used_bitmap);
}
}
diff --git a/drivers/net/ethernet/sfc/filter.h b/drivers/net/ethernet/sfc/filter.h
index 63c77a557178..3ef298d3c47e 100644
--- a/drivers/net/ethernet/sfc/filter.h
+++ b/drivers/net/ethernet/sfc/filter.h
@@ -59,12 +59,16 @@ enum efx_filter_match_flags {
/**
* enum efx_filter_priority - priority of a hardware filter specification
* @EFX_FILTER_PRI_HINT: Performance hint
+ * @EFX_FILTER_PRI_AUTO: Automatic filter based on device address list
+ * or hardware requirements. This may only be used by the filter
+ * implementation for each NIC type.
* @EFX_FILTER_PRI_MANUAL: Manually configured filter
* @EFX_FILTER_PRI_REQUIRED: Required for correct behaviour (user-level
* networking and SR-IOV)
*/
enum efx_filter_priority {
EFX_FILTER_PRI_HINT = 0,
+ EFX_FILTER_PRI_AUTO,
EFX_FILTER_PRI_MANUAL,
EFX_FILTER_PRI_REQUIRED,
};
@@ -78,19 +82,18 @@ enum efx_filter_priority {
* according to the indirection table.
* @EFX_FILTER_FLAG_RX_SCATTER: Enable DMA scatter on the receiving
* queue.
- * @EFX_FILTER_FLAG_RX_STACK: Indicates a filter inserted for the
- * network stack. The filter must have a priority of
- * %EFX_FILTER_PRI_REQUIRED. It can be steered by a replacement
- * request with priority %EFX_FILTER_PRI_MANUAL, and a removal
- * request with priority %EFX_FILTER_PRI_MANUAL will reset the
- * steering (but not remove the filter).
+ * @EFX_FILTER_FLAG_RX_OVER_AUTO: Indicates a filter that is
+ * overriding an automatic filter (priority
+ * %EFX_FILTER_PRI_AUTO). This may only be set by the filter
+ * implementation for each type. A removal request will restore
+ * the automatic filter in its place.
* @EFX_FILTER_FLAG_RX: Filter is for RX
* @EFX_FILTER_FLAG_TX: Filter is for TX
*/
enum efx_filter_flags {
EFX_FILTER_FLAG_RX_RSS = 0x01,
EFX_FILTER_FLAG_RX_SCATTER = 0x02,
- EFX_FILTER_FLAG_RX_STACK = 0x04,
+ EFX_FILTER_FLAG_RX_OVER_AUTO = 0x04,
EFX_FILTER_FLAG_RX = 0x08,
EFX_FILTER_FLAG_TX = 0x10,
};
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index 366c8e3e3784..0d5d7b5325e8 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -42,6 +42,7 @@ struct efx_mcdi_async_param {
unsigned int cmd;
size_t inlen;
size_t outlen;
+ bool quiet;
efx_mcdi_async_completer *complete;
unsigned long cookie;
/* followed by request/response buffer */
@@ -50,6 +51,7 @@ struct efx_mcdi_async_param {
static void efx_mcdi_timeout_async(unsigned long context);
static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
bool *was_attached_out);
+static bool efx_mcdi_poll_once(struct efx_nic *efx);
static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx)
{
@@ -100,6 +102,10 @@ int efx_mcdi_init(struct efx_nic *efx)
netif_err(efx, probe, efx->net_dev,
"Host already registered with MCPU\n");
+ if (efx->mcdi->fn_flags &
+ (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY))
+ efx->primary = efx;
+
return 0;
}
@@ -190,6 +196,8 @@ static int efx_mcdi_errno(unsigned int mcdi_err)
TRANSLATE_ERROR(EALREADY);
TRANSLATE_ERROR(ENOSPC);
#undef TRANSLATE_ERROR
+ case MC_CMD_ERR_ENOTSUP:
+ return -EOPNOTSUPP;
case MC_CMD_ERR_ALLOC_FAIL:
return -ENOBUFS;
case MC_CMD_ERR_MAC_EXIST:
@@ -237,6 +245,21 @@ static void efx_mcdi_read_response_header(struct efx_nic *efx)
}
}
+static bool efx_mcdi_poll_once(struct efx_nic *efx)
+{
+ struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+
+ rmb();
+ if (!efx->type->mcdi_poll_response(efx))
+ return false;
+
+ spin_lock_bh(&mcdi->iface_lock);
+ efx_mcdi_read_response_header(efx);
+ spin_unlock_bh(&mcdi->iface_lock);
+
+ return true;
+}
+
static int efx_mcdi_poll(struct efx_nic *efx)
{
struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
@@ -272,18 +295,13 @@ static int efx_mcdi_poll(struct efx_nic *efx)
time = jiffies;
- rmb();
- if (efx->type->mcdi_poll_response(efx))
+ if (efx_mcdi_poll_once(efx))
break;
if (time_after(time, finish))
return -ETIMEDOUT;
}
- spin_lock_bh(&mcdi->iface_lock);
- efx_mcdi_read_response_header(efx);
- spin_unlock_bh(&mcdi->iface_lock);
-
/* Return rc=0 like wait_event_timeout() */
return 0;
}
@@ -391,8 +409,9 @@ static bool efx_mcdi_complete_async(struct efx_mcdi_iface *mcdi, bool timeout)
{
struct efx_nic *efx = mcdi->efx;
struct efx_mcdi_async_param *async;
- size_t hdr_len, data_len;
+ size_t hdr_len, data_len, err_len;
efx_dword_t *outbuf;
+ MCDI_DECLARE_BUF_OUT_OR_ERR(errbuf, 0);
int rc;
if (cmpxchg(&mcdi->state,
@@ -433,6 +452,13 @@ static bool efx_mcdi_complete_async(struct efx_mcdi_iface *mcdi, bool timeout)
outbuf = (efx_dword_t *)(async + 1);
efx->type->mcdi_read_response(efx, outbuf, hdr_len,
min(async->outlen, data_len));
+ if (!timeout && rc && !async->quiet) {
+ err_len = min(sizeof(errbuf), data_len);
+ efx->type->mcdi_read_response(efx, errbuf, hdr_len,
+ sizeof(errbuf));
+ efx_mcdi_display_error(efx, async->cmd, async->inlen, errbuf,
+ err_len, rc);
+ }
async->complete(efx, async->cookie, rc, outbuf, data_len);
kfree(async);
@@ -508,18 +534,129 @@ efx_mcdi_check_supported(struct efx_nic *efx, unsigned int cmd, size_t inlen)
return 0;
}
+static int _efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
+ efx_dword_t *outbuf, size_t outlen,
+ size_t *outlen_actual, bool quiet)
+{
+ struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+ MCDI_DECLARE_BUF_OUT_OR_ERR(errbuf, 0);
+ int rc;
+
+ if (mcdi->mode == MCDI_MODE_POLL)
+ rc = efx_mcdi_poll(efx);
+ else
+ rc = efx_mcdi_await_completion(efx);
+
+ if (rc != 0) {
+ netif_err(efx, hw, efx->net_dev,
+ "MC command 0x%x inlen %d mode %d timed out\n",
+ cmd, (int)inlen, mcdi->mode);
+
+ if (mcdi->mode == MCDI_MODE_EVENTS && efx_mcdi_poll_once(efx)) {
+ netif_err(efx, hw, efx->net_dev,
+ "MCDI request was completed without an event\n");
+ rc = 0;
+ }
+
+ /* Close the race with efx_mcdi_ev_cpl() executing just too late
+ * and completing a request we've just cancelled, by ensuring
+ * that the seqno check therein fails.
+ */
+ spin_lock_bh(&mcdi->iface_lock);
+ ++mcdi->seqno;
+ ++mcdi->credits;
+ spin_unlock_bh(&mcdi->iface_lock);
+ }
+
+ if (rc != 0) {
+ if (outlen_actual)
+ *outlen_actual = 0;
+ } else {
+ size_t hdr_len, data_len, err_len;
+
+ /* At the very least we need a memory barrier here to ensure
+ * we pick up changes from efx_mcdi_ev_cpl(). Protect against
+ * a spurious efx_mcdi_ev_cpl() running concurrently by
+ * acquiring the iface_lock. */
+ spin_lock_bh(&mcdi->iface_lock);
+ rc = mcdi->resprc;
+ hdr_len = mcdi->resp_hdr_len;
+ data_len = mcdi->resp_data_len;
+ err_len = min(sizeof(errbuf), data_len);
+ spin_unlock_bh(&mcdi->iface_lock);
+
+ BUG_ON(rc > 0);
+
+ efx->type->mcdi_read_response(efx, outbuf, hdr_len,
+ min(outlen, data_len));
+ if (outlen_actual)
+ *outlen_actual = data_len;
+
+ efx->type->mcdi_read_response(efx, errbuf, hdr_len, err_len);
+
+ if (cmd == MC_CMD_REBOOT && rc == -EIO) {
+ /* Don't reset if MC_CMD_REBOOT returns EIO */
+ } else if (rc == -EIO || rc == -EINTR) {
+ netif_err(efx, hw, efx->net_dev, "MC fatal error %d\n",
+ -rc);
+ efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
+ } else if (rc && !quiet) {
+ efx_mcdi_display_error(efx, cmd, inlen, errbuf, err_len,
+ rc);
+ }
+
+ if (rc == -EIO || rc == -EINTR) {
+ msleep(MCDI_STATUS_SLEEP_MS);
+ efx_mcdi_poll_reboot(efx);
+ mcdi->new_epoch = true;
+ }
+ }
+
+ efx_mcdi_release(mcdi);
+ return rc;
+}
+
+static int _efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd,
+ const efx_dword_t *inbuf, size_t inlen,
+ efx_dword_t *outbuf, size_t outlen,
+ size_t *outlen_actual, bool quiet)
+{
+ int rc;
+
+ rc = efx_mcdi_rpc_start(efx, cmd, inbuf, inlen);
+ if (rc) {
+ if (outlen_actual)
+ *outlen_actual = 0;
+ return rc;
+ }
+ return _efx_mcdi_rpc_finish(efx, cmd, inlen, outbuf, outlen,
+ outlen_actual, quiet);
+}
+
int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd,
const efx_dword_t *inbuf, size_t inlen,
efx_dword_t *outbuf, size_t outlen,
size_t *outlen_actual)
{
- int rc;
+ return _efx_mcdi_rpc(efx, cmd, inbuf, inlen, outbuf, outlen,
+ outlen_actual, false);
+}
- rc = efx_mcdi_rpc_start(efx, cmd, inbuf, inlen);
- if (rc)
- return rc;
- return efx_mcdi_rpc_finish(efx, cmd, inlen,
- outbuf, outlen, outlen_actual);
+/* Normally, on receiving an error code in the MCDI response,
+ * efx_mcdi_rpc will log an error message containing (among other
+ * things) the raw error code, by means of efx_mcdi_display_error.
+ * This _quiet version suppresses that; if the caller wishes to log
+ * the error conditionally on the return code, it should call this
+ * function and is then responsible for calling efx_mcdi_display_error
+ * as needed.
+ */
+int efx_mcdi_rpc_quiet(struct efx_nic *efx, unsigned cmd,
+ const efx_dword_t *inbuf, size_t inlen,
+ efx_dword_t *outbuf, size_t outlen,
+ size_t *outlen_actual)
+{
+ return _efx_mcdi_rpc(efx, cmd, inbuf, inlen, outbuf, outlen,
+ outlen_actual, true);
}
int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd,
@@ -532,35 +669,19 @@ int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd,
if (rc)
return rc;
+ if (efx->mc_bist_for_other_fn)
+ return -ENETDOWN;
+
efx_mcdi_acquire_sync(mcdi);
efx_mcdi_send_request(efx, cmd, inbuf, inlen);
return 0;
}
-/**
- * efx_mcdi_rpc_async - Schedule an MCDI command to run asynchronously
- * @efx: NIC through which to issue the command
- * @cmd: Command type number
- * @inbuf: Command parameters
- * @inlen: Length of command parameters, in bytes
- * @outlen: Length to allocate for response buffer, in bytes
- * @complete: Function to be called on completion or cancellation.
- * @cookie: Arbitrary value to be passed to @complete.
- *
- * This function does not sleep and therefore may be called in atomic
- * context. It will fail if event queues are disabled or if MCDI
- * event completions have been disabled due to an error.
- *
- * If it succeeds, the @complete function will be called exactly once
- * in atomic context, when one of the following occurs:
- * (a) the completion event is received (in NAPI context)
- * (b) event queues are disabled (in the process that disables them)
- * (c) the request times-out (in timer context)
- */
-int
-efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
- const efx_dword_t *inbuf, size_t inlen, size_t outlen,
- efx_mcdi_async_completer *complete, unsigned long cookie)
+static int _efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
+ const efx_dword_t *inbuf, size_t inlen,
+ size_t outlen,
+ efx_mcdi_async_completer *complete,
+ unsigned long cookie, bool quiet)
{
struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
struct efx_mcdi_async_param *async;
@@ -570,6 +691,9 @@ efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
if (rc)
return rc;
+ if (efx->mc_bist_for_other_fn)
+ return -ENETDOWN;
+
async = kmalloc(sizeof(*async) + ALIGN(max(inlen, outlen), 4),
GFP_ATOMIC);
if (!async)
@@ -578,6 +702,7 @@ efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
async->cmd = cmd;
async->inlen = inlen;
async->outlen = outlen;
+ async->quiet = quiet;
async->complete = complete;
async->cookie = cookie;
memcpy(async + 1, inbuf, inlen);
@@ -606,71 +731,73 @@ efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
return rc;
}
+/**
+ * efx_mcdi_rpc_async - Schedule an MCDI command to run asynchronously
+ * @efx: NIC through which to issue the command
+ * @cmd: Command type number
+ * @inbuf: Command parameters
+ * @inlen: Length of command parameters, in bytes
+ * @outlen: Length to allocate for response buffer, in bytes
+ * @complete: Function to be called on completion or cancellation.
+ * @cookie: Arbitrary value to be passed to @complete.
+ *
+ * This function does not sleep and therefore may be called in atomic
+ * context. It will fail if event queues are disabled or if MCDI
+ * event completions have been disabled due to an error.
+ *
+ * If it succeeds, the @complete function will be called exactly once
+ * in atomic context, when one of the following occurs:
+ * (a) the completion event is received (in NAPI context)
+ * (b) event queues are disabled (in the process that disables them)
+ * (c) the request times-out (in timer context)
+ */
+int
+efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
+ const efx_dword_t *inbuf, size_t inlen, size_t outlen,
+ efx_mcdi_async_completer *complete, unsigned long cookie)
+{
+ return _efx_mcdi_rpc_async(efx, cmd, inbuf, inlen, outlen, complete,
+ cookie, false);
+}
+
+int efx_mcdi_rpc_async_quiet(struct efx_nic *efx, unsigned int cmd,
+ const efx_dword_t *inbuf, size_t inlen,
+ size_t outlen, efx_mcdi_async_completer *complete,
+ unsigned long cookie)
+{
+ return _efx_mcdi_rpc_async(efx, cmd, inbuf, inlen, outlen, complete,
+ cookie, true);
+}
+
int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
efx_dword_t *outbuf, size_t outlen,
size_t *outlen_actual)
{
- struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
- int rc;
-
- if (mcdi->mode == MCDI_MODE_POLL)
- rc = efx_mcdi_poll(efx);
- else
- rc = efx_mcdi_await_completion(efx);
-
- if (rc != 0) {
- /* Close the race with efx_mcdi_ev_cpl() executing just too late
- * and completing a request we've just cancelled, by ensuring
- * that the seqno check therein fails.
- */
- spin_lock_bh(&mcdi->iface_lock);
- ++mcdi->seqno;
- ++mcdi->credits;
- spin_unlock_bh(&mcdi->iface_lock);
-
- netif_err(efx, hw, efx->net_dev,
- "MC command 0x%x inlen %d mode %d timed out\n",
- cmd, (int)inlen, mcdi->mode);
- } else {
- size_t hdr_len, data_len;
-
- /* At the very least we need a memory barrier here to ensure
- * we pick up changes from efx_mcdi_ev_cpl(). Protect against
- * a spurious efx_mcdi_ev_cpl() running concurrently by
- * acquiring the iface_lock. */
- spin_lock_bh(&mcdi->iface_lock);
- rc = mcdi->resprc;
- hdr_len = mcdi->resp_hdr_len;
- data_len = mcdi->resp_data_len;
- spin_unlock_bh(&mcdi->iface_lock);
-
- BUG_ON(rc > 0);
+ return _efx_mcdi_rpc_finish(efx, cmd, inlen, outbuf, outlen,
+ outlen_actual, false);
+}
- if (rc == 0) {
- efx->type->mcdi_read_response(efx, outbuf, hdr_len,
- min(outlen, data_len));
- if (outlen_actual != NULL)
- *outlen_actual = data_len;
- } else if (cmd == MC_CMD_REBOOT && rc == -EIO)
- ; /* Don't reset if MC_CMD_REBOOT returns EIO */
- else if (rc == -EIO || rc == -EINTR) {
- netif_err(efx, hw, efx->net_dev, "MC fatal error %d\n",
- -rc);
- efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
- } else
- netif_dbg(efx, hw, efx->net_dev,
- "MC command 0x%x inlen %d failed rc=%d\n",
- cmd, (int)inlen, -rc);
+int efx_mcdi_rpc_finish_quiet(struct efx_nic *efx, unsigned cmd, size_t inlen,
+ efx_dword_t *outbuf, size_t outlen,
+ size_t *outlen_actual)
+{
+ return _efx_mcdi_rpc_finish(efx, cmd, inlen, outbuf, outlen,
+ outlen_actual, true);
+}
- if (rc == -EIO || rc == -EINTR) {
- msleep(MCDI_STATUS_SLEEP_MS);
- efx_mcdi_poll_reboot(efx);
- mcdi->new_epoch = true;
- }
- }
+void efx_mcdi_display_error(struct efx_nic *efx, unsigned cmd,
+ size_t inlen, efx_dword_t *outbuf,
+ size_t outlen, int rc)
+{
+ int code = 0, err_arg = 0;
- efx_mcdi_release(mcdi);
- return rc;
+ if (outlen >= MC_CMD_ERR_CODE_OFST + 4)
+ code = MCDI_DWORD(outbuf, ERR_CODE);
+ if (outlen >= MC_CMD_ERR_ARG_OFST + 4)
+ err_arg = MCDI_DWORD(outbuf, ERR_ARG);
+ netif_err(efx, hw, efx->net_dev,
+ "MC command 0x%x inlen %d failed rc=%d (raw=%d) arg=%d\n",
+ cmd, (int)inlen, rc, code, err_arg);
}
/* Switch to polled MCDI completions. This can be called in various
@@ -815,6 +942,30 @@ static void efx_mcdi_ev_death(struct efx_nic *efx, int rc)
spin_unlock(&mcdi->iface_lock);
}
+/* The MC is going down in to BIST mode. set the BIST flag to block
+ * new MCDI, cancel any outstanding MCDI and and schedule a BIST-type reset
+ * (which doesn't actually execute a reset, it waits for the controlling
+ * function to reset it).
+ */
+static void efx_mcdi_ev_bist(struct efx_nic *efx)
+{
+ struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+
+ spin_lock(&mcdi->iface_lock);
+ efx->mc_bist_for_other_fn = true;
+ if (efx_mcdi_complete_sync(mcdi)) {
+ if (mcdi->mode == MCDI_MODE_EVENTS) {
+ mcdi->resprc = -EIO;
+ mcdi->resp_hdr_len = 0;
+ mcdi->resp_data_len = 0;
+ ++mcdi->credits;
+ }
+ }
+ mcdi->new_epoch = true;
+ efx_schedule_reset(efx, RESET_TYPE_MC_BIST);
+ spin_unlock(&mcdi->iface_lock);
+}
+
/* Called from falcon_process_eventq for MCDI events */
void efx_mcdi_process_event(struct efx_channel *channel,
efx_qword_t *event)
@@ -848,14 +999,18 @@ void efx_mcdi_process_event(struct efx_channel *channel,
efx_mcdi_sensor_event(efx, event);
break;
case MCDI_EVENT_CODE_SCHEDERR:
- netif_info(efx, hw, efx->net_dev,
- "MC Scheduler error address=0x%x\n", data);
+ netif_dbg(efx, hw, efx->net_dev,
+ "MC Scheduler alert (0x%x)\n", data);
break;
case MCDI_EVENT_CODE_REBOOT:
case MCDI_EVENT_CODE_MC_REBOOT:
netif_info(efx, hw, efx->net_dev, "MC Reboot\n");
efx_mcdi_ev_death(efx, -EIO);
break;
+ case MCDI_EVENT_CODE_MC_BIST:
+ netif_info(efx, hw, efx->net_dev, "MC entered BIST mode\n");
+ efx_mcdi_ev_bist(efx);
+ break;
case MCDI_EVENT_CODE_MAC_STATS_DMA:
/* MAC stats are gather lazily. We can ignore this. */
break;
@@ -867,6 +1022,9 @@ void efx_mcdi_process_event(struct efx_channel *channel,
case MCDI_EVENT_CODE_PTP_PPS:
efx_ptp_event(efx, event);
break;
+ case MCDI_EVENT_CODE_PTP_TIME:
+ efx_time_sync_event(channel, event);
+ break;
case MCDI_EVENT_CODE_TX_FLUSH:
case MCDI_EVENT_CODE_RX_FLUSH:
/* Two flush events will be sent: one to the same event
@@ -981,13 +1139,27 @@ static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
goto fail;
}
+ if (driver_operating) {
+ if (outlen >= MC_CMD_DRV_ATTACH_EXT_OUT_LEN) {
+ efx->mcdi->fn_flags =
+ MCDI_DWORD(outbuf,
+ DRV_ATTACH_EXT_OUT_FUNC_FLAGS);
+ } else {
+ /* Synthesise flags for Siena */
+ efx->mcdi->fn_flags =
+ 1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL |
+ 1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED |
+ (efx_port_num(efx) == 0) <<
+ MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY;
+ }
+ }
+
/* We currently assume we have control of the external link
* and are completely trusted by firmware. Abort probing
* if that's not true for this function.
*/
if (driver_operating &&
- outlen >= MC_CMD_DRV_ATTACH_EXT_OUT_LEN &&
- (MCDI_DWORD(outbuf, DRV_ATTACH_EXT_OUT_FUNC_FLAGS) &
+ (efx->mcdi->fn_flags &
(1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL |
1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED)) !=
(1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL |
@@ -1078,13 +1250,6 @@ int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, u32 dest_evq)
rc = efx_mcdi_rpc(efx, MC_CMD_LOG_CTRL, inbuf, sizeof(inbuf),
NULL, 0, NULL);
- if (rc)
- goto fail;
-
- return 0;
-
-fail:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
@@ -1201,7 +1366,7 @@ fail1:
static int efx_mcdi_read_assertion(struct efx_nic *efx)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_ASSERTS_IN_LEN);
- MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_ASSERTS_OUT_LEN);
+ MCDI_DECLARE_BUF_OUT_OR_ERR(outbuf, MC_CMD_GET_ASSERTS_OUT_LEN);
unsigned int flags, index;
const char *reason;
size_t outlen;
@@ -1216,13 +1381,17 @@ static int efx_mcdi_read_assertion(struct efx_nic *efx)
retry = 2;
do {
MCDI_SET_DWORD(inbuf, GET_ASSERTS_IN_CLEAR, 1);
- rc = efx_mcdi_rpc(efx, MC_CMD_GET_ASSERTS,
- inbuf, MC_CMD_GET_ASSERTS_IN_LEN,
- outbuf, sizeof(outbuf), &outlen);
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_GET_ASSERTS,
+ inbuf, MC_CMD_GET_ASSERTS_IN_LEN,
+ outbuf, sizeof(outbuf), &outlen);
} while ((rc == -EINTR || rc == -EIO) && retry-- > 0);
- if (rc)
+ if (rc) {
+ efx_mcdi_display_error(efx, MC_CMD_GET_ASSERTS,
+ MC_CMD_GET_ASSERTS_IN_LEN, outbuf,
+ outlen, rc);
return rc;
+ }
if (outlen < MC_CMD_GET_ASSERTS_OUT_LEN)
return -EIO;
@@ -1300,18 +1469,11 @@ void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
rc = efx_mcdi_rpc(efx, MC_CMD_SET_ID_LED, inbuf, sizeof(inbuf),
NULL, 0, NULL);
- if (rc)
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
- __func__, rc);
}
static int efx_mcdi_reset_port(struct efx_nic *efx)
{
- int rc = efx_mcdi_rpc(efx, MC_CMD_ENTITY_RESET, NULL, 0, NULL, 0, NULL);
- if (rc)
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
- __func__, rc);
- return rc;
+ return efx_mcdi_rpc(efx, MC_CMD_ENTITY_RESET, NULL, 0, NULL, 0, NULL);
}
static int efx_mcdi_reset_mc(struct efx_nic *efx)
@@ -1328,7 +1490,6 @@ static int efx_mcdi_reset_mc(struct efx_nic *efx)
return 0;
if (rc == 0)
rc = -EIO;
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
@@ -1430,13 +1591,6 @@ int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id)
rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_REMOVE, inbuf, sizeof(inbuf),
NULL, 0, NULL);
- if (rc)
- goto fail;
-
- return 0;
-
-fail:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
@@ -1477,13 +1631,6 @@ int efx_mcdi_wol_filter_reset(struct efx_nic *efx)
int rc;
rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_RESET, NULL, 0, NULL, 0, NULL);
- if (rc)
- goto fail;
-
- return 0;
-
-fail:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
@@ -1513,13 +1660,6 @@ static int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type)
rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_START, inbuf, sizeof(inbuf),
NULL, 0, NULL);
- if (rc)
- goto fail;
-
- return 0;
-
-fail:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
@@ -1539,14 +1679,10 @@ static int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type,
rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_READ, inbuf, sizeof(inbuf),
outbuf, sizeof(outbuf), &outlen);
if (rc)
- goto fail;
+ return rc;
memcpy(buffer, MCDI_PTR(outbuf, NVRAM_READ_OUT_READ_BUFFER), length);
return 0;
-
-fail:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
- return rc;
}
static int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
@@ -1566,13 +1702,6 @@ static int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf,
ALIGN(MC_CMD_NVRAM_WRITE_IN_LEN(length), 4),
NULL, 0, NULL);
- if (rc)
- goto fail;
-
- return 0;
-
-fail:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
@@ -1590,13 +1719,6 @@ static int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type,
rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_ERASE, inbuf, sizeof(inbuf),
NULL, 0, NULL);
- if (rc)
- goto fail;
-
- return 0;
-
-fail:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
@@ -1611,13 +1733,6 @@ static int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type)
rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_FINISH, inbuf, sizeof(inbuf),
NULL, 0, NULL);
- if (rc)
- goto fail;
-
- return 0;
-
-fail:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h
index 656a3277c2b2..52931aebf3c3 100644
--- a/drivers/net/ethernet/sfc/mcdi.h
+++ b/drivers/net/ethernet/sfc/mcdi.h
@@ -75,6 +75,8 @@ struct efx_mcdi_mon {
unsigned long last_update;
struct device *device;
struct efx_mcdi_mon_attribute *attrs;
+ struct attribute_group group;
+ const struct attribute_group *groups[2];
unsigned int n_attrs;
};
@@ -92,12 +94,14 @@ struct efx_mcdi_mtd_partition {
* struct efx_mcdi_data - extra state for NICs that implement MCDI
* @iface: Interface/protocol state
* @hwmon: Hardware monitor state
+ * @fn_flags: Flags for this function, as returned by %MC_CMD_DRV_ATTACH.
*/
struct efx_mcdi_data {
struct efx_mcdi_iface iface;
#ifdef CONFIG_SFC_MCDI_MON
struct efx_mcdi_mon hwmon;
#endif
+ u32 fn_flags;
};
#ifdef CONFIG_SFC_MCDI_MON
@@ -114,12 +118,19 @@ void efx_mcdi_fini(struct efx_nic *efx);
int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, const efx_dword_t *inbuf,
size_t inlen, efx_dword_t *outbuf, size_t outlen,
size_t *outlen_actual);
+int efx_mcdi_rpc_quiet(struct efx_nic *efx, unsigned cmd,
+ const efx_dword_t *inbuf, size_t inlen,
+ efx_dword_t *outbuf, size_t outlen,
+ size_t *outlen_actual);
int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd,
const efx_dword_t *inbuf, size_t inlen);
int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
efx_dword_t *outbuf, size_t outlen,
size_t *outlen_actual);
+int efx_mcdi_rpc_finish_quiet(struct efx_nic *efx, unsigned cmd,
+ size_t inlen, efx_dword_t *outbuf,
+ size_t outlen, size_t *outlen_actual);
typedef void efx_mcdi_async_completer(struct efx_nic *efx,
unsigned long cookie, int rc,
@@ -129,6 +140,15 @@ int efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
const efx_dword_t *inbuf, size_t inlen, size_t outlen,
efx_mcdi_async_completer *complete,
unsigned long cookie);
+int efx_mcdi_rpc_async_quiet(struct efx_nic *efx, unsigned int cmd,
+ const efx_dword_t *inbuf, size_t inlen,
+ size_t outlen,
+ efx_mcdi_async_completer *complete,
+ unsigned long cookie);
+
+void efx_mcdi_display_error(struct efx_nic *efx, unsigned cmd,
+ size_t inlen, efx_dword_t *outbuf,
+ size_t outlen, int rc);
int efx_mcdi_poll_reboot(struct efx_nic *efx);
void efx_mcdi_mode_poll(struct efx_nic *efx);
@@ -145,6 +165,8 @@ void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
*/
#define MCDI_DECLARE_BUF(_name, _len) \
efx_dword_t _name[DIV_ROUND_UP(_len, 4)]
+#define MCDI_DECLARE_BUF_OUT_OR_ERR(_name, _len) \
+ MCDI_DECLARE_BUF(_name, max_t(size_t, _len, 8))
#define _MCDI_PTR(_buf, _offset) \
((u8 *)(_buf) + (_offset))
#define MCDI_PTR(_buf, _field) \
@@ -299,6 +321,7 @@ int efx_mcdi_set_mac(struct efx_nic *efx);
#define EFX_MC_STATS_GENERATION_INVALID ((__force __le64)(-1))
void efx_mcdi_mac_start_stats(struct efx_nic *efx);
void efx_mcdi_mac_stop_stats(struct efx_nic *efx);
+void efx_mcdi_mac_pull_stats(struct efx_nic *efx);
bool efx_mcdi_mac_check_fault(struct efx_nic *efx);
enum reset_type efx_mcdi_map_reset_reason(enum reset_type reason);
int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method);
diff --git a/drivers/net/ethernet/sfc/mcdi_mon.c b/drivers/net/ethernet/sfc/mcdi_mon.c
index 4cc5d95b2a5a..bc27d5b580f5 100644
--- a/drivers/net/ethernet/sfc/mcdi_mon.c
+++ b/drivers/net/ethernet/sfc/mcdi_mon.c
@@ -24,6 +24,15 @@ enum efx_hwmon_type {
EFX_HWMON_IN, /* voltage */
EFX_HWMON_CURR, /* current */
EFX_HWMON_POWER, /* power */
+ EFX_HWMON_TYPES_COUNT
+};
+
+static const char *const efx_hwmon_unit[EFX_HWMON_TYPES_COUNT] = {
+ [EFX_HWMON_TEMP] = " degC",
+ [EFX_HWMON_COOL] = " rpm", /* though nonsense for a heatsink */
+ [EFX_HWMON_IN] = " mV",
+ [EFX_HWMON_CURR] = " mA",
+ [EFX_HWMON_POWER] = " W",
};
static const struct {
@@ -33,13 +42,13 @@ static const struct {
} efx_mcdi_sensor_type[] = {
#define SENSOR(name, label, hwmon_type, port) \
[MC_CMD_SENSOR_##name] = { label, EFX_HWMON_ ## hwmon_type, port }
- SENSOR(CONTROLLER_TEMP, "Controller ext. temp.", TEMP, -1),
+ SENSOR(CONTROLLER_TEMP, "Controller board temp.", TEMP, -1),
SENSOR(PHY_COMMON_TEMP, "PHY temp.", TEMP, -1),
- SENSOR(CONTROLLER_COOLING, "Controller cooling", COOL, -1),
+ SENSOR(CONTROLLER_COOLING, "Controller heat sink", COOL, -1),
SENSOR(PHY0_TEMP, "PHY temp.", TEMP, 0),
- SENSOR(PHY0_COOLING, "PHY cooling", COOL, 0),
+ SENSOR(PHY0_COOLING, "PHY heat sink", COOL, 0),
SENSOR(PHY1_TEMP, "PHY temp.", TEMP, 1),
- SENSOR(PHY1_COOLING, "PHY cooling", COOL, 1),
+ SENSOR(PHY1_COOLING, "PHY heat sink", COOL, 1),
SENSOR(IN_1V0, "1.0V supply", IN, -1),
SENSOR(IN_1V2, "1.2V supply", IN, -1),
SENSOR(IN_1V8, "1.8V supply", IN, -1),
@@ -47,36 +56,42 @@ static const struct {
SENSOR(IN_3V3, "3.3V supply", IN, -1),
SENSOR(IN_12V0, "12.0V supply", IN, -1),
SENSOR(IN_1V2A, "1.2V analogue supply", IN, -1),
- SENSOR(IN_VREF, "ref. voltage", IN, -1),
- SENSOR(OUT_VAOE, "AOE power supply", IN, -1),
- SENSOR(AOE_TEMP, "AOE temp.", TEMP, -1),
- SENSOR(PSU_AOE_TEMP, "AOE PSU temp.", TEMP, -1),
- SENSOR(PSU_TEMP, "Controller PSU temp.", TEMP, -1),
- SENSOR(FAN_0, NULL, COOL, -1),
- SENSOR(FAN_1, NULL, COOL, -1),
- SENSOR(FAN_2, NULL, COOL, -1),
- SENSOR(FAN_3, NULL, COOL, -1),
- SENSOR(FAN_4, NULL, COOL, -1),
+ SENSOR(IN_VREF, "Ref. voltage", IN, -1),
+ SENSOR(OUT_VAOE, "AOE FPGA supply", IN, -1),
+ SENSOR(AOE_TEMP, "AOE FPGA temp.", TEMP, -1),
+ SENSOR(PSU_AOE_TEMP, "AOE regulator temp.", TEMP, -1),
+ SENSOR(PSU_TEMP, "Controller regulator temp.",
+ TEMP, -1),
+ SENSOR(FAN_0, "Fan 0", COOL, -1),
+ SENSOR(FAN_1, "Fan 1", COOL, -1),
+ SENSOR(FAN_2, "Fan 2", COOL, -1),
+ SENSOR(FAN_3, "Fan 3", COOL, -1),
+ SENSOR(FAN_4, "Fan 4", COOL, -1),
SENSOR(IN_VAOE, "AOE input supply", IN, -1),
SENSOR(OUT_IAOE, "AOE output current", CURR, -1),
SENSOR(IN_IAOE, "AOE input current", CURR, -1),
SENSOR(NIC_POWER, "Board power use", POWER, -1),
SENSOR(IN_0V9, "0.9V supply", IN, -1),
- SENSOR(IN_I0V9, "0.9V input current", CURR, -1),
- SENSOR(IN_I1V2, "1.2V input current", CURR, -1),
- SENSOR(IN_0V9_ADC, "0.9V supply (at ADC)", IN, -1),
- SENSOR(CONTROLLER_2_TEMP, "Controller ext. temp. 2", TEMP, -1),
- SENSOR(VREG_INTERNAL_TEMP, "Voltage regulator temp.", TEMP, -1),
+ SENSOR(IN_I0V9, "0.9V supply current", CURR, -1),
+ SENSOR(IN_I1V2, "1.2V supply current", CURR, -1),
+ SENSOR(IN_0V9_ADC, "0.9V supply (ext. ADC)", IN, -1),
+ SENSOR(CONTROLLER_2_TEMP, "Controller board temp. 2", TEMP, -1),
+ SENSOR(VREG_INTERNAL_TEMP, "Regulator die temp.", TEMP, -1),
SENSOR(VREG_0V9_TEMP, "0.9V regulator temp.", TEMP, -1),
SENSOR(VREG_1V2_TEMP, "1.2V regulator temp.", TEMP, -1),
- SENSOR(CONTROLLER_VPTAT, "Controller int. temp. raw", IN, -1),
- SENSOR(CONTROLLER_INTERNAL_TEMP, "Controller int. temp.", TEMP, -1),
+ SENSOR(CONTROLLER_VPTAT,
+ "Controller PTAT voltage (int. ADC)", IN, -1),
+ SENSOR(CONTROLLER_INTERNAL_TEMP,
+ "Controller die temp. (int. ADC)", TEMP, -1),
SENSOR(CONTROLLER_VPTAT_EXTADC,
- "Controller int. temp. raw (at ADC)", IN, -1),
+ "Controller PTAT voltage (ext. ADC)", IN, -1),
SENSOR(CONTROLLER_INTERNAL_TEMP_EXTADC,
- "Controller int. temp. (via ADC)", TEMP, -1),
+ "Controller die temp. (ext. ADC)", TEMP, -1),
SENSOR(AMBIENT_TEMP, "Ambient temp.", TEMP, -1),
SENSOR(AIRFLOW, "Air flow raw", IN, -1),
+ SENSOR(VDD08D_VSS08D_CSR, "0.9V die (int. ADC)", IN, -1),
+ SENSOR(VDD08D_VSS08D_CSR_EXTADC, "0.9V die (ext. ADC)", IN, -1),
+ SENSOR(HOTPOINT_TEMP, "Controller board temp. (hotpoint)", TEMP, -1),
#undef SENSOR
};
@@ -91,7 +106,8 @@ static const char *const sensor_status_names[] = {
void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev)
{
unsigned int type, state, value;
- const char *name = NULL, *state_txt;
+ enum efx_hwmon_type hwmon_type = EFX_HWMON_UNKNOWN;
+ const char *name = NULL, *state_txt, *unit;
type = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_MONITOR);
state = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_STATE);
@@ -99,16 +115,22 @@ void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev)
/* Deal gracefully with the board having more drivers than we
* know about, but do not expect new sensor states. */
- if (type < ARRAY_SIZE(efx_mcdi_sensor_type))
+ if (type < ARRAY_SIZE(efx_mcdi_sensor_type)) {
name = efx_mcdi_sensor_type[type].label;
+ hwmon_type = efx_mcdi_sensor_type[type].hwmon_type;
+ }
if (!name)
name = "No sensor name available";
EFX_BUG_ON_PARANOID(state >= ARRAY_SIZE(sensor_status_names));
state_txt = sensor_status_names[state];
+ EFX_BUG_ON_PARANOID(hwmon_type >= EFX_HWMON_TYPES_COUNT);
+ unit = efx_hwmon_unit[hwmon_type];
+ if (!unit)
+ unit = "";
netif_err(efx, hw, efx->net_dev,
- "Sensor %d (%s) reports condition '%s' for raw value %d\n",
- type, name, state_txt, value);
+ "Sensor %d (%s) reports condition '%s' for value %d%s\n",
+ type, name, state_txt, value, unit);
}
#ifdef CONFIG_SFC_MCDI_MON
@@ -139,17 +161,10 @@ static int efx_mcdi_mon_update(struct efx_nic *efx)
return rc;
}
-static ssize_t efx_mcdi_mon_show_name(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- return sprintf(buf, "%s\n", KBUILD_MODNAME);
-}
-
static int efx_mcdi_mon_get_entry(struct device *dev, unsigned int index,
efx_dword_t *entry)
{
- struct efx_nic *efx = dev_get_drvdata(dev);
+ struct efx_nic *efx = dev_get_drvdata(dev->parent);
struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx);
int rc;
@@ -263,7 +278,7 @@ static ssize_t efx_mcdi_mon_show_label(struct device *dev,
efx_mcdi_sensor_type[mon_attr->type].label);
}
-static int
+static void
efx_mcdi_mon_add_attr(struct efx_nic *efx, const char *name,
ssize_t (*reader)(struct device *,
struct device_attribute *, char *),
@@ -272,7 +287,6 @@ efx_mcdi_mon_add_attr(struct efx_nic *efx, const char *name,
{
struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx);
struct efx_mcdi_mon_attribute *attr = &hwmon->attrs[hwmon->n_attrs];
- int rc;
strlcpy(attr->name, name, sizeof(attr->name));
attr->index = index;
@@ -286,10 +300,7 @@ efx_mcdi_mon_add_attr(struct efx_nic *efx, const char *name,
attr->dev_attr.attr.name = attr->name;
attr->dev_attr.attr.mode = S_IRUGO;
attr->dev_attr.show = reader;
- rc = device_create_file(&efx->pci_dev->dev, &attr->dev_attr);
- if (rc == 0)
- ++hwmon->n_attrs;
- return rc;
+ hwmon->group.attrs[hwmon->n_attrs++] = &attr->dev_attr.attr;
}
int efx_mcdi_mon_probe(struct efx_nic *efx)
@@ -338,26 +349,22 @@ int efx_mcdi_mon_probe(struct efx_nic *efx)
efx_mcdi_mon_update(efx);
/* Allocate space for the maximum possible number of
- * attributes for this set of sensors: name of the driver plus
+ * attributes for this set of sensors:
* value, min, max, crit, alarm and label for each sensor.
*/
- n_attrs = 1 + 6 * n_sensors;
+ n_attrs = 6 * n_sensors;
hwmon->attrs = kcalloc(n_attrs, sizeof(*hwmon->attrs), GFP_KERNEL);
if (!hwmon->attrs) {
rc = -ENOMEM;
goto fail;
}
-
- hwmon->device = hwmon_device_register(&efx->pci_dev->dev);
- if (IS_ERR(hwmon->device)) {
- rc = PTR_ERR(hwmon->device);
+ hwmon->group.attrs = kcalloc(n_attrs + 1, sizeof(struct attribute *),
+ GFP_KERNEL);
+ if (!hwmon->group.attrs) {
+ rc = -ENOMEM;
goto fail;
}
- rc = efx_mcdi_mon_add_attr(efx, "name", efx_mcdi_mon_show_name, 0, 0, 0);
- if (rc)
- goto fail;
-
for (i = 0, j = -1, type = -1; ; i++) {
enum efx_hwmon_type hwmon_type;
const char *hwmon_prefix;
@@ -372,7 +379,7 @@ int efx_mcdi_mon_probe(struct efx_nic *efx)
page = type / 32;
j = -1;
if (page == n_pages)
- return 0;
+ goto hwmon_register;
MCDI_SET_DWORD(inbuf, SENSOR_INFO_EXT_IN_PAGE,
page);
@@ -453,28 +460,22 @@ int efx_mcdi_mon_probe(struct efx_nic *efx)
if (min1 != max1) {
snprintf(name, sizeof(name), "%s%u_input",
hwmon_prefix, hwmon_index);
- rc = efx_mcdi_mon_add_attr(
+ efx_mcdi_mon_add_attr(
efx, name, efx_mcdi_mon_show_value, i, type, 0);
- if (rc)
- goto fail;
if (hwmon_type != EFX_HWMON_POWER) {
snprintf(name, sizeof(name), "%s%u_min",
hwmon_prefix, hwmon_index);
- rc = efx_mcdi_mon_add_attr(
+ efx_mcdi_mon_add_attr(
efx, name, efx_mcdi_mon_show_limit,
i, type, min1);
- if (rc)
- goto fail;
}
snprintf(name, sizeof(name), "%s%u_max",
hwmon_prefix, hwmon_index);
- rc = efx_mcdi_mon_add_attr(
+ efx_mcdi_mon_add_attr(
efx, name, efx_mcdi_mon_show_limit,
i, type, max1);
- if (rc)
- goto fail;
if (min2 != max2) {
/* Assume max2 is critical value.
@@ -482,32 +483,38 @@ int efx_mcdi_mon_probe(struct efx_nic *efx)
*/
snprintf(name, sizeof(name), "%s%u_crit",
hwmon_prefix, hwmon_index);
- rc = efx_mcdi_mon_add_attr(
+ efx_mcdi_mon_add_attr(
efx, name, efx_mcdi_mon_show_limit,
i, type, max2);
- if (rc)
- goto fail;
}
}
snprintf(name, sizeof(name), "%s%u_alarm",
hwmon_prefix, hwmon_index);
- rc = efx_mcdi_mon_add_attr(
+ efx_mcdi_mon_add_attr(
efx, name, efx_mcdi_mon_show_alarm, i, type, 0);
- if (rc)
- goto fail;
if (type < ARRAY_SIZE(efx_mcdi_sensor_type) &&
efx_mcdi_sensor_type[type].label) {
snprintf(name, sizeof(name), "%s%u_label",
hwmon_prefix, hwmon_index);
- rc = efx_mcdi_mon_add_attr(
+ efx_mcdi_mon_add_attr(
efx, name, efx_mcdi_mon_show_label, i, type, 0);
- if (rc)
- goto fail;
}
}
+hwmon_register:
+ hwmon->groups[0] = &hwmon->group;
+ hwmon->device = hwmon_device_register_with_groups(&efx->pci_dev->dev,
+ KBUILD_MODNAME, NULL,
+ hwmon->groups);
+ if (IS_ERR(hwmon->device)) {
+ rc = PTR_ERR(hwmon->device);
+ goto fail;
+ }
+
+ return 0;
+
fail:
efx_mcdi_mon_remove(efx);
return rc;
@@ -516,14 +523,11 @@ fail:
void efx_mcdi_mon_remove(struct efx_nic *efx)
{
struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx);
- unsigned int i;
- for (i = 0; i < hwmon->n_attrs; i++)
- device_remove_file(&efx->pci_dev->dev,
- &hwmon->attrs[i].dev_attr);
- kfree(hwmon->attrs);
if (hwmon->device)
hwmon_device_unregister(hwmon->device);
+ kfree(hwmon->attrs);
+ kfree(hwmon->group.attrs);
efx_nic_free_buffer(efx, &hwmon->dma_buf);
}
diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h
index e0a63ddb7a6c..a707fb5ef14c 100644
--- a/drivers/net/ethernet/sfc/mcdi_pcol.h
+++ b/drivers/net/ethernet/sfc/mcdi_pcol.h
@@ -224,6 +224,8 @@
#define MC_CMD_ERR_MAC_EXIST 0x1009
/* Slave core not present */
#define MC_CMD_ERR_SLAVE_NOT_PRESENT 0x100a
+/* The datapath is disabled. */
+#define MC_CMD_ERR_DATAPATH_DISABLED 0x100b
#define MC_CMD_ERR_CODE_OFST 0
@@ -390,6 +392,8 @@
* AOE_ERR_DATA)
*/
#define MCDI_EVENT_AOE_BYTEBLASTER 0x9
+/* enum: DDR ECC status update */
+#define MCDI_EVENT_AOE_DDR_ECC_STATUS 0xa
#define MCDI_EVENT_AOE_ERR_DATA_LBN 8
#define MCDI_EVENT_AOE_ERR_DATA_WIDTH 8
#define MCDI_EVENT_RX_ERR_RXQ_LBN 0
@@ -462,6 +466,10 @@
#define MCDI_EVENT_CODE_ECC_CORR_ERR 0x17
/* enum: the MC has detected an uncorrectable error */
#define MCDI_EVENT_CODE_ECC_FATAL_ERR 0x18
+/* enum: The MC has entered offline BIST mode */
+#define MCDI_EVENT_CODE_MC_BIST 0x19
+/* enum: PTP tick event providing current NIC time */
+#define MCDI_EVENT_CODE_PTP_TIME 0x1a
/* enum: Artificial event generated by host and posted via MC for test
* purposes.
*/
@@ -481,15 +489,32 @@
#define MCDI_EVENT_TX_ERR_DATA_OFST 0
#define MCDI_EVENT_TX_ERR_DATA_LBN 0
#define MCDI_EVENT_TX_ERR_DATA_WIDTH 32
-/* Seconds field of timestamp */
+/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the seconds field of
+ * timestamp
+ */
#define MCDI_EVENT_PTP_SECONDS_OFST 0
#define MCDI_EVENT_PTP_SECONDS_LBN 0
#define MCDI_EVENT_PTP_SECONDS_WIDTH 32
-/* Nanoseconds field of timestamp */
+/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the major field of
+ * timestamp
+ */
+#define MCDI_EVENT_PTP_MAJOR_OFST 0
+#define MCDI_EVENT_PTP_MAJOR_LBN 0
+#define MCDI_EVENT_PTP_MAJOR_WIDTH 32
+/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the nanoseconds field
+ * of timestamp
+ */
#define MCDI_EVENT_PTP_NANOSECONDS_OFST 0
#define MCDI_EVENT_PTP_NANOSECONDS_LBN 0
#define MCDI_EVENT_PTP_NANOSECONDS_WIDTH 32
-/* Lowest four bytes of sourceUUID from PTP packet */
+/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the minor field of
+ * timestamp
+ */
+#define MCDI_EVENT_PTP_MINOR_OFST 0
+#define MCDI_EVENT_PTP_MINOR_LBN 0
+#define MCDI_EVENT_PTP_MINOR_WIDTH 32
+/* For CODE_PTP_RX events, the lowest four bytes of sourceUUID from PTP packet
+ */
#define MCDI_EVENT_PTP_UUID_OFST 0
#define MCDI_EVENT_PTP_UUID_LBN 0
#define MCDI_EVENT_PTP_UUID_WIDTH 32
@@ -505,6 +530,13 @@
#define MCDI_EVENT_ECC_FATAL_ERR_DATA_OFST 0
#define MCDI_EVENT_ECC_FATAL_ERR_DATA_LBN 0
#define MCDI_EVENT_ECC_FATAL_ERR_DATA_WIDTH 32
+/* For CODE_PTP_TIME events, the major value of the PTP clock */
+#define MCDI_EVENT_PTP_TIME_MAJOR_OFST 0
+#define MCDI_EVENT_PTP_TIME_MAJOR_LBN 0
+#define MCDI_EVENT_PTP_TIME_MAJOR_WIDTH 32
+/* For CODE_PTP_TIME events, bits 19-26 of the minor value of the PTP clock */
+#define MCDI_EVENT_PTP_TIME_MINOR_26_19_LBN 36
+#define MCDI_EVENT_PTP_TIME_MINOR_26_19_WIDTH 8
/* FCDI_EVENT structuredef */
#define FCDI_EVENT_LEN 8
@@ -545,8 +577,10 @@
#define FCDI_EVENT_CODE_TIMED_READ 0x5
/* enum: One or more PPS IN events */
#define FCDI_EVENT_CODE_PPS_IN 0x6
-/* enum: One or more PPS OUT events */
-#define FCDI_EVENT_CODE_PPS_OUT 0x7
+/* enum: Tick event from PTP clock */
+#define FCDI_EVENT_CODE_PTP_TICK 0x7
+/* enum: ECC error counters */
+#define FCDI_EVENT_CODE_DDR_ECC_STATUS 0x8
#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_OFST 0
#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_LBN 0
#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_WIDTH 32
@@ -560,14 +594,21 @@
#define FCDI_EVENT_LINK_STATE_DATA_OFST 0
#define FCDI_EVENT_LINK_STATE_DATA_LBN 0
#define FCDI_EVENT_LINK_STATE_DATA_WIDTH 32
-#define FCDI_EVENT_PPS_COUNT_OFST 0
-#define FCDI_EVENT_PPS_COUNT_LBN 0
-#define FCDI_EVENT_PPS_COUNT_WIDTH 32
-
-/* FCDI_EXTENDED_EVENT structuredef */
-#define FCDI_EXTENDED_EVENT_LENMIN 16
-#define FCDI_EXTENDED_EVENT_LENMAX 248
-#define FCDI_EXTENDED_EVENT_LEN(num) (8+8*(num))
+#define FCDI_EVENT_DDR_ECC_STATUS_BANK_ID_LBN 36
+#define FCDI_EVENT_DDR_ECC_STATUS_BANK_ID_WIDTH 8
+#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_OFST 0
+#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_LBN 0
+#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_WIDTH 32
+
+/* FCDI_EXTENDED_EVENT_PPS structuredef: Extended FCDI event to send PPS events
+ * to the MC. Note that this structure | is overlayed over a normal FCDI event
+ * such that bits 32-63 containing | event code, level, source etc remain the
+ * same. In this case the data | field of the header is defined to be the
+ * number of timestamps
+ */
+#define FCDI_EXTENDED_EVENT_PPS_LENMIN 16
+#define FCDI_EXTENDED_EVENT_PPS_LENMAX 248
+#define FCDI_EXTENDED_EVENT_PPS_LEN(num) (8+8*(num))
/* Number of timestamps following */
#define FCDI_EXTENDED_EVENT_PPS_COUNT_OFST 0
#define FCDI_EXTENDED_EVENT_PPS_COUNT_LBN 0
@@ -581,14 +622,14 @@
#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_LBN 96
#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_WIDTH 32
/* Timestamp records comprising the event */
-#define FCDI_EXTENDED_EVENT_PPS_TIME_OFST 8
-#define FCDI_EXTENDED_EVENT_PPS_TIME_LEN 8
-#define FCDI_EXTENDED_EVENT_PPS_TIME_LO_OFST 8
-#define FCDI_EXTENDED_EVENT_PPS_TIME_HI_OFST 12
-#define FCDI_EXTENDED_EVENT_PPS_TIME_MINNUM 1
-#define FCDI_EXTENDED_EVENT_PPS_TIME_MAXNUM 30
-#define FCDI_EXTENDED_EVENT_PPS_TIME_LBN 64
-#define FCDI_EXTENDED_EVENT_PPS_TIME_WIDTH 64
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_OFST 8
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LEN 8
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LO_OFST 8
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_HI_OFST 12
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_MINNUM 1
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_MAXNUM 30
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LBN 64
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_WIDTH 64
/***********************************/
@@ -642,6 +683,10 @@
#define MC_CMD_COPYCODE_IN_LEN 16
/* Source address */
#define MC_CMD_COPYCODE_IN_SRC_ADDR_OFST 0
+/* enum: The main image should be entered via a copy of a single word from and
+ * to this address when none of the other magic behaviours are required.
+ */
+#define MC_CMD_COPYCODE_HUNT_NO_MAGIC_ADDR 0x10000
/* enum: Entering the main image via a copy of a single word from and to this
* address indicates that it should not attempt to start the datapath CPUs.
* This is useful for certain soft rebooting scenarios. (Huntington only)
@@ -872,8 +917,28 @@
#define MC_CMD_PTP_OP_RST_CLK 0x14
/* enum: Enable the forwarding of PPS events to the host */
#define MC_CMD_PTP_OP_PPS_ENABLE 0x15
+/* enum: Get the time format used by this NIC for PTP operations */
+#define MC_CMD_PTP_OP_GET_TIME_FORMAT 0x16
+/* enum: Get the clock attributes. NOTE- extended version of
+ * MC_CMD_PTP_OP_GET_TIME_FORMAT
+ */
+#define MC_CMD_PTP_OP_GET_ATTRIBUTES 0x16
+/* enum: Get corrections that should be applied to the various different
+ * timestamps
+ */
+#define MC_CMD_PTP_OP_GET_TIMESTAMP_CORRECTIONS 0x17
+/* enum: Subscribe to receive periodic time events indicating the current NIC
+ * time
+ */
+#define MC_CMD_PTP_OP_TIME_EVENT_SUBSCRIBE 0x18
+/* enum: Unsubscribe to stop receiving time events */
+#define MC_CMD_PTP_OP_TIME_EVENT_UNSUBSCRIBE 0x19
+/* enum: PPS based manfacturing tests. Requires PPS output to be looped to PPS
+ * input on the same NIC.
+ */
+#define MC_CMD_PTP_OP_MANFTEST_PPS 0x1a
/* enum: Above this for future use. */
-#define MC_CMD_PTP_OP_MAX 0x16
+#define MC_CMD_PTP_OP_MAX 0x1b
/* MC_CMD_PTP_IN_ENABLE msgrequest */
#define MC_CMD_PTP_IN_ENABLE_LEN 16
@@ -938,8 +1003,12 @@
#define MC_CMD_PTP_IN_ADJUST_BITS 0x28
/* Time adjustment in seconds */
#define MC_CMD_PTP_IN_ADJUST_SECONDS_OFST 16
+/* Time adjustment major value */
+#define MC_CMD_PTP_IN_ADJUST_MAJOR_OFST 16
/* Time adjustment in nanoseconds */
#define MC_CMD_PTP_IN_ADJUST_NANOSECONDS_OFST 20
+/* Time adjustment minor value */
+#define MC_CMD_PTP_IN_ADJUST_MINOR_OFST 20
/* MC_CMD_PTP_IN_SYNCHRONIZE msgrequest */
#define MC_CMD_PTP_IN_SYNCHRONIZE_LEN 20
@@ -1005,8 +1074,12 @@
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
/* Time adjustment in seconds */
#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_SECONDS_OFST 8
+/* Time adjustment major value */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_MAJOR_OFST 8
/* Time adjustment in nanoseconds */
#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_NANOSECONDS_OFST 12
+/* Time adjustment minor value */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_MINOR_OFST 12
/* MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST msgrequest */
#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_LEN 16
@@ -1078,9 +1151,51 @@
#define MC_CMD_PTP_ENABLE_PPS 0x0
/* enum: Disable */
#define MC_CMD_PTP_DISABLE_PPS 0x1
-/* Queueid to send events back */
+/* Queue id to send events back */
#define MC_CMD_PTP_IN_PPS_ENABLE_QUEUE_ID_OFST 8
+/* MC_CMD_PTP_IN_GET_TIME_FORMAT msgrequest */
+#define MC_CMD_PTP_IN_GET_TIME_FORMAT_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+
+/* MC_CMD_PTP_IN_GET_ATTRIBUTES msgrequest */
+#define MC_CMD_PTP_IN_GET_ATTRIBUTES_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+
+/* MC_CMD_PTP_IN_GET_TIMESTAMP_CORRECTIONS msgrequest */
+#define MC_CMD_PTP_IN_GET_TIMESTAMP_CORRECTIONS_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+
+/* MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE msgrequest */
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_LEN 12
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* Event queue to send PTP time events to */
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_OFST 8
+
+/* MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE msgrequest */
+#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_LEN 16
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* Unsubscribe options */
+#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_CONTROL_OFST 8
+/* enum: Unsubscribe a single queue */
+#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_SINGLE 0x0
+/* enum: Unsubscribe all queues */
+#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_ALL 0x1
+/* Event queue ID */
+#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_QUEUE_OFST 12
+
+/* MC_CMD_PTP_IN_MANFTEST_PPS msgrequest */
+#define MC_CMD_PTP_IN_MANFTEST_PPS_LEN 12
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* 1 to enable PPS test mode, 0 to disable and return result. */
+#define MC_CMD_PTP_IN_MANFTEST_PPS_TEST_ENABLE_OFST 8
+
/* MC_CMD_PTP_OUT msgresponse */
#define MC_CMD_PTP_OUT_LEN 0
@@ -1088,15 +1203,29 @@
#define MC_CMD_PTP_OUT_TRANSMIT_LEN 8
/* Value of seconds timestamp */
#define MC_CMD_PTP_OUT_TRANSMIT_SECONDS_OFST 0
+/* Timestamp major value */
+#define MC_CMD_PTP_OUT_TRANSMIT_MAJOR_OFST 0
/* Value of nanoseconds timestamp */
#define MC_CMD_PTP_OUT_TRANSMIT_NANOSECONDS_OFST 4
+/* Timestamp minor value */
+#define MC_CMD_PTP_OUT_TRANSMIT_MINOR_OFST 4
+
+/* MC_CMD_PTP_OUT_TIME_EVENT_SUBSCRIBE msgresponse */
+#define MC_CMD_PTP_OUT_TIME_EVENT_SUBSCRIBE_LEN 0
+
+/* MC_CMD_PTP_OUT_TIME_EVENT_UNSUBSCRIBE msgresponse */
+#define MC_CMD_PTP_OUT_TIME_EVENT_UNSUBSCRIBE_LEN 0
/* MC_CMD_PTP_OUT_READ_NIC_TIME msgresponse */
#define MC_CMD_PTP_OUT_READ_NIC_TIME_LEN 8
/* Value of seconds timestamp */
#define MC_CMD_PTP_OUT_READ_NIC_TIME_SECONDS_OFST 0
+/* Timestamp major value */
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_MAJOR_OFST 0
/* Value of nanoseconds timestamp */
#define MC_CMD_PTP_OUT_READ_NIC_TIME_NANOSECONDS_OFST 4
+/* Timestamp minor value */
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_MINOR_OFST 4
/* MC_CMD_PTP_OUT_STATUS msgresponse */
#define MC_CMD_PTP_OUT_STATUS_LEN 64
@@ -1116,21 +1245,21 @@
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFLOW_OFST 24
/* Number of PPS bad periods */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_BAD_OFST 28
-/* Minimum period of PPS pulse */
+/* Minimum period of PPS pulse in nanoseconds */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MIN_OFST 32
-/* Maximum period of PPS pulse */
+/* Maximum period of PPS pulse in nanoseconds */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MAX_OFST 36
-/* Last period of PPS pulse */
+/* Last period of PPS pulse in nanoseconds */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_LAST_OFST 40
-/* Mean period of PPS pulse */
+/* Mean period of PPS pulse in nanoseconds */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MEAN_OFST 44
-/* Minimum offset of PPS pulse (signed) */
+/* Minimum offset of PPS pulse in nanoseconds (signed) */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MIN_OFST 48
-/* Maximum offset of PPS pulse (signed) */
+/* Maximum offset of PPS pulse in nanoseconds (signed) */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MAX_OFST 52
-/* Last offset of PPS pulse (signed) */
+/* Last offset of PPS pulse in nanoseconds (signed) */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_LAST_OFST 56
-/* Mean offset of PPS pulse (signed) */
+/* Mean offset of PPS pulse in nanoseconds (signed) */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MEAN_OFST 60
/* MC_CMD_PTP_OUT_SYNCHRONIZE msgresponse */
@@ -1146,8 +1275,12 @@
#define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTSTART_OFST 0
/* Value of seconds timestamp */
#define MC_CMD_PTP_OUT_SYNCHRONIZE_SECONDS_OFST 4
+/* Timestamp major value */
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_MAJOR_OFST 4
/* Value of nanoseconds timestamp */
#define MC_CMD_PTP_OUT_SYNCHRONIZE_NANOSECONDS_OFST 8
+/* Timestamp minor value */
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_MINOR_OFST 8
/* Host time immediately after NIC's hardware clock read */
#define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTEND_OFST 12
/* Number of nanoseconds waited after reading NIC's hardware clock */
@@ -1177,6 +1310,16 @@
#define MC_CMD_PTP_MANF_PACKET_ENOUGH 0x8
/* enum: Timestamp trigger GPIO not working */
#define MC_CMD_PTP_MANF_GPIO_TRIGGER 0x9
+/* enum: Insufficient PPS events to perform checks */
+#define MC_CMD_PTP_MANF_PPS_ENOUGH 0xa
+/* enum: PPS time event period not sufficiently close to 1s. */
+#define MC_CMD_PTP_MANF_PPS_PERIOD 0xb
+/* enum: PPS time event nS reading not sufficiently close to zero. */
+#define MC_CMD_PTP_MANF_PPS_NS 0xc
+/* enum: PTP peripheral registers incorrect */
+#define MC_CMD_PTP_MANF_REGISTERS 0xd
+/* enum: Failed to read time from PTP peripheral */
+#define MC_CMD_PTP_MANF_CLOCK_READ 0xe
/* Presence of external oscillator */
#define MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_EXTOSC_OFST 4
@@ -1198,6 +1341,62 @@
#define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_MINNUM 1
#define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_MAXNUM 252
+/* MC_CMD_PTP_OUT_GET_TIME_FORMAT msgresponse */
+#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_LEN 4
+/* Time format required/used by for this NIC. Applies to all PTP MCDI
+ * operations that pass times between the host and firmware. If this operation
+ * is not supported (older firmware) a format of seconds and nanoseconds should
+ * be assumed.
+ */
+#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_FORMAT_OFST 0
+/* enum: Times are in seconds and nanoseconds */
+#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_SECONDS_NANOSECONDS 0x0
+/* enum: Major register has units of 16 second per tick, minor 8 ns per tick */
+#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_16SECONDS_8NANOSECONDS 0x1
+/* enum: Major register has units of seconds, minor 2^-27s per tick */
+#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_SECONDS_27FRACTION 0x2
+
+/* MC_CMD_PTP_OUT_GET_ATTRIBUTES msgresponse */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_LEN 8
+/* Time format required/used by for this NIC. Applies to all PTP MCDI
+ * operations that pass times between the host and firmware. If this operation
+ * is not supported (older firmware) a format of seconds and nanoseconds should
+ * be assumed.
+ */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_TIME_FORMAT_OFST 0
+/* enum: Times are in seconds and nanoseconds */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_NANOSECONDS 0x0
+/* enum: Major register has units of 16 second per tick, minor 8 ns per tick */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_16SECONDS_8NANOSECONDS 0x1
+/* enum: Major register has units of seconds, minor 2^-27s per tick */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_27FRACTION 0x2
+/* Minimum acceptable value for a corrected synchronization timeset. When
+ * comparing host and NIC clock times, the MC returns a set of samples that
+ * contain the host start and end time, the MC time when the host start was
+ * detected and the time the MC waited between reading the time and detecting
+ * the host end. The corrected sync window is the difference between the host
+ * end and start times minus the time that the MC waited for host end.
+ */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SYNC_WINDOW_MIN_OFST 4
+
+/* MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS msgresponse */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_LEN 16
+/* Uncorrected error on transmit timestamps in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_TRANSMIT_OFST 0
+/* Uncorrected error on receive timestamps in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_RECEIVE_OFST 4
+/* Uncorrected error on PPS output in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_OUT_OFST 8
+/* Uncorrected error on PPS input in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_IN_OFST 12
+
+/* MC_CMD_PTP_OUT_MANFTEST_PPS msgresponse */
+#define MC_CMD_PTP_OUT_MANFTEST_PPS_LEN 4
+/* Results of testing */
+#define MC_CMD_PTP_OUT_MANFTEST_PPS_TEST_RESULT_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_PTP_OUT_MANFTEST_BASIC/TEST_RESULT */
+
/***********************************/
/* MC_CMD_CSR_READ32
@@ -1923,6 +2122,8 @@
#define MC_CMD_MEDIA_SFP_PLUS 0x5
/* enum: 10GBaseT. */
#define MC_CMD_MEDIA_BASE_T 0x6
+/* enum: QSFP+. */
+#define MC_CMD_MEDIA_QSFP_PLUS 0x7
#define MC_CMD_GET_PHY_CFG_OUT_MMD_MASK_OFST 48
/* enum: Native clause 22 */
#define MC_CMD_MMD_CLAUSE22 0x0
@@ -2223,6 +2424,8 @@
#define MC_CMD_LOOPBACK_SD_FEP_WS 0x21
/* enum: KR Serdes Serial Wireside. */
#define MC_CMD_LOOPBACK_SD_FES_WS 0x22
+/* enum: Near side of AOE Siena side port */
+#define MC_CMD_LOOPBACK_AOE_INT_NEAR 0x23
/* Supported loopbacks. */
#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_OFST 8
#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_LEN 8
@@ -2286,6 +2489,10 @@
#define MC_CMD_GET_LINK_OUT_BPX_LINK_WIDTH 1
#define MC_CMD_GET_LINK_OUT_PHY_LINK_LBN 3
#define MC_CMD_GET_LINK_OUT_PHY_LINK_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_LINK_FAULT_RX_LBN 6
+#define MC_CMD_GET_LINK_OUT_LINK_FAULT_RX_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_LINK_FAULT_TX_LBN 7
+#define MC_CMD_GET_LINK_OUT_LINK_FAULT_TX_WIDTH 1
/* This returns the negotiated flow control value. */
#define MC_CMD_GET_LINK_OUT_FCNTL_OFST 20
/* enum: Flow control is off. */
@@ -3175,7 +3382,7 @@
#define MC_CMD_SENSOR_INFO_EXT_IN_PAGE_OFST 0
/* MC_CMD_SENSOR_INFO_OUT msgresponse */
-#define MC_CMD_SENSOR_INFO_OUT_LENMIN 12
+#define MC_CMD_SENSOR_INFO_OUT_LENMIN 4
#define MC_CMD_SENSOR_INFO_OUT_LENMAX 252
#define MC_CMD_SENSOR_INFO_OUT_LEN(num) (4+8*(num))
#define MC_CMD_SENSOR_INFO_OUT_MASK_OFST 0
@@ -3269,16 +3476,18 @@
#define MC_CMD_SENSOR_VDD08D_VSS08D_CSR 0x2b
/* enum: voltage between VSS08D and VSS08D at CSR (external ADC): mV */
#define MC_CMD_SENSOR_VDD08D_VSS08D_CSR_EXTADC 0x2c
+/* enum: Hotpoint temperature: degC */
+#define MC_CMD_SENSOR_HOTPOINT_TEMP 0x2d
/* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF */
#define MC_CMD_SENSOR_ENTRY_OFST 4
#define MC_CMD_SENSOR_ENTRY_LEN 8
#define MC_CMD_SENSOR_ENTRY_LO_OFST 4
#define MC_CMD_SENSOR_ENTRY_HI_OFST 8
-#define MC_CMD_SENSOR_ENTRY_MINNUM 1
+#define MC_CMD_SENSOR_ENTRY_MINNUM 0
#define MC_CMD_SENSOR_ENTRY_MAXNUM 31
/* MC_CMD_SENSOR_INFO_EXT_OUT msgresponse */
-#define MC_CMD_SENSOR_INFO_EXT_OUT_LENMIN 12
+#define MC_CMD_SENSOR_INFO_EXT_OUT_LENMIN 4
#define MC_CMD_SENSOR_INFO_EXT_OUT_LENMAX 252
#define MC_CMD_SENSOR_INFO_EXT_OUT_LEN(num) (4+8*(num))
#define MC_CMD_SENSOR_INFO_EXT_OUT_MASK_OFST 0
@@ -3291,7 +3500,7 @@
/* MC_CMD_SENSOR_ENTRY_LEN 8 */
/* MC_CMD_SENSOR_ENTRY_LO_OFST 4 */
/* MC_CMD_SENSOR_ENTRY_HI_OFST 8 */
-/* MC_CMD_SENSOR_ENTRY_MINNUM 1 */
+/* MC_CMD_SENSOR_ENTRY_MINNUM 0 */
/* MC_CMD_SENSOR_ENTRY_MAXNUM 31 */
/* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF structuredef */
@@ -3864,6 +4073,18 @@
#define NVRAM_PARTITION_TYPE_ID_LBN 0
#define NVRAM_PARTITION_TYPE_ID_WIDTH 16
+/* LICENSED_APP_ID structuredef */
+#define LICENSED_APP_ID_LEN 4
+#define LICENSED_APP_ID_ID_OFST 0
+/* enum: OpenOnload */
+#define LICENSED_APP_ID_ONLOAD 0x1
+/* enum: PTP timestamping */
+#define LICENSED_APP_ID_PTP 0x2
+/* enum: SolarCapture Pro */
+#define LICENSED_APP_ID_SOLARCAPTURE_PRO 0x4
+#define LICENSED_APP_ID_ID_LBN 0
+#define LICENSED_APP_ID_ID_WIDTH 32
+
/***********************************/
/* MC_CMD_READ_REGS
@@ -4021,6 +4242,8 @@
#define MC_CMD_INIT_RXQ_IN_FLAG_CHAIN_WIDTH 1
#define MC_CMD_INIT_RXQ_IN_FLAG_PREFIX_LBN 8
#define MC_CMD_INIT_RXQ_IN_FLAG_PREFIX_WIDTH 1
+#define MC_CMD_INIT_RXQ_IN_FLAG_DISABLE_SCATTER_LBN 9
+#define MC_CMD_INIT_RXQ_IN_FLAG_DISABLE_SCATTER_WIDTH 1
/* Owner ID to use if in buffer mode (zero if physical) */
#define MC_CMD_INIT_RXQ_IN_OWNER_ID_OFST 20
/* The port ID associated with the v-adaptor which should contain this DMAQ. */
@@ -4179,6 +4402,9 @@
#define MC_CMD_PROXY_CMD_IN_TARGET_VF_WIDTH 16
#define MC_CMD_PROXY_CMD_IN_VF_NULL 0xffff /* enum */
+/* MC_CMD_PROXY_CMD_OUT msgresponse */
+#define MC_CMD_PROXY_CMD_OUT_LEN 0
+
/***********************************/
/* MC_CMD_ALLOC_BUFTBL_CHUNK
@@ -4213,7 +4439,7 @@
/* MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN msgrequest */
#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMIN 20
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMAX 252
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMAX 268
#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LEN(num) (12+8*(num))
#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_HANDLE_OFST 0
/* ID */
@@ -4226,7 +4452,7 @@
#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LO_OFST 12
#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_HI_OFST 16
#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MINNUM 1
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MAXNUM 30
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MAXNUM 32
/* MC_CMD_PROGRAM_BUFTBL_ENTRIES_OUT msgresponse */
#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_OUT_LEN 0
@@ -6800,6 +7026,30 @@
/***********************************/
+/* MC_CMD_CAP_BLK_READ
+ * Read multiple 64bit words from capture block memory
+ */
+#define MC_CMD_CAP_BLK_READ 0xe7
+
+/* MC_CMD_CAP_BLK_READ_IN msgrequest */
+#define MC_CMD_CAP_BLK_READ_IN_LEN 12
+#define MC_CMD_CAP_BLK_READ_IN_CAP_REG_OFST 0
+#define MC_CMD_CAP_BLK_READ_IN_ADDR_OFST 4
+#define MC_CMD_CAP_BLK_READ_IN_COUNT_OFST 8
+
+/* MC_CMD_CAP_BLK_READ_OUT msgresponse */
+#define MC_CMD_CAP_BLK_READ_OUT_LENMIN 8
+#define MC_CMD_CAP_BLK_READ_OUT_LENMAX 248
+#define MC_CMD_CAP_BLK_READ_OUT_LEN(num) (0+8*(num))
+#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_OFST 0
+#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_LEN 8
+#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_LO_OFST 0
+#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_HI_OFST 4
+#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_MINNUM 1
+#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_MAXNUM 31
+
+
+/***********************************/
/* MC_CMD_DUMP_DO
* Take a dump of the DUT state
*/
@@ -6826,6 +7076,10 @@
#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 20
#define MC_CMD_DUMP_DO_IN_HOST_MEMORY_MLI_MAX_DEPTH 0x2 /* enum */
#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_UART_PORT_OFST 12
+/* enum: The uart port this command was received over (if using a uart
+ * transport)
+ */
+#define MC_CMD_DUMP_DO_IN_UART_PORT_SRC 0xff
#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_SIZE_OFST 24
#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_OFST 28
#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM 0x0 /* enum */
@@ -6942,39 +7196,68 @@
/***********************************/
-/* MC_CMD_START_KR_EYE_PLOT
- * Start KR Serdes Eye diagram plot on a given lane. Lane must have valid
- * signal.
- */
-#define MC_CMD_START_KR_EYE_PLOT 0xee
-
-/* MC_CMD_START_KR_EYE_PLOT_IN msgrequest */
-#define MC_CMD_START_KR_EYE_PLOT_IN_LEN 4
-#define MC_CMD_START_KR_EYE_PLOT_IN_LANE_OFST 0
-
-/* MC_CMD_START_KR_EYE_PLOT_OUT msgresponse */
-#define MC_CMD_START_KR_EYE_PLOT_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_POLL_KR_EYE_PLOT
- * Poll KR Serdes Eye diagram plot. Returns one row of BER data. The caller
- * should call this command repeatedly after starting eye plot, until no more
- * data is returned.
- */
-#define MC_CMD_POLL_KR_EYE_PLOT 0xef
-
-/* MC_CMD_POLL_KR_EYE_PLOT_IN msgrequest */
-#define MC_CMD_POLL_KR_EYE_PLOT_IN_LEN 0
-
-/* MC_CMD_POLL_KR_EYE_PLOT_OUT msgresponse */
-#define MC_CMD_POLL_KR_EYE_PLOT_OUT_LENMIN 0
-#define MC_CMD_POLL_KR_EYE_PLOT_OUT_LENMAX 252
-#define MC_CMD_POLL_KR_EYE_PLOT_OUT_LEN(num) (0+2*(num))
-#define MC_CMD_POLL_KR_EYE_PLOT_OUT_SAMPLES_OFST 0
-#define MC_CMD_POLL_KR_EYE_PLOT_OUT_SAMPLES_LEN 2
-#define MC_CMD_POLL_KR_EYE_PLOT_OUT_SAMPLES_MINNUM 0
-#define MC_CMD_POLL_KR_EYE_PLOT_OUT_SAMPLES_MAXNUM 126
+/* MC_CMD_UART_SEND_DATA
+ * Send checksummed[sic] block of data over the uart. Response is a placeholder
+ * should we wish to make this reliable; currently requests are fire-and-
+ * forget.
+ */
+#define MC_CMD_UART_SEND_DATA 0xee
+
+/* MC_CMD_UART_SEND_DATA_OUT msgrequest */
+#define MC_CMD_UART_SEND_DATA_OUT_LENMIN 16
+#define MC_CMD_UART_SEND_DATA_OUT_LENMAX 252
+#define MC_CMD_UART_SEND_DATA_OUT_LEN(num) (16+1*(num))
+/* CRC32 over OFFSET, LENGTH, RESERVED, DATA */
+#define MC_CMD_UART_SEND_DATA_OUT_CHECKSUM_OFST 0
+/* Offset at which to write the data */
+#define MC_CMD_UART_SEND_DATA_OUT_OFFSET_OFST 4
+/* Length of data */
+#define MC_CMD_UART_SEND_DATA_OUT_LENGTH_OFST 8
+/* Reserved for future use */
+#define MC_CMD_UART_SEND_DATA_OUT_RESERVED_OFST 12
+#define MC_CMD_UART_SEND_DATA_OUT_DATA_OFST 16
+#define MC_CMD_UART_SEND_DATA_OUT_DATA_LEN 1
+#define MC_CMD_UART_SEND_DATA_OUT_DATA_MINNUM 0
+#define MC_CMD_UART_SEND_DATA_OUT_DATA_MAXNUM 236
+
+/* MC_CMD_UART_SEND_DATA_IN msgresponse */
+#define MC_CMD_UART_SEND_DATA_IN_LEN 0
+
+
+/***********************************/
+/* MC_CMD_UART_RECV_DATA
+ * Request checksummed[sic] block of data over the uart. Only a placeholder,
+ * subject to change and not currently implemented.
+ */
+#define MC_CMD_UART_RECV_DATA 0xef
+
+/* MC_CMD_UART_RECV_DATA_OUT msgrequest */
+#define MC_CMD_UART_RECV_DATA_OUT_LEN 16
+/* CRC32 over OFFSET, LENGTH, RESERVED */
+#define MC_CMD_UART_RECV_DATA_OUT_CHECKSUM_OFST 0
+/* Offset from which to read the data */
+#define MC_CMD_UART_RECV_DATA_OUT_OFFSET_OFST 4
+/* Length of data */
+#define MC_CMD_UART_RECV_DATA_OUT_LENGTH_OFST 8
+/* Reserved for future use */
+#define MC_CMD_UART_RECV_DATA_OUT_RESERVED_OFST 12
+
+/* MC_CMD_UART_RECV_DATA_IN msgresponse */
+#define MC_CMD_UART_RECV_DATA_IN_LENMIN 16
+#define MC_CMD_UART_RECV_DATA_IN_LENMAX 252
+#define MC_CMD_UART_RECV_DATA_IN_LEN(num) (16+1*(num))
+/* CRC32 over RESERVED1, RESERVED2, RESERVED3, DATA */
+#define MC_CMD_UART_RECV_DATA_IN_CHECKSUM_OFST 0
+/* Offset at which to write the data */
+#define MC_CMD_UART_RECV_DATA_IN_RESERVED1_OFST 4
+/* Length of data */
+#define MC_CMD_UART_RECV_DATA_IN_RESERVED2_OFST 8
+/* Reserved for future use */
+#define MC_CMD_UART_RECV_DATA_IN_RESERVED3_OFST 12
+#define MC_CMD_UART_RECV_DATA_IN_DATA_OFST 16
+#define MC_CMD_UART_RECV_DATA_IN_DATA_LEN 1
+#define MC_CMD_UART_RECV_DATA_IN_DATA_MINNUM 0
+#define MC_CMD_UART_RECV_DATA_IN_DATA_MAXNUM 236
/***********************************/
@@ -7026,6 +7309,15 @@
#define MC_CMD_KR_TUNE_IN_TXEQ_SET 0x3
/* enum: Force KR Serdes reset / recalibration */
#define MC_CMD_KR_TUNE_IN_RECAL 0x4
+/* enum: Start KR Serdes Eye diagram plot on a given lane. Lane must have valid
+ * signal.
+ */
+#define MC_CMD_KR_TUNE_IN_START_EYE_PLOT 0x5
+/* enum: Poll KR Serdes Eye diagram plot. Returns one row of BER data. The
+ * caller should call this command repeatedly after starting eye plot, until no
+ * more data is returned.
+ */
+#define MC_CMD_KR_TUNE_IN_POLL_EYE_PLOT 0x6
/* Align the arguments to 32 bits */
#define MC_CMD_KR_TUNE_IN_KR_TUNE_RSVD_OFST 1
#define MC_CMD_KR_TUNE_IN_KR_TUNE_RSVD_LEN 3
@@ -7123,6 +7415,91 @@
/* MC_CMD_KR_TUNE_RXEQ_SET_OUT msgresponse */
#define MC_CMD_KR_TUNE_RXEQ_SET_OUT_LEN 0
+/* MC_CMD_KR_TUNE_TXEQ_GET_IN msgrequest */
+#define MC_CMD_KR_TUNE_TXEQ_GET_IN_LEN 4
+/* Requested operation */
+#define MC_CMD_KR_TUNE_TXEQ_GET_IN_KR_TUNE_OP_OFST 0
+#define MC_CMD_KR_TUNE_TXEQ_GET_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_KR_TUNE_TXEQ_GET_IN_KR_TUNE_RSVD_OFST 1
+#define MC_CMD_KR_TUNE_TXEQ_GET_IN_KR_TUNE_RSVD_LEN 3
+
+/* MC_CMD_KR_TUNE_TXEQ_GET_OUT msgresponse */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LENMIN 4
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LENMAX 252
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LEN(num) (0+4*(num))
+/* TXEQ Parameter */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_OFST 0
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LEN 4
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_MINNUM 1
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_MAXNUM 63
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_ID_LBN 0
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_ID_WIDTH 8
+/* enum: TX Amplitude */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_LEV 0x0
+/* enum: De-Emphasis Tap1 Magnitude (0-7) */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_MODE 0x1
+/* enum: De-Emphasis Tap1 Fine */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_DTLEV 0x2
+/* enum: De-Emphasis Tap2 Magnitude (0-6) */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_D2 0x3
+/* enum: De-Emphasis Tap2 Fine */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_D2TLEV 0x4
+/* enum: Pre-Emphasis Magnitude */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_E 0x5
+/* enum: Pre-Emphasis Fine */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_ETLEV 0x6
+/* enum: TX Slew Rate Coarse control */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_PREDRV_DLY 0x7
+/* enum: TX Slew Rate Fine control */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_SR_SET 0x8
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LANE_LBN 8
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LANE_WIDTH 3
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_0 0x0 /* enum */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_1 0x1 /* enum */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_2 0x2 /* enum */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_3 0x3 /* enum */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_ALL 0x4 /* enum */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_RESERVED_LBN 11
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_RESERVED_WIDTH 5
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_INITIAL_LBN 16
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_INITIAL_WIDTH 8
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_RESERVED2_LBN 24
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_RESERVED2_WIDTH 8
+
+/* MC_CMD_KR_TUNE_TXEQ_SET_IN msgrequest */
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_LENMIN 8
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_LENMAX 252
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_LEN(num) (4+4*(num))
+/* Requested operation */
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_KR_TUNE_OP_OFST 0
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_KR_TUNE_RSVD_OFST 1
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_KR_TUNE_RSVD_LEN 3
+/* TXEQ Parameter */
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_OFST 4
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_LEN 4
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_MINNUM 1
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_MAXNUM 62
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_ID_LBN 0
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_ID_WIDTH 8
+/* Enum values, see field(s): */
+/* MC_CMD_KR_TUNE_TXEQ_GET_OUT/PARAM_ID */
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_LANE_LBN 8
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_LANE_WIDTH 3
+/* Enum values, see field(s): */
+/* MC_CMD_KR_TUNE_TXEQ_GET_OUT/PARAM_LANE */
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_RESERVED_LBN 11
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_RESERVED_WIDTH 5
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_INITIAL_LBN 16
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_INITIAL_WIDTH 8
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_RESERVED2_LBN 24
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_RESERVED2_WIDTH 8
+
+/* MC_CMD_KR_TUNE_TXEQ_SET_OUT msgresponse */
+#define MC_CMD_KR_TUNE_TXEQ_SET_OUT_LEN 0
+
/* MC_CMD_KR_TUNE_RECAL_IN msgrequest */
#define MC_CMD_KR_TUNE_RECAL_IN_LEN 4
/* Requested operation */
@@ -7135,6 +7512,37 @@
/* MC_CMD_KR_TUNE_RECAL_OUT msgresponse */
#define MC_CMD_KR_TUNE_RECAL_OUT_LEN 0
+/* MC_CMD_KR_TUNE_START_EYE_PLOT_IN msgrequest */
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_LEN 8
+/* Requested operation */
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_KR_TUNE_OP_OFST 0
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_KR_TUNE_RSVD_OFST 1
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_KR_TUNE_RSVD_LEN 3
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_LANE_OFST 4
+
+/* MC_CMD_KR_TUNE_START_EYE_PLOT_OUT msgresponse */
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_OUT_LEN 0
+
+/* MC_CMD_KR_TUNE_POLL_EYE_PLOT_IN msgrequest */
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_IN_LEN 4
+/* Requested operation */
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_IN_KR_TUNE_OP_OFST 0
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_IN_KR_TUNE_RSVD_OFST 1
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_IN_KR_TUNE_RSVD_LEN 3
+
+/* MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT msgresponse */
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_LENMIN 0
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_LENMAX 252
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_LEN(num) (0+2*(num))
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_OFST 0
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_LEN 2
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MINNUM 0
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MAXNUM 126
+
/***********************************/
/* MC_CMD_PCIE_TUNE
@@ -7157,6 +7565,13 @@
#define MC_CMD_PCIE_TUNE_IN_TXEQ_GET 0x2
/* enum: Override TX Driver settings */
#define MC_CMD_PCIE_TUNE_IN_TXEQ_SET 0x3
+/* enum: Start PCIe Serdes Eye diagram plot on a given lane. */
+#define MC_CMD_PCIE_TUNE_IN_START_EYE_PLOT 0x5
+/* enum: Poll PCIe Serdes Eye diagram plot. Returns one row of BER data. The
+ * caller should call this command repeatedly after starting eye plot, until no
+ * more data is returned.
+ */
+#define MC_CMD_PCIE_TUNE_IN_POLL_EYE_PLOT 0x6
/* Align the arguments to 32 bits */
#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_RSVD_OFST 1
#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_RSVD_LEN 3
@@ -7258,6 +7673,37 @@
#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_CURRENT_LBN 24
#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_CURRENT_WIDTH 8
+/* MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN msgrequest */
+#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_LEN 8
+/* Requested operation */
+#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_OP_OFST 0
+#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_RSVD_OFST 1
+#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_RSVD_LEN 3
+#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_LANE_OFST 4
+
+/* MC_CMD_PCIE_TUNE_START_EYE_PLOT_OUT msgresponse */
+#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_OUT_LEN 0
+
+/* MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN msgrequest */
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_LEN 4
+/* Requested operation */
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_PCIE_TUNE_OP_OFST 0
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_PCIE_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_PCIE_TUNE_RSVD_OFST 1
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_PCIE_TUNE_RSVD_LEN 3
+
+/* MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT msgresponse */
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_LENMIN 0
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_LENMAX 252
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_LEN(num) (0+2*(num))
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_OFST 0
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_LEN 2
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MINNUM 0
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MAXNUM 126
+
/***********************************/
/* MC_CMD_LICENSING
@@ -7310,5 +7756,152 @@
*/
#define MC_CMD_MC2MC_PROXY 0xf4
+/* MC_CMD_MC2MC_PROXY_IN msgrequest */
+#define MC_CMD_MC2MC_PROXY_IN_LEN 0
+
+/* MC_CMD_MC2MC_PROXY_OUT msgresponse */
+#define MC_CMD_MC2MC_PROXY_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_LICENSED_APP_STATE
+ * Query the state of an individual licensed application. (Note that the actual
+ * state may be invalidated by the MC_CMD_LICENSING OP_UPDATE_LICENSE operation
+ * or a reboot of the MC.)
+ */
+#define MC_CMD_GET_LICENSED_APP_STATE 0xf5
+
+/* MC_CMD_GET_LICENSED_APP_STATE_IN msgrequest */
+#define MC_CMD_GET_LICENSED_APP_STATE_IN_LEN 4
+/* application ID to query (LICENSED_APP_ID_xxx) */
+#define MC_CMD_GET_LICENSED_APP_STATE_IN_APP_ID_OFST 0
+
+/* MC_CMD_GET_LICENSED_APP_STATE_OUT msgresponse */
+#define MC_CMD_GET_LICENSED_APP_STATE_OUT_LEN 4
+/* state of this application */
+#define MC_CMD_GET_LICENSED_APP_STATE_OUT_STATE_OFST 0
+/* enum: no (or invalid) license is present for the application */
+#define MC_CMD_GET_LICENSED_APP_STATE_OUT_NOT_LICENSED 0x0
+/* enum: a valid license is present for the application */
+#define MC_CMD_GET_LICENSED_APP_STATE_OUT_LICENSED 0x1
+
+
+/***********************************/
+/* MC_CMD_LICENSED_APP_OP
+ * Perform an action for an individual licensed application.
+ */
+#define MC_CMD_LICENSED_APP_OP 0xf6
+
+/* MC_CMD_LICENSED_APP_OP_IN msgrequest */
+#define MC_CMD_LICENSED_APP_OP_IN_LENMIN 8
+#define MC_CMD_LICENSED_APP_OP_IN_LENMAX 252
+#define MC_CMD_LICENSED_APP_OP_IN_LEN(num) (8+4*(num))
+/* application ID */
+#define MC_CMD_LICENSED_APP_OP_IN_APP_ID_OFST 0
+/* the type of operation requested */
+#define MC_CMD_LICENSED_APP_OP_IN_OP_OFST 4
+/* enum: validate application */
+#define MC_CMD_LICENSED_APP_OP_IN_OP_VALIDATE 0x0
+/* arguments specific to this particular operation */
+#define MC_CMD_LICENSED_APP_OP_IN_ARGS_OFST 8
+#define MC_CMD_LICENSED_APP_OP_IN_ARGS_LEN 4
+#define MC_CMD_LICENSED_APP_OP_IN_ARGS_MINNUM 0
+#define MC_CMD_LICENSED_APP_OP_IN_ARGS_MAXNUM 61
+
+/* MC_CMD_LICENSED_APP_OP_OUT msgresponse */
+#define MC_CMD_LICENSED_APP_OP_OUT_LENMIN 0
+#define MC_CMD_LICENSED_APP_OP_OUT_LENMAX 252
+#define MC_CMD_LICENSED_APP_OP_OUT_LEN(num) (0+4*(num))
+/* result specific to this particular operation */
+#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_OFST 0
+#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_LEN 4
+#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_MINNUM 0
+#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_MAXNUM 63
+
+/* MC_CMD_LICENSED_APP_OP_VALIDATE_IN msgrequest */
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_LEN 72
+/* application ID */
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_APP_ID_OFST 0
+/* the type of operation requested */
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_OP_OFST 4
+/* validation challenge */
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_CHALLENGE_OFST 8
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_CHALLENGE_LEN 64
+
+/* MC_CMD_LICENSED_APP_OP_VALIDATE_OUT msgresponse */
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_LEN 68
+/* feature expiry (time_t) */
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_EXPIRY_OFST 0
+/* validation response */
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_RESPONSE_OFST 4
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_RESPONSE_LEN 64
+
+
+/***********************************/
+/* MC_CMD_SET_PORT_SNIFF_CONFIG
+ * Configure port sniffing for the physical port associated with the calling
+ * function. Only a privileged function may change the port sniffing
+ * configuration. A copy of all traffic delivered to the host (non-promiscuous
+ * mode) or all traffic arriving at the port (promiscuous mode) may be
+ * delivered to a specific queue, or a set of queues with RSS.
+ */
+#define MC_CMD_SET_PORT_SNIFF_CONFIG 0xf7
+
+/* MC_CMD_SET_PORT_SNIFF_CONFIG_IN msgrequest */
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_LEN 16
+/* configuration flags */
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_FLAGS_OFST 0
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_ENABLE_LBN 0
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_ENABLE_WIDTH 1
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_PROMISCUOUS_LBN 1
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_PROMISCUOUS_WIDTH 1
+/* receive queue handle (for RSS mode, this is the base queue) */
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_QUEUE_OFST 4
+/* receive mode */
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_OFST 8
+/* enum: receive to just the specified queue */
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_SIMPLE 0x0
+/* enum: receive to multiple queues using RSS context */
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_RSS 0x1
+/* RSS context (for RX_MODE_RSS) as returned by MC_CMD_RSS_CONTEXT_ALLOC. Note
+ * that these handles should be considered opaque to the host, although a value
+ * of 0xFFFFFFFF is guaranteed never to be a valid handle.
+ */
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_OFST 12
+
+/* MC_CMD_SET_PORT_SNIFF_CONFIG_OUT msgresponse */
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_PORT_SNIFF_CONFIG
+ * Obtain the current port sniffing configuration for the physical port
+ * associated with the calling function. Only a privileged function may read
+ * the configuration.
+ */
+#define MC_CMD_GET_PORT_SNIFF_CONFIG 0xf8
+
+/* MC_CMD_GET_PORT_SNIFF_CONFIG_IN msgrequest */
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_IN_LEN 0
+
+/* MC_CMD_GET_PORT_SNIFF_CONFIG_OUT msgresponse */
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_LEN 16
+/* configuration flags */
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_FLAGS_OFST 0
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_ENABLE_LBN 0
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_ENABLE_WIDTH 1
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_PROMISCUOUS_LBN 1
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_PROMISCUOUS_WIDTH 1
+/* receiving queue handle (for RSS mode, this is the base queue) */
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_OFST 4
+/* receive mode */
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_OFST 8
+/* enum: receiving to just the specified queue */
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_SIMPLE 0x0
+/* enum: receiving to multiple queues using RSS context */
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_RSS 0x1
+/* RSS context (for RX_MODE_RSS) */
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_OFST 12
+
#endif /* MCDI_PCOL_H */
diff --git a/drivers/net/ethernet/sfc/mcdi_port.c b/drivers/net/ethernet/sfc/mcdi_port.c
index 7b6be61d549f..91d23252f8fa 100644
--- a/drivers/net/ethernet/sfc/mcdi_port.c
+++ b/drivers/net/ethernet/sfc/mcdi_port.c
@@ -90,13 +90,6 @@ static int efx_mcdi_set_link(struct efx_nic *efx, u32 capabilities,
rc = efx_mcdi_rpc(efx, MC_CMD_SET_LINK, inbuf, sizeof(inbuf),
NULL, 0, NULL);
- if (rc)
- goto fail;
-
- return 0;
-
-fail:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
@@ -143,17 +136,13 @@ static int efx_mcdi_mdio_read(struct net_device *net_dev,
rc = efx_mcdi_rpc(efx, MC_CMD_MDIO_READ, inbuf, sizeof(inbuf),
outbuf, sizeof(outbuf), &outlen);
if (rc)
- goto fail;
+ return rc;
if (MCDI_DWORD(outbuf, MDIO_READ_OUT_STATUS) !=
MC_CMD_MDIO_STATUS_GOOD)
return -EIO;
return (u16)MCDI_DWORD(outbuf, MDIO_READ_OUT_VALUE);
-
-fail:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
- return rc;
}
static int efx_mcdi_mdio_write(struct net_device *net_dev,
@@ -174,17 +163,13 @@ static int efx_mcdi_mdio_write(struct net_device *net_dev,
rc = efx_mcdi_rpc(efx, MC_CMD_MDIO_WRITE, inbuf, sizeof(inbuf),
outbuf, sizeof(outbuf), &outlen);
if (rc)
- goto fail;
+ return rc;
if (MCDI_DWORD(outbuf, MDIO_WRITE_OUT_STATUS) !=
MC_CMD_MDIO_STATUS_GOOD)
return -EIO;
return 0;
-
-fail:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
- return rc;
}
static u32 mcdi_to_ethtool_cap(u32 media, u32 cap)
@@ -487,17 +472,14 @@ static bool efx_mcdi_phy_poll(struct efx_nic *efx)
rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
outbuf, sizeof(outbuf), NULL);
- if (rc) {
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
- __func__, rc);
+ if (rc)
efx->link_state.up = false;
- } else {
+ else
efx_mcdi_phy_decode_link(
efx, &efx->link_state,
MCDI_DWORD(outbuf, GET_LINK_OUT_LINK_SPEED),
MCDI_DWORD(outbuf, GET_LINK_OUT_FLAGS),
MCDI_DWORD(outbuf, GET_LINK_OUT_FCNTL));
- }
return !efx_link_state_equal(&efx->link_state, &old_state);
}
@@ -531,11 +513,8 @@ static void efx_mcdi_phy_get_settings(struct efx_nic *efx, struct ethtool_cmd *e
BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
outbuf, sizeof(outbuf), NULL);
- if (rc) {
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
- __func__, rc);
+ if (rc)
return;
- }
ecmd->lp_advertising =
mcdi_to_ethtool_cap(phy_cfg->media,
MCDI_DWORD(outbuf, GET_LINK_OUT_LP_CAP));
@@ -918,21 +897,29 @@ bool efx_mcdi_mac_check_fault(struct efx_nic *efx)
rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
outbuf, sizeof(outbuf), &outlength);
- if (rc) {
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
- __func__, rc);
+ if (rc)
return true;
- }
return MCDI_DWORD(outbuf, GET_LINK_OUT_MAC_FAULT) != 0;
}
-static int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr,
- u32 dma_len, int enable, int clear)
+enum efx_stats_action {
+ EFX_STATS_ENABLE,
+ EFX_STATS_DISABLE,
+ EFX_STATS_PULL,
+};
+
+static int efx_mcdi_mac_stats(struct efx_nic *efx,
+ enum efx_stats_action action, int clear)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_MAC_STATS_IN_LEN);
int rc;
- int period = enable ? 1000 : 0;
+ int change = action == EFX_STATS_PULL ? 0 : 1;
+ int enable = action == EFX_STATS_ENABLE ? 1 : 0;
+ int period = action == EFX_STATS_ENABLE ? 1000 : 0;
+ dma_addr_t dma_addr = efx->stats_buffer.dma_addr;
+ u32 dma_len = action != EFX_STATS_DISABLE ?
+ MC_CMD_MAC_NSTATS * sizeof(u64) : 0;
BUILD_BUG_ON(MC_CMD_MAC_STATS_OUT_DMA_LEN != 0);
@@ -940,8 +927,8 @@ static int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr,
MCDI_POPULATE_DWORD_7(inbuf, MAC_STATS_IN_CMD,
MAC_STATS_IN_DMA, !!enable,
MAC_STATS_IN_CLEAR, clear,
- MAC_STATS_IN_PERIODIC_CHANGE, 1,
- MAC_STATS_IN_PERIODIC_ENABLE, !!enable,
+ MAC_STATS_IN_PERIODIC_CHANGE, change,
+ MAC_STATS_IN_PERIODIC_ENABLE, enable,
MAC_STATS_IN_PERIODIC_CLEAR, 0,
MAC_STATS_IN_PERIODIC_NOEVENT, 1,
MAC_STATS_IN_PERIOD_MS, period);
@@ -949,14 +936,6 @@ static int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr,
rc = efx_mcdi_rpc(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf),
NULL, 0, NULL);
- if (rc)
- goto fail;
-
- return 0;
-
-fail:
- netif_err(efx, hw, efx->net_dev, "%s: %s failed rc=%d\n",
- __func__, enable ? "enable" : "disable", rc);
return rc;
}
@@ -966,13 +945,29 @@ void efx_mcdi_mac_start_stats(struct efx_nic *efx)
dma_stats[MC_CMD_MAC_GENERATION_END] = EFX_MC_STATS_GENERATION_INVALID;
- efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr,
- MC_CMD_MAC_NSTATS * sizeof(u64), 1, 0);
+ efx_mcdi_mac_stats(efx, EFX_STATS_ENABLE, 0);
}
void efx_mcdi_mac_stop_stats(struct efx_nic *efx)
{
- efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, 0, 0, 0);
+ efx_mcdi_mac_stats(efx, EFX_STATS_DISABLE, 0);
+}
+
+#define EFX_MAC_STATS_WAIT_US 100
+#define EFX_MAC_STATS_WAIT_ATTEMPTS 10
+
+void efx_mcdi_mac_pull_stats(struct efx_nic *efx)
+{
+ __le64 *dma_stats = efx->stats_buffer.addr;
+ int attempts = EFX_MAC_STATS_WAIT_ATTEMPTS;
+
+ dma_stats[MC_CMD_MAC_GENERATION_END] = EFX_MC_STATS_GENERATION_INVALID;
+ efx_mcdi_mac_stats(efx, EFX_STATS_PULL, 0);
+
+ while (dma_stats[MC_CMD_MAC_GENERATION_END] ==
+ EFX_MC_STATS_GENERATION_INVALID &&
+ attempts-- != 0)
+ udelay(EFX_MAC_STATS_WAIT_US);
}
int efx_mcdi_port_probe(struct efx_nic *efx)
@@ -1003,7 +998,7 @@ int efx_mcdi_port_probe(struct efx_nic *efx)
efx->stats_buffer.addr,
(u64)virt_to_phys(efx->stats_buffer.addr));
- efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, 0, 0, 1);
+ efx_mcdi_mac_stats(efx, EFX_STATS_DISABLE, 1);
return 0;
}
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index b14a717ac3e8..af2b8c59a903 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -91,6 +91,7 @@
/* Forward declare Precision Time Protocol (PTP) support structure. */
struct efx_ptp_data;
+struct hwtstamp_config;
struct efx_self_tests;
@@ -287,12 +288,9 @@ struct efx_rx_buffer {
* Used to facilitate sharing dma mappings between recycled rx buffers
* and those passed up to the kernel.
*
- * @refcnt: Number of struct efx_rx_buffer's referencing this page.
- * When refcnt falls to zero, the page is unmapped for dma
* @dma_addr: The dma address of this page.
*/
struct efx_rx_page_state {
- unsigned refcnt;
dma_addr_t dma_addr;
unsigned int __pad[0] ____cacheline_aligned;
@@ -362,10 +360,11 @@ struct efx_rx_queue {
unsigned int slow_fill_count;
};
-enum efx_rx_alloc_method {
- RX_ALLOC_METHOD_AUTO = 0,
- RX_ALLOC_METHOD_SKB = 1,
- RX_ALLOC_METHOD_PAGE = 2,
+enum efx_sync_events_state {
+ SYNC_EVENTS_DISABLED = 0,
+ SYNC_EVENTS_QUIESCENT,
+ SYNC_EVENTS_REQUESTED,
+ SYNC_EVENTS_VALID,
};
/**
@@ -407,6 +406,9 @@ enum efx_rx_alloc_method {
* by __efx_rx_packet(), if @rx_pkt_n_frags != 0
* @rx_queue: RX queue for this channel
* @tx_queue: TX queues for this channel
+ * @sync_events_state: Current state of sync events on this channel
+ * @sync_timestamp_major: Major part of the last ptp sync event
+ * @sync_timestamp_minor: Minor part of the last ptp sync event
*/
struct efx_channel {
struct efx_nic *efx;
@@ -445,6 +447,10 @@ struct efx_channel {
struct efx_rx_queue rx_queue;
struct efx_tx_queue tx_queue[EFX_TXQ_TYPES];
+
+ enum efx_sync_events_state sync_events_state;
+ u32 sync_timestamp_major;
+ u32 sync_timestamp_minor;
};
/**
@@ -520,15 +526,6 @@ enum nic_state {
STATE_RECOVERY = 3, /* device recovering from PCI error */
};
-/*
- * Alignment of the skb->head which wraps a page-allocated RX buffer
- *
- * The skb allocated to wrap an rx_buffer can have this alignment. Since
- * the data is memcpy'd from the rx_buf, it does not need to be equal to
- * NET_IP_ALIGN.
- */
-#define EFX_PAGE_SKB_ALIGN 2
-
/* Forward declaration */
struct efx_nic;
@@ -651,6 +648,13 @@ struct vfdi_status;
* struct efx_nic - an Efx NIC
* @name: Device name (net device name or bus id before net device registered)
* @pci_dev: The PCI device
+ * @node: List node for maintaning primary/secondary function lists
+ * @primary: &struct efx_nic instance for the primary function of this
+ * controller. May be the same structure, and may be %NULL if no
+ * primary function is bound. Serialised by rtnl_lock.
+ * @secondary_list: List of &struct efx_nic instances for the secondary PCI
+ * functions of the controller, if this is for the primary function.
+ * Serialised by rtnl_lock.
* @type: Controller type attributes
* @legacy_irq: IRQ number
* @workqueue: Workqueue for port reconfigures and the HW monitor.
@@ -683,6 +687,8 @@ struct vfdi_status;
* @n_channels: Number of channels in use
* @n_rx_channels: Number of channels used for RX (= number of RX queues)
* @n_tx_channels: Number of channels used for TX
+ * @rx_ip_align: RX DMA address offset to have IP header aligned in
+ * in accordance with NET_IP_ALIGN
* @rx_dma_len: Current maximum RX DMA length
* @rx_buffer_order: Order (log2) of number of pages for each RX buffer
* @rx_buffer_truesize: Amortised allocation size of an RX buffer,
@@ -692,6 +698,8 @@ struct vfdi_status;
* (valid only if @rx_prefix_size != 0; always negative)
* @rx_packet_len_offset: Offset of RX packet length from start of packet data
* (valid only for NICs that set %EFX_RX_PKT_PREFIX_LEN; always negative)
+ * @rx_packet_ts_offset: Offset of timestamp from start of packet data
+ * (valid only if channel->sync_timestamps_enabled; always negative)
* @rx_hash_key: Toeplitz hash key for RSS
* @rx_indir_table: Indirection table for RSS
* @rx_scatter: Scatter mode enabled for receives
@@ -761,6 +769,7 @@ struct vfdi_status;
* @local_lock: Mutex protecting %local_addr_list and %local_page_list.
* @peer_work: Work item to broadcast peer addresses to VMs.
* @ptp_data: PTP state data
+ * @vpd_sn: Serial number read from VPD
* @monitor_work: Hardware monitor workitem
* @biu_lock: BIU (bus interface unit) lock
* @last_irq_cpu: Last CPU to handle a possible test interrupt. This
@@ -775,6 +784,9 @@ struct efx_nic {
/* The following fields should be written very rarely */
char name[IFNAMSIZ];
+ struct list_head node;
+ struct efx_nic *primary;
+ struct list_head secondary_list;
struct pci_dev *pci_dev;
unsigned int port_num;
const struct efx_nic_type *type;
@@ -816,6 +828,7 @@ struct efx_nic {
unsigned rss_spread;
unsigned tx_channel_offset;
unsigned n_tx_channels;
+ unsigned int rx_ip_align;
unsigned int rx_dma_len;
unsigned int rx_buffer_order;
unsigned int rx_buffer_truesize;
@@ -825,6 +838,7 @@ struct efx_nic {
unsigned int rx_prefix_size;
int rx_packet_hash_offset;
int rx_packet_len_offset;
+ int rx_packet_ts_offset;
u8 rx_hash_key[40];
u32 rx_indir_table[128];
bool rx_scatter;
@@ -849,10 +863,14 @@ struct efx_nic {
struct work_struct mac_work;
bool port_enabled;
+ bool mc_bist_for_other_fn;
bool port_initialized;
struct net_device *net_dev;
struct efx_buffer stats_buffer;
+ u64 rx_nodesc_drops_total;
+ u64 rx_nodesc_drops_while_down;
+ bool rx_nodesc_drops_prev_state;
unsigned int phy_type;
const struct efx_phy_operations *phy_op;
@@ -904,6 +922,8 @@ struct efx_nic {
struct efx_ptp_data *ptp_data;
+ char *vpd_sn;
+
/* The following fields may be written more often */
struct delayed_work monitor_work ____cacheline_aligned_in_smp;
@@ -956,6 +976,7 @@ struct efx_mtd_partition {
* @update_stats: Update statistics not provided by event handling.
* Either argument may be %NULL.
* @start_stats: Start the regular fetching of statistics
+ * @pull_stats: Pull stats from the NIC and wait until they arrive.
* @stop_stats: Stop the regular fetching of statistics
* @set_id_led: Set state of identifying LED or revert to automatic function
* @push_irq_moderation: Apply interrupt moderation value
@@ -994,7 +1015,7 @@ struct efx_mtd_partition {
* @tx_init: Initialise TX queue on the NIC
* @tx_remove: Free resources for TX queue
* @tx_write: Write TX descriptors and doorbell
- * @rx_push_indir_table: Write RSS indirection table to the NIC
+ * @rx_push_rss_config: Write RSS hash key and indirection table to the NIC
* @rx_probe: Allocate resources for RX queue
* @rx_init: Initialise RX queue on the NIC
* @rx_remove: Free resources for RX queue
@@ -1014,7 +1035,8 @@ struct efx_mtd_partition {
* @filter_insert: add or replace a filter
* @filter_remove_safe: remove a filter by ID, carefully
* @filter_get_safe: retrieve a filter by ID, carefully
- * @filter_clear_rx: remove RX filters by priority
+ * @filter_clear_rx: Remove all RX filters whose priority is less than or
+ * equal to the given priority and is not %EFX_FILTER_PRI_AUTO
* @filter_count_rx_used: Get the number of filters in use at a given priority
* @filter_get_rx_id_limit: Get maximum value of a filter id, plus 1
* @filter_get_rx_ids: Get list of RX filters at a given priority
@@ -1034,6 +1056,12 @@ struct efx_mtd_partition {
* @mtd_sync: Wait for write-back to complete on MTD partition. This
* also notifies the driver that a writer has finished using this
* partition.
+ * @ptp_write_host_time: Send host time to MC as part of sync protocol
+ * @ptp_set_ts_sync_events: Enable or disable sync events for inline RX
+ * timestamping, possibly only temporarily for the purposes of a reset.
+ * @ptp_set_ts_config: Set hardware timestamp configuration. The flags
+ * and tx_type will already have been validated but this operation
+ * must validate and update rx_filter.
* @revision: Hardware architecture revision
* @txd_ptr_tbl_base: TX descriptor ring base address
* @rxd_ptr_tbl_base: RX descriptor ring base address
@@ -1043,6 +1071,7 @@ struct efx_mtd_partition {
* @max_dma_mask: Maximum possible DMA mask
* @rx_prefix_size: Size of RX prefix before packet data
* @rx_hash_offset: Offset of RX flow hash within prefix
+ * @rx_ts_offset: Offset of timestamp within prefix
* @rx_buffer_padding: Size of padding at end of RX packet
* @can_rx_scatter: NIC is able to scatter packets to multiple buffers
* @always_rx_scatter: NIC will always scatter packets to multiple buffers
@@ -1052,6 +1081,7 @@ struct efx_mtd_partition {
* @offload_features: net_device feature flags for protocol offload
* features implemented in hardware
* @mcdi_max_ver: Maximum MCDI version supported
+ * @hwtstamp_filters: Mask of hardware timestamp filter types supported
*/
struct efx_nic_type {
unsigned int (*mem_map_size)(struct efx_nic *efx);
@@ -1074,6 +1104,7 @@ struct efx_nic_type {
size_t (*update_stats)(struct efx_nic *efx, u64 *full_stats,
struct rtnl_link_stats64 *core_stats);
void (*start_stats)(struct efx_nic *efx);
+ void (*pull_stats)(struct efx_nic *efx);
void (*stop_stats)(struct efx_nic *efx);
void (*set_id_led)(struct efx_nic *efx, enum efx_led_mode mode);
void (*push_irq_moderation)(struct efx_channel *channel);
@@ -1102,7 +1133,7 @@ struct efx_nic_type {
void (*tx_init)(struct efx_tx_queue *tx_queue);
void (*tx_remove)(struct efx_tx_queue *tx_queue);
void (*tx_write)(struct efx_tx_queue *tx_queue);
- void (*rx_push_indir_table)(struct efx_nic *efx);
+ void (*rx_push_rss_config)(struct efx_nic *efx);
int (*rx_probe)(struct efx_rx_queue *rx_queue);
void (*rx_init)(struct efx_rx_queue *rx_queue);
void (*rx_remove)(struct efx_rx_queue *rx_queue);
@@ -1127,8 +1158,8 @@ struct efx_nic_type {
int (*filter_get_safe)(struct efx_nic *efx,
enum efx_filter_priority priority,
u32 filter_id, struct efx_filter_spec *);
- void (*filter_clear_rx)(struct efx_nic *efx,
- enum efx_filter_priority priority);
+ int (*filter_clear_rx)(struct efx_nic *efx,
+ enum efx_filter_priority priority);
u32 (*filter_count_rx_used)(struct efx_nic *efx,
enum efx_filter_priority priority);
u32 (*filter_get_rx_id_limit)(struct efx_nic *efx);
@@ -1152,6 +1183,9 @@ struct efx_nic_type {
int (*mtd_sync)(struct mtd_info *mtd);
#endif
void (*ptp_write_host_time)(struct efx_nic *efx, u32 host_time);
+ int (*ptp_set_ts_sync_events)(struct efx_nic *efx, bool en, bool temp);
+ int (*ptp_set_ts_config)(struct efx_nic *efx,
+ struct hwtstamp_config *init);
int revision;
unsigned int txd_ptr_tbl_base;
@@ -1162,6 +1196,7 @@ struct efx_nic_type {
u64 max_dma_mask;
unsigned int rx_prefix_size;
unsigned int rx_hash_offset;
+ unsigned int rx_ts_offset;
unsigned int rx_buffer_padding;
bool can_rx_scatter;
bool always_rx_scatter;
@@ -1170,6 +1205,7 @@ struct efx_nic_type {
netdev_features_t offload_features;
int mcdi_max_ver;
unsigned int max_rx_ip_filters;
+ u32 hwtstamp_filters;
};
/**************************************************************************
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c
index 9c90bf56090f..79226b19e3c4 100644
--- a/drivers/net/ethernet/sfc/nic.c
+++ b/drivers/net/ethernet/sfc/nic.c
@@ -519,3 +519,15 @@ void efx_nic_update_stats(const struct efx_hw_stat_desc *desc, size_t count,
}
}
}
+
+void efx_nic_fix_nodesc_drop_stat(struct efx_nic *efx, u64 *rx_nodesc_drops)
+{
+ /* if down, or this is the first update after coming up */
+ if (!(efx->net_dev->flags & IFF_UP) || !efx->rx_nodesc_drops_prev_state)
+ efx->rx_nodesc_drops_while_down +=
+ *rx_nodesc_drops - efx->rx_nodesc_drops_total;
+ efx->rx_nodesc_drops_total = *rx_nodesc_drops;
+ efx->rx_nodesc_drops_prev_state = !!(efx->net_dev->flags & IFF_UP);
+ *rx_nodesc_drops -= efx->rx_nodesc_drops_while_down;
+}
+
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index 11b6112d9249..a001fae1a8d7 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -412,8 +412,8 @@ enum {
EF10_STAT_rx_dp_q_disabled_packets,
EF10_STAT_rx_dp_di_dropped_packets,
EF10_STAT_rx_dp_streaming_packets,
- EF10_STAT_rx_dp_emerg_fetch,
- EF10_STAT_rx_dp_emerg_wait,
+ EF10_STAT_rx_dp_hlb_fetch,
+ EF10_STAT_rx_dp_hlb_wait,
EF10_STAT_COUNT
};
@@ -554,12 +554,31 @@ int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf,
bool spoofchk);
struct ethtool_ts_info;
-void efx_ptp_probe(struct efx_nic *efx);
-int efx_ptp_ioctl(struct efx_nic *efx, struct ifreq *ifr, int cmd);
+int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel);
+void efx_ptp_defer_probe_with_channel(struct efx_nic *efx);
+void efx_ptp_remove(struct efx_nic *efx);
+int efx_ptp_set_ts_config(struct efx_nic *efx, struct ifreq *ifr);
+int efx_ptp_get_ts_config(struct efx_nic *efx, struct ifreq *ifr);
void efx_ptp_get_ts_info(struct efx_nic *efx, struct ethtool_ts_info *ts_info);
bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
+int efx_ptp_get_mode(struct efx_nic *efx);
+int efx_ptp_change_mode(struct efx_nic *efx, bool enable_wanted,
+ unsigned int new_mode);
int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev);
+size_t efx_ptp_describe_stats(struct efx_nic *efx, u8 *strings);
+size_t efx_ptp_update_stats(struct efx_nic *efx, u64 *stats);
+void efx_time_sync_event(struct efx_channel *channel, efx_qword_t *ev);
+void __efx_rx_skb_attach_timestamp(struct efx_channel *channel,
+ struct sk_buff *skb);
+static inline void efx_rx_skb_attach_timestamp(struct efx_channel *channel,
+ struct sk_buff *skb)
+{
+ if (channel->sync_events_state == SYNC_EVENTS_VALID)
+ __efx_rx_skb_attach_timestamp(channel, skb);
+}
+void efx_ptp_start_datapath(struct efx_nic *efx);
+void efx_ptp_stop_datapath(struct efx_nic *efx);
extern const struct efx_nic_type falcon_a1_nic_type;
extern const struct efx_nic_type falcon_b0_nic_type;
@@ -676,8 +695,8 @@ int efx_farch_filter_remove_safe(struct efx_nic *efx,
int efx_farch_filter_get_safe(struct efx_nic *efx,
enum efx_filter_priority priority, u32 filter_id,
struct efx_filter_spec *);
-void efx_farch_filter_clear_rx(struct efx_nic *efx,
- enum efx_filter_priority priority);
+int efx_farch_filter_clear_rx(struct efx_nic *efx,
+ enum efx_filter_priority priority);
u32 efx_farch_filter_count_rx_used(struct efx_nic *efx,
enum efx_filter_priority priority);
u32 efx_farch_filter_get_rx_id_limit(struct efx_nic *efx);
@@ -745,10 +764,6 @@ int falcon_reset_xaui(struct efx_nic *efx);
void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw);
void efx_farch_init_common(struct efx_nic *efx);
void efx_ef10_handle_drain_event(struct efx_nic *efx);
-static inline void efx_nic_push_rx_indir_table(struct efx_nic *efx)
-{
- efx->type->rx_push_indir_table(efx);
-}
void efx_farch_rx_push_indir_table(struct efx_nic *efx);
int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
@@ -772,6 +787,7 @@ size_t efx_nic_describe_stats(const struct efx_hw_stat_desc *desc, size_t count,
void efx_nic_update_stats(const struct efx_hw_stat_desc *desc, size_t count,
const unsigned long *mask, u64 *stats,
const void *dma_buf, bool accumulate);
+void efx_nic_fix_nodesc_drop_stat(struct efx_nic *efx, u64 *stat);
#define EFX_MAX_FLUSH_TIME 5000
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index 03acf57df045..7aa070813960 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -62,7 +62,7 @@
#define SYNCHRONISATION_GRANULARITY_NS 200
/* Minimum permitted length of a (corrected) synchronisation time */
-#define MIN_SYNCHRONISATION_NS 120
+#define DEFAULT_MIN_SYNCHRONISATION_NS 120
/* Maximum permitted length of a (corrected) synchronisation time */
#define MAX_SYNCHRONISATION_NS 1000
@@ -195,31 +195,35 @@ struct efx_ptp_event_rx {
/**
* struct efx_ptp_timeset - Synchronisation between host and MC
* @host_start: Host time immediately before hardware timestamp taken
- * @seconds: Hardware timestamp, seconds
- * @nanoseconds: Hardware timestamp, nanoseconds
+ * @major: Hardware timestamp, major
+ * @minor: Hardware timestamp, minor
* @host_end: Host time immediately after hardware timestamp taken
- * @waitns: Number of nanoseconds between hardware timestamp being read and
+ * @wait: Number of NIC clock ticks between hardware timestamp being read and
* host end time being seen
* @window: Difference of host_end and host_start
* @valid: Whether this timeset is valid
*/
struct efx_ptp_timeset {
u32 host_start;
- u32 seconds;
- u32 nanoseconds;
+ u32 major;
+ u32 minor;
u32 host_end;
- u32 waitns;
+ u32 wait;
u32 window; /* Derived: end - start, allowing for wrap */
};
/**
* struct efx_ptp_data - Precision Time Protocol (PTP) state
- * @channel: The PTP channel
+ * @efx: The NIC context
+ * @channel: The PTP channel (Siena only)
+ * @rx_ts_inline: Flag for whether RX timestamps are inline (else they are
+ * separate events)
* @rxq: Receive queue (awaiting timestamps)
* @txq: Transmit queue
* @evt_list: List of MC receive events awaiting packets
* @evt_free_list: List of free events
* @evt_lock: Lock for manipulating evt_list and evt_free_list
+ * @evt_overflow: Boolean indicating that event list has overflowed
* @rx_evts: Instantiated events (on evt_list and evt_free_list)
* @workwq: Work queue for processing pending PTP operations
* @work: Work task
@@ -230,46 +234,48 @@ struct efx_ptp_timeset {
* @config: Current timestamp configuration
* @enabled: PTP operation enabled
* @mode: Mode in which PTP operating (PTP version)
+ * @time_format: Time format supported by this NIC
+ * @ns_to_nic_time: Function to convert from scalar nanoseconds to NIC time
+ * @nic_to_kernel_time: Function to convert from NIC to kernel time
+ * @min_synchronisation_ns: Minimum acceptable corrected sync window
+ * @ts_corrections.tx: Required driver correction of transmit timestamps
+ * @ts_corrections.rx: Required driver correction of receive timestamps
+ * @ts_corrections.pps_out: PPS output error (information only)
+ * @ts_corrections.pps_in: Required driver correction of PPS input timestamps
* @evt_frags: Partly assembled PTP events
* @evt_frag_idx: Current fragment number
* @evt_code: Last event code
* @start: Address at which MC indicates ready for synchronisation
* @host_time_pps: Host time at last PPS
- * @last_sync_ns: Last number of nanoseconds between readings when synchronising
- * @base_sync_ns: Number of nanoseconds for last synchronisation.
- * @base_sync_valid: Whether base_sync_time is valid.
* @current_adjfreq: Current ppb adjustment.
- * @phc_clock: Pointer to registered phc device
+ * @phc_clock: Pointer to registered phc device (if primary function)
* @phc_clock_info: Registration structure for phc device
* @pps_work: pps work task for handling pps events
* @pps_workwq: pps work queue
* @nic_ts_enabled: Flag indicating if NIC generated TS events are handled
* @txbuf: Buffer for use when transmitting (PTP) packets to MC (avoids
* allocations in main data path).
- * @debug_ptp_dir: PTP debugfs directory
- * @missed_rx_sync: Number of packets received without syncrhonisation.
* @good_syncs: Number of successful synchronisations.
- * @no_time_syncs: Number of synchronisations with no good times.
- * @bad_sync_durations: Number of synchronisations with bad durations.
+ * @fast_syncs: Number of synchronisations requiring short delay
* @bad_syncs: Number of failed synchronisations.
- * @last_sync_time: Number of nanoseconds for last synchronisation.
* @sync_timeouts: Number of synchronisation timeouts
- * @fast_syncs: Number of synchronisations requiring short delay
- * @min_sync_delta: Minimum time between event and synchronisation
- * @max_sync_delta: Maximum time between event and synchronisation
- * @average_sync_delta: Average time between event and synchronisation.
- * Modified moving average.
- * @last_sync_delta: Last time between event and synchronisation
- * @mc_stats: Context value for MC statistics
+ * @no_time_syncs: Number of synchronisations with no good times.
+ * @invalid_sync_windows: Number of sync windows with bad durations.
+ * @undersize_sync_windows: Number of corrected sync windows that are too small
+ * @oversize_sync_windows: Number of corrected sync windows that are too large
+ * @rx_no_timestamp: Number of packets received without a timestamp.
* @timeset: Last set of synchronisation statistics.
*/
struct efx_ptp_data {
+ struct efx_nic *efx;
struct efx_channel *channel;
+ bool rx_ts_inline;
struct sk_buff_head rxq;
struct sk_buff_head txq;
struct list_head evt_list;
struct list_head evt_free_list;
spinlock_t evt_lock;
+ bool evt_overflow;
struct efx_ptp_event_rx rx_evts[MAX_RECEIVE_EVENTS];
struct workqueue_struct *workwq;
struct work_struct work;
@@ -280,14 +286,22 @@ struct efx_ptp_data {
struct hwtstamp_config config;
bool enabled;
unsigned int mode;
+ unsigned int time_format;
+ void (*ns_to_nic_time)(s64 ns, u32 *nic_major, u32 *nic_minor);
+ ktime_t (*nic_to_kernel_time)(u32 nic_major, u32 nic_minor,
+ s32 correction);
+ unsigned int min_synchronisation_ns;
+ struct {
+ s32 tx;
+ s32 rx;
+ s32 pps_out;
+ s32 pps_in;
+ } ts_corrections;
efx_qword_t evt_frags[MAX_EVENT_FRAGS];
int evt_frag_idx;
int evt_code;
struct efx_buffer start;
struct pps_event_time host_time_pps;
- unsigned last_sync_ns;
- unsigned base_sync_ns;
- bool base_sync_valid;
s64 current_adjfreq;
struct ptp_clock *phc_clock;
struct ptp_clock_info phc_clock_info;
@@ -295,6 +309,16 @@ struct efx_ptp_data {
struct workqueue_struct *pps_workwq;
bool nic_ts_enabled;
MCDI_DECLARE_BUF(txbuf, MC_CMD_PTP_IN_TRANSMIT_LENMAX);
+
+ unsigned int good_syncs;
+ unsigned int fast_syncs;
+ unsigned int bad_syncs;
+ unsigned int sync_timeouts;
+ unsigned int no_time_syncs;
+ unsigned int invalid_sync_windows;
+ unsigned int undersize_sync_windows;
+ unsigned int oversize_sync_windows;
+ unsigned int rx_no_timestamp;
struct efx_ptp_timeset
timeset[MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MAXNUM];
};
@@ -307,19 +331,263 @@ static int efx_phc_settime(struct ptp_clock_info *ptp,
static int efx_phc_enable(struct ptp_clock_info *ptp,
struct ptp_clock_request *request, int on);
+#define PTP_SW_STAT(ext_name, field_name) \
+ { #ext_name, 0, offsetof(struct efx_ptp_data, field_name) }
+#define PTP_MC_STAT(ext_name, mcdi_name) \
+ { #ext_name, 32, MC_CMD_PTP_OUT_STATUS_STATS_ ## mcdi_name ## _OFST }
+static const struct efx_hw_stat_desc efx_ptp_stat_desc[] = {
+ PTP_SW_STAT(ptp_good_syncs, good_syncs),
+ PTP_SW_STAT(ptp_fast_syncs, fast_syncs),
+ PTP_SW_STAT(ptp_bad_syncs, bad_syncs),
+ PTP_SW_STAT(ptp_sync_timeouts, sync_timeouts),
+ PTP_SW_STAT(ptp_no_time_syncs, no_time_syncs),
+ PTP_SW_STAT(ptp_invalid_sync_windows, invalid_sync_windows),
+ PTP_SW_STAT(ptp_undersize_sync_windows, undersize_sync_windows),
+ PTP_SW_STAT(ptp_oversize_sync_windows, oversize_sync_windows),
+ PTP_SW_STAT(ptp_rx_no_timestamp, rx_no_timestamp),
+ PTP_MC_STAT(ptp_tx_timestamp_packets, TX),
+ PTP_MC_STAT(ptp_rx_timestamp_packets, RX),
+ PTP_MC_STAT(ptp_timestamp_packets, TS),
+ PTP_MC_STAT(ptp_filter_matches, FM),
+ PTP_MC_STAT(ptp_non_filter_matches, NFM),
+};
+#define PTP_STAT_COUNT ARRAY_SIZE(efx_ptp_stat_desc)
+static const unsigned long efx_ptp_stat_mask[] = {
+ [0 ... BITS_TO_LONGS(PTP_STAT_COUNT) - 1] = ~0UL,
+};
+
+size_t efx_ptp_describe_stats(struct efx_nic *efx, u8 *strings)
+{
+ if (!efx->ptp_data)
+ return 0;
+
+ return efx_nic_describe_stats(efx_ptp_stat_desc, PTP_STAT_COUNT,
+ efx_ptp_stat_mask, strings);
+}
+
+size_t efx_ptp_update_stats(struct efx_nic *efx, u64 *stats)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_STATUS_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_PTP_OUT_STATUS_LEN);
+ size_t i;
+ int rc;
+
+ if (!efx->ptp_data)
+ return 0;
+
+ /* Copy software statistics */
+ for (i = 0; i < PTP_STAT_COUNT; i++) {
+ if (efx_ptp_stat_desc[i].dma_width)
+ continue;
+ stats[i] = *(unsigned int *)((char *)efx->ptp_data +
+ efx_ptp_stat_desc[i].offset);
+ }
+
+ /* Fetch MC statistics. We *must* fill in all statistics or
+ * risk leaking kernel memory to userland, so if the MCDI
+ * request fails we pretend we got zeroes.
+ */
+ MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_STATUS);
+ MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
+ rc = efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), NULL);
+ if (rc) {
+ netif_err(efx, hw, efx->net_dev,
+ "MC_CMD_PTP_OP_STATUS failed (%d)\n", rc);
+ memset(outbuf, 0, sizeof(outbuf));
+ }
+ efx_nic_update_stats(efx_ptp_stat_desc, PTP_STAT_COUNT,
+ efx_ptp_stat_mask,
+ stats, _MCDI_PTR(outbuf, 0), false);
+
+ return PTP_STAT_COUNT;
+}
+
+/* For Siena platforms NIC time is s and ns */
+static void efx_ptp_ns_to_s_ns(s64 ns, u32 *nic_major, u32 *nic_minor)
+{
+ struct timespec ts = ns_to_timespec(ns);
+ *nic_major = ts.tv_sec;
+ *nic_minor = ts.tv_nsec;
+}
+
+static ktime_t efx_ptp_s_ns_to_ktime_correction(u32 nic_major, u32 nic_minor,
+ s32 correction)
+{
+ ktime_t kt = ktime_set(nic_major, nic_minor);
+ if (correction >= 0)
+ kt = ktime_add_ns(kt, (u64)correction);
+ else
+ kt = ktime_sub_ns(kt, (u64)-correction);
+ return kt;
+}
+
+/* To convert from s27 format to ns we multiply then divide by a power of 2.
+ * For the conversion from ns to s27, the operation is also converted to a
+ * multiply and shift.
+ */
+#define S27_TO_NS_SHIFT (27)
+#define NS_TO_S27_MULT (((1ULL << 63) + NSEC_PER_SEC / 2) / NSEC_PER_SEC)
+#define NS_TO_S27_SHIFT (63 - S27_TO_NS_SHIFT)
+#define S27_MINOR_MAX (1 << S27_TO_NS_SHIFT)
+
+/* For Huntington platforms NIC time is in seconds and fractions of a second
+ * where the minor register only uses 27 bits in units of 2^-27s.
+ */
+static void efx_ptp_ns_to_s27(s64 ns, u32 *nic_major, u32 *nic_minor)
+{
+ struct timespec ts = ns_to_timespec(ns);
+ u32 maj = ts.tv_sec;
+ u32 min = (u32)(((u64)ts.tv_nsec * NS_TO_S27_MULT +
+ (1ULL << (NS_TO_S27_SHIFT - 1))) >> NS_TO_S27_SHIFT);
+
+ /* The conversion can result in the minor value exceeding the maximum.
+ * In this case, round up to the next second.
+ */
+ if (min >= S27_MINOR_MAX) {
+ min -= S27_MINOR_MAX;
+ maj++;
+ }
+
+ *nic_major = maj;
+ *nic_minor = min;
+}
+
+static inline ktime_t efx_ptp_s27_to_ktime(u32 nic_major, u32 nic_minor)
+{
+ u32 ns = (u32)(((u64)nic_minor * NSEC_PER_SEC +
+ (1ULL << (S27_TO_NS_SHIFT - 1))) >> S27_TO_NS_SHIFT);
+ return ktime_set(nic_major, ns);
+}
+
+static ktime_t efx_ptp_s27_to_ktime_correction(u32 nic_major, u32 nic_minor,
+ s32 correction)
+{
+ /* Apply the correction and deal with carry */
+ nic_minor += correction;
+ if ((s32)nic_minor < 0) {
+ nic_minor += S27_MINOR_MAX;
+ nic_major--;
+ } else if (nic_minor >= S27_MINOR_MAX) {
+ nic_minor -= S27_MINOR_MAX;
+ nic_major++;
+ }
+
+ return efx_ptp_s27_to_ktime(nic_major, nic_minor);
+}
+
+/* Get PTP attributes and set up time conversions */
+static int efx_ptp_get_attributes(struct efx_nic *efx)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_GET_ATTRIBUTES_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_PTP_OUT_GET_ATTRIBUTES_LEN);
+ struct efx_ptp_data *ptp = efx->ptp_data;
+ int rc;
+ u32 fmt;
+ size_t out_len;
+
+ /* Get the PTP attributes. If the NIC doesn't support the operation we
+ * use the default format for compatibility with older NICs i.e.
+ * seconds and nanoseconds.
+ */
+ MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_GET_ATTRIBUTES);
+ MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
+ rc = efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &out_len);
+ if (rc == 0)
+ fmt = MCDI_DWORD(outbuf, PTP_OUT_GET_ATTRIBUTES_TIME_FORMAT);
+ else if (rc == -EINVAL)
+ fmt = MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_NANOSECONDS;
+ else
+ return rc;
+
+ if (fmt == MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_27FRACTION) {
+ ptp->ns_to_nic_time = efx_ptp_ns_to_s27;
+ ptp->nic_to_kernel_time = efx_ptp_s27_to_ktime_correction;
+ } else if (fmt == MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_NANOSECONDS) {
+ ptp->ns_to_nic_time = efx_ptp_ns_to_s_ns;
+ ptp->nic_to_kernel_time = efx_ptp_s_ns_to_ktime_correction;
+ } else {
+ return -ERANGE;
+ }
+
+ ptp->time_format = fmt;
+
+ /* MC_CMD_PTP_OP_GET_ATTRIBUTES is an extended version of an older
+ * operation MC_CMD_PTP_OP_GET_TIME_FORMAT that also returns a value
+ * to use for the minimum acceptable corrected synchronization window.
+ * If we have the extra information store it. For older firmware that
+ * does not implement the extended command use the default value.
+ */
+ if (rc == 0 && out_len >= MC_CMD_PTP_OUT_GET_ATTRIBUTES_LEN)
+ ptp->min_synchronisation_ns =
+ MCDI_DWORD(outbuf,
+ PTP_OUT_GET_ATTRIBUTES_SYNC_WINDOW_MIN);
+ else
+ ptp->min_synchronisation_ns = DEFAULT_MIN_SYNCHRONISATION_NS;
+
+ return 0;
+}
+
+/* Get PTP timestamp corrections */
+static int efx_ptp_get_timestamp_corrections(struct efx_nic *efx)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_GET_TIMESTAMP_CORRECTIONS_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_LEN);
+ int rc;
+
+ /* Get the timestamp corrections from the NIC. If this operation is
+ * not supported (older NICs) then no correction is required.
+ */
+ MCDI_SET_DWORD(inbuf, PTP_IN_OP,
+ MC_CMD_PTP_OP_GET_TIMESTAMP_CORRECTIONS);
+ MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), NULL);
+ if (rc == 0) {
+ efx->ptp_data->ts_corrections.tx = MCDI_DWORD(outbuf,
+ PTP_OUT_GET_TIMESTAMP_CORRECTIONS_TRANSMIT);
+ efx->ptp_data->ts_corrections.rx = MCDI_DWORD(outbuf,
+ PTP_OUT_GET_TIMESTAMP_CORRECTIONS_RECEIVE);
+ efx->ptp_data->ts_corrections.pps_out = MCDI_DWORD(outbuf,
+ PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_OUT);
+ efx->ptp_data->ts_corrections.pps_in = MCDI_DWORD(outbuf,
+ PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_IN);
+ } else if (rc == -EINVAL) {
+ efx->ptp_data->ts_corrections.tx = 0;
+ efx->ptp_data->ts_corrections.rx = 0;
+ efx->ptp_data->ts_corrections.pps_out = 0;
+ efx->ptp_data->ts_corrections.pps_in = 0;
+ } else {
+ return rc;
+ }
+
+ return 0;
+}
+
/* Enable MCDI PTP support. */
static int efx_ptp_enable(struct efx_nic *efx)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_ENABLE_LEN);
+ MCDI_DECLARE_BUF_OUT_OR_ERR(outbuf, 0);
+ int rc;
MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_ENABLE);
MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
MCDI_SET_DWORD(inbuf, PTP_IN_ENABLE_QUEUE,
- efx->ptp_data->channel->channel);
+ efx->ptp_data->channel ?
+ efx->ptp_data->channel->channel : 0);
MCDI_SET_DWORD(inbuf, PTP_IN_ENABLE_MODE, efx->ptp_data->mode);
- return efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
- NULL, 0, NULL);
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), NULL);
+ rc = (rc == -EALREADY) ? 0 : rc;
+ if (rc)
+ efx_mcdi_display_error(efx, MC_CMD_PTP,
+ MC_CMD_PTP_IN_ENABLE_LEN,
+ outbuf, sizeof(outbuf), rc);
+ return rc;
}
/* Disable MCDI PTP support.
@@ -330,11 +598,19 @@ static int efx_ptp_enable(struct efx_nic *efx)
static int efx_ptp_disable(struct efx_nic *efx)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_DISABLE_LEN);
+ MCDI_DECLARE_BUF_OUT_OR_ERR(outbuf, 0);
+ int rc;
MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_DISABLE);
MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
- return efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
- NULL, 0, NULL);
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), NULL);
+ rc = (rc == -EALREADY) ? 0 : rc;
+ if (rc)
+ efx_mcdi_display_error(efx, MC_CMD_PTP,
+ MC_CMD_PTP_IN_DISABLE_LEN,
+ outbuf, sizeof(outbuf), rc);
+ return rc;
}
static void efx_ptp_deliver_rx_queue(struct sk_buff_head *q)
@@ -402,11 +678,10 @@ static void efx_ptp_read_timeset(MCDI_DECLARE_STRUCT_PTR(data),
unsigned start_ns, end_ns;
timeset->host_start = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_HOSTSTART);
- timeset->seconds = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_SECONDS);
- timeset->nanoseconds = MCDI_DWORD(data,
- PTP_OUT_SYNCHRONIZE_NANOSECONDS);
+ timeset->major = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_MAJOR);
+ timeset->minor = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_MINOR);
timeset->host_end = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_HOSTEND),
- timeset->waitns = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_WAITNS);
+ timeset->wait = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_WAITNS);
/* Ignore seconds */
start_ns = timeset->host_start & MC_NANOSECOND_MASK;
@@ -435,62 +710,73 @@ efx_ptp_process_times(struct efx_nic *efx, MCDI_DECLARE_STRUCT_PTR(synch_buf),
MCDI_VAR_ARRAY_LEN(response_length,
PTP_OUT_SYNCHRONIZE_TIMESET);
unsigned i;
- unsigned total;
unsigned ngood = 0;
unsigned last_good = 0;
struct efx_ptp_data *ptp = efx->ptp_data;
u32 last_sec;
u32 start_sec;
struct timespec delta;
+ ktime_t mc_time;
if (number_readings == 0)
return -EAGAIN;
- /* Read the set of results and increment stats for any results that
- * appera to be erroneous.
+ /* Read the set of results and find the last good host-MC
+ * synchronization result. The MC times when it finishes reading the
+ * host time so the corrected window time should be fairly constant
+ * for a given platform. Increment stats for any results that appear
+ * to be erroneous.
*/
for (i = 0; i < number_readings; i++) {
+ s32 window, corrected;
+ struct timespec wait;
+
efx_ptp_read_timeset(
MCDI_ARRAY_STRUCT_PTR(synch_buf,
PTP_OUT_SYNCHRONIZE_TIMESET, i),
&ptp->timeset[i]);
- }
- /* Find the last good host-MC synchronization result. The MC times
- * when it finishes reading the host time so the corrected window time
- * should be fairly constant for a given platform.
- */
- total = 0;
- for (i = 0; i < number_readings; i++)
- if (ptp->timeset[i].window > ptp->timeset[i].waitns) {
- unsigned win;
-
- win = ptp->timeset[i].window - ptp->timeset[i].waitns;
- if (win >= MIN_SYNCHRONISATION_NS &&
- win < MAX_SYNCHRONISATION_NS) {
- total += ptp->timeset[i].window;
- ngood++;
- last_good = i;
- }
+ wait = ktime_to_timespec(
+ ptp->nic_to_kernel_time(0, ptp->timeset[i].wait, 0));
+ window = ptp->timeset[i].window;
+ corrected = window - wait.tv_nsec;
+
+ /* We expect the uncorrected synchronization window to be at
+ * least as large as the interval between host start and end
+ * times. If it is smaller than this then this is mostly likely
+ * to be a consequence of the host's time being adjusted.
+ * Check that the corrected sync window is in a reasonable
+ * range. If it is out of range it is likely to be because an
+ * interrupt or other delay occurred between reading the system
+ * time and writing it to MC memory.
+ */
+ if (window < SYNCHRONISATION_GRANULARITY_NS) {
+ ++ptp->invalid_sync_windows;
+ } else if (corrected >= MAX_SYNCHRONISATION_NS) {
+ ++ptp->undersize_sync_windows;
+ } else if (corrected < ptp->min_synchronisation_ns) {
+ ++ptp->oversize_sync_windows;
+ } else {
+ ngood++;
+ last_good = i;
}
+ }
if (ngood == 0) {
netif_warn(efx, drv, efx->net_dev,
- "PTP no suitable synchronisations %dns\n",
- ptp->base_sync_ns);
+ "PTP no suitable synchronisations\n");
return -EAGAIN;
}
- /* Average minimum this synchronisation */
- ptp->last_sync_ns = DIV_ROUND_UP(total, ngood);
- if (!ptp->base_sync_valid || (ptp->last_sync_ns < ptp->base_sync_ns)) {
- ptp->base_sync_valid = true;
- ptp->base_sync_ns = ptp->last_sync_ns;
- }
+ /* Convert the NIC time into kernel time. No correction is required-
+ * this time is the output of a firmware process.
+ */
+ mc_time = ptp->nic_to_kernel_time(ptp->timeset[last_good].major,
+ ptp->timeset[last_good].minor, 0);
/* Calculate delay from actual PPS to last_time */
- delta.tv_nsec =
- ptp->timeset[last_good].nanoseconds +
+ delta = ktime_to_timespec(mc_time);
+ delta.tv_nsec +=
last_time->ts_real.tv_nsec -
(ptp->timeset[last_good].host_start & MC_NANOSECOND_MASK);
@@ -551,6 +837,11 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
loops++;
}
+ if (loops <= 1)
+ ++ptp->fast_syncs;
+ if (!time_before(jiffies, timeout))
+ ++ptp->sync_timeouts;
+
if (ACCESS_ONCE(*start))
efx_ptp_send_times(efx, &last_time);
@@ -559,9 +850,20 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
MC_CMD_PTP_IN_SYNCHRONIZE_LEN,
synch_buf, sizeof(synch_buf),
&response_length);
- if (rc == 0)
+ if (rc == 0) {
rc = efx_ptp_process_times(efx, synch_buf, response_length,
&last_time);
+ if (rc == 0)
+ ++ptp->good_syncs;
+ else
+ ++ptp->no_time_syncs;
+ }
+
+ /* Increment the bad syncs counter if the synchronize fails, whatever
+ * the reason.
+ */
+ if (rc != 0)
+ ++ptp->bad_syncs;
return rc;
}
@@ -600,9 +902,10 @@ static int efx_ptp_xmit_skb(struct efx_nic *efx, struct sk_buff *skb)
goto fail;
memset(&timestamps, 0, sizeof(timestamps));
- timestamps.hwtstamp = ktime_set(
- MCDI_DWORD(txtime, PTP_OUT_TRANSMIT_SECONDS),
- MCDI_DWORD(txtime, PTP_OUT_TRANSMIT_NANOSECONDS));
+ timestamps.hwtstamp = ptp_data->nic_to_kernel_time(
+ MCDI_DWORD(txtime, PTP_OUT_TRANSMIT_MAJOR),
+ MCDI_DWORD(txtime, PTP_OUT_TRANSMIT_MINOR),
+ ptp_data->ts_corrections.tx);
skb_tstamp_tx(skb, &timestamps);
@@ -620,6 +923,9 @@ static void efx_ptp_drop_time_expired_events(struct efx_nic *efx)
struct list_head *cursor;
struct list_head *next;
+ if (ptp->rx_ts_inline)
+ return;
+
/* Drop time-expired events */
spin_lock_bh(&ptp->evt_lock);
if (!list_empty(&ptp->evt_list)) {
@@ -635,6 +941,11 @@ static void efx_ptp_drop_time_expired_events(struct efx_nic *efx)
}
}
}
+ /* If the event overflow flag is set and the event list is now empty
+ * clear the flag to re-enable the overflow warning message.
+ */
+ if (ptp->evt_overflow && list_empty(&ptp->evt_list))
+ ptp->evt_overflow = false;
spin_unlock_bh(&ptp->evt_lock);
}
@@ -648,6 +959,8 @@ static enum ptp_packet_state efx_ptp_match_rx(struct efx_nic *efx,
struct efx_ptp_match *match;
enum ptp_packet_state rc = PTP_PACKET_STATE_UNMATCHED;
+ WARN_ON_ONCE(ptp->rx_ts_inline);
+
spin_lock_bh(&ptp->evt_lock);
evts_waiting = !list_empty(&ptp->evt_list);
spin_unlock_bh(&ptp->evt_lock);
@@ -676,6 +989,11 @@ static enum ptp_packet_state efx_ptp_match_rx(struct efx_nic *efx,
break;
}
}
+ /* If the event overflow flag is set and the event list is now empty
+ * clear the flag to re-enable the overflow warning message.
+ */
+ if (ptp->evt_overflow && list_empty(&ptp->evt_list))
+ ptp->evt_overflow = false;
spin_unlock_bh(&ptp->evt_lock);
return rc;
@@ -684,13 +1002,10 @@ static enum ptp_packet_state efx_ptp_match_rx(struct efx_nic *efx,
/* Process any queued receive events and corresponding packets
*
* q is returned with all the packets that are ready for delivery.
- * true is returned if at least one of those packets requires
- * synchronisation.
*/
-static bool efx_ptp_process_events(struct efx_nic *efx, struct sk_buff_head *q)
+static void efx_ptp_process_events(struct efx_nic *efx, struct sk_buff_head *q)
{
struct efx_ptp_data *ptp = efx->ptp_data;
- bool rc = false;
struct sk_buff *skb;
while ((skb = skb_dequeue(&ptp->rxq))) {
@@ -701,12 +1016,10 @@ static bool efx_ptp_process_events(struct efx_nic *efx, struct sk_buff_head *q)
__skb_queue_tail(q, skb);
} else if (efx_ptp_match_rx(efx, skb) ==
PTP_PACKET_STATE_MATCHED) {
- rc = true;
__skb_queue_tail(q, skb);
} else if (time_after(jiffies, match->expiry)) {
match->state = PTP_PACKET_STATE_TIMED_OUT;
- netif_warn(efx, rx_err, efx->net_dev,
- "PTP packet - no timestamp seen\n");
+ ++ptp->rx_no_timestamp;
__skb_queue_tail(q, skb);
} else {
/* Replace unprocessed entry and stop */
@@ -714,8 +1027,6 @@ static bool efx_ptp_process_events(struct efx_nic *efx, struct sk_buff_head *q)
break;
}
}
-
- return rc;
}
/* Complete processing of a received packet */
@@ -726,13 +1037,27 @@ static inline void efx_ptp_process_rx(struct efx_nic *efx, struct sk_buff *skb)
local_bh_enable();
}
-static int efx_ptp_start(struct efx_nic *efx)
+static void efx_ptp_remove_multicast_filters(struct efx_nic *efx)
+{
+ struct efx_ptp_data *ptp = efx->ptp_data;
+
+ if (ptp->rxfilter_installed) {
+ efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED,
+ ptp->rxfilter_general);
+ efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED,
+ ptp->rxfilter_event);
+ ptp->rxfilter_installed = false;
+ }
+}
+
+static int efx_ptp_insert_multicast_filters(struct efx_nic *efx)
{
struct efx_ptp_data *ptp = efx->ptp_data;
struct efx_filter_spec rxfilter;
int rc;
- ptp->reset_required = false;
+ if (!ptp->channel || ptp->rxfilter_installed)
+ return 0;
/* Must filter on both event and general ports to ensure
* that there is no packet re-ordering.
@@ -765,40 +1090,53 @@ static int efx_ptp_start(struct efx_nic *efx)
goto fail;
ptp->rxfilter_general = rc;
+ ptp->rxfilter_installed = true;
+ return 0;
+
+fail:
+ efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED,
+ ptp->rxfilter_event);
+ return rc;
+}
+
+static int efx_ptp_start(struct efx_nic *efx)
+{
+ struct efx_ptp_data *ptp = efx->ptp_data;
+ int rc;
+
+ ptp->reset_required = false;
+
+ rc = efx_ptp_insert_multicast_filters(efx);
+ if (rc)
+ return rc;
+
rc = efx_ptp_enable(efx);
if (rc != 0)
- goto fail2;
+ goto fail;
ptp->evt_frag_idx = 0;
ptp->current_adjfreq = 0;
- ptp->rxfilter_installed = true;
return 0;
-fail2:
- efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED,
- ptp->rxfilter_general);
fail:
- efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED,
- ptp->rxfilter_event);
-
+ efx_ptp_remove_multicast_filters(efx);
return rc;
}
static int efx_ptp_stop(struct efx_nic *efx)
{
struct efx_ptp_data *ptp = efx->ptp_data;
- int rc = efx_ptp_disable(efx);
struct list_head *cursor;
struct list_head *next;
+ int rc;
- if (ptp->rxfilter_installed) {
- efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED,
- ptp->rxfilter_general);
- efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED,
- ptp->rxfilter_event);
- ptp->rxfilter_installed = false;
- }
+ if (ptp == NULL)
+ return 0;
+
+ rc = efx_ptp_disable(efx);
+
+ efx_ptp_remove_multicast_filters(efx);
/* Make sure RX packets are really delivered */
efx_ptp_deliver_rx_queue(&efx->ptp_data->rxq);
@@ -809,16 +1147,24 @@ static int efx_ptp_stop(struct efx_nic *efx)
list_for_each_safe(cursor, next, &efx->ptp_data->evt_list) {
list_move(cursor, &efx->ptp_data->evt_free_list);
}
+ ptp->evt_overflow = false;
spin_unlock_bh(&efx->ptp_data->evt_lock);
return rc;
}
+static int efx_ptp_restart(struct efx_nic *efx)
+{
+ if (efx->ptp_data && efx->ptp_data->enabled)
+ return efx_ptp_start(efx);
+ return 0;
+}
+
static void efx_ptp_pps_worker(struct work_struct *work)
{
struct efx_ptp_data *ptp =
container_of(work, struct efx_ptp_data, pps_work);
- struct efx_nic *efx = ptp->channel->efx;
+ struct efx_nic *efx = ptp->efx;
struct ptp_clock_event ptp_evt;
if (efx_ptp_synchronize(efx, PTP_SYNC_ATTEMPTS))
@@ -829,13 +1175,11 @@ static void efx_ptp_pps_worker(struct work_struct *work)
ptp_clock_event(ptp->phc_clock, &ptp_evt);
}
-/* Process any pending transmissions and timestamp any received packets.
- */
static void efx_ptp_worker(struct work_struct *work)
{
struct efx_ptp_data *ptp_data =
container_of(work, struct efx_ptp_data, work);
- struct efx_nic *efx = ptp_data->channel->efx;
+ struct efx_nic *efx = ptp_data->efx;
struct sk_buff *skb;
struct sk_buff_head tempq;
@@ -848,42 +1192,50 @@ static void efx_ptp_worker(struct work_struct *work)
efx_ptp_drop_time_expired_events(efx);
__skb_queue_head_init(&tempq);
- if (efx_ptp_process_events(efx, &tempq) ||
- !skb_queue_empty(&ptp_data->txq)) {
+ efx_ptp_process_events(efx, &tempq);
- while ((skb = skb_dequeue(&ptp_data->txq)))
- efx_ptp_xmit_skb(efx, skb);
- }
+ while ((skb = skb_dequeue(&ptp_data->txq)))
+ efx_ptp_xmit_skb(efx, skb);
while ((skb = __skb_dequeue(&tempq)))
efx_ptp_process_rx(efx, skb);
}
-/* Initialise PTP channel and state.
- *
- * Setting core_index to zero causes the queue to be initialised and doesn't
- * overlap with 'rxq0' because ptp.c doesn't use skb_record_rx_queue.
- */
-static int efx_ptp_probe_channel(struct efx_channel *channel)
+static const struct ptp_clock_info efx_phc_clock_info = {
+ .owner = THIS_MODULE,
+ .name = "sfc",
+ .max_adj = MAX_PPB,
+ .n_alarm = 0,
+ .n_ext_ts = 0,
+ .n_per_out = 0,
+ .pps = 1,
+ .adjfreq = efx_phc_adjfreq,
+ .adjtime = efx_phc_adjtime,
+ .gettime = efx_phc_gettime,
+ .settime = efx_phc_settime,
+ .enable = efx_phc_enable,
+};
+
+/* Initialise PTP state. */
+int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel)
{
- struct efx_nic *efx = channel->efx;
struct efx_ptp_data *ptp;
int rc = 0;
unsigned int pos;
- channel->irq_moderation = 0;
- channel->rx_queue.core_index = 0;
-
ptp = kzalloc(sizeof(struct efx_ptp_data), GFP_KERNEL);
efx->ptp_data = ptp;
if (!efx->ptp_data)
return -ENOMEM;
+ ptp->efx = efx;
+ ptp->channel = channel;
+ ptp->rx_ts_inline = efx_nic_rev(efx) >= EFX_REV_HUNT_A0;
+
rc = efx_nic_alloc_buffer(efx, &ptp->start, sizeof(int), GFP_KERNEL);
if (rc != 0)
goto fail1;
- ptp->channel = channel;
skb_queue_head_init(&ptp->rxq);
skb_queue_head_init(&ptp->txq);
ptp->workwq = create_singlethread_workqueue("sfc_ptp");
@@ -901,34 +1253,34 @@ static int efx_ptp_probe_channel(struct efx_channel *channel)
spin_lock_init(&ptp->evt_lock);
for (pos = 0; pos < MAX_RECEIVE_EVENTS; pos++)
list_add(&ptp->rx_evts[pos].link, &ptp->evt_free_list);
+ ptp->evt_overflow = false;
- ptp->phc_clock_info.owner = THIS_MODULE;
- snprintf(ptp->phc_clock_info.name,
- sizeof(ptp->phc_clock_info.name),
- "%pm", efx->net_dev->perm_addr);
- ptp->phc_clock_info.max_adj = MAX_PPB;
- ptp->phc_clock_info.n_alarm = 0;
- ptp->phc_clock_info.n_ext_ts = 0;
- ptp->phc_clock_info.n_per_out = 0;
- ptp->phc_clock_info.pps = 1;
- ptp->phc_clock_info.adjfreq = efx_phc_adjfreq;
- ptp->phc_clock_info.adjtime = efx_phc_adjtime;
- ptp->phc_clock_info.gettime = efx_phc_gettime;
- ptp->phc_clock_info.settime = efx_phc_settime;
- ptp->phc_clock_info.enable = efx_phc_enable;
-
- ptp->phc_clock = ptp_clock_register(&ptp->phc_clock_info,
- &efx->pci_dev->dev);
- if (IS_ERR(ptp->phc_clock)) {
- rc = PTR_ERR(ptp->phc_clock);
+ /* Get the NIC PTP attributes and set up time conversions */
+ rc = efx_ptp_get_attributes(efx);
+ if (rc < 0)
goto fail3;
- }
- INIT_WORK(&ptp->pps_work, efx_ptp_pps_worker);
- ptp->pps_workwq = create_singlethread_workqueue("sfc_pps");
- if (!ptp->pps_workwq) {
- rc = -ENOMEM;
- goto fail4;
+ /* Get the timestamp corrections */
+ rc = efx_ptp_get_timestamp_corrections(efx);
+ if (rc < 0)
+ goto fail3;
+
+ if (efx->mcdi->fn_flags &
+ (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY)) {
+ ptp->phc_clock_info = efx_phc_clock_info;
+ ptp->phc_clock = ptp_clock_register(&ptp->phc_clock_info,
+ &efx->pci_dev->dev);
+ if (IS_ERR(ptp->phc_clock)) {
+ rc = PTR_ERR(ptp->phc_clock);
+ goto fail3;
+ }
+
+ INIT_WORK(&ptp->pps_work, efx_ptp_pps_worker);
+ ptp->pps_workwq = create_singlethread_workqueue("sfc_pps");
+ if (!ptp->pps_workwq) {
+ rc = -ENOMEM;
+ goto fail4;
+ }
}
ptp->nic_ts_enabled = false;
@@ -949,14 +1301,27 @@ fail1:
return rc;
}
-static void efx_ptp_remove_channel(struct efx_channel *channel)
+/* Initialise PTP channel.
+ *
+ * Setting core_index to zero causes the queue to be initialised and doesn't
+ * overlap with 'rxq0' because ptp.c doesn't use skb_record_rx_queue.
+ */
+static int efx_ptp_probe_channel(struct efx_channel *channel)
{
struct efx_nic *efx = channel->efx;
+ channel->irq_moderation = 0;
+ channel->rx_queue.core_index = 0;
+
+ return efx_ptp_probe(efx, channel);
+}
+
+void efx_ptp_remove(struct efx_nic *efx)
+{
if (!efx->ptp_data)
return;
- (void)efx_ptp_disable(channel->efx);
+ (void)efx_ptp_disable(efx);
cancel_work_sync(&efx->ptp_data->work);
cancel_work_sync(&efx->ptp_data->pps_work);
@@ -964,15 +1329,22 @@ static void efx_ptp_remove_channel(struct efx_channel *channel)
skb_queue_purge(&efx->ptp_data->rxq);
skb_queue_purge(&efx->ptp_data->txq);
- ptp_clock_unregister(efx->ptp_data->phc_clock);
+ if (efx->ptp_data->phc_clock) {
+ destroy_workqueue(efx->ptp_data->pps_workwq);
+ ptp_clock_unregister(efx->ptp_data->phc_clock);
+ }
destroy_workqueue(efx->ptp_data->workwq);
- destroy_workqueue(efx->ptp_data->pps_workwq);
efx_nic_free_buffer(efx, &efx->ptp_data->start);
kfree(efx->ptp_data);
}
+static void efx_ptp_remove_channel(struct efx_channel *channel)
+{
+ efx_ptp_remove(channel->efx);
+}
+
static void efx_ptp_get_channel_name(struct efx_channel *channel,
char *buf, size_t len)
{
@@ -989,7 +1361,11 @@ bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb)
skb->len >= PTP_MIN_LENGTH &&
skb->len <= MC_CMD_PTP_IN_TRANSMIT_PACKET_MAXNUM &&
likely(skb->protocol == htons(ETH_P_IP)) &&
+ skb_transport_header_was_set(skb) &&
+ skb_network_header_len(skb) >= sizeof(struct iphdr) &&
ip_hdr(skb)->protocol == IPPROTO_UDP &&
+ skb_headlen(skb) >=
+ skb_transport_offset(skb) + sizeof(struct udphdr) &&
udp_hdr(skb)->dest == htons(PTP_EVENT_PORT);
}
@@ -1049,14 +1425,8 @@ static bool efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb)
/* Does this packet require timestamping? */
if (ntohs(*(__be16 *)&skb->data[PTP_DPORT_OFFSET]) == PTP_EVENT_PORT) {
- struct skb_shared_hwtstamps *timestamps;
-
match->state = PTP_PACKET_STATE_UNMATCHED;
- /* Clear all timestamps held: filled in later */
- timestamps = skb_hwtstamps(skb);
- memset(timestamps, 0, sizeof(*timestamps));
-
/* We expect the sequence number to be in the same position in
* the packet for PTP V1 and V2
*/
@@ -1101,12 +1471,17 @@ int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb)
return NETDEV_TX_OK;
}
-static int efx_ptp_change_mode(struct efx_nic *efx, bool enable_wanted,
- unsigned int new_mode)
+int efx_ptp_get_mode(struct efx_nic *efx)
+{
+ return efx->ptp_data->mode;
+}
+
+int efx_ptp_change_mode(struct efx_nic *efx, bool enable_wanted,
+ unsigned int new_mode)
{
if ((enable_wanted != efx->ptp_data->enabled) ||
(enable_wanted && (efx->ptp_data->mode != new_mode))) {
- int rc;
+ int rc = 0;
if (enable_wanted) {
/* Change of mode requires disable */
@@ -1123,7 +1498,8 @@ static int efx_ptp_change_mode(struct efx_nic *efx, bool enable_wanted,
* succeed.
*/
efx->ptp_data->mode = new_mode;
- rc = efx_ptp_start(efx);
+ if (netif_running(efx->net_dev))
+ rc = efx_ptp_start(efx);
if (rc == 0) {
rc = efx_ptp_synchronize(efx,
PTP_SYNC_ATTEMPTS * 2);
@@ -1145,8 +1521,6 @@ static int efx_ptp_change_mode(struct efx_nic *efx, bool enable_wanted,
static int efx_ptp_ts_init(struct efx_nic *efx, struct hwtstamp_config *init)
{
- bool enable_wanted = false;
- unsigned int new_mode;
int rc;
if (init->flags)
@@ -1156,63 +1530,20 @@ static int efx_ptp_ts_init(struct efx_nic *efx, struct hwtstamp_config *init)
(init->tx_type != HWTSTAMP_TX_ON))
return -ERANGE;
- new_mode = efx->ptp_data->mode;
- /* Determine whether any PTP HW operations are required */
- switch (init->rx_filter) {
- case HWTSTAMP_FILTER_NONE:
- break;
- case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
- case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
- case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
- init->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
- new_mode = MC_CMD_PTP_MODE_V1;
- enable_wanted = true;
- break;
- case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
- case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
- /* Although these three are accepted only IPV4 packets will be
- * timestamped
- */
- init->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
- new_mode = MC_CMD_PTP_MODE_V2_ENHANCED;
- enable_wanted = true;
- break;
- case HWTSTAMP_FILTER_PTP_V2_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_SYNC:
- case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
- case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
- case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
- /* Non-IP + IPv6 timestamping not supported */
- return -ERANGE;
- break;
- default:
- return -ERANGE;
- }
-
- if (init->tx_type != HWTSTAMP_TX_OFF)
- enable_wanted = true;
-
- /* Old versions of the firmware do not support the improved
- * UUID filtering option (SF bug 33070). If the firmware does
- * not accept the enhanced mode, fall back to the standard PTP
- * v2 UUID filtering.
- */
- rc = efx_ptp_change_mode(efx, enable_wanted, new_mode);
- if ((rc != 0) && (new_mode == MC_CMD_PTP_MODE_V2_ENHANCED))
- rc = efx_ptp_change_mode(efx, enable_wanted, MC_CMD_PTP_MODE_V2);
- if (rc != 0)
+ rc = efx->type->ptp_set_ts_config(efx, init);
+ if (rc)
return rc;
efx->ptp_data->config = *init;
-
return 0;
}
void efx_ptp_get_ts_info(struct efx_nic *efx, struct ethtool_ts_info *ts_info)
{
struct efx_ptp_data *ptp = efx->ptp_data;
+ struct efx_nic *primary = efx->primary;
+
+ ASSERT_RTNL();
if (!ptp)
return;
@@ -1220,18 +1551,14 @@ void efx_ptp_get_ts_info(struct efx_nic *efx, struct ethtool_ts_info *ts_info)
ts_info->so_timestamping |= (SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE);
- ts_info->phc_index = ptp_clock_index(ptp->phc_clock);
+ if (primary && primary->ptp_data && primary->ptp_data->phc_clock)
+ ts_info->phc_index =
+ ptp_clock_index(primary->ptp_data->phc_clock);
ts_info->tx_types = 1 << HWTSTAMP_TX_OFF | 1 << HWTSTAMP_TX_ON;
- ts_info->rx_filters = (1 << HWTSTAMP_FILTER_NONE |
- 1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT |
- 1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC |
- 1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ |
- 1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT |
- 1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC |
- 1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ);
+ ts_info->rx_filters = ptp->efx->type->hwtstamp_filters;
}
-int efx_ptp_ioctl(struct efx_nic *efx, struct ifreq *ifr, int cmd)
+int efx_ptp_set_ts_config(struct efx_nic *efx, struct ifreq *ifr)
{
struct hwtstamp_config config;
int rc;
@@ -1251,6 +1578,15 @@ int efx_ptp_ioctl(struct efx_nic *efx, struct ifreq *ifr, int cmd)
? -EFAULT : 0;
}
+int efx_ptp_get_ts_config(struct efx_nic *efx, struct ifreq *ifr)
+{
+ if (!efx->ptp_data)
+ return -EOPNOTSUPP;
+
+ return copy_to_user(ifr->ifr_data, &efx->ptp_data->config,
+ sizeof(efx->ptp_data->config)) ? -EFAULT : 0;
+}
+
static void ptp_event_failure(struct efx_nic *efx, int expected_frag_len)
{
struct efx_ptp_data *ptp = efx->ptp_data;
@@ -1270,6 +1606,9 @@ static void ptp_event_rx(struct efx_nic *efx, struct efx_ptp_data *ptp)
{
struct efx_ptp_event_rx *evt = NULL;
+ if (WARN_ON_ONCE(ptp->rx_ts_inline))
+ return;
+
if (ptp->evt_frag_idx != 3) {
ptp_event_failure(efx, 3);
return;
@@ -1288,15 +1627,21 @@ static void ptp_event_rx(struct efx_nic *efx, struct efx_ptp_data *ptp)
MCDI_EVENT_SRC) << 8) |
(EFX_QWORD_FIELD(ptp->evt_frags[0],
MCDI_EVENT_SRC) << 16));
- evt->hwtimestamp = ktime_set(
+ evt->hwtimestamp = efx->ptp_data->nic_to_kernel_time(
EFX_QWORD_FIELD(ptp->evt_frags[0], MCDI_EVENT_DATA),
- EFX_QWORD_FIELD(ptp->evt_frags[1], MCDI_EVENT_DATA));
+ EFX_QWORD_FIELD(ptp->evt_frags[1], MCDI_EVENT_DATA),
+ ptp->ts_corrections.rx);
evt->expiry = jiffies + msecs_to_jiffies(PKT_EVENT_LIFETIME_MS);
list_add_tail(&evt->link, &ptp->evt_list);
queue_work(ptp->workwq, &ptp->work);
- } else {
- netif_err(efx, rx_err, efx->net_dev, "No free PTP event");
+ } else if (!ptp->evt_overflow) {
+ /* Log a warning message and set the event overflow flag.
+ * The message won't be logged again until the event queue
+ * becomes empty.
+ */
+ netif_err(efx, rx_err, efx->net_dev, "PTP event queue overflow\n");
+ ptp->evt_overflow = true;
}
spin_unlock_bh(&ptp->evt_lock);
}
@@ -1360,12 +1705,99 @@ void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev)
}
}
+void efx_time_sync_event(struct efx_channel *channel, efx_qword_t *ev)
+{
+ channel->sync_timestamp_major = MCDI_EVENT_FIELD(*ev, PTP_TIME_MAJOR);
+ channel->sync_timestamp_minor =
+ MCDI_EVENT_FIELD(*ev, PTP_TIME_MINOR_26_19) << 19;
+ /* if sync events have been disabled then we want to silently ignore
+ * this event, so throw away result.
+ */
+ (void) cmpxchg(&channel->sync_events_state, SYNC_EVENTS_REQUESTED,
+ SYNC_EVENTS_VALID);
+}
+
+/* make some assumptions about the time representation rather than abstract it,
+ * since we currently only support one type of inline timestamping and only on
+ * EF10.
+ */
+#define MINOR_TICKS_PER_SECOND 0x8000000
+/* Fuzz factor for sync events to be out of order with RX events */
+#define FUZZ (MINOR_TICKS_PER_SECOND / 10)
+#define EXPECTED_SYNC_EVENTS_PER_SECOND 4
+
+static inline u32 efx_rx_buf_timestamp_minor(struct efx_nic *efx, const u8 *eh)
+{
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+ return __le32_to_cpup((const __le32 *)(eh + efx->rx_packet_ts_offset));
+#else
+ const u8 *data = eh + efx->rx_packet_ts_offset;
+ return (u32)data[0] |
+ (u32)data[1] << 8 |
+ (u32)data[2] << 16 |
+ (u32)data[3] << 24;
+#endif
+}
+
+void __efx_rx_skb_attach_timestamp(struct efx_channel *channel,
+ struct sk_buff *skb)
+{
+ struct efx_nic *efx = channel->efx;
+ u32 pkt_timestamp_major, pkt_timestamp_minor;
+ u32 diff, carry;
+ struct skb_shared_hwtstamps *timestamps;
+
+ pkt_timestamp_minor = (efx_rx_buf_timestamp_minor(efx,
+ skb_mac_header(skb)) +
+ (u32) efx->ptp_data->ts_corrections.rx) &
+ (MINOR_TICKS_PER_SECOND - 1);
+
+ /* get the difference between the packet and sync timestamps,
+ * modulo one second
+ */
+ diff = (pkt_timestamp_minor - channel->sync_timestamp_minor) &
+ (MINOR_TICKS_PER_SECOND - 1);
+ /* do we roll over a second boundary and need to carry the one? */
+ carry = channel->sync_timestamp_minor + diff > MINOR_TICKS_PER_SECOND ?
+ 1 : 0;
+
+ if (diff <= MINOR_TICKS_PER_SECOND / EXPECTED_SYNC_EVENTS_PER_SECOND +
+ FUZZ) {
+ /* packet is ahead of the sync event by a quarter of a second or
+ * less (allowing for fuzz)
+ */
+ pkt_timestamp_major = channel->sync_timestamp_major + carry;
+ } else if (diff >= MINOR_TICKS_PER_SECOND - FUZZ) {
+ /* packet is behind the sync event but within the fuzz factor.
+ * This means the RX packet and sync event crossed as they were
+ * placed on the event queue, which can sometimes happen.
+ */
+ pkt_timestamp_major = channel->sync_timestamp_major - 1 + carry;
+ } else {
+ /* it's outside tolerance in both directions. this might be
+ * indicative of us missing sync events for some reason, so
+ * we'll call it an error rather than risk giving a bogus
+ * timestamp.
+ */
+ netif_vdbg(efx, drv, efx->net_dev,
+ "packet timestamp %x too far from sync event %x:%x\n",
+ pkt_timestamp_minor, channel->sync_timestamp_major,
+ channel->sync_timestamp_minor);
+ return;
+ }
+
+ /* attach the timestamps to the skb */
+ timestamps = skb_hwtstamps(skb);
+ timestamps->hwtstamp =
+ efx_ptp_s27_to_ktime(pkt_timestamp_major, pkt_timestamp_minor);
+}
+
static int efx_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta)
{
struct efx_ptp_data *ptp_data = container_of(ptp,
struct efx_ptp_data,
phc_clock_info);
- struct efx_nic *efx = ptp_data->channel->efx;
+ struct efx_nic *efx = ptp_data->efx;
MCDI_DECLARE_BUF(inadj, MC_CMD_PTP_IN_ADJUST_LEN);
s64 adjustment_ns;
int rc;
@@ -1389,24 +1821,26 @@ static int efx_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta)
if (rc != 0)
return rc;
- ptp_data->current_adjfreq = delta;
+ ptp_data->current_adjfreq = adjustment_ns;
return 0;
}
static int efx_phc_adjtime(struct ptp_clock_info *ptp, s64 delta)
{
+ u32 nic_major, nic_minor;
struct efx_ptp_data *ptp_data = container_of(ptp,
struct efx_ptp_data,
phc_clock_info);
- struct efx_nic *efx = ptp_data->channel->efx;
- struct timespec delta_ts = ns_to_timespec(delta);
+ struct efx_nic *efx = ptp_data->efx;
MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_ADJUST_LEN);
+ efx->ptp_data->ns_to_nic_time(delta, &nic_major, &nic_minor);
+
MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_ADJUST);
MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
- MCDI_SET_QWORD(inbuf, PTP_IN_ADJUST_FREQ, 0);
- MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_SECONDS, (u32)delta_ts.tv_sec);
- MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_NANOSECONDS, (u32)delta_ts.tv_nsec);
+ MCDI_SET_QWORD(inbuf, PTP_IN_ADJUST_FREQ, ptp_data->current_adjfreq);
+ MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_MAJOR, nic_major);
+ MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_MINOR, nic_minor);
return efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
NULL, 0, NULL);
}
@@ -1416,10 +1850,11 @@ static int efx_phc_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
struct efx_ptp_data *ptp_data = container_of(ptp,
struct efx_ptp_data,
phc_clock_info);
- struct efx_nic *efx = ptp_data->channel->efx;
+ struct efx_nic *efx = ptp_data->efx;
MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_READ_NIC_TIME_LEN);
MCDI_DECLARE_BUF(outbuf, MC_CMD_PTP_OUT_READ_NIC_TIME_LEN);
int rc;
+ ktime_t kt;
MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_READ_NIC_TIME);
MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
@@ -1429,8 +1864,10 @@ static int efx_phc_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
if (rc != 0)
return rc;
- ts->tv_sec = MCDI_DWORD(outbuf, PTP_OUT_READ_NIC_TIME_SECONDS);
- ts->tv_nsec = MCDI_DWORD(outbuf, PTP_OUT_READ_NIC_TIME_NANOSECONDS);
+ kt = ptp_data->nic_to_kernel_time(
+ MCDI_DWORD(outbuf, PTP_OUT_READ_NIC_TIME_MAJOR),
+ MCDI_DWORD(outbuf, PTP_OUT_READ_NIC_TIME_MINOR), 0);
+ *ts = ktime_to_timespec(kt);
return 0;
}
@@ -1482,7 +1919,7 @@ static const struct efx_channel_type efx_ptp_channel_type = {
.keep_eventq = false,
};
-void efx_ptp_probe(struct efx_nic *efx)
+void efx_ptp_defer_probe_with_channel(struct efx_nic *efx)
{
/* Check whether PTP is implemented on this NIC. The DISABLE
* operation will succeed if and only if it is implemented.
@@ -1491,3 +1928,20 @@ void efx_ptp_probe(struct efx_nic *efx)
efx->extra_channel_type[EFX_EXTRA_CHANNEL_PTP] =
&efx_ptp_channel_type;
}
+
+void efx_ptp_start_datapath(struct efx_nic *efx)
+{
+ if (efx_ptp_restart(efx))
+ netif_err(efx, drv, efx->net_dev, "Failed to restart PTP.\n");
+ /* re-enable timestamping if it was previously enabled */
+ if (efx->type->ptp_set_ts_sync_events)
+ efx->type->ptp_set_ts_sync_events(efx, true, true);
+}
+
+void efx_ptp_stop_datapath(struct efx_nic *efx)
+{
+ /* temporarily disable timestamping */
+ if (efx->type->ptp_set_ts_sync_events)
+ efx->type->ptp_set_ts_sync_events(efx, false, true);
+ efx_ptp_stop(efx);
+}
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index 8f09e686fc23..48588ddf81b0 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -94,7 +94,7 @@ static inline void efx_sync_rx_buffer(struct efx_nic *efx,
void efx_rx_config_page_split(struct efx_nic *efx)
{
- efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + NET_IP_ALIGN,
+ efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + efx->rx_ip_align,
EFX_RX_BUF_ALIGNMENT);
efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 :
((PAGE_SIZE - sizeof(struct efx_rx_page_state)) /
@@ -149,7 +149,7 @@ static struct page *efx_reuse_page(struct efx_rx_queue *rx_queue)
* 0 on success. If a single page can be used for multiple buffers,
* then the page will either be inserted fully, or not at all.
*/
-static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue)
+static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue, bool atomic)
{
struct efx_nic *efx = rx_queue->efx;
struct efx_rx_buffer *rx_buf;
@@ -163,7 +163,8 @@ static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue)
do {
page = efx_reuse_page(rx_queue);
if (page == NULL) {
- page = alloc_pages(__GFP_COLD | __GFP_COMP | GFP_ATOMIC,
+ page = alloc_pages(__GFP_COLD | __GFP_COMP |
+ (atomic ? GFP_ATOMIC : GFP_KERNEL),
efx->rx_buffer_order);
if (unlikely(page == NULL))
return -ENOMEM;
@@ -189,9 +190,9 @@ static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue)
do {
index = rx_queue->added_count & rx_queue->ptr_mask;
rx_buf = efx_rx_buffer(rx_queue, index);
- rx_buf->dma_addr = dma_addr + NET_IP_ALIGN;
+ rx_buf->dma_addr = dma_addr + efx->rx_ip_align;
rx_buf->page = page;
- rx_buf->page_offset = page_offset + NET_IP_ALIGN;
+ rx_buf->page_offset = page_offset + efx->rx_ip_align;
rx_buf->len = efx->rx_dma_len;
rx_buf->flags = 0;
++rx_queue->added_count;
@@ -321,7 +322,7 @@ static void efx_discard_rx_packet(struct efx_channel *channel,
* this means this function must run from the NAPI handler, or be called
* when NAPI is disabled.
*/
-void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
+void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic)
{
struct efx_nic *efx = rx_queue->efx;
unsigned int fill_level, batch_size;
@@ -354,7 +355,7 @@ void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
do {
- rc = efx_init_rx_buffers(rx_queue);
+ rc = efx_init_rx_buffers(rx_queue, atomic);
if (unlikely(rc)) {
/* Ensure that we don't leave the rx queue empty */
if (rx_queue->added_count == rx_queue->removed_count)
@@ -439,7 +440,8 @@ efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf,
}
if (efx->net_dev->features & NETIF_F_RXHASH)
- skb->rxhash = efx_rx_buf_hash(efx, eh);
+ skb_set_hash(skb, efx_rx_buf_hash(efx, eh),
+ PKT_HASH_TYPE_L3);
skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ?
CHECKSUM_UNNECESSARY : CHECKSUM_NONE);
@@ -475,14 +477,18 @@ static struct sk_buff *efx_rx_mk_skb(struct efx_channel *channel,
struct sk_buff *skb;
/* Allocate an SKB to store the headers */
- skb = netdev_alloc_skb(efx->net_dev, hdr_len + EFX_PAGE_SKB_ALIGN);
+ skb = netdev_alloc_skb(efx->net_dev,
+ efx->rx_ip_align + efx->rx_prefix_size +
+ hdr_len);
if (unlikely(skb == NULL))
return NULL;
EFX_BUG_ON_PARANOID(rx_buf->len < hdr_len);
- skb_reserve(skb, EFX_PAGE_SKB_ALIGN);
- memcpy(__skb_put(skb, hdr_len), eh, hdr_len);
+ memcpy(skb->data + efx->rx_ip_align, eh - efx->rx_prefix_size,
+ efx->rx_prefix_size + hdr_len);
+ skb_reserve(skb, efx->rx_ip_align + efx->rx_prefix_size);
+ __skb_put(skb, hdr_len);
/* Append the remaining page(s) onto the frag list */
if (rx_buf->len > hdr_len) {
@@ -619,6 +625,8 @@ static void efx_rx_deliver(struct efx_channel *channel, u8 *eh,
if (likely(rx_buf->flags & EFX_RX_PKT_CSUMMED))
skb->ip_summed = CHECKSUM_UNNECESSARY;
+ efx_rx_skb_attach_timestamp(channel, skb);
+
if (channel->type->receive_skb)
if (channel->type->receive_skb(channel, skb))
return;
diff --git a/drivers/net/ethernet/sfc/selftest.c b/drivers/net/ethernet/sfc/selftest.c
index 144bbff5a4ae..26641817a9c7 100644
--- a/drivers/net/ethernet/sfc/selftest.c
+++ b/drivers/net/ethernet/sfc/selftest.c
@@ -722,7 +722,7 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
return rc_reset;
}
- if ((tests->registers < 0) && !rc_test)
+ if ((tests->memory < 0 || tests->registers < 0) && !rc_test)
rc_test = -EIO;
}
diff --git a/drivers/net/ethernet/sfc/selftest.h b/drivers/net/ethernet/sfc/selftest.h
index a2f4a06ffa4e..009dbe88f3be 100644
--- a/drivers/net/ethernet/sfc/selftest.h
+++ b/drivers/net/ethernet/sfc/selftest.h
@@ -38,6 +38,7 @@ struct efx_self_tests {
int eventq_dma[EFX_MAX_CHANNELS];
int eventq_int[EFX_MAX_CHANNELS];
/* offline tests */
+ int memory;
int registers;
int phy_ext[EFX_MAX_PHY_TESTS];
struct efx_loopback_self_tests loopback[LOOPBACK_TEST_MAX + 1];
diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c
index d034bcd124ef..23f3a6f7737a 100644
--- a/drivers/net/ethernet/sfc/siena.c
+++ b/drivers/net/ethernet/sfc/siena.c
@@ -118,6 +118,54 @@ out:
/**************************************************************************
*
+ * PTP
+ *
+ **************************************************************************
+ */
+
+static void siena_ptp_write_host_time(struct efx_nic *efx, u32 host_time)
+{
+ _efx_writed(efx, cpu_to_le32(host_time),
+ FR_CZ_MC_TREG_SMEM + MC_SMEM_P0_PTP_TIME_OFST);
+}
+
+static int siena_ptp_set_ts_config(struct efx_nic *efx,
+ struct hwtstamp_config *init)
+{
+ int rc;
+
+ switch (init->rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ /* if TX timestamping is still requested then leave PTP on */
+ return efx_ptp_change_mode(efx,
+ init->tx_type != HWTSTAMP_TX_OFF,
+ efx_ptp_get_mode(efx));
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ init->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
+ return efx_ptp_change_mode(efx, true, MC_CMD_PTP_MODE_V1);
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ init->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
+ rc = efx_ptp_change_mode(efx, true,
+ MC_CMD_PTP_MODE_V2_ENHANCED);
+ /* bug 33070 - old versions of the firmware do not support the
+ * improved UUID filtering option. Similarly old versions of the
+ * application do not expect it to be enabled. If the firmware
+ * does not accept the enhanced mode, fall back to the standard
+ * PTP v2 UUID filtering. */
+ if (rc != 0)
+ rc = efx_ptp_change_mode(efx, true, MC_CMD_PTP_MODE_V2);
+ return rc;
+ default:
+ return -ERANGE;
+ }
+}
+
+/**************************************************************************
+ *
* Device reset
*
**************************************************************************
@@ -259,7 +307,7 @@ static int siena_probe_nic(struct efx_nic *efx)
goto fail5;
efx_sriov_probe(efx);
- efx_ptp_probe(efx);
+ efx_ptp_defer_probe_with_channel(efx);
return 0;
@@ -273,6 +321,31 @@ fail1:
return rc;
}
+static void siena_rx_push_rss_config(struct efx_nic *efx)
+{
+ efx_oword_t temp;
+
+ /* Set hash key for IPv4 */
+ memcpy(&temp, efx->rx_hash_key, sizeof(temp));
+ efx_writeo(efx, &temp, FR_BZ_RX_RSS_TKEY);
+
+ /* Enable IPv6 RSS */
+ BUILD_BUG_ON(sizeof(efx->rx_hash_key) <
+ 2 * sizeof(temp) + FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8 ||
+ FRF_CZ_RX_RSS_IPV6_TKEY_HI_LBN != 0);
+ memcpy(&temp, efx->rx_hash_key, sizeof(temp));
+ efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG1);
+ memcpy(&temp, efx->rx_hash_key + sizeof(temp), sizeof(temp));
+ efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG2);
+ EFX_POPULATE_OWORD_2(temp, FRF_CZ_RX_RSS_IPV6_THASH_ENABLE, 1,
+ FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE, 1);
+ memcpy(&temp, efx->rx_hash_key + 2 * sizeof(temp),
+ FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8);
+ efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG3);
+
+ efx_farch_rx_push_indir_table(efx);
+}
+
/* This call performs hardware-specific global initialisation, such as
* defining the descriptor cache sizes and number of RSS channels.
* It does not set up any buffers, descriptor rings or event queues.
@@ -313,23 +386,7 @@ static int siena_init_nic(struct efx_nic *efx)
EFX_RX_USR_BUF_SIZE >> 5);
efx_writeo(efx, &temp, FR_AZ_RX_CFG);
- /* Set hash key for IPv4 */
- memcpy(&temp, efx->rx_hash_key, sizeof(temp));
- efx_writeo(efx, &temp, FR_BZ_RX_RSS_TKEY);
-
- /* Enable IPv6 RSS */
- BUILD_BUG_ON(sizeof(efx->rx_hash_key) <
- 2 * sizeof(temp) + FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8 ||
- FRF_CZ_RX_RSS_IPV6_TKEY_HI_LBN != 0);
- memcpy(&temp, efx->rx_hash_key, sizeof(temp));
- efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG1);
- memcpy(&temp, efx->rx_hash_key + sizeof(temp), sizeof(temp));
- efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG2);
- EFX_POPULATE_OWORD_2(temp, FRF_CZ_RX_RSS_IPV6_THASH_ENABLE, 1,
- FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE, 1);
- memcpy(&temp, efx->rx_hash_key + 2 * sizeof(temp),
- FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8);
- efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG3);
+ siena_rx_push_rss_config(efx);
/* Enable event logging */
rc = efx_mcdi_log_ctrl(efx, true, false, 0);
@@ -458,6 +515,8 @@ static int siena_try_update_nic_stats(struct efx_nic *efx)
return -EAGAIN;
/* Update derived statistics */
+ efx_nic_fix_nodesc_drop_stat(efx,
+ &stats[SIENA_STAT_rx_nodesc_drop_cnt]);
efx_update_diff_stat(&stats[SIENA_STAT_tx_good_bytes],
stats[SIENA_STAT_tx_bytes] -
stats[SIENA_STAT_tx_bad_bytes]);
@@ -837,19 +896,6 @@ fail:
/**************************************************************************
*
- * PTP
- *
- **************************************************************************
- */
-
-static void siena_ptp_write_host_time(struct efx_nic *efx, u32 host_time)
-{
- _efx_writed(efx, cpu_to_le32(host_time),
- FR_CZ_MC_TREG_SMEM + MC_SMEM_P0_PTP_TIME_OFST);
-}
-
-/**************************************************************************
- *
* Revision-dependent attributes used by efx.c and nic.c
*
**************************************************************************
@@ -878,6 +924,7 @@ const struct efx_nic_type siena_a0_nic_type = {
.describe_stats = siena_describe_nic_stats,
.update_stats = siena_update_nic_stats,
.start_stats = efx_mcdi_mac_start_stats,
+ .pull_stats = efx_mcdi_mac_pull_stats,
.stop_stats = efx_mcdi_mac_stop_stats,
.set_id_led = efx_mcdi_set_id_led,
.push_irq_moderation = siena_push_irq_moderation,
@@ -902,7 +949,7 @@ const struct efx_nic_type siena_a0_nic_type = {
.tx_init = efx_farch_tx_init,
.tx_remove = efx_farch_tx_remove,
.tx_write = efx_farch_tx_write,
- .rx_push_indir_table = efx_farch_rx_push_indir_table,
+ .rx_push_rss_config = siena_rx_push_rss_config,
.rx_probe = efx_farch_rx_probe,
.rx_init = efx_farch_rx_init,
.rx_remove = efx_farch_rx_remove,
@@ -939,6 +986,7 @@ const struct efx_nic_type siena_a0_nic_type = {
.mtd_sync = efx_mcdi_mtd_sync,
#endif
.ptp_write_host_time = siena_ptp_write_host_time,
+ .ptp_set_ts_config = siena_ptp_set_ts_config,
.revision = EFX_REV_SIENA_A0,
.txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL,
@@ -957,4 +1005,11 @@ const struct efx_nic_type siena_a0_nic_type = {
NETIF_F_RXHASH | NETIF_F_NTUPLE),
.mcdi_max_ver = 1,
.max_rx_ip_filters = FR_BZ_RX_FILTER_TBL0_ROWS,
+ .hwtstamp_filters = (1 << HWTSTAMP_FILTER_NONE |
+ 1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT |
+ 1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC |
+ 1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ |
+ 1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT |
+ 1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC |
+ 1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ),
};
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
index 975dc2d8e548..ff57a46388ee 100644
--- a/drivers/net/ethernet/sis/sis900.c
+++ b/drivers/net/ethernet/sis/sis900.c
@@ -576,7 +576,6 @@ err_unmap_tx:
err_out_unmap:
pci_iounmap(pci_dev, ioaddr);
err_out_cleardev:
- pci_set_drvdata(pci_dev, NULL);
pci_release_regions(pci_dev);
err_out:
free_netdev(net_dev);
@@ -2427,7 +2426,6 @@ static void sis900_remove(struct pci_dev *pci_dev)
pci_iounmap(pci_dev, sis_priv->ioaddr);
free_netdev(net_dev);
pci_release_regions(pci_dev);
- pci_set_drvdata(pci_dev, NULL);
}
#ifdef CONFIG_PM
diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c
index 0f096a890059..5a4278d1f7d0 100644
--- a/drivers/net/ethernet/smsc/smc911x.c
+++ b/drivers/net/ethernet/smsc/smc911x.c
@@ -17,8 +17,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
* Arguments:
* watchdog = TX watchdog timeout
diff --git a/drivers/net/ethernet/smsc/smc911x.h b/drivers/net/ethernet/smsc/smc911x.h
index 9965da39281b..04b35f55df97 100644
--- a/drivers/net/ethernet/smsc/smc911x.h
+++ b/drivers/net/ethernet/smsc/smc911x.h
@@ -15,8 +15,7 @@
. GNU General Public License for more details.
.
. You should have received a copy of the GNU General Public License
- . along with this program; if not, write to the Free Software
- . Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ . along with this program; if not, see <http://www.gnu.org/licenses/>.
.
. Information contained in this file was obtained from the LAN9118
. manual from SMC. To get a copy, if you really want one, you can find
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 0c9b5d94154f..96f79f7c4395 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -19,8 +19,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
* Arguments:
* io = for the base address
@@ -82,6 +81,7 @@ static const char version[] =
#include <linux/mii.h>
#include <linux/workqueue.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
@@ -2184,6 +2184,15 @@ static void smc_release_datacs(struct platform_device *pdev, struct net_device *
}
}
+#if IS_BUILTIN(CONFIG_OF)
+static const struct of_device_id smc91x_match[] = {
+ { .compatible = "smsc,lan91c94", },
+ { .compatible = "smsc,lan91c111", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, smc91x_match);
+#endif
+
/*
* smc_init(void)
* Input parameters:
@@ -2198,6 +2207,7 @@ static void smc_release_datacs(struct platform_device *pdev, struct net_device *
static int smc_drv_probe(struct platform_device *pdev)
{
struct smc91x_platdata *pd = dev_get_platdata(&pdev->dev);
+ const struct of_device_id *match = NULL;
struct smc_local *lp;
struct net_device *ndev;
struct resource *res, *ires;
@@ -2217,11 +2227,34 @@ static int smc_drv_probe(struct platform_device *pdev)
*/
lp = netdev_priv(ndev);
+ lp->cfg.flags = 0;
if (pd) {
memcpy(&lp->cfg, pd, sizeof(lp->cfg));
lp->io_shift = SMC91X_IO_SHIFT(lp->cfg.flags);
- } else {
+ }
+
+#if IS_BUILTIN(CONFIG_OF)
+ match = of_match_device(of_match_ptr(smc91x_match), &pdev->dev);
+ if (match) {
+ struct device_node *np = pdev->dev.of_node;
+ u32 val;
+
+ /* Combination of IO widths supported, default to 16-bit */
+ if (!of_property_read_u32(np, "reg-io-width", &val)) {
+ if (val & 1)
+ lp->cfg.flags |= SMC91X_USE_8BIT;
+ if ((val == 0) || (val & 2))
+ lp->cfg.flags |= SMC91X_USE_16BIT;
+ if (val & 4)
+ lp->cfg.flags |= SMC91X_USE_32BIT;
+ } else {
+ lp->cfg.flags |= SMC91X_USE_16BIT;
+ }
+ }
+#endif
+
+ if (!pd && !match) {
lp->cfg.flags |= (SMC_CAN_USE_8BIT) ? SMC91X_USE_8BIT : 0;
lp->cfg.flags |= (SMC_CAN_USE_16BIT) ? SMC91X_USE_16BIT : 0;
lp->cfg.flags |= (SMC_CAN_USE_32BIT) ? SMC91X_USE_32BIT : 0;
@@ -2370,15 +2403,6 @@ static int smc_drv_resume(struct device *dev)
return 0;
}
-#ifdef CONFIG_OF
-static const struct of_device_id smc91x_match[] = {
- { .compatible = "smsc,lan91c94", },
- { .compatible = "smsc,lan91c111", },
- {},
-};
-MODULE_DEVICE_TABLE(of, smc91x_match);
-#endif
-
static struct dev_pm_ops smc_drv_pm_ops = {
.suspend = smc_drv_suspend,
.resume = smc_drv_resume,
diff --git a/drivers/net/ethernet/smsc/smc91x.h b/drivers/net/ethernet/smsc/smc91x.h
index c9d4c872e81d..47dce918eb0f 100644
--- a/drivers/net/ethernet/smsc/smc91x.h
+++ b/drivers/net/ethernet/smsc/smc91x.h
@@ -18,8 +18,7 @@
. GNU General Public License for more details.
.
. You should have received a copy of the GNU General Public License
- . along with this program; if not, write to the Free Software
- . Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ . along with this program; if not, see <http://www.gnu.org/licenses/>.
.
. Information contained in this file was obtained from the LAN91C111
. manual from SMC. To get a copy, if you really want one, you can find
@@ -46,7 +45,8 @@
defined(CONFIG_MACH_LITTLETON) ||\
defined(CONFIG_MACH_ZYLONITE2) ||\
defined(CONFIG_ARCH_VIPER) ||\
- defined(CONFIG_MACH_STARGATE2)
+ defined(CONFIG_MACH_STARGATE2) ||\
+ defined(CONFIG_ARCH_VERSATILE)
#include <asm/mach-types.h>
@@ -154,6 +154,8 @@ static inline void SMC_outw(u16 val, void __iomem *ioaddr, int reg)
#define SMC_outl(v, a, r) writel(v, (a) + (r))
#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l)
#define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l)
+#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
+#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
#define SMC_IRQ_FLAGS (-1) /* from resource */
/* We actually can't write halfwords properly if not word aligned */
@@ -206,23 +208,6 @@ SMC_outw(u16 val, void __iomem *ioaddr, int reg)
#define RPC_LSA_DEFAULT RPC_LED_TX_RX
#define RPC_LSB_DEFAULT RPC_LED_100_10
-#elif defined(CONFIG_ARCH_VERSATILE)
-
-#define SMC_CAN_USE_8BIT 1
-#define SMC_CAN_USE_16BIT 1
-#define SMC_CAN_USE_32BIT 1
-#define SMC_NOWAIT 1
-
-#define SMC_inb(a, r) readb((a) + (r))
-#define SMC_inw(a, r) readw((a) + (r))
-#define SMC_inl(a, r) readl((a) + (r))
-#define SMC_outb(v, a, r) writeb(v, (a) + (r))
-#define SMC_outw(v, a, r) writew(v, (a) + (r))
-#define SMC_outl(v, a, r) writel(v, (a) + (r))
-#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l)
-#define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l)
-#define SMC_IRQ_FLAGS (-1) /* from resource */
-
#elif defined(CONFIG_MN10300)
/*
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index 8564f23a6796..6382b7c416f4 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -14,8 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
***************************************************************************
* Rewritten, heavily based on smsc911x simple driver by SMSC.
diff --git a/drivers/net/ethernet/smsc/smsc911x.h b/drivers/net/ethernet/smsc/smsc911x.h
index 9ad5e5d39a03..23953957fed8 100644
--- a/drivers/net/ethernet/smsc/smsc911x.h
+++ b/drivers/net/ethernet/smsc/smsc911x.h
@@ -14,8 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
***************************************************************************/
#ifndef __SMSC911X_H__
diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c
index f433d97aa097..d3b967aff9e0 100644
--- a/drivers/net/ethernet/smsc/smsc9420.c
+++ b/drivers/net/ethernet/smsc/smsc9420.c
@@ -13,8 +13,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
***************************************************************************
*/
@@ -1541,7 +1540,7 @@ static int smsc9420_resume(struct pci_dev *pdev)
pci_set_master(pdev);
- err = pci_enable_wake(pdev, 0, 0);
+ err = pci_enable_wake(pdev, PCI_D0, 0);
if (err)
netif_warn(pd, ifup, pd->dev, "pci_enable_wake failed: %d\n",
err);
diff --git a/drivers/net/ethernet/smsc/smsc9420.h b/drivers/net/ethernet/smsc/smsc9420.h
index e441402f77a2..c63c76381af6 100644
--- a/drivers/net/ethernet/smsc/smsc9420.h
+++ b/drivers/net/ethernet/smsc/smsc9420.h
@@ -13,8 +13,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
***************************************************************************
*/
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index 22f89ffdfd95..92be6b3fe547 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -108,8 +108,6 @@ struct stmmac_priv {
spinlock_t ptp_lock;
};
-extern int phyaddr;
-
int stmmac_mdio_unregister(struct net_device *ndev);
int stmmac_mdio_register(struct net_device *ndev);
void stmmac_set_ethtool_ops(struct net_device *netdev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 8a7a23a84ac5..b8e3a4ce24b0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -64,7 +64,7 @@ static int debug = -1;
module_param(debug, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
-int phyaddr = -1;
+static int phyaddr = -1;
module_param(phyaddr, int, S_IRUGO);
MODULE_PARM_DESC(phyaddr, "Physical device address");
@@ -622,17 +622,15 @@ static int stmmac_init_ptp(struct stmmac_priv *priv)
if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
return -EOPNOTSUPP;
- if (netif_msg_hw(priv)) {
- if (priv->dma_cap.time_stamp) {
- pr_debug("IEEE 1588-2002 Time Stamp supported\n");
- priv->adv_ts = 0;
- }
- if (priv->dma_cap.atime_stamp && priv->extend_desc) {
- pr_debug
- ("IEEE 1588-2008 Advanced Time Stamp supported\n");
- priv->adv_ts = 1;
- }
- }
+ priv->adv_ts = 0;
+ if (priv->dma_cap.atime_stamp && priv->extend_desc)
+ priv->adv_ts = 1;
+
+ if (netif_msg_hw(priv) && priv->dma_cap.time_stamp)
+ pr_debug("IEEE 1588-2002 Time Stamp supported\n");
+
+ if (netif_msg_hw(priv) && priv->adv_ts)
+ pr_debug("IEEE 1588-2008 Advanced Time Stamp supported\n");
priv->hw->ptp = &stmmac_ptp;
priv->hwts_tx_en = 0;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index 644d80ece067..37ba2e080825 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -26,9 +26,9 @@
#include <linux/pci.h>
#include "stmmac.h"
-struct plat_stmmacenet_data plat_dat;
-struct stmmac_mdio_bus_data mdio_data;
-struct stmmac_dma_cfg dma_cfg;
+static struct plat_stmmacenet_data plat_dat;
+static struct stmmac_mdio_bus_data mdio_data;
+static struct stmmac_dma_cfg dma_cfg;
static void stmmac_default_data(void)
{
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 51c9069ef405..38bd1f4fbe33 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -219,7 +219,7 @@ static int stmmac_pltfr_resume(struct device *dev)
return stmmac_resume(ndev);
}
-int stmmac_pltfr_freeze(struct device *dev)
+static int stmmac_pltfr_freeze(struct device *dev)
{
int ret;
struct plat_stmmacenet_data *plat_dat = dev_get_platdata(dev);
@@ -233,7 +233,7 @@ int stmmac_pltfr_freeze(struct device *dev)
return ret;
}
-int stmmac_pltfr_restore(struct device *dev)
+static int stmmac_pltfr_restore(struct device *dev)
{
struct plat_stmmacenet_data *plat_dat = dev_get_platdata(dev);
struct net_device *ndev = dev_get_drvdata(dev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
index b8b0eeed0f92..7680581ebe12 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
@@ -56,7 +56,7 @@ static int stmmac_adjust_freq(struct ptp_clock_info *ptp, s32 ppb)
priv->hw->ptp->config_addend(priv->ioaddr, addend);
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_unlock_irqrestore(&priv->ptp_lock, flags);
return 0;
}
@@ -91,7 +91,7 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta)
priv->hw->ptp->adjust_systime(priv->ioaddr, sec, nsec, neg_adj);
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_unlock_irqrestore(&priv->ptp_lock, flags);
return 0;
}
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index b4d50d74ba18..df8d383acf48 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -14,9 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
- * 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
* This driver uses the sungem driver (c) David Miller
* (davem@redhat.com) as its basis.
diff --git a/drivers/net/ethernet/sun/cassini.h b/drivers/net/ethernet/sun/cassini.h
index b361424d5f57..882ce168a799 100644
--- a/drivers/net/ethernet/sun/cassini.h
+++ b/drivers/net/ethernet/sun/cassini.h
@@ -15,9 +15,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
- * 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
* vendor id: 0x108E (Sun Microsystems, Inc.)
* device id: 0xabba (Cassini)
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 388540fcb977..8e2266e1f260 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -3493,10 +3493,12 @@ static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np,
rh = (struct rx_pkt_hdr1 *) skb->data;
if (np->dev->features & NETIF_F_RXHASH)
- skb->rxhash = ((u32)rh->hashval2_0 << 24 |
- (u32)rh->hashval2_1 << 16 |
- (u32)rh->hashval1_1 << 8 |
- (u32)rh->hashval1_2 << 0);
+ skb_set_hash(skb,
+ ((u32)rh->hashval2_0 << 24 |
+ (u32)rh->hashval2_1 << 16 |
+ (u32)rh->hashval1_1 << 8 |
+ (u32)rh->hashval1_2 << 0),
+ PKT_HASH_TYPE_L3);
skb_pull(skb, sizeof(*rh));
rp->rx_packets++;
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index 3df56840a3b9..1c24a8f368bd 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -751,7 +751,7 @@ static struct vnet_mcast_entry *__vnet_mc_find(struct vnet *vp, u8 *addr)
struct vnet_mcast_entry *m;
for (m = vp->mcast_list; m; m = m->next) {
- if (!memcmp(m->addr, addr, ETH_ALEN))
+ if (ether_addr_equal(m->addr, addr))
return m;
}
return NULL;
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index dd0dd6279b4e..4f1d2549130e 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -2019,7 +2019,6 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ndev->features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO
| NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_RXCSUM
- /*| NETIF_F_FRAGLIST */
;
ndev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
NETIF_F_TSO | NETIF_F_HW_VLAN_CTAG_TX;
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 7536a4c01293..e8bb77d25d98 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -582,7 +582,7 @@ static void cpsw_intr_disable(struct cpsw_priv *priv)
return;
}
-void cpsw_tx_handler(void *token, int len, int status)
+static void cpsw_tx_handler(void *token, int len, int status)
{
struct sk_buff *skb = token;
struct net_device *ndev = skb->dev;
@@ -599,7 +599,7 @@ void cpsw_tx_handler(void *token, int len, int status)
dev_kfree_skb_any(skb);
}
-void cpsw_rx_handler(void *token, int len, int status)
+static void cpsw_rx_handler(void *token, int len, int status)
{
struct sk_buff *skb = token;
struct sk_buff *new_skb;
@@ -740,6 +740,8 @@ static void _cpsw_adjust_link(struct cpsw_slave *slave,
/* set speed_in input in case RMII mode is used in 100Mbps */
if (phy->speed == 100)
mac_control |= BIT(15);
+ else if (phy->speed == 10)
+ mac_control |= BIT(18); /* In Band mode */
*link = true;
} else {
@@ -1151,6 +1153,12 @@ static int cpsw_ndo_open(struct net_device *ndev)
* receive descs
*/
cpsw_info(priv, ifup, "submitted %d rx descriptors\n", i);
+
+ if (cpts_register(&priv->pdev->dev, priv->cpts,
+ priv->data.cpts_clock_mult,
+ priv->data.cpts_clock_shift))
+ dev_err(priv->dev, "error registering cpts device\n");
+
}
/* Enable Interrupt pacing if configured */
@@ -1197,6 +1205,7 @@ static int cpsw_ndo_stop(struct net_device *ndev)
netif_carrier_off(priv->ndev);
if (cpsw_common_res_usage_state(priv) <= 1) {
+ cpts_unregister(priv->cpts);
cpsw_intr_disable(priv);
cpdma_ctlr_int_ctrl(priv->dma, false);
cpdma_ctlr_stop(priv->dma);
@@ -1322,7 +1331,7 @@ static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
__raw_writel(ETH_P_1588, &priv->regs->ts_ltype);
}
-static int cpsw_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
+static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
{
struct cpsw_priv *priv = netdev_priv(dev);
struct cpts *cpts = priv->cpts;
@@ -1383,6 +1392,24 @@ static int cpsw_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
}
+static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
+{
+ struct cpsw_priv *priv = netdev_priv(dev);
+ struct cpts *cpts = priv->cpts;
+ struct hwtstamp_config cfg;
+
+ if (priv->version != CPSW_VERSION_1 &&
+ priv->version != CPSW_VERSION_2)
+ return -EOPNOTSUPP;
+
+ cfg.flags = 0;
+ cfg.tx_type = cpts->tx_enable ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
+ cfg.rx_filter = (cpts->rx_enable ?
+ HWTSTAMP_FILTER_PTP_V2_EVENT : HWTSTAMP_FILTER_NONE);
+
+ return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+}
+
#endif /*CONFIG_TI_CPTS*/
static int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
@@ -1397,7 +1424,9 @@ static int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
switch (cmd) {
#ifdef CONFIG_TI_CPTS
case SIOCSHWTSTAMP:
- return cpsw_hwtstamp_ioctl(dev, req);
+ return cpsw_hwtstamp_set(dev, req);
+ case SIOCGHWTSTAMP:
+ return cpsw_hwtstamp_get(dev, req);
#endif
case SIOCGMIIPHY:
data->phy_id = priv->slaves[slave_no].phy->addr;
@@ -1816,6 +1845,8 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
}
i++;
+ if (i == data->slaves)
+ break;
}
return 0;
@@ -1983,9 +2014,15 @@ static int cpsw_probe(struct platform_device *pdev)
goto clean_runtime_disable_ret;
}
priv->regs = ss_regs;
- priv->version = __raw_readl(&priv->regs->id_ver);
priv->host_port = HOST_PORT_NUM;
+ /* Need to enable clocks with runtime PM api to access module
+ * registers
+ */
+ pm_runtime_get_sync(&pdev->dev);
+ priv->version = readl(&priv->regs->id_ver);
+ pm_runtime_put_sync(&pdev->dev);
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
priv->wr_regs = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(priv->wr_regs)) {
@@ -2091,7 +2128,7 @@ static int cpsw_probe(struct platform_device *pdev)
while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) {
for (i = res->start; i <= res->end; i++) {
if (devm_request_irq(&pdev->dev, i, cpsw_interrupt, 0,
- dev_name(priv->dev), priv)) {
+ dev_name(&pdev->dev), priv)) {
dev_err(priv->dev, "error attaching irq\n");
goto clean_ale_ret;
}
@@ -2120,8 +2157,8 @@ static int cpsw_probe(struct platform_device *pdev)
data->cpts_clock_mult, data->cpts_clock_shift))
dev_err(priv->dev, "error registering cpts device\n");
- cpsw_notice(priv, probe, "initialized device (regs %x, irq %d)\n",
- ss_res->start, ndev->irq);
+ cpsw_notice(priv, probe, "initialized device (regs %pa, irq %d)\n",
+ &ss_res->start, ndev->irq);
if (priv->data.dual_emac) {
ret = cpsw_probe_dual_emac(pdev, priv);
@@ -2155,8 +2192,6 @@ static int cpsw_remove(struct platform_device *pdev)
unregister_netdev(cpsw_get_slave_ndev(priv, 1));
unregister_netdev(ndev);
- cpts_unregister(priv->cpts);
-
cpsw_ale_destroy(priv->ale);
cpdma_chan_destroy(priv->txch);
cpdma_chan_destroy(priv->rxch);
diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
index 7fa60d6092ed..63e981975059 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.c
+++ b/drivers/net/ethernet/ti/cpsw_ale.c
@@ -163,7 +163,7 @@ int cpsw_ale_match_addr(struct cpsw_ale *ale, u8 *addr, u16 vid)
if (cpsw_ale_get_vlan_id(ale_entry) != vid)
continue;
cpsw_ale_get_addr(ale_entry, entry_addr);
- if (memcmp(entry_addr, addr, 6) == 0)
+ if (ether_addr_equal(entry_addr, addr))
return idx;
}
return -ENOENT;
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
index 90a79462c869..364d0c7952c0 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.c
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -81,7 +81,7 @@ struct cpdma_desc {
};
struct cpdma_desc_pool {
- u32 phys;
+ phys_addr_t phys;
u32 hw_addr;
void __iomem *iomap; /* ioremap map */
void *cpumap; /* dma_alloc map */
@@ -219,8 +219,7 @@ static inline dma_addr_t desc_phys(struct cpdma_desc_pool *pool,
{
if (!desc)
return 0;
- return pool->hw_addr + (__force dma_addr_t)desc -
- (__force dma_addr_t)pool->iomap;
+ return pool->hw_addr + (__force long)desc - (__force long)pool->iomap;
}
static inline struct cpdma_desc __iomem *
@@ -972,7 +971,7 @@ struct cpdma_control_info {
#define ACCESS_RW (ACCESS_RO | ACCESS_WO)
};
-struct cpdma_control_info controls[] = {
+static struct cpdma_control_info controls[] = {
[CPDMA_CMD_IDLE] = {CPDMA_DMACONTROL, 3, 1, ACCESS_WO},
[CPDMA_COPY_ERROR_FRAMES] = {CPDMA_DMACONTROL, 4, 1, ACCESS_RW},
[CPDMA_RX_OFF_LEN_UPDATE] = {CPDMA_DMACONTROL, 2, 1, ACCESS_RW},
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 41ba974bf37c..cd9b164a0434 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -61,6 +61,7 @@
#include <linux/davinci_emac.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/of_net.h>
@@ -1752,10 +1753,14 @@ static const struct net_device_ops emac_netdev_ops = {
#endif
};
+static const struct of_device_id davinci_emac_of_match[];
+
static struct emac_platform_data *
davinci_emac_of_get_pdata(struct platform_device *pdev, struct emac_priv *priv)
{
struct device_node *np;
+ const struct of_device_id *match;
+ const struct emac_platform_data *auxdata;
struct emac_platform_data *pdata = NULL;
const u8 *mac_addr;
@@ -1793,7 +1798,20 @@ davinci_emac_of_get_pdata(struct platform_device *pdev, struct emac_priv *priv)
priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
if (!priv->phy_node)
- pdata->phy_id = "";
+ pdata->phy_id = NULL;
+
+ auxdata = pdev->dev.platform_data;
+ if (auxdata) {
+ pdata->interrupt_enable = auxdata->interrupt_enable;
+ pdata->interrupt_disable = auxdata->interrupt_disable;
+ }
+
+ match = of_match_device(davinci_emac_of_match, &pdev->dev);
+ if (match && match->data) {
+ auxdata = match->data;
+ pdata->version = auxdata->version;
+ pdata->hw_ram_addr = auxdata->hw_ram_addr;
+ }
pdev->dev.platform_data = pdata;
@@ -2020,8 +2038,14 @@ static const struct dev_pm_ops davinci_emac_pm_ops = {
};
#if IS_ENABLED(CONFIG_OF)
+static const struct emac_platform_data am3517_emac_data = {
+ .version = EMAC_VERSION_2,
+ .hw_ram_addr = 0x01e20000,
+};
+
static const struct of_device_id davinci_emac_of_match[] = {
{.compatible = "ti,davinci-dm6467-emac", },
+ {.compatible = "ti,am3517-emac", .data = &am3517_emac_data, },
{},
};
MODULE_DEVICE_TABLE(of, davinci_emac_of_match);
diff --git a/drivers/net/ethernet/tile/Kconfig b/drivers/net/ethernet/tile/Kconfig
index 4083ba8839e1..f59a6c265331 100644
--- a/drivers/net/ethernet/tile/Kconfig
+++ b/drivers/net/ethernet/tile/Kconfig
@@ -9,20 +9,10 @@ config TILE_NET
select CRC32
select TILE_GXIO_MPIPE if TILEGX
select HIGH_RES_TIMERS if TILEGX
+ select PTP_1588_CLOCK if TILEGX
---help---
This is a standard Linux network device driver for the
on-chip Tilera Gigabit Ethernet and XAUI interfaces.
To compile this driver as a module, choose M here: the module
will be called tile_net.
-
-config PTP_1588_CLOCK_TILEGX
- tristate "Tilera TILE-Gx mPIPE as PTP clock"
- select PTP_1588_CLOCK
- depends on TILE_NET
- depends on TILEGX
- ---help---
- This driver adds support for using the mPIPE as a PTP
- clock. This clock is only useful if your PTP programs are
- getting hardware time stamps on the PTP Ethernet packets
- using the SO_TIMESTAMPING API.
diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c
index 628b736e5ae7..570495be77f3 100644
--- a/drivers/net/ethernet/tile/tilegx.c
+++ b/drivers/net/ethernet/tile/tilegx.c
@@ -187,10 +187,8 @@ struct tile_net_priv {
int echannel;
/* mPIPE instance, 0 or 1. */
int instance;
-#ifdef CONFIG_PTP_1588_CLOCK_TILEGX
/* The timestamp config. */
struct hwtstamp_config stamp_cfg;
-#endif
};
static struct mpipe_data {
@@ -229,14 +227,12 @@ static struct mpipe_data {
int first_bucket;
int num_buckets;
-#ifdef CONFIG_PTP_1588_CLOCK_TILEGX
/* PTP-specific data. */
struct ptp_clock *ptp_clock;
struct ptp_clock_info caps;
/* Lock for ptp accessors. */
struct mutex ptp_lock;
-#endif
} mpipe_data[NR_MPIPE_MAX] = {
[0 ... (NR_MPIPE_MAX - 1)] {
@@ -451,20 +447,17 @@ static void tile_net_provide_needed_buffers(void)
static void tile_rx_timestamp(struct tile_net_priv *priv, struct sk_buff *skb,
gxio_mpipe_idesc_t *idesc)
{
-#ifdef CONFIG_PTP_1588_CLOCK_TILEGX
if (unlikely(priv->stamp_cfg.rx_filter != HWTSTAMP_FILTER_NONE)) {
struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
memset(shhwtstamps, 0, sizeof(*shhwtstamps));
shhwtstamps->hwtstamp = ktime_set(idesc->time_stamp_sec,
idesc->time_stamp_ns);
}
-#endif
}
/* Get TX timestamp, and store it in the skb. */
static void tile_tx_timestamp(struct sk_buff *skb, int instance)
{
-#ifdef CONFIG_PTP_1588_CLOCK_TILEGX
struct skb_shared_info *shtx = skb_shinfo(skb);
if (unlikely((shtx->tx_flags & SKBTX_HW_TSTAMP) != 0)) {
struct mpipe_data *md = &mpipe_data[instance];
@@ -477,14 +470,11 @@ static void tile_tx_timestamp(struct sk_buff *skb, int instance)
shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec);
skb_tstamp_tx(skb, &shhwtstamps);
}
-#endif
}
/* Use ioctl() to enable or disable TX or RX timestamping. */
-static int tile_hwtstamp_ioctl(struct net_device *dev, struct ifreq *rq,
- int cmd)
+static int tile_hwtstamp_set(struct net_device *dev, struct ifreq *rq)
{
-#ifdef CONFIG_PTP_1588_CLOCK_TILEGX
struct hwtstamp_config config;
struct tile_net_priv *priv = netdev_priv(dev);
@@ -530,9 +520,17 @@ static int tile_hwtstamp_ioctl(struct net_device *dev, struct ifreq *rq,
priv->stamp_cfg = config;
return 0;
-#else
- return -EOPNOTSUPP;
-#endif
+}
+
+static int tile_hwtstamp_get(struct net_device *dev, struct ifreq *rq)
+{
+ struct tile_net_priv *priv = netdev_priv(dev);
+
+ if (copy_to_user(rq->ifr_data, &priv->stamp_cfg,
+ sizeof(priv->stamp_cfg)))
+ return -EFAULT;
+
+ return 0;
}
static inline bool filter_packet(struct net_device *dev, void *buf)
@@ -814,8 +812,6 @@ static enum hrtimer_restart tile_net_handle_egress_timer(struct hrtimer *t)
return HRTIMER_NORESTART;
}
-#ifdef CONFIG_PTP_1588_CLOCK_TILEGX
-
/* PTP clock operations. */
static int ptp_mpipe_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
@@ -882,12 +878,9 @@ static struct ptp_clock_info ptp_mpipe_caps = {
.enable = ptp_mpipe_enable,
};
-#endif /* CONFIG_PTP_1588_CLOCK_TILEGX */
-
/* Sync mPIPE's timestamp up with Linux system time and register PTP clock. */
static void register_ptp_clock(struct net_device *dev, struct mpipe_data *md)
{
-#ifdef CONFIG_PTP_1588_CLOCK_TILEGX
struct timespec ts;
getnstimeofday(&ts);
@@ -899,16 +892,13 @@ static void register_ptp_clock(struct net_device *dev, struct mpipe_data *md)
if (IS_ERR(md->ptp_clock))
netdev_err(dev, "ptp_clock_register failed %ld\n",
PTR_ERR(md->ptp_clock));
-#endif
}
/* Initialize PTP fields in a new device. */
static void init_ptp_dev(struct tile_net_priv *priv)
{
-#ifdef CONFIG_PTP_1588_CLOCK_TILEGX
priv->stamp_cfg.rx_filter = HWTSTAMP_FILTER_NONE;
priv->stamp_cfg.tx_type = HWTSTAMP_TX_OFF;
-#endif
}
/* Helper functions for "tile_net_update()". */
@@ -2098,7 +2088,9 @@ static void tile_net_tx_timeout(struct net_device *dev)
static int tile_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
if (cmd == SIOCSHWTSTAMP)
- return tile_hwtstamp_ioctl(dev, rq, cmd);
+ return tile_hwtstamp_set(dev, rq);
+ if (cmd == SIOCGHWTSTAMP)
+ return tile_hwtstamp_get(dev, rq);
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
index f7f2ef49c0c1..d899d0072ae0 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
@@ -1739,12 +1739,14 @@ static int ps3_gelic_driver_probe(struct ps3_system_bus_device *dev)
GELIC_CARD_PORT_STATUS_CHANGED;
- if (gelic_card_init_chain(card, &card->tx_chain,
- card->descr, GELIC_NET_TX_DESCRIPTORS))
+ result = gelic_card_init_chain(card, &card->tx_chain,
+ card->descr, GELIC_NET_TX_DESCRIPTORS);
+ if (result)
goto fail_alloc_tx;
- if (gelic_card_init_chain(card, &card->rx_chain,
- card->descr + GELIC_NET_TX_DESCRIPTORS,
- GELIC_NET_RX_DESCRIPTORS))
+ result = gelic_card_init_chain(card, &card->rx_chain,
+ card->descr + GELIC_NET_TX_DESCRIPTORS,
+ GELIC_NET_RX_DESCRIPTORS);
+ if (result)
goto fail_alloc_rx;
/* head of chain */
@@ -1754,7 +1756,8 @@ static int ps3_gelic_driver_probe(struct ps3_system_bus_device *dev)
card->rx_top, card->tx_top, sizeof(struct gelic_descr),
GELIC_NET_RX_DESCRIPTORS);
/* allocate rx skbs */
- if (gelic_card_alloc_rx_skbs(card))
+ result = gelic_card_alloc_rx_skbs(card);
+ if (result)
goto fail_alloc_skbs;
spin_lock_init(&card->tx_lock);
@@ -1772,7 +1775,8 @@ static int ps3_gelic_driver_probe(struct ps3_system_bus_device *dev)
}
#ifdef CONFIG_GELIC_WIRELESS
- if (gelic_wl_driver_probe(card)) {
+ result = gelic_wl_driver_probe(card);
+ if (result) {
dev_dbg(&dev->core, "%s: WL init failed\n", __func__);
goto fail_setup_netdev;
}
diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c
index 1322546d92ac..f6b3212ec339 100644
--- a/drivers/net/ethernet/toshiba/tc35815.c
+++ b/drivers/net/ethernet/toshiba/tc35815.c
@@ -1170,19 +1170,12 @@ static int tc35815_tx_full(struct net_device *dev)
static void tc35815_restart(struct net_device *dev)
{
struct tc35815_local *lp = netdev_priv(dev);
+ int ret;
if (lp->phy_dev) {
- int timeout;
-
- phy_write(lp->phy_dev, MII_BMCR, BMCR_RESET);
- timeout = 100;
- while (--timeout) {
- if (!(phy_read(lp->phy_dev, MII_BMCR) & BMCR_RESET))
- break;
- udelay(1);
- }
- if (!timeout)
- printk(KERN_ERR "%s: BMCR reset failed.\n", dev->name);
+ ret = phy_init_hw(lp->phy_dev);
+ if (ret)
+ printk(KERN_ERR "%s: PHY init failed.\n", dev->name);
}
spin_lock_bh(&lp->rx_lock);
diff --git a/drivers/net/ethernet/tundra/tsi108_eth.h b/drivers/net/ethernet/tundra/tsi108_eth.h
index 5fee7d78dc6d..4a03c594b2b1 100644
--- a/drivers/net/ethernet/tundra/tsi108_eth.h
+++ b/drivers/net/ethernet/tundra/tsi108_eth.h
@@ -16,9 +16,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
- * MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index d022bf936572..ad61d26a44f3 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -2172,16 +2172,13 @@ static int velocity_poll(struct napi_struct *napi, int budget)
unsigned int rx_done;
unsigned long flags;
- spin_lock_irqsave(&vptr->lock, flags);
/*
* Do rx and tx twice for performance (taken from the VIA
* out-of-tree driver).
*/
- rx_done = velocity_rx_srv(vptr, budget / 2);
- velocity_tx_srv(vptr);
- rx_done += velocity_rx_srv(vptr, budget - rx_done);
+ rx_done = velocity_rx_srv(vptr, budget);
+ spin_lock_irqsave(&vptr->lock, flags);
velocity_tx_srv(vptr);
-
/* If budget not fully consumed, exit the polling mode */
if (rx_done < budget) {
napi_complete(napi);
@@ -2342,6 +2339,8 @@ static int velocity_change_mtu(struct net_device *dev, int new_mtu)
if (ret < 0)
goto out_free_tmp_vptr_1;
+ napi_disable(&vptr->napi);
+
spin_lock_irqsave(&vptr->lock, flags);
netif_stop_queue(dev);
@@ -2362,6 +2361,8 @@ static int velocity_change_mtu(struct net_device *dev, int new_mtu)
velocity_give_many_rx_descs(vptr);
+ napi_enable(&vptr->napi);
+
mac_enable_int(vptr->mac_regs);
netif_start_queue(dev);
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index 1f2364126323..2166e879a096 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -1017,7 +1017,7 @@ static int temac_of_probe(struct platform_device *op)
platform_set_drvdata(op, ndev);
SET_NETDEV_DEV(ndev, &op->dev);
ndev->flags &= ~IFF_MULTICAST; /* clear multicast */
- ndev->features = NETIF_F_SG | NETIF_F_FRAGLIST;
+ ndev->features = NETIF_F_SG;
ndev->netdev_ops = &temac_netdev_ops;
ndev->ethtool_ops = &temac_ethtool_ops;
#if 0
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index b2ff038d6d20..f9293da19e26 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -1486,7 +1486,7 @@ static int axienet_of_probe(struct platform_device *op)
SET_NETDEV_DEV(ndev, &op->dev);
ndev->flags &= ~IFF_MULTICAST; /* clear multicast */
- ndev->features = NETIF_F_SG | NETIF_F_FRAGLIST;
+ ndev->features = NETIF_F_SG;
ndev->netdev_ops = &axienet_netdev_ops;
ndev->ethtool_ops = &axienet_ethtool_ops;
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index 74234a51c851..fefb8cd5eb65 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -163,26 +163,9 @@ static void xemaclite_enable_interrupts(struct net_local *drvdata)
__raw_writel(reg_data | XEL_TSR_XMIT_IE_MASK,
drvdata->base_addr + XEL_TSR_OFFSET);
- /* Enable the Tx interrupts for the second Buffer if
- * configured in HW */
- if (drvdata->tx_ping_pong != 0) {
- reg_data = __raw_readl(drvdata->base_addr +
- XEL_BUFFER_OFFSET + XEL_TSR_OFFSET);
- __raw_writel(reg_data | XEL_TSR_XMIT_IE_MASK,
- drvdata->base_addr + XEL_BUFFER_OFFSET +
- XEL_TSR_OFFSET);
- }
-
/* Enable the Rx interrupts for the first buffer */
__raw_writel(XEL_RSR_RECV_IE_MASK, drvdata->base_addr + XEL_RSR_OFFSET);
- /* Enable the Rx interrupts for the second Buffer if
- * configured in HW */
- if (drvdata->rx_ping_pong != 0) {
- __raw_writel(XEL_RSR_RECV_IE_MASK, drvdata->base_addr +
- XEL_BUFFER_OFFSET + XEL_RSR_OFFSET);
- }
-
/* Enable the Global Interrupt Enable */
__raw_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET);
}
@@ -206,31 +189,10 @@ static void xemaclite_disable_interrupts(struct net_local *drvdata)
__raw_writel(reg_data & (~XEL_TSR_XMIT_IE_MASK),
drvdata->base_addr + XEL_TSR_OFFSET);
- /* Disable the Tx interrupts for the second Buffer
- * if configured in HW */
- if (drvdata->tx_ping_pong != 0) {
- reg_data = __raw_readl(drvdata->base_addr + XEL_BUFFER_OFFSET +
- XEL_TSR_OFFSET);
- __raw_writel(reg_data & (~XEL_TSR_XMIT_IE_MASK),
- drvdata->base_addr + XEL_BUFFER_OFFSET +
- XEL_TSR_OFFSET);
- }
-
/* Disable the Rx interrupts for the first buffer */
reg_data = __raw_readl(drvdata->base_addr + XEL_RSR_OFFSET);
__raw_writel(reg_data & (~XEL_RSR_RECV_IE_MASK),
drvdata->base_addr + XEL_RSR_OFFSET);
-
- /* Disable the Rx interrupts for the second buffer
- * if configured in HW */
- if (drvdata->rx_ping_pong != 0) {
-
- reg_data = __raw_readl(drvdata->base_addr + XEL_BUFFER_OFFSET +
- XEL_RSR_OFFSET);
- __raw_writel(reg_data & (~XEL_RSR_RECV_IE_MASK),
- drvdata->base_addr + XEL_BUFFER_OFFSET +
- XEL_RSR_OFFSET);
- }
}
/**
@@ -258,6 +220,13 @@ static void xemaclite_aligned_write(void *src_ptr, u32 *dest_ptr,
*to_u16_ptr++ = *from_u16_ptr++;
*to_u16_ptr++ = *from_u16_ptr++;
+ /* This barrier resolves occasional issues seen around
+ * cases where the data is not properly flushed out
+ * from the processor store buffers to the destination
+ * memory locations.
+ */
+ wmb();
+
/* Output a word */
*to_u32_ptr++ = align_buffer;
}
@@ -273,6 +242,12 @@ static void xemaclite_aligned_write(void *src_ptr, u32 *dest_ptr,
for (; length > 0; length--)
*to_u8_ptr++ = *from_u8_ptr++;
+ /* This barrier resolves occasional issues seen around
+ * cases where the data is not properly flushed out
+ * from the processor store buffers to the destination
+ * memory locations.
+ */
+ wmb();
*to_u32_ptr = align_buffer;
}
}
diff --git a/drivers/net/ethernet/xircom/xirc2ps_cs.c b/drivers/net/ethernet/xircom/xirc2ps_cs.c
index bdd20b888cf6..7c81ffb861e8 100644
--- a/drivers/net/ethernet/xircom/xirc2ps_cs.c
+++ b/drivers/net/ethernet/xircom/xirc2ps_cs.c
@@ -27,8 +27,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
*
* ALTERNATIVELY, this driver may be distributed under the terms of
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index bcc224a83734..25283f17d82f 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -373,7 +373,7 @@ static void ixp_tx_timestamp(struct port *port, struct sk_buff *skb)
__raw_writel(TX_SNAPSHOT_LOCKED, &regs->channel[ch].ch_event);
}
-static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+static int hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
{
struct hwtstamp_config cfg;
struct ixp46x_ts_regs *regs;
@@ -417,6 +417,32 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
}
+static int hwtstamp_get(struct net_device *netdev, struct ifreq *ifr)
+{
+ struct hwtstamp_config cfg;
+ struct port *port = netdev_priv(netdev);
+
+ cfg.flags = 0;
+ cfg.tx_type = port->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
+
+ switch (port->hwts_rx_en) {
+ case 0:
+ cfg.rx_filter = HWTSTAMP_FILTER_NONE;
+ break;
+ case PTP_SLAVE_MODE:
+ cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
+ break;
+ case PTP_MASTER_MODE:
+ cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return -ERANGE;
+ }
+
+ return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+}
+
static int ixp4xx_mdio_cmd(struct mii_bus *bus, int phy_id, int location,
int write, u16 cmd)
{
@@ -959,8 +985,12 @@ static int eth_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
if (!netif_running(dev))
return -EINVAL;
- if (cpu_is_ixp46x() && cmd == SIOCSHWTSTAMP)
- return hwtstamp_ioctl(dev, req, cmd);
+ if (cpu_is_ixp46x()) {
+ if (cmd == SIOCSHWTSTAMP)
+ return hwtstamp_set(dev, req);
+ if (cmd == SIOCGHWTSTAMP)
+ return hwtstamp_get(dev, req);
+ }
return phy_mii_ioctl(port->phydev, req, cmd);
}
diff --git a/drivers/net/fddi/defxx.c b/drivers/net/fddi/defxx.c
index 0b40e1c46f07..0344f71bf4a5 100644
--- a/drivers/net/fddi/defxx.c
+++ b/drivers/net/fddi/defxx.c
@@ -241,12 +241,6 @@ static char version[] =
*/
#define NEW_SKB_SIZE (PI_RCV_DATA_K_SIZE_MAX+128)
-#ifdef CONFIG_PCI
-#define DFX_BUS_PCI(dev) (dev->bus == &pci_bus_type)
-#else
-#define DFX_BUS_PCI(dev) 0
-#endif
-
#ifdef CONFIG_EISA
#define DFX_BUS_EISA(dev) (dev->bus == &eisa_bus_type)
#else
@@ -436,7 +430,7 @@ static void dfx_port_read_long(DFX_board_t *bp, int offset, u32 *data)
static void dfx_get_bars(struct device *bdev,
resource_size_t *bar_start, resource_size_t *bar_len)
{
- int dfx_bus_pci = DFX_BUS_PCI(bdev);
+ int dfx_bus_pci = dev_is_pci(bdev);
int dfx_bus_eisa = DFX_BUS_EISA(bdev);
int dfx_bus_tc = DFX_BUS_TC(bdev);
int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
@@ -518,7 +512,7 @@ static const struct net_device_ops dfx_netdev_ops = {
static int dfx_register(struct device *bdev)
{
static int version_disp;
- int dfx_bus_pci = DFX_BUS_PCI(bdev);
+ int dfx_bus_pci = dev_is_pci(bdev);
int dfx_bus_tc = DFX_BUS_TC(bdev);
int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
const char *print_name = dev_name(bdev);
@@ -667,7 +661,7 @@ static void dfx_bus_init(struct net_device *dev)
{
DFX_board_t *bp = netdev_priv(dev);
struct device *bdev = bp->bus_dev;
- int dfx_bus_pci = DFX_BUS_PCI(bdev);
+ int dfx_bus_pci = dev_is_pci(bdev);
int dfx_bus_eisa = DFX_BUS_EISA(bdev);
int dfx_bus_tc = DFX_BUS_TC(bdev);
int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
@@ -813,7 +807,7 @@ static void dfx_bus_uninit(struct net_device *dev)
{
DFX_board_t *bp = netdev_priv(dev);
struct device *bdev = bp->bus_dev;
- int dfx_bus_pci = DFX_BUS_PCI(bdev);
+ int dfx_bus_pci = dev_is_pci(bdev);
int dfx_bus_eisa = DFX_BUS_EISA(bdev);
u8 val;
@@ -967,7 +961,7 @@ static int dfx_driver_init(struct net_device *dev, const char *print_name,
{
DFX_board_t *bp = netdev_priv(dev);
struct device *bdev = bp->bus_dev;
- int dfx_bus_pci = DFX_BUS_PCI(bdev);
+ int dfx_bus_pci = dev_is_pci(bdev);
int dfx_bus_eisa = DFX_BUS_EISA(bdev);
int dfx_bus_tc = DFX_BUS_TC(bdev);
int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
@@ -1877,7 +1871,7 @@ static irqreturn_t dfx_interrupt(int irq, void *dev_id)
struct net_device *dev = dev_id;
DFX_board_t *bp = netdev_priv(dev);
struct device *bdev = bp->bus_dev;
- int dfx_bus_pci = DFX_BUS_PCI(bdev);
+ int dfx_bus_pci = dev_is_pci(bdev);
int dfx_bus_eisa = DFX_BUS_EISA(bdev);
int dfx_bus_tc = DFX_BUS_TC(bdev);
@@ -3579,7 +3573,7 @@ static void dfx_unregister(struct device *bdev)
{
struct net_device *dev = dev_get_drvdata(bdev);
DFX_board_t *bp = netdev_priv(dev);
- int dfx_bus_pci = DFX_BUS_PCI(bdev);
+ int dfx_bus_pci = dev_is_pci(bdev);
int dfx_bus_tc = DFX_BUS_TC(bdev);
int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
resource_size_t bar_start = 0; /* pointer to port */
diff --git a/drivers/net/fddi/skfp/fplustm.c b/drivers/net/fddi/skfp/fplustm.c
index f83993590174..7d3779ae7377 100644
--- a/drivers/net/fddi/skfp/fplustm.c
+++ b/drivers/net/fddi/skfp/fplustm.c
@@ -23,6 +23,7 @@
#include "h/smc.h"
#include "h/supern_2.h"
#include <linux/bitrev.h>
+#include <linux/etherdevice.h>
#ifndef lint
static const char ID_sccs[] = "@(#)fplustm.c 1.32 99/02/23 (C) SK " ;
@@ -55,14 +56,14 @@ static char cam_warning [] = "E_SMT_004: CAM still busy\n";
#define DUMMY_READ() smc->hw.mc_dummy = (u_short) inp(ADDR(B0_RAP))
-#define CHECK_NPP() { unsigned k = 10000 ;\
+#define CHECK_NPP() { unsigned int k = 10000 ;\
while ((inpw(FM_A(FM_STMCHN)) & FM_SNPPND) && k) k--;\
if (!k) { \
SMT_PANIC(smc,SMT_E0130, SMT_E0130_MSG) ; \
} \
}
-#define CHECK_CAM() { unsigned k = 10 ;\
+#define CHECK_CAM() { unsigned int k = 10 ;\
while (!(inpw(FM_A(FM_AFSTAT)) & FM_DONE) && k) k--;\
if (!k) { \
SMT_PANIC(smc,SMT_E0131, SMT_E0131_MSG) ; \
@@ -356,25 +357,25 @@ static void set_formac_addr(struct s_smc *smc)
long t_requ = smc->mib.m[MAC0].fddiMACT_Req ;
outpw(FM_A(FM_SAID),my_said) ; /* set short address */
- outpw(FM_A(FM_LAIL),(unsigned)((smc->hw.fddi_home_addr.a[4]<<8) +
+ outpw(FM_A(FM_LAIL),(unsigned short)((smc->hw.fddi_home_addr.a[4]<<8) +
smc->hw.fddi_home_addr.a[5])) ;
- outpw(FM_A(FM_LAIC),(unsigned)((smc->hw.fddi_home_addr.a[2]<<8) +
+ outpw(FM_A(FM_LAIC),(unsigned short)((smc->hw.fddi_home_addr.a[2]<<8) +
smc->hw.fddi_home_addr.a[3])) ;
- outpw(FM_A(FM_LAIM),(unsigned)((smc->hw.fddi_home_addr.a[0]<<8) +
+ outpw(FM_A(FM_LAIM),(unsigned short)((smc->hw.fddi_home_addr.a[0]<<8) +
smc->hw.fddi_home_addr.a[1])) ;
outpw(FM_A(FM_SAGP),my_sagp) ; /* set short group address */
- outpw(FM_A(FM_LAGL),(unsigned)((smc->hw.fp.group_addr.a[4]<<8) +
+ outpw(FM_A(FM_LAGL),(unsigned short)((smc->hw.fp.group_addr.a[4]<<8) +
smc->hw.fp.group_addr.a[5])) ;
- outpw(FM_A(FM_LAGC),(unsigned)((smc->hw.fp.group_addr.a[2]<<8) +
+ outpw(FM_A(FM_LAGC),(unsigned short)((smc->hw.fp.group_addr.a[2]<<8) +
smc->hw.fp.group_addr.a[3])) ;
- outpw(FM_A(FM_LAGM),(unsigned)((smc->hw.fp.group_addr.a[0]<<8) +
+ outpw(FM_A(FM_LAGM),(unsigned short)((smc->hw.fp.group_addr.a[0]<<8) +
smc->hw.fp.group_addr.a[1])) ;
/* set r_request regs. (MSW & LSW of TRT ) */
- outpw(FM_A(FM_TREQ1),(unsigned)(t_requ>>16)) ;
- outpw(FM_A(FM_TREQ0),(unsigned)t_requ) ;
+ outpw(FM_A(FM_TREQ1),(unsigned short)(t_requ>>16)) ;
+ outpw(FM_A(FM_TREQ0),(unsigned short)t_requ) ;
}
static void set_int(char *p, int l)
@@ -394,10 +395,10 @@ static void set_int(char *p, int l)
* append 'end of chain' pointer
*/
static void copy_tx_mac(struct s_smc *smc, u_long td, struct fddi_mac *mac,
- unsigned off, int len)
+ unsigned int off, int len)
/* u_long td; transmit descriptor */
/* struct fddi_mac *mac; mac frame pointer */
-/* unsigned off; start address within buffer memory */
+/* unsigned int off; start address within buffer memory */
/* int len ; length of the frame including the FC */
{
int i ;
@@ -1082,7 +1083,7 @@ static struct s_fpmc* mac_get_mc_table(struct s_smc *smc,
slot = tb ;
continue ;
}
- if (memcmp((char *)&tb->a,(char *)own,6))
+ if (!ether_addr_equal((char *)&tb->a, (char *)own))
continue ;
return tb;
}
diff --git a/drivers/net/fddi/skfp/h/supern_2.h b/drivers/net/fddi/skfp/h/supern_2.h
index 0b73690280f6..4ee360d2dc62 100644
--- a/drivers/net/fddi/skfp/h/supern_2.h
+++ b/drivers/net/fddi/skfp/h/supern_2.h
@@ -92,33 +92,33 @@
union rx_descr {
struct {
#ifdef LITTLE_ENDIAN
- unsigned rx_length :16 ; /* frame length lower/upper byte */
- unsigned rx_erfbb :2 ; /* received frame byte boundary */
- unsigned rx_reserv2:2 ; /* reserved */
- unsigned rx_sfrmty :3 ; /* frame type bits */
- unsigned rx_sadrrg :1 ; /* DA == MA or broad-/multicast */
- unsigned rx_sfrmerr:1 ; /* received frame not valid */
- unsigned rx_seac0 :1 ; /* frame-copied C-indicator */
- unsigned rx_seac1 :1 ; /* address-match A-indicator */
- unsigned rx_seac2 :1 ; /* frame-error E-indicator */
- unsigned rx_ssrcrtg:1 ; /* == 1 SA has MSB set */
- unsigned rx_reserv1:1 ; /* reserved */
- unsigned rx_msrabt :1 ; /* memory status receive abort */
- unsigned rx_msvalid:1 ; /* memory status valid */
+ unsigned int rx_length :16 ; /* frame length lower/upper byte */
+ unsigned int rx_erfbb :2 ; /* received frame byte boundary */
+ unsigned int rx_reserv2:2 ; /* reserved */
+ unsigned int rx_sfrmty :3 ; /* frame type bits */
+ unsigned int rx_sadrrg :1 ; /* DA == MA or broad-/multicast */
+ unsigned int rx_sfrmerr:1 ; /* received frame not valid */
+ unsigned int rx_seac0 :1 ; /* frame-copied C-indicator */
+ unsigned int rx_seac1 :1 ; /* address-match A-indicator */
+ unsigned int rx_seac2 :1 ; /* frame-error E-indicator */
+ unsigned int rx_ssrcrtg:1 ; /* == 1 SA has MSB set */
+ unsigned int rx_reserv1:1 ; /* reserved */
+ unsigned int rx_msrabt :1 ; /* memory status receive abort */
+ unsigned int rx_msvalid:1 ; /* memory status valid */
#else
- unsigned rx_msvalid:1 ; /* memory status valid */
- unsigned rx_msrabt :1 ; /* memory status receive abort */
- unsigned rx_reserv1:1 ; /* reserved */
- unsigned rx_ssrcrtg:1 ; /* == 1 SA has MSB set */
- unsigned rx_seac2 :1 ; /* frame-error E-indicator */
- unsigned rx_seac1 :1 ; /* address-match A-indicator */
- unsigned rx_seac0 :1 ; /* frame-copied C-indicator */
- unsigned rx_sfrmerr:1 ; /* received frame not valid */
- unsigned rx_sadrrg :1 ; /* DA == MA or broad-/multicast */
- unsigned rx_sfrmty :3 ; /* frame type bits */
- unsigned rx_erfbb :2 ; /* received frame byte boundary */
- unsigned rx_reserv2:2 ; /* reserved */
- unsigned rx_length :16 ; /* frame length lower/upper byte */
+ unsigned int rx_msvalid:1 ; /* memory status valid */
+ unsigned int rx_msrabt :1 ; /* memory status receive abort */
+ unsigned int rx_reserv1:1 ; /* reserved */
+ unsigned int rx_ssrcrtg:1 ; /* == 1 SA has MSB set */
+ unsigned int rx_seac2 :1 ; /* frame-error E-indicator */
+ unsigned int rx_seac1 :1 ; /* address-match A-indicator */
+ unsigned int rx_seac0 :1 ; /* frame-copied C-indicator */
+ unsigned int rx_sfrmerr:1 ; /* received frame not valid */
+ unsigned int rx_sadrrg :1 ; /* DA == MA or broad-/multicast */
+ unsigned int rx_sfrmty :3 ; /* frame type bits */
+ unsigned int rx_erfbb :2 ; /* received frame byte boundary */
+ unsigned int rx_reserv2:2 ; /* reserved */
+ unsigned int rx_length :16 ; /* frame length lower/upper byte */
#endif
} r ;
long i ;
@@ -162,23 +162,23 @@ union rx_descr {
union tx_descr {
struct {
#ifdef LITTLE_ENDIAN
- unsigned tx_length:16 ; /* frame length lower/upper byte */
- unsigned tx_res :8 ; /* reserved (bit 16..23) */
- unsigned tx_xmtabt:1 ; /* transmit abort */
- unsigned tx_nfcs :1 ; /* no frame check sequence */
- unsigned tx_xdone :1 ; /* give up token */
- unsigned tx_rpxm :2 ; /* byte offset */
- unsigned tx_pat1 :2 ; /* must be TXP1 */
- unsigned tx_more :1 ; /* more frame in chain */
+ unsigned int tx_length:16 ; /* frame length lower/upper byte */
+ unsigned int tx_res :8 ; /* reserved (bit 16..23) */
+ unsigned int tx_xmtabt:1 ; /* transmit abort */
+ unsigned int tx_nfcs :1 ; /* no frame check sequence */
+ unsigned int tx_xdone :1 ; /* give up token */
+ unsigned int tx_rpxm :2 ; /* byte offset */
+ unsigned int tx_pat1 :2 ; /* must be TXP1 */
+ unsigned int tx_more :1 ; /* more frame in chain */
#else
- unsigned tx_more :1 ; /* more frame in chain */
- unsigned tx_pat1 :2 ; /* must be TXP1 */
- unsigned tx_rpxm :2 ; /* byte offset */
- unsigned tx_xdone :1 ; /* give up token */
- unsigned tx_nfcs :1 ; /* no frame check sequence */
- unsigned tx_xmtabt:1 ; /* transmit abort */
- unsigned tx_res :8 ; /* reserved (bit 16..23) */
- unsigned tx_length:16 ; /* frame length lower/upper byte */
+ unsigned int tx_more :1 ; /* more frame in chain */
+ unsigned int tx_pat1 :2 ; /* must be TXP1 */
+ unsigned int tx_rpxm :2 ; /* byte offset */
+ unsigned int tx_xdone :1 ; /* give up token */
+ unsigned int tx_nfcs :1 ; /* no frame check sequence */
+ unsigned int tx_xmtabt:1 ; /* transmit abort */
+ unsigned int tx_res :8 ; /* reserved (bit 16..23) */
+ unsigned int tx_length:16 ; /* frame length lower/upper byte */
#endif
} t ;
long i ;
@@ -202,13 +202,13 @@ union tx_descr {
union tx_pointer {
struct t {
#ifdef LITTLE_ENDIAN
- unsigned tp_pointer:16 ; /* pointer to tx_descr (low/high) */
- unsigned tp_res :8 ; /* reserved (bit 16..23) */
- unsigned tp_pattern:8 ; /* fixed pattern (bit 24..31) */
+ unsigned int tp_pointer:16 ; /* pointer to tx_descr (low/high) */
+ unsigned int tp_res :8 ; /* reserved (bit 16..23) */
+ unsigned int tp_pattern:8 ; /* fixed pattern (bit 24..31) */
#else
- unsigned tp_pattern:8 ; /* fixed pattern (bit 24..31) */
- unsigned tp_res :8 ; /* reserved (bit 16..23) */
- unsigned tp_pointer:16 ; /* pointer to tx_descr (low/high) */
+ unsigned int tp_pattern:8 ; /* fixed pattern (bit 24..31) */
+ unsigned int tp_res :8 ; /* reserved (bit 16..23) */
+ unsigned int tp_pointer:16 ; /* pointer to tx_descr (low/high) */
#endif
} t ;
long i ;
diff --git a/drivers/net/fddi/skfp/skfddi.c b/drivers/net/fddi/skfp/skfddi.c
index 713d303a06a9..d5f58121b2e2 100644
--- a/drivers/net/fddi/skfp/skfddi.c
+++ b/drivers/net/fddi/skfp/skfddi.c
@@ -351,7 +351,6 @@ static void skfp_remove_one(struct pci_dev *pdev)
free_netdev(p);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
}
/*
diff --git a/drivers/net/fddi/skfp/smt.c b/drivers/net/fddi/skfp/smt.c
index 08d94329c12f..9edada85ed02 100644
--- a/drivers/net/fddi/skfp/smt.c
+++ b/drivers/net/fddi/skfp/smt.c
@@ -900,7 +900,7 @@ static void smt_send_rdf(struct s_smc *smc, SMbuf *rej, int fc, int reason,
rdf->version.v_pad2 = 0 ;
/* set P13 */
- if ((unsigned) frame_len <= SMT_MAX_INFO_LEN - sizeof(*rdf) +
+ if ((unsigned int) frame_len <= SMT_MAX_INFO_LEN - sizeof(*rdf) +
2*sizeof(struct smt_header))
len = frame_len ;
else
diff --git a/drivers/net/fddi/skfp/srf.c b/drivers/net/fddi/skfp/srf.c
index f6f7baf9f27a..cc27dea3414e 100644
--- a/drivers/net/fddi/skfp/srf.c
+++ b/drivers/net/fddi/skfp/srf.c
@@ -73,7 +73,7 @@ void smt_init_evc(struct s_smc *smc)
{
struct s_srf_evc *evc ;
const struct evc_init *init ;
- int i ;
+ unsigned int i ;
int index ;
int offset ;
@@ -84,7 +84,7 @@ void smt_init_evc(struct s_smc *smc)
evc = smc->evcs ;
init = evc_inits ;
- for (i = 0 ; (unsigned) i < MAX_INIT_EVC ; i++) {
+ for (i = 0 ; i < MAX_INIT_EVC ; i++) {
for (index = 0 ; index < init->n ; index++) {
evc->evc_code = init->code ;
evc->evc_para = init->para ;
@@ -98,7 +98,7 @@ void smt_init_evc(struct s_smc *smc)
init++ ;
}
- if ((unsigned) (evc - smc->evcs) > MAX_EVCS) {
+ if ((unsigned int) (evc - smc->evcs) > MAX_EVCS) {
SMT_PANIC(smc,SMT_E0127, SMT_E0127_MSG) ;
}
@@ -139,7 +139,7 @@ void smt_init_evc(struct s_smc *smc)
offset++ ;
}
#ifdef DEBUG
- for (i = 0, evc = smc->evcs ; (unsigned) i < MAX_EVCS ; i++, evc++) {
+ for (i = 0, evc = smc->evcs ; i < MAX_EVCS ; i++, evc++) {
if (SMT_IS_CONDITION(evc->evc_code)) {
if (!evc->evc_cond_state) {
SMT_PANIC(smc,SMT_E0128, SMT_E0128_MSG) ;
@@ -160,10 +160,10 @@ void smt_init_evc(struct s_smc *smc)
static struct s_srf_evc *smt_get_evc(struct s_smc *smc, int code, int index)
{
- int i ;
+ unsigned int i ;
struct s_srf_evc *evc ;
- for (i = 0, evc = smc->evcs ; (unsigned) i < MAX_EVCS ; i++, evc++) {
+ for (i = 0, evc = smc->evcs ; i < MAX_EVCS ; i++, evc++) {
if (evc->evc_code == code && evc->evc_index == index)
return evc;
}
@@ -335,9 +335,9 @@ void smt_srf_event(struct s_smc *smc, int code, int index, int cond)
static void clear_all_rep(struct s_smc *smc)
{
struct s_srf_evc *evc ;
- int i ;
+ unsigned int i ;
- for (i = 0, evc = smc->evcs ; (unsigned) i < MAX_EVCS ; i++, evc++) {
+ for (i = 0, evc = smc->evcs ; i < MAX_EVCS ; i++, evc++) {
evc->evc_rep_required = FALSE ;
if (SMT_IS_CONDITION(evc->evc_code))
*evc->evc_cond_state = FALSE ;
@@ -348,10 +348,10 @@ static void clear_all_rep(struct s_smc *smc)
static void clear_reported(struct s_smc *smc)
{
struct s_srf_evc *evc ;
- int i ;
+ unsigned int i ;
smc->srf.any_report = FALSE ;
- for (i = 0, evc = smc->evcs ; (unsigned) i < MAX_EVCS ; i++, evc++) {
+ for (i = 0, evc = smc->evcs ; i < MAX_EVCS ; i++, evc++) {
if (SMT_IS_CONDITION(evc->evc_code)) {
if (*evc->evc_cond_state == FALSE)
evc->evc_rep_required = FALSE ;
@@ -375,7 +375,7 @@ static void smt_send_srf(struct s_smc *smc)
struct s_srf_evc *evc ;
SK_LOC_DECL(struct s_pcon,pcon) ;
SMbuf *mb ;
- int i ;
+ unsigned int i ;
static const struct fddi_addr SMT_SRF_DA = {
{ 0x80, 0x01, 0x43, 0x00, 0x80, 0x08 }
@@ -405,7 +405,7 @@ static void smt_send_srf(struct s_smc *smc)
smt_add_para(smc,&pcon,(u_short) SMT_P1033,0,0) ;
smt_add_para(smc,&pcon,(u_short) SMT_P1034,0,0) ;
- for (i = 0, evc = smc->evcs ; (unsigned) i < MAX_EVCS ; i++, evc++) {
+ for (i = 0, evc = smc->evcs ; i < MAX_EVCS ; i++, evc++) {
if (evc->evc_rep_required) {
smt_add_para(smc,&pcon,evc->evc_para,
(int)evc->evc_index,0) ;
diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
index 1450e33fc250..66e2b19ef709 100644
--- a/drivers/net/hamradio/6pack.c
+++ b/drivers/net/hamradio/6pack.c
@@ -662,7 +662,8 @@ static int sixpack_open(struct tty_struct *tty)
tty->receive_room = 65536;
/* Now we're ready to register. */
- if (register_netdev(dev))
+ err = register_netdev(dev);
+ if (err)
goto out_free;
tnc_init(sp);
diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c
index f91bf0ddf031..d50b23cf9ea9 100644
--- a/drivers/net/hamradio/bpqether.c
+++ b/drivers/net/hamradio/bpqether.c
@@ -208,7 +208,7 @@ static int bpq_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_ty
eth = eth_hdr(skb);
if (!(bpq->acpt_addr[0] & 0x01) &&
- memcmp(eth->h_source, bpq->acpt_addr, ETH_ALEN))
+ !ether_addr_equal(eth->h_source, bpq->acpt_addr))
goto drop_unlock;
if (skb_cow(skb, sizeof(struct ethhdr)))
diff --git a/drivers/net/hamradio/hdlcdrv.c b/drivers/net/hamradio/hdlcdrv.c
index 3169252613fa..5d78c1d08abd 100644
--- a/drivers/net/hamradio/hdlcdrv.c
+++ b/drivers/net/hamradio/hdlcdrv.c
@@ -571,6 +571,8 @@ static int hdlcdrv_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
case HDLCDRVCTL_CALIBRATE:
if(!capable(CAP_SYS_RAWIO))
return -EPERM;
+ if (bi.data.calibrate > INT_MAX / s->par.bitrate)
+ return -EINVAL;
s->hdlctx.calibrate = bi.data.calibrate * s->par.bitrate / 16;
return 0;
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
index 8e01c457015b..8a6c720a4cc9 100644
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -9,8 +9,7 @@
* for more details.
*
* You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
*
* Copyright (C) Hans Alblas PE1AYX <hans@esrac.ele.tue.nl>
* Copyright (C) 2004, 05 Ralf Baechle DL5RB <ralf@linux-mips.org>
diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c
index 1971411574db..61dd2447e1bb 100644
--- a/drivers/net/hamradio/yam.c
+++ b/drivers/net/hamradio/yam.c
@@ -1057,6 +1057,7 @@ static int yam_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
break;
case SIOCYAMGCFG:
+ memset(&yi, 0, sizeof(yi));
yi.cfg.mask = 0xffffffff;
yi.cfg.iobase = yp->iobase;
yi.cfg.irq = yp->irq;
diff --git a/drivers/net/hippi/rrunner.c b/drivers/net/hippi/rrunner.c
index 00ed75155ce8..cd85f24637e1 100644
--- a/drivers/net/hippi/rrunner.c
+++ b/drivers/net/hippi/rrunner.c
@@ -213,10 +213,8 @@ static int rr_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
rrpriv->tx_ring_dma);
if (rrpriv->regs)
pci_iounmap(pdev, rrpriv->regs);
- if (pdev) {
+ if (pdev)
pci_release_regions(pdev);
- pci_set_drvdata(pdev, NULL);
- }
out2:
free_netdev(dev);
out3:
@@ -244,7 +242,6 @@ static void rr_remove_one(struct pci_dev *pdev)
pci_iounmap(pdev, rr->regs);
pci_release_regions(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
free_netdev(dev);
}
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index e6fe0d80d612..a26eecb1212c 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -12,8 +12,7 @@
* more details.
*
* You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
+ * this program; if not, see <http://www.gnu.org/licenses/>.
*
* Authors:
* Haiyang Zhang <haiyangz@microsoft.com>
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 2b0480416b31..93b485b96249 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -11,8 +11,7 @@
* more details.
*
* You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
+ * this program; if not, see <http://www.gnu.org/licenses/>.
*
* Authors:
* Haiyang Zhang <haiyangz@microsoft.com>
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 524f713f6017..7756118c2f0a 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -11,8 +11,7 @@
* more details.
*
* You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
+ * this program; if not, see <http://www.gnu.org/licenses/>.
*
* Authors:
* Haiyang Zhang <haiyangz@microsoft.com>
@@ -261,9 +260,7 @@ int netvsc_recv_callback(struct hv_device *device_obj,
struct sk_buff *skb;
net = ((struct netvsc_device *)hv_get_drvdata(device_obj))->ndev;
- if (!net) {
- netdev_err(net, "got receive callback but net device"
- " not initialized yet\n");
+ if (!net || net->reg_state != NETREG_REGISTERED) {
packet->status = NVSP_STAT_FAIL;
return 0;
}
@@ -327,7 +324,6 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
return -EINVAL;
nvdev->start_remove = true;
- cancel_delayed_work_sync(&ndevctx->dwork);
cancel_work_sync(&ndevctx->work);
netif_tx_disable(ndev);
rndis_filter_device_remove(hdev);
@@ -436,19 +432,11 @@ static int netvsc_probe(struct hv_device *dev,
SET_ETHTOOL_OPS(net, &ethtool_ops);
SET_NETDEV_DEV(net, &dev->device);
- ret = register_netdev(net);
- if (ret != 0) {
- pr_err("Unable to register netdev.\n");
- free_netdev(net);
- goto out;
- }
-
/* Notify the netvsc driver of the new device */
device_info.ring_size = ring_size;
ret = rndis_filter_device_add(dev, &device_info);
if (ret != 0) {
netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
- unregister_netdev(net);
free_netdev(net);
hv_set_drvdata(dev, NULL);
return ret;
@@ -457,7 +445,13 @@ static int netvsc_probe(struct hv_device *dev,
netif_carrier_on(net);
-out:
+ ret = register_netdev(net);
+ if (ret != 0) {
+ pr_err("Unable to register netdev.\n");
+ rndis_filter_device_remove(dev);
+ free_netdev(net);
+ }
+
return ret;
}
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 0775f0aefd1e..1084e5de3ceb 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -11,8 +11,7 @@
* more details.
*
* You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
+ * this program; if not, see <http://www.gnu.org/licenses/>.
*
* Authors:
* Haiyang Zhang <haiyangz@microsoft.com>
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
index 2cbe1c249996..ab31544bc254 100644
--- a/drivers/net/ieee802154/at86rf230.c
+++ b/drivers/net/ieee802154/at86rf230.c
@@ -987,7 +987,6 @@ err_gpio_dir:
err_slp_tr:
gpio_free(lp->rstn);
err_rstn:
- spi_set_drvdata(spi, NULL);
mutex_destroy(&lp->bmux);
ieee802154_free_device(lp->dev);
return rc;
@@ -1006,7 +1005,6 @@ static int at86rf230_remove(struct spi_device *spi)
gpio_free(lp->slp_tr);
gpio_free(lp->rstn);
- spi_set_drvdata(spi, NULL);
mutex_destroy(&lp->bmux);
ieee802154_free_device(lp->dev);
diff --git a/drivers/net/ieee802154/mrf24j40.c b/drivers/net/ieee802154/mrf24j40.c
index c6e46d6e9f75..246befa4ba05 100644
--- a/drivers/net/ieee802154/mrf24j40.c
+++ b/drivers/net/ieee802154/mrf24j40.c
@@ -715,7 +715,6 @@ static int mrf24j40_remove(struct spi_device *spi)
* complete? */
/* Clean up the SPI stuff. */
- spi_set_drvdata(spi, NULL);
kfree(devrec->buf);
kfree(devrec);
return 0;
diff --git a/drivers/net/irda/Kconfig b/drivers/net/irda/Kconfig
index 2a30193d0d50..2dc82f1d2e70 100644
--- a/drivers/net/irda/Kconfig
+++ b/drivers/net/irda/Kconfig
@@ -62,8 +62,6 @@ config SIR_BFIN_PIO
bool "PIO mode"
endchoice
-comment "Dongle support"
-
config SH_SIR
tristate "SuperH SIR on UART"
depends on IRDA && SUPERH && \
@@ -74,6 +72,8 @@ config SH_SIR
Say Y here if your want to enable SIR function on SuperH UART
devices.
+comment "Dongle support"
+
config DONGLE
bool "Serial dongle support"
depends on IRTTY_SIR
diff --git a/drivers/net/irda/au1k_ir.c b/drivers/net/irda/au1k_ir.c
index 7a1f684edcb5..5b7a665c6d83 100644
--- a/drivers/net/irda/au1k_ir.c
+++ b/drivers/net/irda/au1k_ir.c
@@ -15,8 +15,7 @@
* for more details.
*
* You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/init.h>
diff --git a/drivers/net/irda/esi-sir.c b/drivers/net/irda/esi-sir.c
index a908df7c4b9d..019a3e848bcb 100644
--- a/drivers/net/irda/esi-sir.c
+++ b/drivers/net/irda/esi-sir.c
@@ -25,9 +25,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
- * MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
********************************************************************/
diff --git a/drivers/net/irda/litelink-sir.c b/drivers/net/irda/litelink-sir.c
index d6d9d2e5ad49..6827777cbeea 100644
--- a/drivers/net/irda/litelink-sir.c
+++ b/drivers/net/irda/litelink-sir.c
@@ -22,9 +22,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
- * MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
********************************************************************/
diff --git a/drivers/net/irda/ma600-sir.c b/drivers/net/irda/ma600-sir.c
index e91216452379..a9a81358477b 100644
--- a/drivers/net/irda/ma600-sir.c
+++ b/drivers/net/irda/ma600-sir.c
@@ -25,9 +25,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
- * MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
********************************************************************/
diff --git a/drivers/net/irda/old_belkin-sir.c b/drivers/net/irda/old_belkin-sir.c
index 75714bc71030..f237136f3827 100644
--- a/drivers/net/irda/old_belkin-sir.c
+++ b/drivers/net/irda/old_belkin-sir.c
@@ -22,9 +22,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
- * MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
********************************************************************/
diff --git a/drivers/net/irda/sh_irda.c b/drivers/net/irda/sh_irda.c
index ff45cd0d60e8..c96b46b2c3a8 100644
--- a/drivers/net/irda/sh_irda.c
+++ b/drivers/net/irda/sh_irda.c
@@ -804,7 +804,7 @@ static int sh_irda_probe(struct platform_device *pdev)
goto err_mem_4;
platform_set_drvdata(pdev, ndev);
- err = request_irq(irq, sh_irda_irq, 0, "sh_irda", self);
+ err = devm_request_irq(&pdev->dev, irq, sh_irda_irq, 0, "sh_irda", self);
if (err) {
dev_warn(&pdev->dev, "Unable to attach sh_irda interrupt\n");
goto err_mem_4;
diff --git a/drivers/net/irda/sh_sir.c b/drivers/net/irda/sh_sir.c
index 8d9ae5a086d5..cadf52e22464 100644
--- a/drivers/net/irda/sh_sir.c
+++ b/drivers/net/irda/sh_sir.c
@@ -761,7 +761,7 @@ static int sh_sir_probe(struct platform_device *pdev)
goto err_mem_4;
platform_set_drvdata(pdev, ndev);
- err = request_irq(irq, sh_sir_irq, 0, "sh_sir", self);
+ err = devm_request_irq(&pdev->dev, irq, sh_sir_irq, 0, "sh_sir", self);
if (err) {
dev_warn(&pdev->dev, "Unable to attach sh_sir interrupt\n");
goto err_mem_4;
diff --git a/drivers/net/irda/smsc-ircc2.c b/drivers/net/irda/smsc-ircc2.c
index 0dcdf1592f6b..282120430f12 100644
--- a/drivers/net/irda/smsc-ircc2.c
+++ b/drivers/net/irda/smsc-ircc2.c
@@ -34,9 +34,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
- * MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
********************************************************************/
diff --git a/drivers/net/irda/smsc-ircc2.h b/drivers/net/irda/smsc-ircc2.h
index 317b7fd69bb3..4829fa22cb29 100644
--- a/drivers/net/irda/smsc-ircc2.h
+++ b/drivers/net/irda/smsc-ircc2.h
@@ -25,9 +25,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
- * MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
********************************************************************/
diff --git a/drivers/net/irda/via-ircc.c b/drivers/net/irda/via-ircc.c
index 9abaec27f962..2900af091c2d 100644
--- a/drivers/net/irda/via-ircc.c
+++ b/drivers/net/irda/via-ircc.c
@@ -17,8 +17,7 @@ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
-this program; if not, write to the Free Software Foundation, Inc.,
-59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+this program; if not, see <http://www.gnu.org/licenses/>.
F01 Oct/02/02: Modify code for V0.11(move out back to back transfer)
F02 Oct/28/02: Add SB device ID for 3147 and 3177.
@@ -408,7 +407,6 @@ static int via_ircc_open(struct pci_dev *pdev, chipio_t *info, unsigned int id)
err_out2:
release_region(self->io.fir_base, self->io.fir_ext);
err_out1:
- pci_set_drvdata(pdev, NULL);
free_netdev(dev);
return err;
}
@@ -442,7 +440,6 @@ static void via_remove_one(struct pci_dev *pdev)
if (self->rx_buff.head)
dma_free_coherent(&pdev->dev, self->rx_buff.truesize,
self->rx_buff.head, self->rx_buff_dma);
- pci_set_drvdata(pdev, NULL);
free_netdev(self->netdev);
diff --git a/drivers/net/irda/via-ircc.h b/drivers/net/irda/via-ircc.h
index f903a6a2dcb7..7ce820ecc361 100644
--- a/drivers/net/irda/via-ircc.h
+++ b/drivers/net/irda/via-ircc.h
@@ -18,8 +18,7 @@ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
-this program; if not, write to the Free Software Foundation, Inc.,
-59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+this program; if not, see <http://www.gnu.org/licenses/>.
* Comment:
* jul/08/2002 : Rx buffer length should use Rx ring ptr.
diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c
index c5bd58b4d8a8..485006604bbc 100644
--- a/drivers/net/irda/vlsi_ir.c
+++ b/drivers/net/irda/vlsi_ir.c
@@ -15,9 +15,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
- * MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
********************************************************************/
@@ -1695,7 +1693,6 @@ out_freedev:
out_disable:
pci_disable_device(pdev);
out:
- pci_set_drvdata(pdev, NULL);
return -ENODEV;
}
@@ -1721,8 +1718,6 @@ static void vlsi_irda_remove(struct pci_dev *pdev)
free_netdev(ndev);
- pci_set_drvdata(pdev, NULL);
-
IRDA_MESSAGE("%s: %s removed\n", drivername, pci_name(pdev));
}
diff --git a/drivers/net/irda/vlsi_ir.h b/drivers/net/irda/vlsi_ir.h
index a076eb125349..56399204e68c 100644
--- a/drivers/net/irda/vlsi_ir.h
+++ b/drivers/net/irda/vlsi_ir.h
@@ -18,9 +18,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
- * MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
********************************************************************/
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index acf93798dc67..09ababe54a5b 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -120,7 +120,7 @@ static int macvlan_broadcast_one(struct sk_buff *skb,
struct net_device *dev = vlan->dev;
if (local)
- return vlan->forward(dev, skb);
+ return dev_forward_skb(dev, skb);
skb->dev = dev;
if (ether_addr_equal_64bits(eth->h_dest, dev->broadcast))
@@ -128,7 +128,7 @@ static int macvlan_broadcast_one(struct sk_buff *skb,
else
skb->pkt_type = PACKET_MULTICAST;
- return vlan->receive(skb);
+ return netif_rx(skb);
}
static u32 macvlan_hash_mix(const struct macvlan_dev *vlan)
@@ -251,7 +251,7 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
skb->dev = dev;
skb->pkt_type = PACKET_HOST;
- ret = vlan->receive(skb);
+ ret = netif_rx(skb);
out:
macvlan_count_rx(vlan, len, ret == NET_RX_SUCCESS, 0);
@@ -290,8 +290,8 @@ xmit_world:
return dev_queue_xmit(skb);
}
-netdev_tx_t macvlan_start_xmit(struct sk_buff *skb,
- struct net_device *dev)
+static netdev_tx_t macvlan_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
{
unsigned int len = skb->len;
int ret;
@@ -305,7 +305,7 @@ netdev_tx_t macvlan_start_xmit(struct sk_buff *skb,
}
if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
- struct macvlan_pcpu_stats *pcpu_stats;
+ struct vlan_pcpu_stats *pcpu_stats;
pcpu_stats = this_cpu_ptr(vlan->pcpu_stats);
u64_stats_update_begin(&pcpu_stats->syncp);
@@ -317,7 +317,6 @@ netdev_tx_t macvlan_start_xmit(struct sk_buff *skb,
}
return ret;
}
-EXPORT_SYMBOL_GPL(macvlan_start_xmit);
static int macvlan_hard_header(struct sk_buff *skb, struct net_device *dev,
unsigned short type, const void *daddr,
@@ -546,12 +545,12 @@ static int macvlan_init(struct net_device *dev)
macvlan_set_lockdep_class(dev);
- vlan->pcpu_stats = alloc_percpu(struct macvlan_pcpu_stats);
+ vlan->pcpu_stats = alloc_percpu(struct vlan_pcpu_stats);
if (!vlan->pcpu_stats)
return -ENOMEM;
for_each_possible_cpu(i) {
- struct macvlan_pcpu_stats *mvlstats;
+ struct vlan_pcpu_stats *mvlstats;
mvlstats = per_cpu_ptr(vlan->pcpu_stats, i);
u64_stats_init(&mvlstats->syncp);
}
@@ -577,7 +576,7 @@ static struct rtnl_link_stats64 *macvlan_dev_get_stats64(struct net_device *dev,
struct macvlan_dev *vlan = netdev_priv(dev);
if (vlan->pcpu_stats) {
- struct macvlan_pcpu_stats *p;
+ struct vlan_pcpu_stats *p;
u64 rx_packets, rx_bytes, rx_multicast, tx_packets, tx_bytes;
u32 rx_errors = 0, tx_dropped = 0;
unsigned int start;
@@ -690,8 +689,19 @@ static netdev_features_t macvlan_fix_features(struct net_device *dev,
netdev_features_t features)
{
struct macvlan_dev *vlan = netdev_priv(dev);
+ netdev_features_t mask;
- return features & (vlan->set_features | ~MACVLAN_FEATURES);
+ features |= NETIF_F_ALL_FOR_ALL;
+ features &= (vlan->set_features | ~MACVLAN_FEATURES);
+ mask = features;
+
+ features = netdev_increment_features(vlan->lowerdev->features,
+ features,
+ mask);
+ if (!vlan->fwd_priv)
+ features |= NETIF_F_LLTX;
+
+ return features;
}
static const struct ethtool_ops macvlan_ethtool_ops = {
@@ -803,10 +813,7 @@ static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[])
}
int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[],
- int (*receive)(struct sk_buff *skb),
- int (*forward)(struct net_device *dev,
- struct sk_buff *skb))
+ struct nlattr *tb[], struct nlattr *data[])
{
struct macvlan_dev *vlan = netdev_priv(dev);
struct macvlan_port *port;
@@ -820,13 +827,11 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
if (lowerdev == NULL)
return -ENODEV;
- /* When creating macvlans on top of other macvlans - use
+ /* When creating macvlans or macvtaps on top of other macvlans - use
* the real device as the lowerdev.
*/
- if (lowerdev->rtnl_link_ops == dev->rtnl_link_ops) {
- struct macvlan_dev *lowervlan = netdev_priv(lowerdev);
- lowerdev = lowervlan->lowerdev;
- }
+ if (netif_is_macvlan(lowerdev))
+ lowerdev = macvlan_dev_real_dev(lowerdev);
if (!tb[IFLA_MTU])
dev->mtu = lowerdev->mtu;
@@ -850,8 +855,6 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
vlan->lowerdev = lowerdev;
vlan->dev = dev;
vlan->port = port;
- vlan->receive = receive;
- vlan->forward = forward;
vlan->set_features = MACVLAN_FEATURES;
vlan->mode = MACVLAN_MODE_VEPA;
@@ -896,9 +899,7 @@ EXPORT_SYMBOL_GPL(macvlan_common_newlink);
static int macvlan_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[])
{
- return macvlan_common_newlink(src_net, dev, tb, data,
- netif_rx,
- dev_forward_skb);
+ return macvlan_common_newlink(src_net, dev, tb, data);
}
void macvlan_dellink(struct net_device *dev, struct list_head *head)
@@ -1019,9 +1020,8 @@ static int macvlan_device_event(struct notifier_block *unused,
break;
case NETDEV_FEAT_CHANGE:
list_for_each_entry(vlan, &port->vlans, list) {
- vlan->dev->features = dev->features & MACVLAN_FEATURES;
vlan->dev->gso_max_size = dev->gso_max_size;
- netdev_features_change(vlan->dev);
+ netdev_update_features(vlan->dev);
}
break;
case NETDEV_UNREGISTER:
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index dc76670c2f2a..a2c3a897206e 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -70,6 +70,11 @@ static const struct proto_ops macvtap_socket_ops;
#define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO)
#define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG)
+static struct macvlan_dev *macvtap_get_vlan_rcu(const struct net_device *dev)
+{
+ return rcu_dereference(dev->rx_handler_data);
+}
+
/*
* RCU usage:
* The macvtap_queue and the macvlan_dev are loosely coupled, the
@@ -219,7 +224,7 @@ static struct macvtap_queue *macvtap_get_queue(struct net_device *dev,
goto out;
/* Check if we can use flow to select a queue */
- rxq = skb_get_rxhash(skb);
+ rxq = skb_get_hash(skb);
if (rxq) {
tap = rcu_dereference(vlan->taps[rxq % numvtaps]);
goto out;
@@ -271,24 +276,27 @@ static void macvtap_del_queues(struct net_device *dev)
sock_put(&qlist[j]->sk);
}
-/*
- * Forward happens for data that gets sent from one macvlan
- * endpoint to another one in bridge mode. We just take
- * the skb and put it into the receive queue.
- */
-static int macvtap_forward(struct net_device *dev, struct sk_buff *skb)
+static rx_handler_result_t macvtap_handle_frame(struct sk_buff **pskb)
{
- struct macvlan_dev *vlan = netdev_priv(dev);
- struct macvtap_queue *q = macvtap_get_queue(dev, skb);
+ struct sk_buff *skb = *pskb;
+ struct net_device *dev = skb->dev;
+ struct macvlan_dev *vlan;
+ struct macvtap_queue *q;
netdev_features_t features = TAP_FEATURES;
+ vlan = macvtap_get_vlan_rcu(dev);
+ if (!vlan)
+ return RX_HANDLER_PASS;
+
+ q = macvtap_get_queue(dev, skb);
if (!q)
- goto drop;
+ return RX_HANDLER_PASS;
if (skb_queue_len(&q->sk.sk_receive_queue) >= dev->tx_queue_len)
goto drop;
- skb->dev = dev;
+ skb_push(skb, ETH_HLEN);
+
/* Apply the forward feature mask so that we perform segmentation
* according to users wishes. This only works if VNET_HDR is
* enabled.
@@ -320,22 +328,13 @@ static int macvtap_forward(struct net_device *dev, struct sk_buff *skb)
wake_up:
wake_up_interruptible_poll(sk_sleep(&q->sk), POLLIN | POLLRDNORM | POLLRDBAND);
- return NET_RX_SUCCESS;
+ return RX_HANDLER_CONSUMED;
drop:
+ /* Count errors/drops only here, thus don't care about args. */
+ macvlan_count_rx(vlan, 0, 0, 0);
kfree_skb(skb);
- return NET_RX_DROP;
-}
-
-/*
- * Receive is for data from the external interface (lowerdev),
- * in case of macvtap, we can treat that the same way as
- * forward, which macvlan cannot.
- */
-static int macvtap_receive(struct sk_buff *skb)
-{
- skb_push(skb, ETH_HLEN);
- return macvtap_forward(skb->dev, skb);
+ return RX_HANDLER_CONSUMED;
}
static int macvtap_get_minor(struct macvlan_dev *vlan)
@@ -385,6 +384,8 @@ static int macvtap_newlink(struct net *src_net,
struct nlattr *data[])
{
struct macvlan_dev *vlan = netdev_priv(dev);
+ int err;
+
INIT_LIST_HEAD(&vlan->queue_list);
/* Since macvlan supports all offloads by default, make
@@ -392,16 +393,20 @@ static int macvtap_newlink(struct net *src_net,
*/
vlan->tap_features = TUN_OFFLOADS;
+ err = netdev_rx_handler_register(dev, macvtap_handle_frame, vlan);
+ if (err)
+ return err;
+
/* Don't put anything that may fail after macvlan_common_newlink
* because we can't undo what it does.
*/
- return macvlan_common_newlink(src_net, dev, tb, data,
- macvtap_receive, macvtap_forward);
+ return macvlan_common_newlink(src_net, dev, tb, data);
}
static void macvtap_dellink(struct net_device *dev,
struct list_head *head)
{
+ netdev_rx_handler_unregister(dev);
macvtap_del_queues(dev);
macvlan_dellink(dev, head);
}
@@ -588,7 +593,7 @@ static int macvtap_skb_from_vnet_hdr(struct sk_buff *skb,
return 0;
}
-static int macvtap_skb_to_vnet_hdr(const struct sk_buff *skb,
+static void macvtap_skb_to_vnet_hdr(const struct sk_buff *skb,
struct virtio_net_hdr *vnet_hdr)
{
memset(vnet_hdr, 0, sizeof(*vnet_hdr));
@@ -619,8 +624,6 @@ static int macvtap_skb_to_vnet_hdr(const struct sk_buff *skb,
} else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
vnet_hdr->flags = VIRTIO_NET_HDR_F_DATA_VALID;
} /* else everything is zero */
-
- return 0;
}
/* Get packet from user space buffer */
@@ -727,9 +730,8 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
}
if (vlan) {
- local_bh_disable();
- macvlan_start_xmit(skb, vlan->dev);
- local_bh_enable();
+ skb->dev = vlan->dev;
+ dev_queue_xmit(skb);
} else {
kfree_skb(skb);
}
@@ -744,7 +746,7 @@ err:
rcu_read_lock();
vlan = rcu_dereference(q->vlan);
if (vlan)
- vlan->dev->stats.tx_dropped++;
+ this_cpu_inc(vlan->pcpu_stats->tx_dropped);
rcu_read_unlock();
return err;
@@ -767,11 +769,10 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
const struct sk_buff *skb,
const struct iovec *iv, int len)
{
- struct macvlan_dev *vlan;
int ret;
int vnet_hdr_len = 0;
int vlan_offset = 0;
- int copied;
+ int copied, total;
if (q->flags & IFF_VNET_HDR) {
struct virtio_net_hdr vnet_hdr;
@@ -779,14 +780,13 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
if ((len -= vnet_hdr_len) < 0)
return -EINVAL;
- ret = macvtap_skb_to_vnet_hdr(skb, &vnet_hdr);
- if (ret)
- return ret;
+ macvtap_skb_to_vnet_hdr(skb, &vnet_hdr);
if (memcpy_toiovecend(iv, (void *)&vnet_hdr, 0, sizeof(vnet_hdr)))
return -EFAULT;
}
- copied = vnet_hdr_len;
+ total = copied = vnet_hdr_len;
+ total += skb->len;
if (!vlan_tx_tag_present(skb))
len = min_t(int, skb->len, len);
@@ -801,6 +801,7 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
len = min_t(int, skb->len + VLAN_HLEN, len);
+ total += VLAN_HLEN;
copy = min_t(int, vlan_offset, len);
ret = skb_copy_datagram_const_iovec(skb, 0, iv, copied, copy);
@@ -818,22 +819,12 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
}
ret = skb_copy_datagram_const_iovec(skb, vlan_offset, iv, copied, len);
- copied += len;
done:
- rcu_read_lock();
- vlan = rcu_dereference(q->vlan);
- if (vlan) {
- preempt_disable();
- macvlan_count_rx(vlan, copied - vnet_hdr_len, ret == 0, 0);
- preempt_enable();
- }
- rcu_read_unlock();
-
- return ret ? ret : copied;
+ return ret ? ret : total;
}
-static ssize_t macvtap_do_read(struct macvtap_queue *q, struct kiocb *iocb,
+static ssize_t macvtap_do_read(struct macvtap_queue *q,
const struct iovec *iv, unsigned long len,
int noblock)
{
@@ -884,8 +875,10 @@ static ssize_t macvtap_aio_read(struct kiocb *iocb, const struct iovec *iv,
goto out;
}
- ret = macvtap_do_read(q, iocb, iv, len, file->f_flags & O_NONBLOCK);
- ret = min_t(ssize_t, ret, len); /* XXX copied from tun.c. Why? */
+ ret = macvtap_do_read(q, iv, len, file->f_flags & O_NONBLOCK);
+ ret = min_t(ssize_t, ret, len);
+ if (ret > 0)
+ iocb->ki_pos = ret;
out:
return ret;
}
@@ -1116,7 +1109,7 @@ static int macvtap_recvmsg(struct kiocb *iocb, struct socket *sock,
int ret;
if (flags & ~(MSG_DONTWAIT|MSG_TRUNC))
return -EINVAL;
- ret = macvtap_do_read(q, iocb, m->msg_iov, total_len,
+ ret = macvtap_do_read(q, m->msg_iov, total_len,
flags & MSG_DONTWAIT);
if (ret > total_len) {
m->msg_flags |= MSG_TRUNC;
diff --git a/drivers/net/mdio.c b/drivers/net/mdio.c
index 8403316eb02b..3e027ed0b3bb 100644
--- a/drivers/net/mdio.c
+++ b/drivers/net/mdio.c
@@ -342,34 +342,6 @@ void mdio45_ethtool_gset_npage(const struct mdio_if_info *mdio,
EXPORT_SYMBOL(mdio45_ethtool_gset_npage);
/**
- * mdio45_ethtool_spauseparam_an - set auto-negotiated pause parameters
- * @mdio: MDIO interface
- * @ecmd: Ethtool request structure
- *
- * This function assumes that the PHY has an auto-negotiation MMD. It
- * will enable and disable advertising of flow control as appropriate.
- */
-void mdio45_ethtool_spauseparam_an(const struct mdio_if_info *mdio,
- const struct ethtool_pauseparam *ecmd)
-{
- int adv, old_adv;
-
- WARN_ON(!(mdio->mmds & MDIO_DEVS_AN));
-
- old_adv = mdio->mdio_read(mdio->dev, mdio->prtad, MDIO_MMD_AN,
- MDIO_AN_ADVERTISE);
- adv = ((old_adv & ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) |
- mii_advertise_flowctrl((ecmd->rx_pause ? FLOW_CTRL_RX : 0) |
- (ecmd->tx_pause ? FLOW_CTRL_TX : 0)));
- if (adv != old_adv) {
- mdio->mdio_write(mdio->dev, mdio->prtad, MDIO_MMD_AN,
- MDIO_AN_ADVERTISE, adv);
- mdio45_nway_restart(mdio);
- }
-}
-EXPORT_SYMBOL(mdio45_ethtool_spauseparam_an);
-
-/**
* mdio_mii_ioctl - MII ioctl interface for MDIO (clause 22 or 45) PHYs
* @mdio: MDIO interface
* @mii_data: MII ioctl data structure
diff --git a/drivers/net/phy/cicada.c b/drivers/net/phy/cicada.c
index 313a0377f68f..b57ce0cc9657 100644
--- a/drivers/net/phy/cicada.c
+++ b/drivers/net/phy/cicada.c
@@ -92,8 +92,8 @@ static int cis820x_config_intr(struct phy_device *phydev)
{
int err;
- if(phydev->interrupts == PHY_INTERRUPT_ENABLED)
- err = phy_write(phydev, MII_CIS8201_IMASK,
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
+ err = phy_write(phydev, MII_CIS8201_IMASK,
MII_CIS8201_IMASK_MASK);
else
err = phy_write(phydev, MII_CIS8201_IMASK, 0);
diff --git a/drivers/net/phy/davicom.c b/drivers/net/phy/davicom.c
index 383e8338ad86..d2c08f625a41 100644
--- a/drivers/net/phy/davicom.c
+++ b/drivers/net/phy/davicom.c
@@ -72,7 +72,7 @@ static int dm9161_config_intr(struct phy_device *phydev)
if (temp < 0)
return temp;
- if(PHY_INTERRUPT_ENABLED == phydev->interrupts )
+ if (PHY_INTERRUPT_ENABLED == phydev->interrupts)
temp &= ~(MII_DM9161_INTR_STOP);
else
temp |= MII_DM9161_INTR_STOP;
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index 7490b6c866e6..547725fa8671 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -851,8 +851,8 @@ static int match(struct sk_buff *skb, unsigned int type, struct rxts *rxts)
seqid = (u16 *)(data + offset + OFF_PTP_SEQUENCE_ID);
- return (rxts->msgtype == (*msgtype & 0xf) &&
- rxts->seqid == ntohs(*seqid));
+ return rxts->msgtype == (*msgtype & 0xf) &&
+ rxts->seqid == ntohs(*seqid);
}
static void dp83640_free_clocks(void)
diff --git a/drivers/net/phy/icplus.c b/drivers/net/phy/icplus.c
index b5ddd5077a80..97bf58bf4939 100644
--- a/drivers/net/phy/icplus.c
+++ b/drivers/net/phy/icplus.c
@@ -48,7 +48,7 @@ MODULE_LICENSE("GPL");
static int ip175c_config_init(struct phy_device *phydev)
{
int err, i;
- static int full_reset_performed = 0;
+ static int full_reset_performed;
if (full_reset_performed == 0) {
diff --git a/drivers/net/phy/lxt.c b/drivers/net/phy/lxt.c
index ff2e45e9cb54..9108f3191701 100644
--- a/drivers/net/phy/lxt.c
+++ b/drivers/net/phy/lxt.c
@@ -82,7 +82,7 @@ static int lxt970_config_intr(struct phy_device *phydev)
{
int err;
- if(phydev->interrupts == PHY_INTERRUPT_ENABLED)
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
err = phy_write(phydev, MII_LXT970_IER, MII_LXT970_IER_IEN);
else
err = phy_write(phydev, MII_LXT970_IER, 0);
@@ -114,7 +114,7 @@ static int lxt971_config_intr(struct phy_device *phydev)
{
int err;
- if(phydev->interrupts == PHY_INTERRUPT_ENABLED)
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
err = phy_write(phydev, MII_LXT971_IER, MII_LXT971_IER_IEN);
else
err = phy_write(phydev, MII_LXT971_IER, 0);
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 2e3c778ea9bf..bd37e45c89c0 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -894,6 +894,8 @@ static struct phy_driver marvell_drivers[] = {
.read_status = &genphy_read_status,
.ack_interrupt = &marvell_ack_interrupt,
.config_intr = &marvell_config_intr,
+ .resume = &genphy_resume,
+ .suspend = &genphy_suspend,
.driver = { .owner = THIS_MODULE },
},
{
@@ -907,6 +909,8 @@ static struct phy_driver marvell_drivers[] = {
.read_status = &genphy_read_status,
.ack_interrupt = &marvell_ack_interrupt,
.config_intr = &marvell_config_intr,
+ .resume = &genphy_resume,
+ .suspend = &genphy_suspend,
.driver = { .owner = THIS_MODULE },
},
{
@@ -920,6 +924,8 @@ static struct phy_driver marvell_drivers[] = {
.read_status = &marvell_read_status,
.ack_interrupt = &marvell_ack_interrupt,
.config_intr = &marvell_config_intr,
+ .resume = &genphy_resume,
+ .suspend = &genphy_suspend,
.driver = { .owner = THIS_MODULE },
},
{
@@ -933,6 +939,8 @@ static struct phy_driver marvell_drivers[] = {
.read_status = &genphy_read_status,
.ack_interrupt = &marvell_ack_interrupt,
.config_intr = &marvell_config_intr,
+ .resume = &genphy_resume,
+ .suspend = &genphy_suspend,
.driver = {.owner = THIS_MODULE,},
},
{
@@ -946,6 +954,8 @@ static struct phy_driver marvell_drivers[] = {
.ack_interrupt = &marvell_ack_interrupt,
.config_intr = &marvell_config_intr,
.did_interrupt = &m88e1121_did_interrupt,
+ .resume = &genphy_resume,
+ .suspend = &genphy_suspend,
.driver = { .owner = THIS_MODULE },
},
{
@@ -961,6 +971,8 @@ static struct phy_driver marvell_drivers[] = {
.did_interrupt = &m88e1121_did_interrupt,
.get_wol = &m88e1318_get_wol,
.set_wol = &m88e1318_set_wol,
+ .resume = &genphy_resume,
+ .suspend = &genphy_suspend,
.driver = { .owner = THIS_MODULE },
},
{
@@ -974,6 +986,8 @@ static struct phy_driver marvell_drivers[] = {
.read_status = &genphy_read_status,
.ack_interrupt = &marvell_ack_interrupt,
.config_intr = &marvell_config_intr,
+ .resume = &genphy_resume,
+ .suspend = &genphy_suspend,
.driver = { .owner = THIS_MODULE },
},
{
@@ -987,6 +1001,8 @@ static struct phy_driver marvell_drivers[] = {
.read_status = &genphy_read_status,
.ack_interrupt = &marvell_ack_interrupt,
.config_intr = &marvell_config_intr,
+ .resume = &genphy_resume,
+ .suspend = &genphy_suspend,
.driver = { .owner = THIS_MODULE },
},
{
@@ -1000,6 +1016,8 @@ static struct phy_driver marvell_drivers[] = {
.read_status = &genphy_read_status,
.ack_interrupt = &marvell_ack_interrupt,
.config_intr = &marvell_config_intr,
+ .resume = &genphy_resume,
+ .suspend = &genphy_suspend,
.driver = { .owner = THIS_MODULE },
},
{
@@ -1013,6 +1031,8 @@ static struct phy_driver marvell_drivers[] = {
.read_status = &genphy_read_status,
.ack_interrupt = &marvell_ack_interrupt,
.config_intr = &marvell_config_intr,
+ .resume = &genphy_resume,
+ .suspend = &genphy_suspend,
.driver = { .owner = THIS_MODULE },
},
{
@@ -1026,6 +1046,8 @@ static struct phy_driver marvell_drivers[] = {
.ack_interrupt = &marvell_ack_interrupt,
.config_intr = &marvell_config_intr,
.did_interrupt = &m88e1121_did_interrupt,
+ .resume = &genphy_resume,
+ .suspend = &genphy_suspend,
.driver = { .owner = THIS_MODULE },
},
};
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 56178761ce93..930694d3a13f 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -1,7 +1,4 @@
-/*
- * drivers/net/phy/mdio_bus.c
- *
- * MDIO Bus interface
+/* MDIO Bus interface
*
* Author: Andy Fleming
*
@@ -36,10 +33,10 @@
#include <linux/mii.h>
#include <linux/ethtool.h>
#include <linux/phy.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
-#include <asm/io.h>
#include <asm/irq.h>
-#include <asm/uaccess.h>
/**
* mdiobus_alloc_size - allocate a mii_bus structure
@@ -139,8 +136,7 @@ int mdiobus_register(struct mii_bus *bus)
int i, err;
if (NULL == bus || NULL == bus->name ||
- NULL == bus->read ||
- NULL == bus->write)
+ NULL == bus->read || NULL == bus->write)
return -EINVAL;
BUG_ON(bus->state != MDIOBUS_ALLOCATED &&
@@ -214,9 +210,7 @@ EXPORT_SYMBOL(mdiobus_unregister);
*/
void mdiobus_free(struct mii_bus *bus)
{
- /*
- * For compatibility with error handling in drivers.
- */
+ /* For compatibility with error handling in drivers. */
if (bus->state == MDIOBUS_ALLOCATED) {
kfree(bus);
return;
@@ -316,8 +310,8 @@ static int mdio_bus_match(struct device *dev, struct device_driver *drv)
if (phydrv->match_phy_device)
return phydrv->match_phy_device(phydev);
- return ((phydrv->phy_id & phydrv->phy_id_mask) ==
- (phydev->phy_id & phydrv->phy_id_mask));
+ return (phydrv->phy_id & phydrv->phy_id_mask) ==
+ (phydev->phy_id & phydrv->phy_id_mask);
}
#ifdef CONFIG_PM
@@ -335,15 +329,13 @@ static bool mdio_bus_phy_may_suspend(struct phy_device *phydev)
if (!netdev)
return true;
- /*
- * Don't suspend PHY if the attched netdev parent may wakeup.
+ /* Don't suspend PHY if the attched netdev parent may wakeup.
* The parent may point to a PCI device, as in tg3 driver.
*/
if (netdev->dev.parent && device_may_wakeup(netdev->dev.parent))
return false;
- /*
- * Also don't suspend PHY if the netdev itself may wakeup. This
+ /* Also don't suspend PHY if the netdev itself may wakeup. This
* is the case for devices w/o underlaying pwr. mgmt. aware bus,
* e.g. SoC devices.
*/
@@ -358,8 +350,7 @@ static int mdio_bus_suspend(struct device *dev)
struct phy_driver *phydrv = to_phy_driver(dev->driver);
struct phy_device *phydev = to_phy_device(dev);
- /*
- * We must stop the state machine manually, otherwise it stops out of
+ /* We must stop the state machine manually, otherwise it stops out of
* control, possibly with the phydev->lock held. Upon resume, netdev
* may call phy routines that try to grab the same lock, and that may
* lead to a deadlock.
@@ -388,7 +379,7 @@ static int mdio_bus_resume(struct device *dev)
no_resume:
if (phydev->attached_dev && phydev->adjust_link)
- phy_start_machine(phydev, NULL);
+ phy_start_machine(phydev);
return 0;
}
@@ -410,12 +401,12 @@ static int mdio_bus_restore(struct device *dev)
phydev->link = 0;
phydev->state = PHY_UP;
- phy_start_machine(phydev, NULL);
+ phy_start_machine(phydev);
return 0;
}
-static struct dev_pm_ops mdio_bus_pm_ops = {
+static const struct dev_pm_ops mdio_bus_pm_ops = {
.suspend = mdio_bus_suspend,
.resume = mdio_bus_resume,
.freeze = mdio_bus_suspend,
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 3ae28f420868..5a8993b0cafc 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -81,14 +81,14 @@ static int ksz_config_flags(struct phy_device *phydev)
}
static int kszphy_extended_write(struct phy_device *phydev,
- u32 regnum, u16 val)
+ u32 regnum, u16 val)
{
phy_write(phydev, MII_KSZPHY_EXTREG, KSZPHY_EXTREG_WRITE | regnum);
return phy_write(phydev, MII_KSZPHY_EXTREG_WRITE, val);
}
static int kszphy_extended_read(struct phy_device *phydev,
- u32 regnum)
+ u32 regnum)
{
phy_write(phydev, MII_KSZPHY_EXTREG, regnum);
return phy_read(phydev, MII_KSZPHY_EXTREG_READ);
@@ -336,6 +336,21 @@ static struct phy_driver ksphy_driver[] = {
.resume = genphy_resume,
.driver = { .owner = THIS_MODULE,},
}, {
+ .phy_id = PHY_ID_KSZ8041RNLI,
+ .phy_id_mask = 0x00fffff0,
+ .name = "Micrel KSZ8041RNLI",
+ .features = PHY_BASIC_FEATURES |
+ SUPPORTED_Pause | SUPPORTED_Asym_Pause,
+ .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+ .config_init = kszphy_config_init,
+ .config_aneg = genphy_config_aneg,
+ .read_status = genphy_read_status,
+ .ack_interrupt = kszphy_ack_interrupt,
+ .config_intr = kszphy_config_intr,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ .driver = { .owner = THIS_MODULE,},
+}, {
.phy_id = PHY_ID_KSZ8051,
.phy_id_mask = 0x00fffff0,
.name = "Micrel KSZ8051",
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 36c6994436b7..76e8936ab9e4 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -1,7 +1,4 @@
-/*
- * drivers/net/phy/phy.c
- *
- * Framework for configuring and reading PHY devices
+/* Framework for configuring and reading PHY devices
* Based on code in sungem_phy.c and gianfar_phy.c
*
* Author: Andy Fleming
@@ -36,11 +33,11 @@
#include <linux/timer.h>
#include <linux/workqueue.h>
#include <linux/mdio.h>
-
+#include <linux/io.h>
+#include <linux/uaccess.h>
#include <linux/atomic.h>
-#include <asm/io.h>
+
#include <asm/irq.h>
-#include <asm/uaccess.h>
/**
* phy_print_status - Convenience function to print out the current phy status
@@ -48,13 +45,14 @@
*/
void phy_print_status(struct phy_device *phydev)
{
- if (phydev->link)
+ if (phydev->link) {
pr_info("%s - Link is Up - %d/%s\n",
dev_name(&phydev->dev),
phydev->speed,
DUPLEX_FULL == phydev->duplex ? "Full" : "Half");
- else
+ } else {
pr_info("%s - Link is Down\n", dev_name(&phydev->dev));
+ }
}
EXPORT_SYMBOL(phy_print_status);
@@ -69,12 +67,10 @@ EXPORT_SYMBOL(phy_print_status);
*/
static int phy_clear_interrupt(struct phy_device *phydev)
{
- int err = 0;
-
if (phydev->drv->ack_interrupt)
- err = phydev->drv->ack_interrupt(phydev);
+ return phydev->drv->ack_interrupt(phydev);
- return err;
+ return 0;
}
/**
@@ -86,13 +82,11 @@ static int phy_clear_interrupt(struct phy_device *phydev)
*/
static int phy_config_interrupt(struct phy_device *phydev, u32 interrupts)
{
- int err = 0;
-
phydev->interrupts = interrupts;
if (phydev->drv->config_intr)
- err = phydev->drv->config_intr(phydev);
+ return phydev->drv->config_intr(phydev);
- return err;
+ return 0;
}
@@ -106,15 +100,14 @@ static int phy_config_interrupt(struct phy_device *phydev, u32 interrupts)
*/
static inline int phy_aneg_done(struct phy_device *phydev)
{
- int retval;
-
- retval = phy_read(phydev, MII_BMSR);
+ int retval = phy_read(phydev, MII_BMSR);
return (retval < 0) ? retval : (retval & BMSR_ANEGCOMPLETE);
}
/* A structure for mapping a particular speed and duplex
- * combination to a particular SUPPORTED and ADVERTISED value */
+ * combination to a particular SUPPORTED and ADVERTISED value
+ */
struct phy_setting {
int speed;
int duplex;
@@ -177,8 +170,7 @@ static inline int phy_find_setting(int speed, int duplex)
int idx = 0;
while (idx < ARRAY_SIZE(settings) &&
- (settings[idx].speed != speed ||
- settings[idx].duplex != duplex))
+ (settings[idx].speed != speed || settings[idx].duplex != duplex))
idx++;
return idx < MAX_NUM_SETTINGS ? idx : MAX_NUM_SETTINGS - 1;
@@ -245,8 +237,7 @@ int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd)
if (cmd->phy_address != phydev->addr)
return -EINVAL;
- /* We make sure that we don't pass unsupported
- * values in to the PHY */
+ /* We make sure that we don't pass unsupported values in to the PHY */
cmd->advertising &= phydev->supported;
/* Verify the settings we care about. */
@@ -289,6 +280,7 @@ int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd)
cmd->supported = phydev->supported;
cmd->advertising = phydev->advertising;
+ cmd->lp_advertising = phydev->lp_advertising;
ethtool_cmd_speed_set(cmd, phydev->speed);
cmd->duplex = phydev->duplex;
@@ -312,8 +304,7 @@ EXPORT_SYMBOL(phy_ethtool_gset);
* PHYCONTROL layer. It changes registers without regard to
* current state. Use at own risk.
*/
-int phy_mii_ioctl(struct phy_device *phydev,
- struct ifreq *ifr, int cmd)
+int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd)
{
struct mii_ioctl_data *mii_data = if_mii(ifr);
u16 val = mii_data->val_in;
@@ -326,25 +317,24 @@ int phy_mii_ioctl(struct phy_device *phydev,
case SIOCGMIIREG:
mii_data->val_out = mdiobus_read(phydev->bus, mii_data->phy_id,
mii_data->reg_num);
- break;
+ return 0;
case SIOCSMIIREG:
if (mii_data->phy_id == phydev->addr) {
- switch(mii_data->reg_num) {
+ switch (mii_data->reg_num) {
case MII_BMCR:
- if ((val & (BMCR_RESET|BMCR_ANENABLE)) == 0)
+ if ((val & (BMCR_RESET | BMCR_ANENABLE)) == 0)
phydev->autoneg = AUTONEG_DISABLE;
else
phydev->autoneg = AUTONEG_ENABLE;
- if ((!phydev->autoneg) && (val & BMCR_FULLDPLX))
+ if (!phydev->autoneg && (val & BMCR_FULLDPLX))
phydev->duplex = DUPLEX_FULL;
else
phydev->duplex = DUPLEX_HALF;
- if ((!phydev->autoneg) &&
- (val & BMCR_SPEED1000))
+ if (!phydev->autoneg && (val & BMCR_SPEED1000))
phydev->speed = SPEED_1000;
- else if ((!phydev->autoneg) &&
- (val & BMCR_SPEED100))
+ else if (!phydev->autoneg &&
+ (val & BMCR_SPEED100))
phydev->speed = SPEED_100;
break;
case MII_ADVERTISE:
@@ -360,12 +350,9 @@ int phy_mii_ioctl(struct phy_device *phydev,
mii_data->reg_num, val);
if (mii_data->reg_num == MII_BMCR &&
- val & BMCR_RESET &&
- phydev->drv->config_init) {
- phy_scan_fixups(phydev);
- phydev->drv->config_init(phydev);
- }
- break;
+ val & BMCR_RESET)
+ return phy_init_hw(phydev);
+ return 0;
case SIOCSHWTSTAMP:
if (phydev->drv->hwtstamp)
@@ -375,8 +362,6 @@ int phy_mii_ioctl(struct phy_device *phydev,
default:
return -EOPNOTSUPP;
}
-
- return 0;
}
EXPORT_SYMBOL(phy_mii_ioctl);
@@ -399,7 +384,6 @@ int phy_start_aneg(struct phy_device *phydev)
phy_sanitize_settings(phydev);
err = phydev->drv->config_aneg(phydev);
-
if (err < 0)
goto out_unlock;
@@ -419,25 +403,18 @@ out_unlock:
}
EXPORT_SYMBOL(phy_start_aneg);
-
/**
* phy_start_machine - start PHY state machine tracking
* @phydev: the phy_device struct
- * @handler: callback function for state change notifications
*
* Description: The PHY infrastructure can run a state machine
* which tracks whether the PHY is starting up, negotiating,
* etc. This function starts the timer which tracks the state
- * of the PHY. If you want to be notified when the state changes,
- * pass in the callback @handler, otherwise, pass NULL. If you
- * want to maintain your own state machine, do not call this
- * function.
+ * of the PHY. If you want to maintain your own state machine,
+ * do not call this function.
*/
-void phy_start_machine(struct phy_device *phydev,
- void (*handler)(struct net_device *))
+void phy_start_machine(struct phy_device *phydev)
{
- phydev->adjust_state = handler;
-
queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, HZ);
}
@@ -457,8 +434,6 @@ void phy_stop_machine(struct phy_device *phydev)
if (phydev->state > PHY_UP)
phydev->state = PHY_UP;
mutex_unlock(&phydev->lock);
-
- phydev->adjust_state = NULL;
}
/**
@@ -495,7 +470,8 @@ static irqreturn_t phy_interrupt(int irq, void *phy_dat)
/* The MDIO bus is not allowed to be written in interrupt
* context, so we need to disable the irq here. A work
* queue will write the PHY to disable and clear the
- * interrupt, and then reenable the irq line. */
+ * interrupt, and then reenable the irq line.
+ */
disable_irq_nosync(irq);
atomic_inc(&phydev->irq_disable);
@@ -510,16 +486,12 @@ static irqreturn_t phy_interrupt(int irq, void *phy_dat)
*/
static int phy_enable_interrupts(struct phy_device *phydev)
{
- int err;
-
- err = phy_clear_interrupt(phydev);
+ int err = phy_clear_interrupt(phydev);
if (err < 0)
return err;
- err = phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED);
-
- return err;
+ return phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED);
}
/**
@@ -532,13 +504,11 @@ static int phy_disable_interrupts(struct phy_device *phydev)
/* Disable PHY interrupts */
err = phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED);
-
if (err)
goto phy_err;
/* Clear the interrupt */
err = phy_clear_interrupt(phydev);
-
if (err)
goto phy_err;
@@ -562,22 +532,16 @@ phy_err:
*/
int phy_start_interrupts(struct phy_device *phydev)
{
- int err = 0;
-
atomic_set(&phydev->irq_disable, 0);
- if (request_irq(phydev->irq, phy_interrupt,
- IRQF_SHARED,
- "phy_interrupt",
- phydev) < 0) {
+ if (request_irq(phydev->irq, phy_interrupt, 0, "phy_interrupt",
+ phydev) < 0) {
pr_warn("%s: Can't get IRQ %d (PHY)\n",
phydev->bus->name, phydev->irq);
phydev->irq = PHY_POLL;
return 0;
}
- err = phy_enable_interrupts(phydev);
-
- return err;
+ return phy_enable_interrupts(phydev);
}
EXPORT_SYMBOL(phy_start_interrupts);
@@ -587,24 +551,20 @@ EXPORT_SYMBOL(phy_start_interrupts);
*/
int phy_stop_interrupts(struct phy_device *phydev)
{
- int err;
-
- err = phy_disable_interrupts(phydev);
+ int err = phy_disable_interrupts(phydev);
if (err)
phy_error(phydev);
free_irq(phydev->irq, phydev);
- /*
- * Cannot call flush_scheduled_work() here as desired because
+ /* Cannot call flush_scheduled_work() here as desired because
* of rtnl_lock(), but we do not really care about what would
* be done, except from enable_irq(), so cancel any work
* possibly pending and take care of the matter below.
*/
cancel_work_sync(&phydev->phy_queue);
- /*
- * If work indeed has been cancelled, disable_irq() will have
+ /* If work indeed has been cancelled, disable_irq() will have
* been left unbalanced from phy_interrupt() and enable_irq()
* has to be called so that other devices on the line work.
*/
@@ -615,14 +575,12 @@ int phy_stop_interrupts(struct phy_device *phydev)
}
EXPORT_SYMBOL(phy_stop_interrupts);
-
/**
* phy_change - Scheduled by the phy_interrupt/timer to handle PHY changes
* @work: work_struct that describes the work to be done
*/
void phy_change(struct work_struct *work)
{
- int err;
struct phy_device *phydev =
container_of(work, struct phy_device, phy_queue);
@@ -630,9 +588,7 @@ void phy_change(struct work_struct *work)
!phydev->drv->did_interrupt(phydev))
goto ignore;
- err = phy_disable_interrupts(phydev);
-
- if (err)
+ if (phy_disable_interrupts(phydev))
goto phy_err;
mutex_lock(&phydev->lock);
@@ -644,16 +600,13 @@ void phy_change(struct work_struct *work)
enable_irq(phydev->irq);
/* Reenable interrupts */
- if (PHY_HALTED != phydev->state)
- err = phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED);
-
- if (err)
+ if (PHY_HALTED != phydev->state &&
+ phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED))
goto irq_enable_err;
/* reschedule state queue work to run as soon as possible */
cancel_delayed_work_sync(&phydev->state_queue);
queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, 0);
-
return;
ignore:
@@ -692,13 +645,12 @@ void phy_stop(struct phy_device *phydev)
out_unlock:
mutex_unlock(&phydev->lock);
- /*
- * Cannot call flush_scheduled_work() here as desired because
+ /* Cannot call flush_scheduled_work() here as desired because
* of rtnl_lock(), but PHY_HALTED shall guarantee phy_change()
* will not reenable interrupts.
*/
}
-
+EXPORT_SYMBOL(phy_stop);
/**
* phy_start - start or restart a PHY device
@@ -715,20 +667,19 @@ void phy_start(struct phy_device *phydev)
mutex_lock(&phydev->lock);
switch (phydev->state) {
- case PHY_STARTING:
- phydev->state = PHY_PENDING;
- break;
- case PHY_READY:
- phydev->state = PHY_UP;
- break;
- case PHY_HALTED:
- phydev->state = PHY_RESUMING;
- default:
- break;
+ case PHY_STARTING:
+ phydev->state = PHY_PENDING;
+ break;
+ case PHY_READY:
+ phydev->state = PHY_UP;
+ break;
+ case PHY_HALTED:
+ phydev->state = PHY_RESUMING;
+ default:
+ break;
}
mutex_unlock(&phydev->lock);
}
-EXPORT_SYMBOL(phy_stop);
EXPORT_SYMBOL(phy_start);
/**
@@ -740,160 +691,132 @@ void phy_state_machine(struct work_struct *work)
struct delayed_work *dwork = to_delayed_work(work);
struct phy_device *phydev =
container_of(dwork, struct phy_device, state_queue);
- int needs_aneg = 0;
+ int needs_aneg = 0, do_suspend = 0;
int err = 0;
mutex_lock(&phydev->lock);
- if (phydev->adjust_state)
- phydev->adjust_state(phydev->attached_dev);
+ switch (phydev->state) {
+ case PHY_DOWN:
+ case PHY_STARTING:
+ case PHY_READY:
+ case PHY_PENDING:
+ break;
+ case PHY_UP:
+ needs_aneg = 1;
- switch(phydev->state) {
- case PHY_DOWN:
- case PHY_STARTING:
- case PHY_READY:
- case PHY_PENDING:
- break;
- case PHY_UP:
- needs_aneg = 1;
+ phydev->link_timeout = PHY_AN_TIMEOUT;
- phydev->link_timeout = PHY_AN_TIMEOUT;
+ break;
+ case PHY_AN:
+ err = phy_read_status(phydev);
+ if (err < 0)
+ break;
+ /* If the link is down, give up on negotiation for now */
+ if (!phydev->link) {
+ phydev->state = PHY_NOLINK;
+ netif_carrier_off(phydev->attached_dev);
+ phydev->adjust_link(phydev->attached_dev);
break;
- case PHY_AN:
- err = phy_read_status(phydev);
+ }
- if (err < 0)
- break;
+ /* Check if negotiation is done. Break if there's an error */
+ err = phy_aneg_done(phydev);
+ if (err < 0)
+ break;
- /* If the link is down, give up on
- * negotiation for now */
- if (!phydev->link) {
- phydev->state = PHY_NOLINK;
- netif_carrier_off(phydev->attached_dev);
- phydev->adjust_link(phydev->attached_dev);
- break;
- }
+ /* If AN is done, we're running */
+ if (err > 0) {
+ phydev->state = PHY_RUNNING;
+ netif_carrier_on(phydev->attached_dev);
+ phydev->adjust_link(phydev->attached_dev);
- /* Check if negotiation is done. Break
- * if there's an error */
- err = phy_aneg_done(phydev);
- if (err < 0)
+ } else if (0 == phydev->link_timeout--) {
+ needs_aneg = 1;
+ /* If we have the magic_aneg bit, we try again */
+ if (phydev->drv->flags & PHY_HAS_MAGICANEG)
break;
-
- /* If AN is done, we're running */
- if (err > 0) {
- phydev->state = PHY_RUNNING;
- netif_carrier_on(phydev->attached_dev);
- phydev->adjust_link(phydev->attached_dev);
-
- } else if (0 == phydev->link_timeout--) {
- needs_aneg = 1;
- /* If we have the magic_aneg bit,
- * we try again */
- if (phydev->drv->flags & PHY_HAS_MAGICANEG)
- break;
- }
+ }
+ break;
+ case PHY_NOLINK:
+ err = phy_read_status(phydev);
+ if (err)
break;
- case PHY_NOLINK:
- err = phy_read_status(phydev);
-
- if (err)
- break;
- if (phydev->link) {
- phydev->state = PHY_RUNNING;
- netif_carrier_on(phydev->attached_dev);
- phydev->adjust_link(phydev->attached_dev);
- }
+ if (phydev->link) {
+ phydev->state = PHY_RUNNING;
+ netif_carrier_on(phydev->attached_dev);
+ phydev->adjust_link(phydev->attached_dev);
+ }
+ break;
+ case PHY_FORCING:
+ err = genphy_update_link(phydev);
+ if (err)
break;
- case PHY_FORCING:
- err = genphy_update_link(phydev);
-
- if (err)
- break;
- if (phydev->link) {
- phydev->state = PHY_RUNNING;
- netif_carrier_on(phydev->attached_dev);
- } else {
- if (0 == phydev->link_timeout--)
- needs_aneg = 1;
- }
+ if (phydev->link) {
+ phydev->state = PHY_RUNNING;
+ netif_carrier_on(phydev->attached_dev);
+ } else {
+ if (0 == phydev->link_timeout--)
+ needs_aneg = 1;
+ }
- phydev->adjust_link(phydev->attached_dev);
- break;
- case PHY_RUNNING:
- /* Only register a CHANGE if we are
- * polling or ignoring interrupts
- */
- if (!phy_interrupt_is_valid(phydev))
- phydev->state = PHY_CHANGELINK;
+ phydev->adjust_link(phydev->attached_dev);
+ break;
+ case PHY_RUNNING:
+ /* Only register a CHANGE if we are
+ * polling or ignoring interrupts
+ */
+ if (!phy_interrupt_is_valid(phydev))
+ phydev->state = PHY_CHANGELINK;
+ break;
+ case PHY_CHANGELINK:
+ err = phy_read_status(phydev);
+ if (err)
break;
- case PHY_CHANGELINK:
- err = phy_read_status(phydev);
- if (err)
- break;
+ if (phydev->link) {
+ phydev->state = PHY_RUNNING;
+ netif_carrier_on(phydev->attached_dev);
+ } else {
+ phydev->state = PHY_NOLINK;
+ netif_carrier_off(phydev->attached_dev);
+ }
- if (phydev->link) {
- phydev->state = PHY_RUNNING;
- netif_carrier_on(phydev->attached_dev);
- } else {
- phydev->state = PHY_NOLINK;
- netif_carrier_off(phydev->attached_dev);
- }
+ phydev->adjust_link(phydev->attached_dev);
+ if (phy_interrupt_is_valid(phydev))
+ err = phy_config_interrupt(phydev,
+ PHY_INTERRUPT_ENABLED);
+ break;
+ case PHY_HALTED:
+ if (phydev->link) {
+ phydev->link = 0;
+ netif_carrier_off(phydev->attached_dev);
phydev->adjust_link(phydev->attached_dev);
-
- if (phy_interrupt_is_valid(phydev))
- err = phy_config_interrupt(phydev,
- PHY_INTERRUPT_ENABLED);
- break;
- case PHY_HALTED:
- if (phydev->link) {
- phydev->link = 0;
- netif_carrier_off(phydev->attached_dev);
- phydev->adjust_link(phydev->attached_dev);
- }
+ do_suspend = 1;
+ }
+ break;
+ case PHY_RESUMING:
+ err = phy_clear_interrupt(phydev);
+ if (err)
break;
- case PHY_RESUMING:
-
- err = phy_clear_interrupt(phydev);
- if (err)
- break;
-
- err = phy_config_interrupt(phydev,
- PHY_INTERRUPT_ENABLED);
+ err = phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED);
+ if (err)
+ break;
- if (err)
+ if (AUTONEG_ENABLE == phydev->autoneg) {
+ err = phy_aneg_done(phydev);
+ if (err < 0)
break;
- if (AUTONEG_ENABLE == phydev->autoneg) {
- err = phy_aneg_done(phydev);
- if (err < 0)
- break;
-
- /* err > 0 if AN is done.
- * Otherwise, it's 0, and we're
- * still waiting for AN */
- if (err > 0) {
- err = phy_read_status(phydev);
- if (err)
- break;
-
- if (phydev->link) {
- phydev->state = PHY_RUNNING;
- netif_carrier_on(phydev->attached_dev);
- } else
- phydev->state = PHY_NOLINK;
- phydev->adjust_link(phydev->attached_dev);
- } else {
- phydev->state = PHY_AN;
- phydev->link_timeout = PHY_AN_TIMEOUT;
- }
- } else {
+ /* err > 0 if AN is done.
+ * Otherwise, it's 0, and we're still waiting for AN
+ */
+ if (err > 0) {
err = phy_read_status(phydev);
if (err)
break;
@@ -901,11 +824,28 @@ void phy_state_machine(struct work_struct *work)
if (phydev->link) {
phydev->state = PHY_RUNNING;
netif_carrier_on(phydev->attached_dev);
- } else
+ } else {
phydev->state = PHY_NOLINK;
+ }
phydev->adjust_link(phydev->attached_dev);
+ } else {
+ phydev->state = PHY_AN;
+ phydev->link_timeout = PHY_AN_TIMEOUT;
}
- break;
+ } else {
+ err = phy_read_status(phydev);
+ if (err)
+ break;
+
+ if (phydev->link) {
+ phydev->state = PHY_RUNNING;
+ netif_carrier_on(phydev->attached_dev);
+ } else {
+ phydev->state = PHY_NOLINK;
+ }
+ phydev->adjust_link(phydev->attached_dev);
+ }
+ break;
}
mutex_unlock(&phydev->lock);
@@ -913,11 +853,14 @@ void phy_state_machine(struct work_struct *work)
if (needs_aneg)
err = phy_start_aneg(phydev);
+ if (do_suspend)
+ phy_suspend(phydev);
+
if (err < 0)
phy_error(phydev);
queue_delayed_work(system_power_efficient_wq, &phydev->state_queue,
- PHY_STATE_TIME * HZ);
+ PHY_STATE_TIME * HZ);
}
void phy_mac_interrupt(struct phy_device *phydev, int new_link)
@@ -959,14 +902,10 @@ static inline void mmd_phy_indirect(struct mii_bus *bus, int prtad, int devad,
static int phy_read_mmd_indirect(struct mii_bus *bus, int prtad, int devad,
int addr)
{
- u32 ret;
-
mmd_phy_indirect(bus, prtad, devad, addr);
/* Read the content of the MMD's selected register */
- ret = bus->read(bus, addr, MII_MMD_DATA);
-
- return ret;
+ return bus->read(bus, addr, MII_MMD_DATA);
}
/**
@@ -1006,8 +945,6 @@ static void phy_write_mmd_indirect(struct mii_bus *bus, int prtad, int devad,
*/
int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
{
- int ret = -EPROTONOSUPPORT;
-
/* According to 802.3az,the EEE is supported only in full duplex-mode.
* Also EEE feature is active when core is operating with MII, GMII
* or RGMII.
@@ -1033,7 +970,7 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
cap = mmd_eee_cap_to_ethtool_sup_t(eee_cap);
if (!cap)
- goto eee_exit;
+ return -EPROTONOSUPPORT;
/* Check which link settings negotiated and verify it in
* the EEE advertising registers.
@@ -1052,7 +989,7 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
lp = mmd_eee_adv_to_ethtool_adv_t(eee_lp);
idx = phy_find_setting(phydev->speed, phydev->duplex);
if (!(lp & adv & settings[idx].setting))
- goto eee_exit;
+ return -EPROTONOSUPPORT;
if (clk_stop_enable) {
/* Configure the PHY to stop receiving xMII
@@ -1069,11 +1006,10 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
MDIO_MMD_PCS, phydev->addr, val);
}
- ret = 0; /* EEE supported */
+ return 0; /* EEE supported */
}
-eee_exit:
- return ret;
+ return -EPROTONOSUPPORT;
}
EXPORT_SYMBOL(phy_init_eee);
@@ -1088,7 +1024,6 @@ int phy_get_eee_err(struct phy_device *phydev)
{
return phy_read_mmd_indirect(phydev->bus, MDIO_PCS_EEE_WK_ERR,
MDIO_MMD_PCS, phydev->addr);
-
}
EXPORT_SYMBOL(phy_get_eee_err);
@@ -1138,9 +1073,8 @@ EXPORT_SYMBOL(phy_ethtool_get_eee);
*/
int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data)
{
- int val;
+ int val = ethtool_adv_to_mmd_eee_adv_t(data->advertised);
- val = ethtool_adv_to_mmd_eee_adv_t(data->advertised);
phy_write_mmd_indirect(phydev->bus, MDIO_AN_EEE_ADV, MDIO_MMD_AN,
phydev->addr, val);
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index d6447b3f7409..5d81c89ee52b 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -1,7 +1,4 @@
-/*
- * drivers/net/phy/phy_device.c
- *
- * Framework for finding and configuring PHYs.
+/* Framework for finding and configuring PHYs.
* Also contains generic PHY driver
*
* Author: Andy Fleming
@@ -33,10 +30,10 @@
#include <linux/mii.h>
#include <linux/ethtool.h>
#include <linux/phy.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
-#include <asm/io.h>
#include <asm/irq.h>
-#include <asm/uaccess.h>
MODULE_DESCRIPTION("PHY library");
MODULE_AUTHOR("Andy Fleming");
@@ -54,8 +51,6 @@ static void phy_device_release(struct device *dev)
}
static struct phy_driver genphy_driver;
-extern int mdio_bus_init(void);
-extern void mdio_bus_exit(void);
static LIST_HEAD(phy_fixup_list);
static DEFINE_MUTEX(phy_fixup_lock);
@@ -63,21 +58,20 @@ static DEFINE_MUTEX(phy_fixup_lock);
static int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
u32 flags, phy_interface_t interface);
-/*
- * Creates a new phy_fixup and adds it to the list
+/**
+ * phy_register_fixup - creates a new phy_fixup and adds it to the list
* @bus_id: A string which matches phydev->dev.bus_id (or PHY_ANY_ID)
* @phy_uid: Used to match against phydev->phy_id (the UID of the PHY)
- * It can also be PHY_ANY_UID
+ * It can also be PHY_ANY_UID
* @phy_uid_mask: Applied to phydev->phy_id and fixup->phy_uid before
- * comparison
+ * comparison
* @run: The actual code to be run when a matching PHY is found
*/
int phy_register_fixup(const char *bus_id, u32 phy_uid, u32 phy_uid_mask,
- int (*run)(struct phy_device *))
+ int (*run)(struct phy_device *))
{
- struct phy_fixup *fixup;
+ struct phy_fixup *fixup = kzalloc(sizeof(*fixup), GFP_KERNEL);
- fixup = kzalloc(sizeof(struct phy_fixup), GFP_KERNEL);
if (!fixup)
return -ENOMEM;
@@ -96,7 +90,7 @@ EXPORT_SYMBOL(phy_register_fixup);
/* Registers a fixup to be run on any PHY with the UID in phy_uid */
int phy_register_fixup_for_uid(u32 phy_uid, u32 phy_uid_mask,
- int (*run)(struct phy_device *))
+ int (*run)(struct phy_device *))
{
return phy_register_fixup(PHY_ANY_ID, phy_uid, phy_uid_mask, run);
}
@@ -104,14 +98,13 @@ EXPORT_SYMBOL(phy_register_fixup_for_uid);
/* Registers a fixup to be run on the PHY with id string bus_id */
int phy_register_fixup_for_id(const char *bus_id,
- int (*run)(struct phy_device *))
+ int (*run)(struct phy_device *))
{
return phy_register_fixup(bus_id, PHY_ANY_UID, 0xffffffff, run);
}
EXPORT_SYMBOL(phy_register_fixup_for_id);
-/*
- * Returns 1 if fixup matches phydev in bus_id and phy_uid.
+/* Returns 1 if fixup matches phydev in bus_id and phy_uid.
* Fixups can be set to match any in one or more fields.
*/
static int phy_needs_fixup(struct phy_device *phydev, struct phy_fixup *fixup)
@@ -121,7 +114,7 @@ static int phy_needs_fixup(struct phy_device *phydev, struct phy_fixup *fixup)
return 0;
if ((fixup->phy_uid & fixup->phy_uid_mask) !=
- (phydev->phy_id & fixup->phy_uid_mask))
+ (phydev->phy_id & fixup->phy_uid_mask))
if (fixup->phy_uid != PHY_ANY_UID)
return 0;
@@ -129,16 +122,14 @@ static int phy_needs_fixup(struct phy_device *phydev, struct phy_fixup *fixup)
}
/* Runs any matching fixups for this phydev */
-int phy_scan_fixups(struct phy_device *phydev)
+static int phy_scan_fixups(struct phy_device *phydev)
{
struct phy_fixup *fixup;
mutex_lock(&phy_fixup_lock);
list_for_each_entry(fixup, &phy_fixup_list, list) {
if (phy_needs_fixup(phydev, fixup)) {
- int err;
-
- err = fixup->run(phydev);
+ int err = fixup->run(phydev);
if (err < 0) {
mutex_unlock(&phy_fixup_lock);
@@ -150,25 +141,24 @@ int phy_scan_fixups(struct phy_device *phydev)
return 0;
}
-EXPORT_SYMBOL(phy_scan_fixups);
struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id,
- bool is_c45, struct phy_c45_device_ids *c45_ids)
+ bool is_c45,
+ struct phy_c45_device_ids *c45_ids)
{
struct phy_device *dev;
- /* We allocate the device, and initialize the
- * default values */
+ /* We allocate the device, and initialize the default values */
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
-
if (NULL == dev)
- return (struct phy_device*) PTR_ERR((void*)-ENOMEM);
+ return (struct phy_device *)PTR_ERR((void *)-ENOMEM);
dev->dev.release = phy_device_release;
dev->speed = 0;
dev->duplex = -1;
- dev->pause = dev->asym_pause = 0;
+ dev->pause = 0;
+ dev->asym_pause = 0;
dev->link = 1;
dev->interface = PHY_INTERFACE_MODE_GMII;
@@ -192,14 +182,15 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id,
INIT_WORK(&dev->phy_queue, phy_change);
/* Request the appropriate module unconditionally; don't
- bother trying to do so only if it isn't already loaded,
- because that gets complicated. A hotplug event would have
- done an unconditional modprobe anyway.
- We don't do normal hotplug because it won't work for MDIO
- -- because it relies on the device staying around for long
- enough for the driver to get loaded. With MDIO, the NIC
- driver will get bored and give up as soon as it finds that
- there's no driver _already_ loaded. */
+ * bother trying to do so only if it isn't already loaded,
+ * because that gets complicated. A hotplug event would have
+ * done an unconditional modprobe anyway.
+ * We don't do normal hotplug because it won't work for MDIO
+ * -- because it relies on the device staying around for long
+ * enough for the driver to get loaded. With MDIO, the NIC
+ * driver will get bored and give up as soon as it finds that
+ * there's no driver _already_ loaded.
+ */
request_module(MDIO_MODULE_PREFIX MDIO_ID_FMT, MDIO_ID_ARGS(phy_id));
device_initialize(&dev->dev);
@@ -299,10 +290,8 @@ static int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id,
if (is_c45)
return get_phy_c45_ids(bus, addr, phy_id, c45_ids);
- /* Grab the bits from PHYIR1, and put them
- * in the upper half */
+ /* Grab the bits from PHYIR1, and put them in the upper half */
phy_reg = mdiobus_read(bus, addr, MII_PHYSID1);
-
if (phy_reg < 0)
return -EIO;
@@ -310,7 +299,6 @@ static int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id,
/* Grab the bits from PHYIR2, and put them in the lower half */
phy_reg = mdiobus_read(bus, addr, MII_PHYSID2);
-
if (phy_reg < 0)
return -EIO;
@@ -320,7 +308,8 @@ static int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id,
}
/**
- * get_phy_device - reads the specified PHY device and returns its @phy_device struct
+ * get_phy_device - reads the specified PHY device and returns its @phy_device
+ * struct
* @bus: the target MII bus
* @addr: PHY address on the MII bus
* @is_c45: If true the PHY uses the 802.3 clause 45 protocol
@@ -331,7 +320,6 @@ static int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id,
struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45)
{
struct phy_c45_device_ids c45_ids = {0};
- struct phy_device *dev = NULL;
u32 phy_id = 0;
int r;
@@ -343,9 +331,7 @@ struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45)
if ((phy_id & 0x1fffffff) == 0x1fffffff)
return NULL;
- dev = phy_device_create(bus, addr, phy_id, is_c45, &c45_ids);
-
- return dev;
+ return phy_device_create(bus, addr, phy_id, is_c45, &c45_ids);
}
EXPORT_SYMBOL(get_phy_device);
@@ -357,14 +343,17 @@ int phy_device_register(struct phy_device *phydev)
{
int err;
- /* Don't register a phy if one is already registered at this
- * address */
+ /* Don't register a phy if one is already registered at this address */
if (phydev->bus->phy_map[phydev->addr])
return -EINVAL;
phydev->bus->phy_map[phydev->addr] = phydev;
/* Run all of the fixups for this PHY */
- phy_scan_fixups(phydev);
+ err = phy_init_hw(phydev);
+ if (err) {
+ pr_err("PHY %d failed to initialize\n", phydev->addr);
+ goto out;
+ }
err = device_add(&phydev->dev);
if (err) {
@@ -409,7 +398,7 @@ EXPORT_SYMBOL(phy_find_first);
* this function.
*/
static void phy_prepare_link(struct phy_device *phydev,
- void (*handler)(struct net_device *))
+ void (*handler)(struct net_device *))
{
phydev->adjust_link = handler;
}
@@ -432,7 +421,7 @@ int phy_connect_direct(struct net_device *dev, struct phy_device *phydev,
return rc;
phy_prepare_link(phydev, handler);
- phy_start_machine(phydev, NULL);
+ phy_start_machine(phydev);
if (phydev->irq > 0)
phy_start_interrupts(phydev);
@@ -455,16 +444,17 @@ EXPORT_SYMBOL(phy_connect_direct);
* choose to call only the subset of functions which provide
* the desired functionality.
*/
-struct phy_device * phy_connect(struct net_device *dev, const char *bus_id,
- void (*handler)(struct net_device *),
- phy_interface_t interface)
+struct phy_device *phy_connect(struct net_device *dev, const char *bus_id,
+ void (*handler)(struct net_device *),
+ phy_interface_t interface)
{
struct phy_device *phydev;
struct device *d;
int rc;
/* Search the list of PHY devices on the mdio bus for the
- * PHY with the requested name */
+ * PHY with the requested name
+ */
d = bus_find_device_by_name(&mdio_bus_type, NULL, bus_id);
if (!d) {
pr_err("PHY %s not found\n", bus_id);
@@ -481,7 +471,8 @@ struct phy_device * phy_connect(struct net_device *dev, const char *bus_id,
EXPORT_SYMBOL(phy_connect);
/**
- * phy_disconnect - disable interrupts, stop state machine, and detach a PHY device
+ * phy_disconnect - disable interrupts, stop state machine, and detach a PHY
+ * device
* @phydev: target phy_device struct
*/
void phy_disconnect(struct phy_device *phydev)
@@ -490,13 +481,53 @@ void phy_disconnect(struct phy_device *phydev)
phy_stop_interrupts(phydev);
phy_stop_machine(phydev);
-
+
phydev->adjust_link = NULL;
phy_detach(phydev);
}
EXPORT_SYMBOL(phy_disconnect);
+/**
+ * phy_poll_reset - Safely wait until a PHY reset has properly completed
+ * @phydev: The PHY device to poll
+ *
+ * Description: According to IEEE 802.3, Section 2, Subsection 22.2.4.1.1, as
+ * published in 2008, a PHY reset may take up to 0.5 seconds. The MII BMCR
+ * register must be polled until the BMCR_RESET bit clears.
+ *
+ * Furthermore, any attempts to write to PHY registers may have no effect
+ * or even generate MDIO bus errors until this is complete.
+ *
+ * Some PHYs (such as the Marvell 88E1111) don't entirely conform to the
+ * standard and do not fully reset after the BMCR_RESET bit is set, and may
+ * even *REQUIRE* a soft-reset to properly restart autonegotiation. In an
+ * effort to support such broken PHYs, this function is separate from the
+ * standard phy_init_hw() which will zero all the other bits in the BMCR
+ * and reapply all driver-specific and board-specific fixups.
+ */
+static int phy_poll_reset(struct phy_device *phydev)
+{
+ /* Poll until the reset bit clears (50ms per retry == 0.6 sec) */
+ unsigned int retries = 12;
+ int ret;
+
+ do {
+ msleep(50);
+ ret = phy_read(phydev, MII_BMCR);
+ if (ret < 0)
+ return ret;
+ } while (ret & BMCR_RESET && --retries);
+ if (ret & BMCR_RESET)
+ return -ETIMEDOUT;
+
+ /* Some chips (smsc911x) may still need up to another 1ms after the
+ * BMCR_RESET bit is cleared before they are usable.
+ */
+ msleep(1);
+ return 0;
+}
+
int phy_init_hw(struct phy_device *phydev)
{
int ret;
@@ -504,12 +535,21 @@ int phy_init_hw(struct phy_device *phydev)
if (!phydev->drv || !phydev->drv->config_init)
return 0;
+ ret = phy_write(phydev, MII_BMCR, BMCR_RESET);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_poll_reset(phydev);
+ if (ret < 0)
+ return ret;
+
ret = phy_scan_fixups(phydev);
if (ret < 0)
return ret;
return phydev->drv->config_init(phydev);
}
+EXPORT_SYMBOL(phy_init_hw);
/**
* phy_attach_direct - attach a network device to a given PHY device pointer
@@ -532,7 +572,8 @@ static int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
int err;
/* Assume that if there is no driver, that it doesn't
- * exist, and we should use the genphy driver. */
+ * exist, and we should use the genphy driver.
+ */
if (NULL == d->driver) {
if (phydev->is_c45) {
pr_err("No driver for phy %x\n", phydev->phy_id);
@@ -565,11 +606,14 @@ static int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
/* Do initial configuration here, now that
* we have certain key parameters
- * (dev_flags and interface) */
+ * (dev_flags and interface)
+ */
err = phy_init_hw(phydev);
if (err)
phy_detach(phydev);
+ phy_resume(phydev);
+
return err;
}
@@ -582,8 +626,8 @@ static int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
* Description: Same as phy_attach_direct() except that a PHY bus_id
* string is passed instead of a pointer to a struct phy_device.
*/
-struct phy_device *phy_attach(struct net_device *dev,
- const char *bus_id, phy_interface_t interface)
+struct phy_device *phy_attach(struct net_device *dev, const char *bus_id,
+ phy_interface_t interface)
{
struct bus_type *bus = &mdio_bus_type;
struct phy_device *phydev;
@@ -591,7 +635,8 @@ struct phy_device *phy_attach(struct net_device *dev,
int rc;
/* Search the list of PHY devices on the mdio bus for the
- * PHY with the requested name */
+ * PHY with the requested name
+ */
d = bus_find_device_by_name(bus, NULL, bus_id);
if (!d) {
pr_err("PHY %s not found\n", bus_id);
@@ -615,16 +660,42 @@ void phy_detach(struct phy_device *phydev)
{
phydev->attached_dev->phydev = NULL;
phydev->attached_dev = NULL;
+ phy_suspend(phydev);
/* If the device had no specific driver before (i.e. - it
* was using the generic driver), we unbind the device
* from the generic driver so that there's a chance a
- * real driver could be loaded */
+ * real driver could be loaded
+ */
if (phydev->dev.driver == &genphy_driver.driver)
device_release_driver(&phydev->dev);
}
EXPORT_SYMBOL(phy_detach);
+int phy_suspend(struct phy_device *phydev)
+{
+ struct phy_driver *phydrv = to_phy_driver(phydev->dev.driver);
+ struct ethtool_wolinfo wol;
+
+ /* If the device has WOL enabled, we cannot suspend the PHY */
+ wol.cmd = ETHTOOL_GWOL;
+ phy_ethtool_get_wol(phydev, &wol);
+ if (wol.wolopts)
+ return -EBUSY;
+
+ if (phydrv->suspend)
+ return phydrv->suspend(phydev);
+ return 0;
+}
+
+int phy_resume(struct phy_device *phydev)
+{
+ struct phy_driver *phydrv = to_phy_driver(phydev->dev.driver);
+
+ if (phydrv->resume)
+ return phydrv->resume(phydev);
+ return 0;
+}
/* Generic PHY support and helper functions */
@@ -643,17 +714,16 @@ static int genphy_config_advert(struct phy_device *phydev)
int oldadv, adv;
int err, changed = 0;
- /* Only allow advertising what
- * this PHY supports */
+ /* Only allow advertising what this PHY supports */
phydev->advertising &= phydev->supported;
advertise = phydev->advertising;
/* Setup standard advertisement */
- oldadv = adv = phy_read(phydev, MII_ADVERTISE);
-
+ adv = phy_read(phydev, MII_ADVERTISE);
if (adv < 0)
return adv;
+ oldadv = adv;
adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP |
ADVERTISE_PAUSE_ASYM);
adv |= ethtool_adv_to_mii_adv_t(advertise);
@@ -668,12 +738,12 @@ static int genphy_config_advert(struct phy_device *phydev)
/* Configure gigabit if it's supported */
if (phydev->supported & (SUPPORTED_1000baseT_Half |
- SUPPORTED_1000baseT_Full)) {
- oldadv = adv = phy_read(phydev, MII_CTRL1000);
-
+ SUPPORTED_1000baseT_Full)) {
+ adv = phy_read(phydev, MII_CTRL1000);
if (adv < 0)
return adv;
+ oldadv = adv;
adv &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
adv |= ethtool_adv_to_mii_ctrl1000_t(advertise);
@@ -699,10 +769,10 @@ static int genphy_config_advert(struct phy_device *phydev)
*/
int genphy_setup_forced(struct phy_device *phydev)
{
- int err;
int ctl = 0;
- phydev->pause = phydev->asym_pause = 0;
+ phydev->pause = 0;
+ phydev->asym_pause = 0;
if (SPEED_1000 == phydev->speed)
ctl |= BMCR_SPEED1000;
@@ -711,10 +781,8 @@ int genphy_setup_forced(struct phy_device *phydev)
if (DUPLEX_FULL == phydev->duplex)
ctl |= BMCR_FULLDPLX;
-
- err = phy_write(phydev, MII_BMCR, ctl);
- return err;
+ return phy_write(phydev, MII_BMCR, ctl);
}
EXPORT_SYMBOL(genphy_setup_forced);
@@ -724,25 +792,20 @@ EXPORT_SYMBOL(genphy_setup_forced);
*/
int genphy_restart_aneg(struct phy_device *phydev)
{
- int ctl;
-
- ctl = phy_read(phydev, MII_BMCR);
+ int ctl = phy_read(phydev, MII_BMCR);
if (ctl < 0)
return ctl;
- ctl |= (BMCR_ANENABLE | BMCR_ANRESTART);
+ ctl |= BMCR_ANENABLE | BMCR_ANRESTART;
/* Don't isolate the PHY if we're negotiating */
- ctl &= ~(BMCR_ISOLATE);
-
- ctl = phy_write(phydev, MII_BMCR, ctl);
+ ctl &= ~BMCR_ISOLATE;
- return ctl;
+ return phy_write(phydev, MII_BMCR, ctl);
}
EXPORT_SYMBOL(genphy_restart_aneg);
-
/**
* genphy_config_aneg - restart auto-negotiation or write BMCR
* @phydev: target phy_device struct
@@ -759,13 +822,12 @@ int genphy_config_aneg(struct phy_device *phydev)
return genphy_setup_forced(phydev);
result = genphy_config_advert(phydev);
-
if (result < 0) /* error */
return result;
-
if (result == 0) {
/* Advertisement hasn't changed, but maybe aneg was never on to
- * begin with? Or maybe phy was isolated? */
+ * begin with? Or maybe phy was isolated?
+ */
int ctl = phy_read(phydev, MII_BMCR);
if (ctl < 0)
@@ -776,7 +838,8 @@ int genphy_config_aneg(struct phy_device *phydev)
}
/* Only restart aneg if we are advertising something different
- * than we were before. */
+ * than we were before.
+ */
if (result > 0)
result = genphy_restart_aneg(phydev);
@@ -798,13 +861,11 @@ int genphy_update_link(struct phy_device *phydev)
/* Do a fake read */
status = phy_read(phydev, MII_BMSR);
-
if (status < 0)
return status;
/* Read link and autonegotiation status */
status = phy_read(phydev, MII_BMSR);
-
if (status < 0)
return status;
@@ -833,35 +894,36 @@ int genphy_read_status(struct phy_device *phydev)
int lpa;
int lpagb = 0;
- /* Update the link, but return if there
- * was an error */
+ /* Update the link, but return if there was an error */
err = genphy_update_link(phydev);
if (err)
return err;
+ phydev->lp_advertising = 0;
+
if (AUTONEG_ENABLE == phydev->autoneg) {
if (phydev->supported & (SUPPORTED_1000baseT_Half
| SUPPORTED_1000baseT_Full)) {
lpagb = phy_read(phydev, MII_STAT1000);
-
if (lpagb < 0)
return lpagb;
adv = phy_read(phydev, MII_CTRL1000);
-
if (adv < 0)
return adv;
+ phydev->lp_advertising =
+ mii_stat1000_to_ethtool_lpa_t(lpagb);
lpagb &= adv << 2;
}
lpa = phy_read(phydev, MII_LPA);
-
if (lpa < 0)
return lpa;
- adv = phy_read(phydev, MII_ADVERTISE);
+ phydev->lp_advertising |= mii_lpa_to_ethtool_lpa_t(lpa);
+ adv = phy_read(phydev, MII_ADVERTISE);
if (adv < 0)
return adv;
@@ -869,7 +931,8 @@ int genphy_read_status(struct phy_device *phydev)
phydev->speed = SPEED_10;
phydev->duplex = DUPLEX_HALF;
- phydev->pause = phydev->asym_pause = 0;
+ phydev->pause = 0;
+ phydev->asym_pause = 0;
if (lpagb & (LPA_1000FULL | LPA_1000HALF)) {
phydev->speed = SPEED_1000;
@@ -878,19 +941,20 @@ int genphy_read_status(struct phy_device *phydev)
phydev->duplex = DUPLEX_FULL;
} else if (lpa & (LPA_100FULL | LPA_100HALF)) {
phydev->speed = SPEED_100;
-
+
if (lpa & LPA_100FULL)
phydev->duplex = DUPLEX_FULL;
} else
if (lpa & LPA_10FULL)
phydev->duplex = DUPLEX_FULL;
- if (phydev->duplex == DUPLEX_FULL){
+ if (phydev->duplex == DUPLEX_FULL) {
phydev->pause = lpa & LPA_PAUSE_CAP ? 1 : 0;
phydev->asym_pause = lpa & LPA_PAUSE_ASYM ? 1 : 0;
}
} else {
int bmcr = phy_read(phydev, MII_BMCR);
+
if (bmcr < 0)
return bmcr;
@@ -906,7 +970,8 @@ int genphy_read_status(struct phy_device *phydev)
else
phydev->speed = SPEED_10;
- phydev->pause = phydev->asym_pause = 0;
+ phydev->pause = 0;
+ phydev->asym_pause = 0;
}
return 0;
@@ -919,14 +984,14 @@ static int genphy_config_init(struct phy_device *phydev)
u32 features;
/* For now, I'll claim that the generic driver supports
- * all possible port types */
+ * all possible port types
+ */
features = (SUPPORTED_TP | SUPPORTED_MII
| SUPPORTED_AUI | SUPPORTED_FIBRE |
SUPPORTED_BNC);
/* Do we support autonegotiation? */
val = phy_read(phydev, MII_BMSR);
-
if (val < 0)
return val;
@@ -944,7 +1009,6 @@ static int genphy_config_init(struct phy_device *phydev)
if (val & BMSR_ESTATEN) {
val = phy_read(phydev, MII_ESTATUS);
-
if (val < 0)
return val;
@@ -966,7 +1030,7 @@ int genphy_suspend(struct phy_device *phydev)
mutex_lock(&phydev->lock);
value = phy_read(phydev, MII_BMCR);
- phy_write(phydev, MII_BMCR, (value | BMCR_PDOWN));
+ phy_write(phydev, MII_BMCR, value | BMCR_PDOWN);
mutex_unlock(&phydev->lock);
@@ -981,7 +1045,7 @@ int genphy_resume(struct phy_device *phydev)
mutex_lock(&phydev->lock);
value = phy_read(phydev, MII_BMCR);
- phy_write(phydev, MII_BMCR, (value & ~BMCR_PDOWN));
+ phy_write(phydev, MII_BMCR, value & ~BMCR_PDOWN);
mutex_unlock(&phydev->lock);
@@ -999,22 +1063,18 @@ EXPORT_SYMBOL(genphy_resume);
*/
static int phy_probe(struct device *dev)
{
- struct phy_device *phydev;
- struct phy_driver *phydrv;
- struct device_driver *drv;
+ struct phy_device *phydev = to_phy_device(dev);
+ struct device_driver *drv = phydev->dev.driver;
+ struct phy_driver *phydrv = to_phy_driver(drv);
int err = 0;
- phydev = to_phy_device(dev);
-
- drv = phydev->dev.driver;
- phydrv = to_phy_driver(drv);
phydev->drv = phydrv;
/* Disable the interrupt if the PHY doesn't support it
* but the interrupt is still a valid one
*/
if (!(phydrv->flags & PHY_HAS_INTERRUPT) &&
- phy_interrupt_is_valid(phydev))
+ phy_interrupt_is_valid(phydev))
phydev->irq = PHY_POLL;
if (phydrv->flags & PHY_IS_INTERNAL)
@@ -1024,7 +1084,8 @@ static int phy_probe(struct device *dev)
/* Start out supporting everything. Eventually,
* a controller will attach, and may modify one
- * or both of these values */
+ * or both of these values
+ */
phydev->supported = phydrv->features;
phydev->advertising = phydrv->features;
@@ -1037,14 +1098,11 @@ static int phy_probe(struct device *dev)
mutex_unlock(&phydev->lock);
return err;
-
}
static int phy_remove(struct device *dev)
{
- struct phy_device *phydev;
-
- phydev = to_phy_device(dev);
+ struct phy_device *phydev = to_phy_device(dev);
mutex_lock(&phydev->lock);
phydev->state = PHY_DOWN;
@@ -1071,7 +1129,6 @@ int phy_driver_register(struct phy_driver *new_driver)
new_driver->driver.remove = phy_remove;
retval = driver_register(&new_driver->driver);
-
if (retval) {
pr_err("%s: Error %d in registering driver\n",
new_driver->name, retval);
@@ -1110,9 +1167,9 @@ EXPORT_SYMBOL(phy_driver_unregister);
void phy_drivers_unregister(struct phy_driver *drv, int n)
{
int i;
- for (i = 0; i < n; i++) {
+
+ for (i = 0; i < n; i++)
phy_driver_unregister(drv + i);
- }
}
EXPORT_SYMBOL(phy_drivers_unregister);
@@ -1126,7 +1183,7 @@ static struct phy_driver genphy_driver = {
.read_status = genphy_read_status,
.suspend = genphy_suspend,
.resume = genphy_resume,
- .driver = {.owner= THIS_MODULE, },
+ .driver = { .owner = THIS_MODULE, },
};
static int __init phy_init(void)
diff --git a/drivers/net/phy/spi_ks8995.c b/drivers/net/phy/spi_ks8995.c
index f3bea1346021..0ba431145a84 100644
--- a/drivers/net/phy/spi_ks8995.c
+++ b/drivers/net/phy/spi_ks8995.c
@@ -171,14 +171,14 @@ static int ks8995_write(struct ks8995_switch *ks, char *buf,
static inline int ks8995_read_reg(struct ks8995_switch *ks, u8 addr, u8 *buf)
{
- return (ks8995_read(ks, buf, addr, 1) != 1);
+ return ks8995_read(ks, buf, addr, 1) != 1;
}
static inline int ks8995_write_reg(struct ks8995_switch *ks, u8 addr, u8 val)
{
char buf = val;
- return (ks8995_write(ks, &buf, addr, 1) != 1);
+ return ks8995_write(ks, &buf, addr, 1) != 1;
}
/* ------------------------------------------------------------------------ */
@@ -325,7 +325,6 @@ static int ks8995_probe(struct spi_device *spi)
return 0;
err_drvdata:
- spi_set_drvdata(spi, NULL);
kfree(ks);
return err;
}
@@ -337,7 +336,6 @@ static int ks8995_remove(struct spi_device *spi)
ks8995 = spi_get_drvdata(spi);
sysfs_remove_bin_file(&spi->dev.kobj, &ks8995_registers_attr);
- spi_set_drvdata(spi, NULL);
kfree(ks8995);
return 0;
diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c
index 508e4359338b..14372c65a7e8 100644
--- a/drivers/net/phy/vitesse.c
+++ b/drivers/net/phy/vitesse.c
@@ -64,6 +64,7 @@
#define PHY_ID_VSC8234 0x000fc620
#define PHY_ID_VSC8244 0x000fc6c0
+#define PHY_ID_VSC8514 0x00070670
#define PHY_ID_VSC8574 0x000704a0
#define PHY_ID_VSC8662 0x00070660
#define PHY_ID_VSC8221 0x000fc550
@@ -131,6 +132,7 @@ static int vsc82xx_config_intr(struct phy_device *phydev)
err = phy_write(phydev, MII_VSC8244_IMASK,
(phydev->drv->phy_id == PHY_ID_VSC8234 ||
phydev->drv->phy_id == PHY_ID_VSC8244 ||
+ phydev->drv->phy_id == PHY_ID_VSC8514 ||
phydev->drv->phy_id == PHY_ID_VSC8574) ?
MII_VSC8244_IMASK_MASK :
MII_VSC8221_IMASK_MASK);
@@ -246,6 +248,18 @@ static struct phy_driver vsc82xx_driver[] = {
.config_intr = &vsc82xx_config_intr,
.driver = { .owner = THIS_MODULE,},
}, {
+ .phy_id = PHY_ID_VSC8514,
+ .name = "Vitesse VSC8514",
+ .phy_id_mask = 0x000ffff0,
+ .features = PHY_GBIT_FEATURES,
+ .flags = PHY_HAS_INTERRUPT,
+ .config_init = &vsc824x_config_init,
+ .config_aneg = &vsc82x4_config_aneg,
+ .read_status = &genphy_read_status,
+ .ack_interrupt = &vsc824x_ack_interrupt,
+ .config_intr = &vsc82xx_config_intr,
+ .driver = { .owner = THIS_MODULE,},
+}, {
.phy_id = PHY_ID_VSC8574,
.name = "Vitesse VSC8574",
.phy_id_mask = 0x000ffff0,
@@ -315,6 +329,7 @@ module_exit(vsc82xx_exit);
static struct mdio_device_id __maybe_unused vitesse_tbl[] = {
{ PHY_ID_VSC8234, 0x000ffff0 },
{ PHY_ID_VSC8244, 0x000fffc0 },
+ { PHY_ID_VSC8514, 0x000ffff0 },
{ PHY_ID_VSC8574, 0x000ffff0 },
{ PHY_ID_VSC8662, 0x000ffff0 },
{ PHY_ID_VSC8221, 0x000ffff0 },
diff --git a/drivers/net/plip/plip.c b/drivers/net/plip/plip.c
index 7b4ff35c8bf7..040b8978d6ca 100644
--- a/drivers/net/plip/plip.c
+++ b/drivers/net/plip/plip.c
@@ -547,9 +547,9 @@ static __be16 plip_type_trans(struct sk_buff *skb, struct net_device *dev)
skb_pull(skb,dev->hard_header_len);
eth = eth_hdr(skb);
- if(*eth->h_dest&1)
+ if(is_multicast_ether_addr(eth->h_dest))
{
- if(memcmp(eth->h_dest,dev->broadcast, ETH_ALEN)==0)
+ if(ether_addr_equal_64bits(eth->h_dest, dev->broadcast))
skb->pkt_type=PACKET_BROADCAST;
else
skb->pkt_type=PACKET_MULTICAST;
diff --git a/drivers/net/ppp/ppp_mppe.c b/drivers/net/ppp/ppp_mppe.c
index 9a1849a83e2a..911b21602ff2 100644
--- a/drivers/net/ppp/ppp_mppe.c
+++ b/drivers/net/ppp/ppp_mppe.c
@@ -27,8 +27,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
*
* Changelog:
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index 82ee6ed954cb..2ea7efd11857 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -131,12 +131,12 @@ static inline struct pppoe_net *pppoe_pernet(struct net *net)
static inline int cmp_2_addr(struct pppoe_addr *a, struct pppoe_addr *b)
{
- return a->sid == b->sid && !memcmp(a->remote, b->remote, ETH_ALEN);
+ return a->sid == b->sid && ether_addr_equal(a->remote, b->remote);
}
static inline int cmp_addr(struct pppoe_addr *a, __be16 sid, char *addr)
{
- return a->sid == sid && !memcmp(a->remote, addr, ETH_ALEN);
+ return a->sid == sid && ether_addr_equal(a->remote, addr);
}
#if 8 % PPPOE_HASH_BITS
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 34b0de09d881..736050d6b451 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1366,6 +1366,8 @@ static int team_user_linkup_option_get(struct team *team,
return 0;
}
+static void __team_carrier_check(struct team *team);
+
static int team_user_linkup_option_set(struct team *team,
struct team_gsetter_ctx *ctx)
{
@@ -1373,6 +1375,7 @@ static int team_user_linkup_option_set(struct team *team,
port->user.linkup = ctx->data.bool_val;
team_refresh_port_linkup(port);
+ __team_carrier_check(port->team);
return 0;
}
@@ -1392,6 +1395,7 @@ static int team_user_linkup_en_option_set(struct team *team,
port->user.linkup_enabled = ctx->data.bool_val;
team_refresh_port_linkup(port);
+ __team_carrier_check(port->team);
return 0;
}
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 782e38bfc1ee..09f66624eaca 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -110,7 +110,7 @@ struct tap_filter {
unsigned char addr[FLT_EXACT_COUNT][ETH_ALEN];
};
-/* DEFAULT_MAX_NUM_RSS_QUEUES were choosed to let the rx/tx queues allocated for
+/* DEFAULT_MAX_NUM_RSS_QUEUES were chosen to let the rx/tx queues allocated for
* the netdevice to be fit in one page. So we can make sure the success of
* memory allocation. TODO: increase the limit. */
#define MAX_TAP_QUEUES DEFAULT_MAX_NUM_RSS_QUEUES
@@ -119,7 +119,7 @@ struct tap_filter {
#define TUN_FLOW_EXPIRE (3 * HZ)
/* A tun_file connects an open character device to a tuntap netdevice. It
- * also contains all socket related strctures (except sock_fprog and tap_filter)
+ * also contains all socket related structures (except sock_fprog and tap_filter)
* to serve as one transmit queue for tuntap device. The sock_fprog and
* tap_filter were kept in tun_struct since they were used for filtering for the
* netdevice not for a specific queue (at least I didn't see the requirement for
@@ -152,6 +152,7 @@ struct tun_flow_entry {
struct tun_struct *tun;
u32 rxhash;
+ u32 rps_rxhash;
int queue_index;
unsigned long updated;
};
@@ -220,6 +221,7 @@ static struct tun_flow_entry *tun_flow_create(struct tun_struct *tun,
rxhash, queue_index);
e->updated = jiffies;
e->rxhash = rxhash;
+ e->rps_rxhash = 0;
e->queue_index = queue_index;
e->tun = tun;
hlist_add_head_rcu(&e->hash_link, head);
@@ -232,6 +234,7 @@ static void tun_flow_delete(struct tun_struct *tun, struct tun_flow_entry *e)
{
tun_debug(KERN_INFO, tun, "delete flow: hash %u index %u\n",
e->rxhash, e->queue_index);
+ sock_rps_reset_flow_hash(e->rps_rxhash);
hlist_del_rcu(&e->hash_link);
kfree_rcu(e, rcu);
--tun->flow_count;
@@ -325,6 +328,7 @@ static void tun_flow_update(struct tun_struct *tun, u32 rxhash,
/* TODO: keep queueing to old queue until it's empty? */
e->queue_index = queue_index;
e->updated = jiffies;
+ sock_rps_record_flow_hash(e->rps_rxhash);
} else {
spin_lock_bh(&tun->lock);
if (!tun_flow_find(head, rxhash) &&
@@ -341,8 +345,20 @@ unlock:
rcu_read_unlock();
}
+/**
+ * Save the hash received in the stack receive path and update the
+ * flow_hash table accordingly.
+ */
+static inline void tun_flow_save_rps_rxhash(struct tun_flow_entry *e, u32 hash)
+{
+ if (unlikely(e->rps_rxhash != hash)) {
+ sock_rps_reset_flow_hash(e->rps_rxhash);
+ e->rps_rxhash = hash;
+ }
+}
+
/* We try to identify a flow through its rxhash first. The reason that
- * we do not check rxq no. is becuase some cards(e.g 82599), chooses
+ * we do not check rxq no. is because some cards(e.g 82599), chooses
* the rxq based on the txq where the last packet of the flow comes. As
* the userspace application move between processors, we may get a
* different rxq no. here. If we could not get rxhash, then we would
@@ -358,12 +374,13 @@ static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb)
rcu_read_lock();
numqueues = ACCESS_ONCE(tun->numqueues);
- txq = skb_get_rxhash(skb);
+ txq = skb_get_hash(skb);
if (txq) {
e = tun_flow_find(&tun->flows[tun_hashfn(txq)], txq);
- if (e)
+ if (e) {
+ tun_flow_save_rps_rxhash(e, txq);
txq = e->queue_index;
- else
+ } else
/* use multiply and shift instead of expensive divide */
txq = ((u64)txq * numqueues) >> 32;
} else if (likely(skb_rx_queue_recorded(skb))) {
@@ -531,7 +548,7 @@ static int tun_attach(struct tun_struct *tun, struct file *file, bool skip_filte
err = 0;
- /* Re-attach the filter to presist device */
+ /* Re-attach the filter to persist device */
if (!skip_filter && (tun->filter_attached == true)) {
err = sk_attach_filter(&tun->fprog, tfile->socket.sk);
if (!err)
@@ -728,6 +745,22 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
if (txq >= tun->numqueues)
goto drop;
+ if (tun->numqueues == 1) {
+ /* Select queue was not called for the skbuff, so we extract the
+ * RPS hash and save it into the flow_table here.
+ */
+ __u32 rxhash;
+
+ rxhash = skb_get_hash(skb);
+ if (rxhash) {
+ struct tun_flow_entry *e;
+ e = tun_flow_find(&tun->flows[tun_hashfn(rxhash)],
+ rxhash);
+ if (e)
+ tun_flow_save_rps_rxhash(e, rxhash);
+ }
+ }
+
tun_debug(KERN_INFO, tun, "tun_net_xmit %d\n", skb->len);
BUG_ON(!tfile);
@@ -819,9 +852,9 @@ static void tun_poll_controller(struct net_device *dev)
* Tun only receives frames when:
* 1) the char device endpoint gets data from user space
* 2) the tun socket gets a sendmsg call from user space
- * Since both of those are syncronous operations, we are guaranteed
+ * Since both of those are synchronous operations, we are guaranteed
* never to have pending data when we poll for it
- * so theres nothing to do here but return.
+ * so there is nothing to do here but return.
* We need this though so netpoll recognizes us as an interface that
* supports polling, which enables bridge devices in virt setups to
* still use netconsole
@@ -1146,7 +1179,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
skb_reset_network_header(skb);
skb_probe_transport_header(skb, 0);
- rxhash = skb_get_rxhash(skb);
+ rxhash = skb_get_hash(skb);
netif_rx_ni(skb);
tun->dev->stats.rx_packets++;
@@ -1184,7 +1217,7 @@ static ssize_t tun_put_user(struct tun_struct *tun,
{
struct tun_pi pi = { 0, skb->protocol };
ssize_t total = 0;
- int vlan_offset = 0;
+ int vlan_offset = 0, copied;
if (!(tun->flags & TUN_NO_PI)) {
if ((len -= sizeof(pi)) < 0)
@@ -1248,6 +1281,8 @@ static ssize_t tun_put_user(struct tun_struct *tun,
total += tun->vnet_hdr_sz;
}
+ copied = total;
+ total += skb->len;
if (!vlan_tx_tag_present(skb)) {
len = min_t(int, skb->len, len);
} else {
@@ -1262,24 +1297,24 @@ static ssize_t tun_put_user(struct tun_struct *tun,
vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
len = min_t(int, skb->len + VLAN_HLEN, len);
+ total += VLAN_HLEN;
copy = min_t(int, vlan_offset, len);
- ret = skb_copy_datagram_const_iovec(skb, 0, iv, total, copy);
+ ret = skb_copy_datagram_const_iovec(skb, 0, iv, copied, copy);
len -= copy;
- total += copy;
+ copied += copy;
if (ret || !len)
goto done;
copy = min_t(int, sizeof(veth), len);
- ret = memcpy_toiovecend(iv, (void *)&veth, total, copy);
+ ret = memcpy_toiovecend(iv, (void *)&veth, copied, copy);
len -= copy;
- total += copy;
+ copied += copy;
if (ret || !len)
goto done;
}
- skb_copy_datagram_const_iovec(skb, vlan_offset, iv, total, len);
- total += len;
+ skb_copy_datagram_const_iovec(skb, vlan_offset, iv, copied, len);
done:
tun->dev->stats.tx_packets++;
@@ -1289,8 +1324,7 @@ done:
}
static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
- struct kiocb *iocb, const struct iovec *iv,
- ssize_t len, int noblock)
+ const struct iovec *iv, ssize_t len, int noblock)
{
DECLARE_WAITQUEUE(wait, current);
struct sk_buff *skb;
@@ -1353,9 +1387,11 @@ static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv,
goto out;
}
- ret = tun_do_read(tun, tfile, iocb, iv, len,
+ ret = tun_do_read(tun, tfile, iv, len,
file->f_flags & O_NONBLOCK);
ret = min_t(ssize_t, ret, len);
+ if (ret > 0)
+ iocb->ki_pos = ret;
out:
tun_put(tun);
return ret;
@@ -1452,7 +1488,7 @@ static int tun_recvmsg(struct kiocb *iocb, struct socket *sock,
SOL_PACKET, TUN_TX_TIMESTAMP);
goto out;
}
- ret = tun_do_read(tun, tfile, iocb, m->msg_iov, total_len,
+ ret = tun_do_read(tun, tfile, m->msg_iov, total_len,
flags & MSG_DONTWAIT);
if (ret > total_len) {
m->msg_flags |= MSG_TRUNC;
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 85e4a01670f0..47b0f732b0b1 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -276,12 +276,12 @@ config USB_NET_CDC_MBIM
module will be called cdc_mbim.
config USB_NET_DM9601
- tristate "Davicom DM9601 based USB 1.1 10/100 ethernet devices"
+ tristate "Davicom DM96xx based USB 10/100 ethernet devices"
depends on USB_USBNET
select CRC32
help
- This option adds support for Davicom DM9601 based USB 1.1
- 10/100 Ethernet adapters.
+ This option adds support for Davicom DM9601/DM9620/DM9621A
+ based USB 10/100 Ethernet adapters.
config USB_NET_SR9700
tristate "CoreChip-sz SR9700 based USB 1.1 10/100 ethernet devices"
diff --git a/drivers/net/usb/asix.h b/drivers/net/usb/asix.h
index bdaa12d07a12..bad857aacd1a 100644
--- a/drivers/net/usb/asix.h
+++ b/drivers/net/usb/asix.h
@@ -16,8 +16,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _ASIX_H
diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c
index 577c72d5f369..5c55f11572ba 100644
--- a/drivers/net/usb/asix_common.c
+++ b/drivers/net/usb/asix_common.c
@@ -16,8 +16,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include "asix.h"
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index 386a3df53678..9765a7d4766d 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -16,8 +16,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include "asix.h"
diff --git a/drivers/net/usb/ax88172a.c b/drivers/net/usb/ax88172a.c
index 723b3879ecc2..5f18fcb8dcc7 100644
--- a/drivers/net/usb/ax88172a.c
+++ b/drivers/net/usb/ax88172a.c
@@ -21,8 +21,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include "asix.h"
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
index 8e8d0fcd4979..d6f64dad05bc 100644
--- a/drivers/net/usb/ax88179_178a.c
+++ b/drivers/net/usb/ax88179_178a.c
@@ -14,8 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/module.h>
diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c
index df507e6dbb9c..da6b8a5dcc0d 100644
--- a/drivers/net/usb/catc.c
+++ b/drivers/net/usb/catc.c
@@ -24,8 +24,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
* Should you need to contact me, the author, you can do so either by
* e-mail - mail your message to <vojtech@suse.cz>, or by paper mail:
diff --git a/drivers/net/usb/cdc_eem.c b/drivers/net/usb/cdc_eem.c
index 08d55b6bf272..57fd7170ae60 100644
--- a/drivers/net/usb/cdc_eem.c
+++ b/drivers/net/usb/cdc_eem.c
@@ -14,8 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/module.h>
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 2023f3ea891e..640406ac4358 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -14,8 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
// #define DEBUG // error path messages, extra info
@@ -487,6 +486,7 @@ static const struct driver_info wwan_info = {
#define ZTE_VENDOR_ID 0x19D2
#define DELL_VENDOR_ID 0x413C
#define REALTEK_VENDOR_ID 0x0bda
+#define SAMSUNG_VENDOR_ID 0x04e8
static const struct usb_device_id products[] = {
/* BLACKLIST !!
@@ -653,6 +653,15 @@ static const struct usb_device_id products[] = {
.driver_info = 0,
},
+#if defined(CONFIG_USB_RTL8152) || defined(CONFIG_USB_RTL8152_MODULE)
+/* Samsung USB Ethernet Adapters */
+{
+ USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, 0xa101, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+ .driver_info = 0,
+},
+#endif
+
/* WHITELIST!!!
*
* CDC Ether uses two interfaces, not necessarily consecutive.
diff --git a/drivers/net/usb/cdc_subset.c b/drivers/net/usb/cdc_subset.c
index 0d1fe89ae0bd..3b2593a7602b 100644
--- a/drivers/net/usb/cdc_subset.c
+++ b/drivers/net/usb/cdc_subset.c
@@ -13,8 +13,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/module.h>
diff --git a/drivers/net/usb/cx82310_eth.c b/drivers/net/usb/cx82310_eth.c
index 1e207f086b75..5233e6d070ec 100644
--- a/drivers/net/usb/cx82310_eth.c
+++ b/drivers/net/usb/cx82310_eth.c
@@ -14,8 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/module.h>
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index c6867f926cff..14aa48fa8d7e 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -1,5 +1,5 @@
/*
- * Davicom DM9601 USB 1.1 10/100Mbps ethernet devices
+ * Davicom DM96xx USB 10/100Mbps ethernet devices
*
* Peter Korsgaard <jacmet@sunsite.dk>
*
@@ -364,7 +364,12 @@ static int dm9601_bind(struct usbnet *dev, struct usb_interface *intf)
dev->net->ethtool_ops = &dm9601_ethtool_ops;
dev->net->hard_header_len += DM_TX_OVERHEAD;
dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
- dev->rx_urb_size = dev->net->mtu + ETH_HLEN + DM_RX_OVERHEAD;
+
+ /* dm9620/21a require room for 4 byte padding, even in dm9601
+ * mode, so we need +1 to be able to receive full size
+ * ethernet frames.
+ */
+ dev->rx_urb_size = dev->net->mtu + ETH_HLEN + DM_RX_OVERHEAD + 1;
dev->mii.dev = dev->net;
dev->mii.mdio_read = dm9601_mdio_read;
@@ -468,7 +473,7 @@ static int dm9601_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
static struct sk_buff *dm9601_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
gfp_t flags)
{
- int len;
+ int len, pad;
/* format:
b1: packet length low
@@ -476,12 +481,23 @@ static struct sk_buff *dm9601_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
b3..n: packet data
*/
- len = skb->len;
+ len = skb->len + DM_TX_OVERHEAD;
+
+ /* workaround for dm962x errata with tx fifo getting out of
+ * sync if a USB bulk transfer retry happens right after a
+ * packet with odd / maxpacket length by adding up to 3 bytes
+ * padding.
+ */
+ while ((len & 1) || !(len % dev->maxpacket))
+ len++;
- if (skb_headroom(skb) < DM_TX_OVERHEAD) {
+ len -= DM_TX_OVERHEAD; /* hw header doesn't count as part of length */
+ pad = len - skb->len;
+
+ if (skb_headroom(skb) < DM_TX_OVERHEAD || skb_tailroom(skb) < pad) {
struct sk_buff *skb2;
- skb2 = skb_copy_expand(skb, DM_TX_OVERHEAD, 0, flags);
+ skb2 = skb_copy_expand(skb, DM_TX_OVERHEAD, pad, flags);
dev_kfree_skb_any(skb);
skb = skb2;
if (!skb)
@@ -490,10 +506,10 @@ static struct sk_buff *dm9601_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
__skb_push(skb, DM_TX_OVERHEAD);
- /* usbnet adds padding if length is a multiple of packet size
- if so, adjust length value in header */
- if ((skb->len % dev->maxpacket) == 0)
- len++;
+ if (pad) {
+ memset(skb->data + skb->len, 0, pad);
+ __skb_put(skb, pad);
+ }
skb->data[0] = len;
skb->data[1] = len >> 8;
@@ -543,7 +559,7 @@ static int dm9601_link_reset(struct usbnet *dev)
}
static const struct driver_info dm9601_info = {
- .description = "Davicom DM9601 USB Ethernet",
+ .description = "Davicom DM96xx USB 10/100 Ethernet",
.flags = FLAG_ETHER | FLAG_LINK_INTR,
.bind = dm9601_bind,
.rx_fixup = dm9601_rx_fixup,
@@ -594,6 +610,10 @@ static const struct usb_device_id products[] = {
USB_DEVICE(0x0a46, 0x9620), /* DM9620 USB to Fast Ethernet Adapter */
.driver_info = (unsigned long)&dm9601_info,
},
+ {
+ USB_DEVICE(0x0a46, 0x9621), /* DM9621A USB to Fast Ethernet Adapter */
+ .driver_info = (unsigned long)&dm9601_info,
+ },
{}, // END
};
@@ -612,5 +632,5 @@ static struct usb_driver dm9601_driver = {
module_usb_driver(dm9601_driver);
MODULE_AUTHOR("Peter Korsgaard <jacmet@sunsite.dk>");
-MODULE_DESCRIPTION("Davicom DM9601 USB 1.1 ethernet devices");
+MODULE_DESCRIPTION("Davicom DM96xx USB 10/100 ethernet devices");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/usb/gl620a.c b/drivers/net/usb/gl620a.c
index a7e3f4e55bf3..3569da1135c1 100644
--- a/drivers/net/usb/gl620a.c
+++ b/drivers/net/usb/gl620a.c
@@ -14,8 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
// #define DEBUG // error path messages, extra info
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 86292e6aaf49..1a482344b3f5 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -185,7 +185,6 @@ enum rx_ctrl_state{
#define BM_REQUEST_TYPE (0xa1)
#define B_NOTIFICATION (0x20)
#define W_VALUE (0x0)
-#define W_INDEX (0x2)
#define W_LENGTH (0x2)
#define B_OVERRUN (0x1<<6)
@@ -1487,6 +1486,7 @@ static void tiocmget_intr_callback(struct urb *urb)
struct uart_icount *icount;
struct hso_serial_state_notification *serial_state_notification;
struct usb_device *usb;
+ int if_num;
/* Sanity checks */
if (!serial)
@@ -1495,15 +1495,24 @@ static void tiocmget_intr_callback(struct urb *urb)
handle_usb_error(status, __func__, serial->parent);
return;
}
+
+ /* tiocmget is only supported on HSO_PORT_MODEM */
tiocmget = serial->tiocmget;
if (!tiocmget)
return;
+ BUG_ON((serial->parent->port_spec & HSO_PORT_MASK) != HSO_PORT_MODEM);
+
usb = serial->parent->usb;
+ if_num = serial->parent->interface->altsetting->desc.bInterfaceNumber;
+
+ /* wIndex should be the USB interface number of the port to which the
+ * notification applies, which should always be the Modem port.
+ */
serial_state_notification = &tiocmget->serial_state_notification;
if (serial_state_notification->bmRequestType != BM_REQUEST_TYPE ||
serial_state_notification->bNotification != B_NOTIFICATION ||
le16_to_cpu(serial_state_notification->wValue) != W_VALUE ||
- le16_to_cpu(serial_state_notification->wIndex) != W_INDEX ||
+ le16_to_cpu(serial_state_notification->wIndex) != if_num ||
le16_to_cpu(serial_state_notification->wLength) != W_LENGTH) {
dev_warn(&usb->dev,
"hso received invalid serial state notification\n");
diff --git a/drivers/net/usb/int51x1.c b/drivers/net/usb/int51x1.c
index ace9e74ffbdd..4ff70b22c6ee 100644
--- a/drivers/net/usb/int51x1.c
+++ b/drivers/net/usb/int51x1.c
@@ -20,8 +20,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/module.h>
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index afb117c16d2d..250fc21d9781 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -25,8 +25,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
****************************************************************/
diff --git a/drivers/net/usb/lg-vl600.c b/drivers/net/usb/lg-vl600.c
index 808d6506da41..acfcc32b323d 100644
--- a/drivers/net/usb/lg-vl600.c
+++ b/drivers/net/usb/lg-vl600.c
@@ -15,8 +15,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c
index 03832d3780aa..36ff0019aa32 100644
--- a/drivers/net/usb/mcs7830.c
+++ b/drivers/net/usb/mcs7830.c
@@ -36,8 +36,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/crc32.h>
@@ -117,7 +116,6 @@ enum {
struct mcs7830_data {
u8 multi_filter[8];
u8 config;
- u8 link_counter;
};
static const char driver_name[] = "MOSCHIP usb-ethernet driver";
@@ -561,26 +559,16 @@ static void mcs7830_status(struct usbnet *dev, struct urb *urb)
{
u8 *buf = urb->transfer_buffer;
bool link, link_changed;
- struct mcs7830_data *data = mcs7830_get_data(dev);
if (urb->actual_length < 16)
return;
- link = !(buf[1] & 0x20);
+ link = !(buf[1] == 0x20);
link_changed = netif_carrier_ok(dev->net) != link;
if (link_changed) {
- data->link_counter++;
- /*
- track link state 20 times to guard against erroneous
- link state changes reported sometimes by the chip
- */
- if (data->link_counter > 20) {
- data->link_counter = 0;
- usbnet_link_change(dev, link, 0);
- netdev_dbg(dev->net, "Link Status is: %d\n", link);
- }
- } else
- data->link_counter = 0;
+ usbnet_link_change(dev, link, 0);
+ netdev_dbg(dev->net, "Link Status is: %d\n", link);
+ }
}
static const struct driver_info moschip_info = {
diff --git a/drivers/net/usb/net1080.c b/drivers/net/usb/net1080.c
index 93e0716a118c..8b2493f05a1c 100644
--- a/drivers/net/usb/net1080.c
+++ b/drivers/net/usb/net1080.c
@@ -13,8 +13,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
// #define DEBUG // error path messages, extra info
diff --git a/drivers/net/usb/plusb.c b/drivers/net/usb/plusb.c
index 0fcc8e65a068..ee705b7bba08 100644
--- a/drivers/net/usb/plusb.c
+++ b/drivers/net/usb/plusb.c
@@ -13,8 +13,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
// #define DEBUG // error path messages, extra info
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 51073721e224..bf7d549ab511 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -24,7 +24,7 @@
#include <linux/ipv6.h>
/* Version Information */
-#define DRIVER_VERSION "v1.02.0 (2013/10/28)"
+#define DRIVER_VERSION "v1.03.0 (2013/12/26)"
#define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
#define DRIVER_DESC "Realtek RTL8152 Based USB 2.0 Ethernet Adapters"
#define MODULENAME "r8152"
@@ -39,15 +39,24 @@
#define PLA_RXFIFO_CTRL2 0xc0a8
#define PLA_FMC 0xc0b4
#define PLA_CFG_WOL 0xc0b6
+#define PLA_TEREDO_CFG 0xc0bc
#define PLA_MAR 0xcd00
+#define PLA_BACKUP 0xd000
#define PAL_BDC_CR 0xd1a0
+#define PLA_TEREDO_TIMER 0xd2cc
+#define PLA_REALWOW_TIMER 0xd2e8
#define PLA_LEDSEL 0xdd90
#define PLA_LED_FEATURE 0xdd92
#define PLA_PHYAR 0xde00
+#define PLA_BOOT_CTRL 0xe004
#define PLA_GPHY_INTR_IMR 0xe022
#define PLA_EEE_CR 0xe040
#define PLA_EEEP_CR 0xe080
#define PLA_MAC_PWR_CTRL 0xe0c0
+#define PLA_MAC_PWR_CTRL2 0xe0ca
+#define PLA_MAC_PWR_CTRL3 0xe0cc
+#define PLA_MAC_PWR_CTRL4 0xe0ce
+#define PLA_WDT6_CTRL 0xe428
#define PLA_TCR0 0xe610
#define PLA_TCR1 0xe612
#define PLA_TXFIFO_CTRL 0xe618
@@ -73,16 +82,25 @@
#define PLA_BP_5 0xfc32
#define PLA_BP_6 0xfc34
#define PLA_BP_7 0xfc36
+#define PLA_BP_EN 0xfc38
+#define USB_U2P3_CTRL 0xb460
#define USB_DEV_STAT 0xb808
#define USB_USB_CTRL 0xd406
#define USB_PHY_CTRL 0xd408
#define USB_TX_AGG 0xd40a
#define USB_RX_BUF_TH 0xd40c
#define USB_USB_TIMER 0xd428
+#define USB_RX_EARLY_AGG 0xd42c
#define USB_PM_CTRL_STATUS 0xd432
#define USB_TX_DMA 0xd434
+#define USB_TOLERANCE 0xd490
+#define USB_LPM_CTRL 0xd41a
#define USB_UPS_CTRL 0xd800
+#define USB_MISC_0 0xd81a
+#define USB_POWER_CUT 0xd80a
+#define USB_AFE_CTRL2 0xd824
+#define USB_WDT11_CTRL 0xe43c
#define USB_BP_BA 0xfc26
#define USB_BP_0 0xfc28
#define USB_BP_1 0xfc2a
@@ -92,14 +110,30 @@
#define USB_BP_5 0xfc32
#define USB_BP_6 0xfc34
#define USB_BP_7 0xfc36
+#define USB_BP_EN 0xfc38
/* OCP Registers */
#define OCP_ALDPS_CONFIG 0x2010
#define OCP_EEE_CONFIG1 0x2080
#define OCP_EEE_CONFIG2 0x2092
#define OCP_EEE_CONFIG3 0x2094
+#define OCP_BASE_MII 0xa400
#define OCP_EEE_AR 0xa41a
#define OCP_EEE_DATA 0xa41c
+#define OCP_PHY_STATUS 0xa420
+#define OCP_POWER_CFG 0xa430
+#define OCP_EEE_CFG 0xa432
+#define OCP_SRAM_ADDR 0xa436
+#define OCP_SRAM_DATA 0xa438
+#define OCP_DOWN_SPEED 0xa442
+#define OCP_EEE_CFG2 0xa5d0
+#define OCP_ADC_CFG 0xbc06
+
+/* SRAM Register */
+#define SRAM_LPF_CFG 0x8012
+#define SRAM_10M_AMP1 0x8080
+#define SRAM_10M_AMP2 0x8082
+#define SRAM_IMPEDANCE 0x8084
/* PLA_RCR */
#define RCR_AAP 0x00000001
@@ -116,14 +150,17 @@
#define RXFIFO_THR2_FULL 0x00000060
#define RXFIFO_THR2_HIGH 0x00000038
#define RXFIFO_THR2_OOB 0x0000004a
+#define RXFIFO_THR2_NORMAL 0x00a0
/* PLA_RXFIFO_CTRL2 */
#define RXFIFO_THR3_FULL 0x00000078
#define RXFIFO_THR3_HIGH 0x00000048
#define RXFIFO_THR3_OOB 0x0000005a
+#define RXFIFO_THR3_NORMAL 0x0110
/* PLA_TXFIFO_CTRL */
#define TXFIFO_THR_NORMAL 0x00400008
+#define TXFIFO_THR_NORMAL2 0x01000008
/* PLA_FMC */
#define FMC_FCR_MCU_EN 0x0001
@@ -131,6 +168,9 @@
/* PLA_EEEP_CR */
#define EEEP_CR_EEEP_TX 0x0002
+/* PLA_WDT6_CTRL */
+#define WDT6_SET_MODE 0x0010
+
/* PLA_TCR0 */
#define TCR0_TX_EMPTY 0x0800
#define TCR0_AUTO_FIFO 0x0080
@@ -168,6 +208,12 @@
/* PLA_CFG_WOL */
#define MAGIC_EN 0x0001
+/* PLA_TEREDO_CFG */
+#define TEREDO_SEL 0x8000
+#define TEREDO_WAKE_MASK 0x7f00
+#define TEREDO_RS_EVENT_MASK 0x00fe
+#define OOB_TEREDO_EN 0x0001
+
/* PAL_BDC_CR */
#define ALDPS_PROXY_MODE 0x0001
@@ -185,6 +231,25 @@
#define D3_CLK_GATED_EN 0x00004000
#define MCU_CLK_RATIO 0x07010f07
#define MCU_CLK_RATIO_MASK 0x0f0f0f0f
+#define ALDPS_SPDWN_RATIO 0x0f87
+
+/* PLA_MAC_PWR_CTRL2 */
+#define EEE_SPDWN_RATIO 0x8007
+
+/* PLA_MAC_PWR_CTRL3 */
+#define PKT_AVAIL_SPDWN_EN 0x0100
+#define SUSPEND_SPDWN_EN 0x0004
+#define U1U2_SPDWN_EN 0x0002
+#define L1_SPDWN_EN 0x0001
+
+/* PLA_MAC_PWR_CTRL4 */
+#define PWRSAVE_SPDWN_EN 0x1000
+#define RXDV_SPDWN_EN 0x0800
+#define TX10MIDLE_EN 0x0100
+#define TP100_SPDWN_EN 0x0020
+#define TP500_SPDWN_EN 0x0010
+#define TP1000_SPDWN_EN 0x0008
+#define EEE_SPDWN_EN 0x0001
/* PLA_GPHY_INTR_IMR */
#define GPHY_STS_MSK 0x0001
@@ -199,6 +264,9 @@
#define EEE_RX_EN 0x0001
#define EEE_TX_EN 0x0002
+/* PLA_BOOT_CTRL */
+#define AUTOLOAD_DONE 0x0002
+
/* USB_DEV_STAT */
#define STAT_SPEED_MASK 0x0006
#define STAT_SPEED_HIGH 0x0000
@@ -208,7 +276,9 @@
#define TX_AGG_MAX_THRESHOLD 0x03
/* USB_RX_BUF_TH */
-#define RX_BUF_THR 0x7a120180
+#define RX_THR_SUPPER 0x0c350180
+#define RX_THR_HIGH 0x7a120180
+#define RX_THR_SLOW 0xffff0180
/* USB_TX_DMA */
#define TEST_MODE_DISABLE 0x00000001
@@ -218,17 +288,55 @@
#define POWER_CUT 0x0100
/* USB_PM_CTRL_STATUS */
-#define RWSUME_INDICATE 0x0001
+#define RESUME_INDICATE 0x0001
/* USB_USB_CTRL */
#define RX_AGG_DISABLE 0x0010
+/* USB_U2P3_CTRL */
+#define U2P3_ENABLE 0x0001
+
+/* USB_POWER_CUT */
+#define PWR_EN 0x0001
+#define PHASE2_EN 0x0008
+
+/* USB_MISC_0 */
+#define PCUT_STATUS 0x0001
+
+/* USB_RX_EARLY_AGG */
+#define EARLY_AGG_SUPPER 0x0e832981
+#define EARLY_AGG_HIGH 0x0e837a12
+#define EARLY_AGG_SLOW 0x0e83ffff
+
+/* USB_WDT11_CTRL */
+#define TIMER11_EN 0x0001
+
+/* USB_LPM_CTRL */
+#define LPM_TIMER_MASK 0x0c
+#define LPM_TIMER_500MS 0x04 /* 500 ms */
+#define LPM_TIMER_500US 0x0c /* 500 us */
+
+/* USB_AFE_CTRL2 */
+#define SEN_VAL_MASK 0xf800
+#define SEN_VAL_NORMAL 0xa000
+#define SEL_RXIDLE 0x0100
+
/* OCP_ALDPS_CONFIG */
#define ENPWRSAVE 0x8000
#define ENPDNPS 0x0200
#define LINKENA 0x0100
#define DIS_SDSAVE 0x0010
+/* OCP_PHY_STATUS */
+#define PHY_STAT_MASK 0x0007
+#define PHY_STAT_LAN_ON 3
+#define PHY_STAT_PWRDN 5
+
+/* OCP_POWER_CFG */
+#define EEE_CLKDIV_EN 0x8000
+#define EN_ALDPS 0x0004
+#define EN_10M_PLLOFF 0x0001
+
/* OCP_EEE_CONFIG1 */
#define RG_TXLPI_MSK_HFDUP 0x8000
#define RG_MATCLR_EN 0x4000
@@ -263,7 +371,36 @@
#define EEE_ADDR 0x003C
#define EEE_DATA 0x0002
+/* OCP_EEE_CFG */
+#define CTAP_SHORT_EN 0x0040
+#define EEE10_EN 0x0010
+
+/* OCP_DOWN_SPEED */
+#define EN_10M_BGOFF 0x0080
+
+/* OCP_EEE_CFG2 */
+#define MY1000_EEE 0x0004
+#define MY100_EEE 0x0002
+
+/* OCP_ADC_CFG */
+#define CKADSEL_L 0x0100
+#define ADC_EN 0x0080
+#define EN_EMI_L 0x0040
+
+/* SRAM_LPF_CFG */
+#define LPF_AUTO_TUNE 0x8000
+
+/* SRAM_10M_AMP1 */
+#define GDAC_IB_UPALL 0x0008
+
+/* SRAM_10M_AMP2 */
+#define AMP_DN 0x0200
+
+/* SRAM_IMPEDANCE */
+#define RX_DRIVING_MASK 0x6000
+
enum rtl_register_content {
+ _1000bps = 0x10,
_100bps = 0x08,
_10bps = 0x04,
LINK_STATUS = 0x02,
@@ -273,6 +410,9 @@ enum rtl_register_content {
#define RTL8152_MAX_TX 10
#define RTL8152_MAX_RX 10
#define INTBUFSIZE 2
+#define CRC_SIZE 4
+#define TX_ALIGN 4
+#define RX_ALIGN 8
#define INTR_LINK 0x0004
@@ -302,6 +442,10 @@ enum rtl8152_flags {
/* Define these values to match your device */
#define VENDOR_ID_REALTEK 0x0bda
#define PRODUCT_ID_RTL8152 0x8152
+#define PRODUCT_ID_RTL8153 0x8153
+
+#define VENDOR_ID_SAMSUNG 0x04e8
+#define PRODUCT_ID_SAMSUNG 0xa101
#define MCU_TYPE_PLA 0x0100
#define MCU_TYPE_USB 0x0000
@@ -363,6 +507,15 @@ struct r8152 {
spinlock_t rx_lock, tx_lock;
struct delayed_work schedule;
struct mii_if_info mii;
+
+ struct rtl_ops {
+ void (*init)(struct r8152 *);
+ int (*enable)(struct r8152 *);
+ void (*disable)(struct r8152 *);
+ void (*down)(struct r8152 *);
+ void (*unload)(struct r8152 *);
+ } rtl_ops;
+
int intr_interval;
u32 msg_enable;
u32 tx_qlen;
@@ -375,7 +528,11 @@ struct r8152 {
enum rtl_version {
RTL_VER_UNKNOWN = 0,
RTL_VER_01,
- RTL_VER_02
+ RTL_VER_02,
+ RTL_VER_03,
+ RTL_VER_04,
+ RTL_VER_05,
+ RTL_VER_MAX
};
/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
@@ -427,8 +584,8 @@ int set_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data)
static int generic_ocp_read(struct r8152 *tp, u16 index, u16 size,
void *data, u16 type)
{
- u16 limit = 64;
- int ret = 0;
+ u16 limit = 64;
+ int ret = 0;
if (test_bit(RTL8152_UNPLUG, &tp->flags))
return -ENODEV;
@@ -467,9 +624,9 @@ static int generic_ocp_read(struct r8152 *tp, u16 index, u16 size,
static int generic_ocp_write(struct r8152 *tp, u16 index, u16 byteen,
u16 size, void *data, u16 type)
{
- int ret;
- u16 byteen_start, byteen_end, byen;
- u16 limit = 512;
+ int ret;
+ u16 byteen_start, byteen_end, byen;
+ u16 limit = 512;
if (test_bit(RTL8152_UNPLUG, &tp->flags))
return -ENODEV;
@@ -653,45 +810,54 @@ static void ocp_write_byte(struct r8152 *tp, u16 type, u16 index, u32 data)
generic_ocp_write(tp, index, byen, sizeof(tmp), &tmp, type);
}
-static void r8152_mdio_write(struct r8152 *tp, u32 reg_addr, u32 value)
+static u16 ocp_reg_read(struct r8152 *tp, u16 addr)
{
- u32 ocp_data;
- int i;
+ u16 ocp_base, ocp_index;
- ocp_data = PHYAR_FLAG | ((reg_addr & 0x1f) << 16) |
- (value & 0xffff);
+ ocp_base = addr & 0xf000;
+ if (ocp_base != tp->ocp_base) {
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_OCP_GPHY_BASE, ocp_base);
+ tp->ocp_base = ocp_base;
+ }
- ocp_write_dword(tp, MCU_TYPE_PLA, PLA_PHYAR, ocp_data);
+ ocp_index = (addr & 0x0fff) | 0xb000;
+ return ocp_read_word(tp, MCU_TYPE_PLA, ocp_index);
+}
- for (i = 20; i > 0; i--) {
- udelay(25);
- ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_PHYAR);
- if (!(ocp_data & PHYAR_FLAG))
- break;
+static void ocp_reg_write(struct r8152 *tp, u16 addr, u16 data)
+{
+ u16 ocp_base, ocp_index;
+
+ ocp_base = addr & 0xf000;
+ if (ocp_base != tp->ocp_base) {
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_OCP_GPHY_BASE, ocp_base);
+ tp->ocp_base = ocp_base;
}
- udelay(20);
+
+ ocp_index = (addr & 0x0fff) | 0xb000;
+ ocp_write_word(tp, MCU_TYPE_PLA, ocp_index, data);
}
-static int r8152_mdio_read(struct r8152 *tp, u32 reg_addr)
+static inline void r8152_mdio_write(struct r8152 *tp, u32 reg_addr, u32 value)
{
- u32 ocp_data;
- int i;
-
- ocp_data = (reg_addr & 0x1f) << 16;
- ocp_write_dword(tp, MCU_TYPE_PLA, PLA_PHYAR, ocp_data);
+ ocp_reg_write(tp, OCP_BASE_MII + reg_addr * 2, value);
+}
- for (i = 20; i > 0; i--) {
- udelay(25);
- ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_PHYAR);
- if (ocp_data & PHYAR_FLAG)
- break;
- }
- udelay(20);
+static inline int r8152_mdio_read(struct r8152 *tp, u32 reg_addr)
+{
+ return ocp_reg_read(tp, OCP_BASE_MII + reg_addr * 2);
+}
- if (!(ocp_data & PHYAR_FLAG))
- return -EAGAIN;
+static void sram_write(struct r8152 *tp, u16 addr, u16 data)
+{
+ ocp_reg_write(tp, OCP_SRAM_ADDR, addr);
+ ocp_reg_write(tp, OCP_SRAM_DATA, data);
+}
- return (u16)(ocp_data & 0xffff);
+static u16 sram_read(struct r8152 *tp, u16 addr)
+{
+ ocp_reg_write(tp, OCP_SRAM_ADDR, addr);
+ return ocp_reg_read(tp, OCP_SRAM_DATA);
}
static int read_mii_word(struct net_device *netdev, int phy_id, int reg)
@@ -715,20 +881,6 @@ void write_mii_word(struct net_device *netdev, int phy_id, int reg, int val)
r8152_mdio_write(tp, reg, val);
}
-static void ocp_reg_write(struct r8152 *tp, u16 addr, u16 data)
-{
- u16 ocp_base, ocp_index;
-
- ocp_base = addr & 0xf000;
- if (ocp_base != tp->ocp_base) {
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_OCP_GPHY_BASE, ocp_base);
- tp->ocp_base = ocp_base;
- }
-
- ocp_index = (addr & 0x0fff) | 0xb000;
- ocp_write_word(tp, MCU_TYPE_PLA, ocp_index, data);
-}
-
static
int r8152_submit_rx(struct r8152 *tp, struct rx_agg *agg, gfp_t mem_flags);
@@ -814,10 +966,12 @@ static void read_bulk_callback(struct urb *urb)
case -ENOENT:
return; /* the urb is in unlink state */
case -ETIME:
- pr_warn_ratelimited("may be reset is needed?..\n");
+ if (net_ratelimit())
+ netdev_warn(netdev, "maybe reset is needed?\n");
break;
default:
- pr_warn_ratelimited("Rx status %d\n", status);
+ if (net_ratelimit())
+ netdev_warn(netdev, "Rx status %d\n", status);
break;
}
@@ -850,7 +1004,8 @@ static void write_bulk_callback(struct urb *urb)
stats = rtl8152_get_stats(tp->netdev);
if (status) {
- pr_warn_ratelimited("Tx status %d\n", status);
+ if (net_ratelimit())
+ netdev_warn(tp->netdev, "Tx status %d\n", status);
stats->tx_errors += agg->skb_num;
} else {
stats->tx_packets += agg->skb_num;
@@ -927,17 +1082,17 @@ resubmit:
netif_device_detach(tp->netdev);
else if (res)
netif_err(tp, intr, tp->netdev,
- "can't resubmit intr, status %d\n", res);
+ "can't resubmit intr, status %d\n", res);
}
static inline void *rx_agg_align(void *data)
{
- return (void *)ALIGN((uintptr_t)data, 8);
+ return (void *)ALIGN((uintptr_t)data, RX_ALIGN);
}
static inline void *tx_agg_align(void *data)
{
- return (void *)ALIGN((uintptr_t)data, 4);
+ return (void *)ALIGN((uintptr_t)data, TX_ALIGN);
}
static void free_all_mem(struct r8152 *tp)
@@ -1006,7 +1161,8 @@ static int alloc_all_mem(struct r8152 *tp)
if (buf != rx_agg_align(buf)) {
kfree(buf);
- buf = kmalloc_node(rx_buf_sz + 8, GFP_KERNEL, node);
+ buf = kmalloc_node(rx_buf_sz + RX_ALIGN, GFP_KERNEL,
+ node);
if (!buf)
goto err1;
}
@@ -1031,7 +1187,8 @@ static int alloc_all_mem(struct r8152 *tp)
if (buf != tx_agg_align(buf)) {
kfree(buf);
- buf = kmalloc_node(rx_buf_sz + 4, GFP_KERNEL, node);
+ buf = kmalloc_node(rx_buf_sz + TX_ALIGN, GFP_KERNEL,
+ node);
if (!buf)
goto err1;
}
@@ -1231,7 +1388,7 @@ static void rx_bottom(struct r8152 *tp)
stats = rtl8152_get_stats(netdev);
- pkt_len -= 4; /* CRC */
+ pkt_len -= CRC_SIZE;
rx_data += sizeof(struct rx_desc);
skb = netdev_alloc_skb_ip_align(netdev, pkt_len);
@@ -1246,7 +1403,7 @@ static void rx_bottom(struct r8152 *tp)
stats->rx_packets++;
stats->rx_bytes += pkt_len;
- rx_data = rx_agg_align(rx_data + pkt_len + 4);
+ rx_data = rx_agg_align(rx_data + pkt_len + CRC_SIZE);
rx_desc = (struct rx_desc *)rx_data;
len_used = (int)(rx_data - (u8 *)agg->head);
len_used += sizeof(struct rx_desc);
@@ -1336,7 +1493,7 @@ static void rtl8152_tx_timeout(struct net_device *netdev)
struct r8152 *tp = netdev_priv(netdev);
int i;
- netif_warn(tp, tx_err, netdev, "Tx timeout.\n");
+ netif_warn(tp, tx_err, netdev, "Tx timeout\n");
for (i = 0; i < RTL8152_MAX_TX; i++)
usb_unlink_urb(tp->tx_info[i].urb);
}
@@ -1449,13 +1606,11 @@ static inline u8 rtl8152_get_speed(struct r8152 *tp)
return ocp_read_byte(tp, MCU_TYPE_PLA, PLA_PHYSTATUS);
}
-static int rtl8152_enable(struct r8152 *tp)
+static void rtl_set_eee_plus(struct r8152 *tp)
{
u32 ocp_data;
- int i, ret;
u8 speed;
- set_tx_qlen(tp);
speed = rtl8152_get_speed(tp);
if (speed & _10bps) {
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEEP_CR);
@@ -1466,6 +1621,12 @@ static int rtl8152_enable(struct r8152 *tp)
ocp_data &= ~EEEP_CR_EEEP_TX;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEEP_CR, ocp_data);
}
+}
+
+static int rtl_enable(struct r8152 *tp)
+{
+ u32 ocp_data;
+ int i, ret;
r8152b_reset_packet_filter(tp);
@@ -1487,6 +1648,47 @@ static int rtl8152_enable(struct r8152 *tp)
return ret;
}
+static int rtl8152_enable(struct r8152 *tp)
+{
+ set_tx_qlen(tp);
+ rtl_set_eee_plus(tp);
+
+ return rtl_enable(tp);
+}
+
+static void r8153_set_rx_agg(struct r8152 *tp)
+{
+ u8 speed;
+
+ speed = rtl8152_get_speed(tp);
+ if (speed & _1000bps) {
+ if (tp->udev->speed == USB_SPEED_SUPER) {
+ ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_BUF_TH,
+ RX_THR_SUPPER);
+ ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_EARLY_AGG,
+ EARLY_AGG_SUPPER);
+ } else {
+ ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_BUF_TH,
+ RX_THR_HIGH);
+ ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_EARLY_AGG,
+ EARLY_AGG_HIGH);
+ }
+ } else {
+ ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_BUF_TH, RX_THR_SLOW);
+ ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_EARLY_AGG,
+ EARLY_AGG_SLOW);
+ }
+}
+
+static int rtl8153_enable(struct r8152 *tp)
+{
+ set_tx_qlen(tp);
+ rtl_set_eee_plus(tp);
+ r8153_set_rx_agg(tp);
+
+ return rtl_enable(tp);
+}
+
static void rtl8152_disable(struct r8152 *tp)
{
struct net_device_stats *stats = rtl8152_get_stats(tp->netdev);
@@ -1596,7 +1798,7 @@ static void r8152b_exit_oob(struct r8152 *tp)
ocp_write_dword(tp, MCU_TYPE_PLA, PLA_TXFIFO_CTRL, TXFIFO_THR_NORMAL);
ocp_write_byte(tp, MCU_TYPE_USB, USB_TX_AGG, TX_AGG_MAX_THRESHOLD);
- ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_BUF_TH, RX_BUF_THR);
+ ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_BUF_TH, RX_THR_HIGH);
ocp_write_dword(tp, MCU_TYPE_USB, USB_TX_DMA,
TEST_MODE_DISABLE | TX_SIZE_ADJUST1);
@@ -1613,8 +1815,8 @@ static void r8152b_exit_oob(struct r8152 *tp)
static void r8152b_enter_oob(struct r8152 *tp)
{
- u32 ocp_data;
- int i;
+ u32 ocp_data;
+ int i;
ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
ocp_data &= ~NOW_IS_OOB;
@@ -1685,15 +1887,269 @@ static inline void r8152b_enable_aldps(struct r8152 *tp)
LINKENA | DIS_SDSAVE);
}
+static void r8153_hw_phy_cfg(struct r8152 *tp)
+{
+ u32 ocp_data;
+ u16 data;
+
+ ocp_reg_write(tp, OCP_ADC_CFG, CKADSEL_L | ADC_EN | EN_EMI_L);
+ r8152_mdio_write(tp, MII_BMCR, BMCR_ANENABLE);
+
+ if (tp->version == RTL_VER_03) {
+ data = ocp_reg_read(tp, OCP_EEE_CFG);
+ data &= ~CTAP_SHORT_EN;
+ ocp_reg_write(tp, OCP_EEE_CFG, data);
+ }
+
+ data = ocp_reg_read(tp, OCP_POWER_CFG);
+ data |= EEE_CLKDIV_EN;
+ ocp_reg_write(tp, OCP_POWER_CFG, data);
+
+ data = ocp_reg_read(tp, OCP_DOWN_SPEED);
+ data |= EN_10M_BGOFF;
+ ocp_reg_write(tp, OCP_DOWN_SPEED, data);
+ data = ocp_reg_read(tp, OCP_POWER_CFG);
+ data |= EN_10M_PLLOFF;
+ ocp_reg_write(tp, OCP_POWER_CFG, data);
+ data = sram_read(tp, SRAM_IMPEDANCE);
+ data &= ~RX_DRIVING_MASK;
+ sram_write(tp, SRAM_IMPEDANCE, data);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR);
+ ocp_data |= PFM_PWM_SWITCH;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR, ocp_data);
+
+ data = sram_read(tp, SRAM_LPF_CFG);
+ data |= LPF_AUTO_TUNE;
+ sram_write(tp, SRAM_LPF_CFG, data);
+
+ data = sram_read(tp, SRAM_10M_AMP1);
+ data |= GDAC_IB_UPALL;
+ sram_write(tp, SRAM_10M_AMP1, data);
+ data = sram_read(tp, SRAM_10M_AMP2);
+ data |= AMP_DN;
+ sram_write(tp, SRAM_10M_AMP2, data);
+}
+
+static void r8153_u1u2en(struct r8152 *tp, int enable)
+{
+ u8 u1u2[8];
+
+ if (enable)
+ memset(u1u2, 0xff, sizeof(u1u2));
+ else
+ memset(u1u2, 0x00, sizeof(u1u2));
+
+ usb_ocp_write(tp, USB_TOLERANCE, BYTE_EN_SIX_BYTES, sizeof(u1u2), u1u2);
+}
+
+static void r8153_u2p3en(struct r8152 *tp, int enable)
+{
+ u32 ocp_data;
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_U2P3_CTRL);
+ if (enable)
+ ocp_data |= U2P3_ENABLE;
+ else
+ ocp_data &= ~U2P3_ENABLE;
+ ocp_write_word(tp, MCU_TYPE_USB, USB_U2P3_CTRL, ocp_data);
+}
+
+static void r8153_power_cut_en(struct r8152 *tp, int enable)
+{
+ u32 ocp_data;
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_POWER_CUT);
+ if (enable)
+ ocp_data |= PWR_EN | PHASE2_EN;
+ else
+ ocp_data &= ~(PWR_EN | PHASE2_EN);
+ ocp_write_word(tp, MCU_TYPE_USB, USB_POWER_CUT, ocp_data);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0);
+ ocp_data &= ~PCUT_STATUS;
+ ocp_write_word(tp, MCU_TYPE_USB, USB_MISC_0, ocp_data);
+}
+
+static void r8153_teredo_off(struct r8152 *tp)
+{
+ u32 ocp_data;
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_TEREDO_CFG);
+ ocp_data &= ~(TEREDO_SEL | TEREDO_RS_EVENT_MASK | OOB_TEREDO_EN);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_TEREDO_CFG, ocp_data);
+
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_WDT6_CTRL, WDT6_SET_MODE);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_REALWOW_TIMER, 0);
+ ocp_write_dword(tp, MCU_TYPE_PLA, PLA_TEREDO_TIMER, 0);
+}
+
+static void r8153_first_init(struct r8152 *tp)
+{
+ u32 ocp_data;
+ int i;
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MISC_1);
+ ocp_data |= RXDY_GATED_EN;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MISC_1, ocp_data);
+
+ r8153_teredo_off(tp);
+
+ ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR);
+ ocp_data &= ~RCR_ACPT_ALL;
+ ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data);
+
+ r8153_hw_phy_cfg(tp);
+
+ rtl8152_nic_reset(tp);
+
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
+ ocp_data &= ~NOW_IS_OOB;
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7);
+ ocp_data &= ~MCU_BORW_EN;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data);
+
+ for (i = 0; i < 1000; i++) {
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
+ if (ocp_data & LINK_LIST_READY)
+ break;
+ mdelay(1);
+ }
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7);
+ ocp_data |= RE_INIT_LL;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data);
+
+ for (i = 0; i < 1000; i++) {
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
+ if (ocp_data & LINK_LIST_READY)
+ break;
+ mdelay(1);
+ }
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CPCR);
+ ocp_data &= ~CPCR_RX_VLAN;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_CPCR, ocp_data);
+
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, RTL8152_RMS);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_TCR0);
+ ocp_data |= TCR0_AUTO_FIFO;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_TCR0, ocp_data);
+
+ rtl8152_nic_reset(tp);
+
+ /* rx share fifo credit full threshold */
+ ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL0, RXFIFO_THR1_NORMAL);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL1, RXFIFO_THR2_NORMAL);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL2, RXFIFO_THR3_NORMAL);
+ /* TX share fifo free credit full threshold */
+ ocp_write_dword(tp, MCU_TYPE_PLA, PLA_TXFIFO_CTRL, TXFIFO_THR_NORMAL2);
+
+ // rx aggregation
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL);
+ ocp_data &= ~RX_AGG_DISABLE;
+ ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data);
+}
+
+static void r8153_enter_oob(struct r8152 *tp)
+{
+ u32 ocp_data;
+ int i;
+
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
+ ocp_data &= ~NOW_IS_OOB;
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data);
+
+ rtl8152_disable(tp);
+
+ for (i = 0; i < 1000; i++) {
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
+ if (ocp_data & LINK_LIST_READY)
+ break;
+ mdelay(1);
+ }
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7);
+ ocp_data |= RE_INIT_LL;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data);
+
+ for (i = 0; i < 1000; i++) {
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
+ if (ocp_data & LINK_LIST_READY)
+ break;
+ mdelay(1);
+ }
+
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, RTL8152_RMS);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CFG_WOL);
+ ocp_data |= MAGIC_EN;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_CFG_WOL, ocp_data);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_TEREDO_CFG);
+ ocp_data &= ~TEREDO_WAKE_MASK;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_TEREDO_CFG, ocp_data);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CPCR);
+ ocp_data |= CPCR_RX_VLAN;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_CPCR, ocp_data);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PAL_BDC_CR);
+ ocp_data |= ALDPS_PROXY_MODE;
+ ocp_write_word(tp, MCU_TYPE_PLA, PAL_BDC_CR, ocp_data);
+
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
+ ocp_data |= NOW_IS_OOB | DIS_MCU_CLROOB;
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data);
+
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CONFIG5, LAN_WAKE_EN);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MISC_1);
+ ocp_data &= ~RXDY_GATED_EN;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MISC_1, ocp_data);
+
+ ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR);
+ ocp_data |= RCR_APM | RCR_AM | RCR_AB;
+ ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data);
+}
+
+static void r8153_disable_aldps(struct r8152 *tp)
+{
+ u16 data;
+
+ data = ocp_reg_read(tp, OCP_POWER_CFG);
+ data &= ~EN_ALDPS;
+ ocp_reg_write(tp, OCP_POWER_CFG, data);
+ msleep(20);
+}
+
+static void r8153_enable_aldps(struct r8152 *tp)
+{
+ u16 data;
+
+ data = ocp_reg_read(tp, OCP_POWER_CFG);
+ data |= EN_ALDPS;
+ ocp_reg_write(tp, OCP_POWER_CFG, data);
+}
+
static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u16 speed, u8 duplex)
{
- u16 bmcr, anar;
+ u16 bmcr, anar, gbcr;
int ret = 0;
cancel_delayed_work_sync(&tp->schedule);
anar = r8152_mdio_read(tp, MII_ADVERTISE);
anar &= ~(ADVERTISE_10HALF | ADVERTISE_10FULL |
ADVERTISE_100HALF | ADVERTISE_100FULL);
+ if (tp->mii.supports_gmii) {
+ gbcr = r8152_mdio_read(tp, MII_CTRL1000);
+ gbcr &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
+ } else {
+ gbcr = 0;
+ }
if (autoneg == AUTONEG_DISABLE) {
if (speed == SPEED_10) {
@@ -1702,6 +2158,9 @@ static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u16 speed, u8 duplex)
} else if (speed == SPEED_100) {
bmcr = BMCR_SPEED100;
anar |= ADVERTISE_100HALF | ADVERTISE_100FULL;
+ } else if (speed == SPEED_1000 && tp->mii.supports_gmii) {
+ bmcr = BMCR_SPEED1000;
+ gbcr |= ADVERTISE_1000FULL | ADVERTISE_1000HALF;
} else {
ret = -EINVAL;
goto out;
@@ -1723,6 +2182,16 @@ static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u16 speed, u8 duplex)
anar |= ADVERTISE_10HALF;
anar |= ADVERTISE_100HALF;
}
+ } else if (speed == SPEED_1000 && tp->mii.supports_gmii) {
+ if (duplex == DUPLEX_FULL) {
+ anar |= ADVERTISE_10HALF | ADVERTISE_10FULL;
+ anar |= ADVERTISE_100HALF | ADVERTISE_100FULL;
+ gbcr |= ADVERTISE_1000FULL | ADVERTISE_1000HALF;
+ } else {
+ anar |= ADVERTISE_10HALF;
+ anar |= ADVERTISE_100HALF;
+ gbcr |= ADVERTISE_1000HALF;
+ }
} else {
ret = -EINVAL;
goto out;
@@ -1731,6 +2200,9 @@ static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u16 speed, u8 duplex)
bmcr = BMCR_ANENABLE | BMCR_ANRESTART;
}
+ if (tp->mii.supports_gmii)
+ r8152_mdio_write(tp, MII_CTRL1000, gbcr);
+
r8152_mdio_write(tp, MII_ADVERTISE, anar);
r8152_mdio_write(tp, MII_BMCR, bmcr);
@@ -1752,6 +2224,15 @@ static void rtl8152_down(struct r8152 *tp)
r8152b_enable_aldps(tp);
}
+static void rtl8153_down(struct r8152 *tp)
+{
+ r8153_u1u2en(tp, 0);
+ r8153_power_cut_en(tp, 0);
+ r8153_disable_aldps(tp);
+ r8153_enter_oob(tp);
+ r8153_enable_aldps(tp);
+}
+
static void set_carrier(struct r8152 *tp)
{
struct net_device *netdev = tp->netdev;
@@ -1762,7 +2243,7 @@ static void set_carrier(struct r8152 *tp)
if (speed & LINK_STATUS) {
if (!(tp->speed & LINK_STATUS)) {
- rtl8152_enable(tp);
+ tp->rtl_ops.enable(tp);
set_bit(RTL8152_SET_RX_MODE, &tp->flags);
netif_carrier_on(netdev);
}
@@ -1770,7 +2251,7 @@ static void set_carrier(struct r8152 *tp)
if (tp->speed & LINK_STATUS) {
netif_carrier_off(netdev);
tasklet_disable(&tp->tl);
- rtl8152_disable(tp);
+ tp->rtl_ops.disable(tp);
tasklet_enable(&tp->tl);
}
}
@@ -1806,12 +2287,14 @@ static int rtl8152_open(struct net_device *netdev)
if (res) {
if (res == -ENODEV)
netif_device_detach(tp->netdev);
- netif_warn(tp, ifup, netdev,
- "intr_urb submit failed: %d\n", res);
+ netif_warn(tp, ifup, netdev, "intr_urb submit failed: %d\n",
+ res);
return res;
}
- rtl8152_set_speed(tp, AUTONEG_ENABLE, SPEED_100, DUPLEX_FULL);
+ rtl8152_set_speed(tp, AUTONEG_ENABLE,
+ tp->mii.supports_gmii ? SPEED_1000 : SPEED_100,
+ DUPLEX_FULL);
tp->speed = 0;
netif_carrier_off(netdev);
netif_start_queue(netdev);
@@ -1830,7 +2313,7 @@ static int rtl8152_close(struct net_device *netdev)
cancel_delayed_work_sync(&tp->schedule);
netif_stop_queue(netdev);
tasklet_disable(&tp->tl);
- rtl8152_disable(tp);
+ tp->rtl_ops.disable(tp);
tasklet_enable(&tp->tl);
return res;
@@ -1851,9 +2334,16 @@ static void rtl_clear_bp(struct r8152 *tp)
ocp_write_word(tp, MCU_TYPE_USB, USB_BP_BA, 0);
}
+static void r8153_clear_bp(struct r8152 *tp)
+{
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_BP_EN, 0);
+ ocp_write_byte(tp, MCU_TYPE_USB, USB_BP_EN, 0);
+ rtl_clear_bp(tp);
+}
+
static void r8152b_enable_eee(struct r8152 *tp)
{
- u32 ocp_data;
+ u32 ocp_data;
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEE_CR);
ocp_data |= EEE_RX_EN | EEE_TX_EN;
@@ -1874,6 +2364,22 @@ static void r8152b_enable_eee(struct r8152 *tp)
ocp_reg_write(tp, OCP_EEE_AR, 0x0000);
}
+static void r8153_enable_eee(struct r8152 *tp)
+{
+ u32 ocp_data;
+ u16 data;
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEE_CR);
+ ocp_data |= EEE_RX_EN | EEE_TX_EN;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEE_CR, ocp_data);
+ data = ocp_reg_read(tp, OCP_EEE_CFG);
+ data |= EEE10_EN;
+ ocp_reg_write(tp, OCP_EEE_CFG, data);
+ data = ocp_reg_read(tp, OCP_EEE_CFG2);
+ data |= MY1000_EEE | MY100_EEE;
+ ocp_reg_write(tp, OCP_EEE_CFG2, data);
+}
+
static void r8152b_enable_fc(struct r8152 *tp)
{
u16 anar;
@@ -1909,7 +2415,7 @@ static void r8152b_init(struct r8152 *tp)
ocp_write_word(tp, MCU_TYPE_USB, USB_UPS_CTRL, ocp_data);
ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_PM_CTRL_STATUS);
- ocp_data &= ~RWSUME_INDICATE;
+ ocp_data &= ~RESUME_INDICATE;
ocp_write_word(tp, MCU_TYPE_USB, USB_PM_CTRL_STATUS, ocp_data);
r8152b_exit_oob(tp);
@@ -1943,6 +2449,75 @@ static void r8152b_init(struct r8152 *tp)
ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data);
}
+static void r8153_init(struct r8152 *tp)
+{
+ u32 ocp_data;
+ int i;
+
+ r8153_u1u2en(tp, 0);
+
+ for (i = 0; i < 500; i++) {
+ if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_BOOT_CTRL) &
+ AUTOLOAD_DONE)
+ break;
+ msleep(20);
+ }
+
+ for (i = 0; i < 500; i++) {
+ ocp_data = ocp_reg_read(tp, OCP_PHY_STATUS) & PHY_STAT_MASK;
+ if (ocp_data == PHY_STAT_LAN_ON || ocp_data == PHY_STAT_PWRDN)
+ break;
+ msleep(20);
+ }
+
+ r8153_u2p3en(tp, 0);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_WDT11_CTRL);
+ ocp_data &= ~TIMER11_EN;
+ ocp_write_word(tp, MCU_TYPE_USB, USB_WDT11_CTRL, ocp_data);
+
+ r8153_clear_bp(tp);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_LED_FEATURE);
+ ocp_data &= ~LED_MODE_MASK;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_LED_FEATURE, ocp_data);
+
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_LPM_CTRL);
+ ocp_data &= ~LPM_TIMER_MASK;
+ if (tp->udev->speed == USB_SPEED_SUPER)
+ ocp_data |= LPM_TIMER_500US;
+ else
+ ocp_data |= LPM_TIMER_500MS;
+ ocp_write_byte(tp, MCU_TYPE_USB, USB_LPM_CTRL, ocp_data);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_AFE_CTRL2);
+ ocp_data &= ~SEN_VAL_MASK;
+ ocp_data |= SEN_VAL_NORMAL | SEL_RXIDLE;
+ ocp_write_word(tp, MCU_TYPE_USB, USB_AFE_CTRL2, ocp_data);
+
+ r8153_power_cut_en(tp, 0);
+ r8153_u1u2en(tp, 1);
+
+ r8153_first_init(tp);
+
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL, ALDPS_SPDWN_RATIO);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2, EEE_SPDWN_RATIO);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3,
+ PKT_AVAIL_SPDWN_EN | SUSPEND_SPDWN_EN |
+ U1U2_SPDWN_EN | L1_SPDWN_EN);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4,
+ PWRSAVE_SPDWN_EN | RXDV_SPDWN_EN | TX10MIDLE_EN |
+ TP100_SPDWN_EN | TP500_SPDWN_EN | TP1000_SPDWN_EN |
+ EEE_SPDWN_EN);
+
+ r8153_enable_eee(tp);
+ r8153_enable_aldps(tp);
+ r8152b_enable_fc(tp);
+
+ r8152_mdio_write(tp, MII_BMCR, BMCR_RESET | BMCR_ANENABLE |
+ BMCR_ANRESTART);
+}
+
static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message)
{
struct r8152 *tp = usb_get_intfdata(intf);
@@ -1956,7 +2531,7 @@ static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message)
tasklet_disable(&tp->tl);
}
- rtl8152_down(tp);
+ tp->rtl_ops.down(tp);
return 0;
}
@@ -1965,10 +2540,12 @@ static int rtl8152_resume(struct usb_interface *intf)
{
struct r8152 *tp = usb_get_intfdata(intf);
- r8152b_init(tp);
+ tp->rtl_ops.init(tp);
netif_device_attach(tp->netdev);
if (netif_running(tp->netdev)) {
- rtl8152_set_speed(tp, AUTONEG_ENABLE, SPEED_100, DUPLEX_FULL);
+ rtl8152_set_speed(tp, AUTONEG_ENABLE,
+ tp->mii.supports_gmii ? SPEED_1000 : SPEED_100,
+ DUPLEX_FULL);
tp->speed = 0;
netif_carrier_off(tp->netdev);
set_bit(WORK_ENABLE, &tp->flags);
@@ -2072,6 +2649,18 @@ static void r8152b_get_version(struct r8152 *tp)
case 0x4c10:
tp->version = RTL_VER_02;
break;
+ case 0x5c00:
+ tp->version = RTL_VER_03;
+ tp->mii.supports_gmii = 1;
+ break;
+ case 0x5c10:
+ tp->version = RTL_VER_04;
+ tp->mii.supports_gmii = 1;
+ break;
+ case 0x5c20:
+ tp->version = RTL_VER_05;
+ tp->mii.supports_gmii = 1;
+ break;
default:
netif_info(tp, probe, tp->netdev,
"Unknown version 0x%04x\n", version);
@@ -2079,6 +2668,80 @@ static void r8152b_get_version(struct r8152 *tp)
}
}
+static void rtl8152_unload(struct r8152 *tp)
+{
+ u32 ocp_data;
+
+ if (tp->version != RTL_VER_01) {
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_UPS_CTRL);
+ ocp_data |= POWER_CUT;
+ ocp_write_word(tp, MCU_TYPE_USB, USB_UPS_CTRL, ocp_data);
+ }
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_PM_CTRL_STATUS);
+ ocp_data &= ~RESUME_INDICATE;
+ ocp_write_word(tp, MCU_TYPE_USB, USB_PM_CTRL_STATUS, ocp_data);
+}
+
+static void rtl8153_unload(struct r8152 *tp)
+{
+ r8153_power_cut_en(tp, 1);
+}
+
+static int rtl_ops_init(struct r8152 *tp, const struct usb_device_id *id)
+{
+ struct rtl_ops *ops = &tp->rtl_ops;
+ int ret = -ENODEV;
+
+ switch (id->idVendor) {
+ case VENDOR_ID_REALTEK:
+ switch (id->idProduct) {
+ case PRODUCT_ID_RTL8152:
+ ops->init = r8152b_init;
+ ops->enable = rtl8152_enable;
+ ops->disable = rtl8152_disable;
+ ops->down = rtl8152_down;
+ ops->unload = rtl8152_unload;
+ ret = 0;
+ break;
+ case PRODUCT_ID_RTL8153:
+ ops->init = r8153_init;
+ ops->enable = rtl8153_enable;
+ ops->disable = rtl8152_disable;
+ ops->down = rtl8153_down;
+ ops->unload = rtl8153_unload;
+ ret = 0;
+ break;
+ default:
+ break;
+ }
+ break;
+
+ case VENDOR_ID_SAMSUNG:
+ switch (id->idProduct) {
+ case PRODUCT_ID_SAMSUNG:
+ ops->init = r8153_init;
+ ops->enable = rtl8153_enable;
+ ops->disable = rtl8152_disable;
+ ops->down = rtl8153_down;
+ ops->unload = rtl8153_unload;
+ ret = 0;
+ break;
+ default:
+ break;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ if (ret)
+ netif_err(tp, probe, tp->netdev, "Unknown Device\n");
+
+ return ret;
+}
+
static int rtl8152_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
@@ -2094,7 +2757,7 @@ static int rtl8152_probe(struct usb_interface *intf,
netdev = alloc_etherdev(sizeof(struct r8152));
if (!netdev) {
- dev_err(&intf->dev, "Out of memory");
+ dev_err(&intf->dev, "Out of memory\n");
return -ENOMEM;
}
@@ -2102,12 +2765,17 @@ static int rtl8152_probe(struct usb_interface *intf,
tp = netdev_priv(netdev);
tp->msg_enable = 0x7FFF;
- tasklet_init(&tp->tl, bottom_half, (unsigned long)tp);
- INIT_DELAYED_WORK(&tp->schedule, rtl_work_func_t);
-
tp->udev = udev;
tp->netdev = netdev;
tp->intf = intf;
+
+ ret = rtl_ops_init(tp, id);
+ if (ret)
+ goto out;
+
+ tasklet_init(&tp->tl, bottom_half, (unsigned long)tp);
+ INIT_DELAYED_WORK(&tp->schedule, rtl_work_func_t);
+
netdev->netdev_ops = &rtl8152_netdev_ops;
netdev->watchdog_timeo = RTL8152_TX_TIMEOUT;
@@ -2124,7 +2792,7 @@ static int rtl8152_probe(struct usb_interface *intf,
tp->mii.supports_gmii = 0;
r8152b_get_version(tp);
- r8152b_init(tp);
+ tp->rtl_ops.init(tp);
set_ethernet_addr(tp);
ret = alloc_all_mem(tp);
@@ -2135,11 +2803,11 @@ static int rtl8152_probe(struct usb_interface *intf,
ret = register_netdev(netdev);
if (ret != 0) {
- netif_err(tp, probe, netdev, "couldn't register the device");
+ netif_err(tp, probe, netdev, "couldn't register the device\n");
goto out1;
}
- netif_info(tp, probe, netdev, "%s", DRIVER_VERSION);
+ netif_info(tp, probe, netdev, "%s\n", DRIVER_VERSION);
return 0;
@@ -2150,21 +2818,6 @@ out:
return ret;
}
-static void rtl8152_unload(struct r8152 *tp)
-{
- u32 ocp_data;
-
- if (tp->version != RTL_VER_01) {
- ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_UPS_CTRL);
- ocp_data |= POWER_CUT;
- ocp_write_word(tp, MCU_TYPE_USB, USB_UPS_CTRL, ocp_data);
- }
-
- ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_PM_CTRL_STATUS);
- ocp_data &= ~RWSUME_INDICATE;
- ocp_write_word(tp, MCU_TYPE_USB, USB_PM_CTRL_STATUS, ocp_data);
-}
-
static void rtl8152_disconnect(struct usb_interface *intf)
{
struct r8152 *tp = usb_get_intfdata(intf);
@@ -2174,7 +2827,7 @@ static void rtl8152_disconnect(struct usb_interface *intf)
set_bit(RTL8152_UNPLUG, &tp->flags);
tasklet_kill(&tp->tl);
unregister_netdev(tp->netdev);
- rtl8152_unload(tp);
+ tp->rtl_ops.unload(tp);
free_all_mem(tp);
free_netdev(tp->netdev);
}
@@ -2183,6 +2836,8 @@ static void rtl8152_disconnect(struct usb_interface *intf)
/* table of devices that work with this driver */
static struct usb_device_id rtl8152_table[] = {
{USB_DEVICE(VENDOR_ID_REALTEK, PRODUCT_ID_RTL8152)},
+ {USB_DEVICE(VENDOR_ID_REALTEK, PRODUCT_ID_RTL8153)},
+ {USB_DEVICE(VENDOR_ID_SAMSUNG, PRODUCT_ID_SAMSUNG)},
{}
};
diff --git a/drivers/net/usb/r815x.c b/drivers/net/usb/r815x.c
index 2df2f4fb42a7..5fd2ca61d1e2 100644
--- a/drivers/net/usb/r815x.c
+++ b/drivers/net/usb/r815x.c
@@ -226,7 +226,7 @@ static const struct usb_device_id products[] = {
{
USB_DEVICE_AND_INTERFACE_INFO(REALTEK_VENDOR_ID, 0x8153, USB_CLASS_COMM,
USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
-#if defined(CONFIG_USB_RTL8153) || defined(CONFIG_USB_RTL8153_MODULE)
+#if defined(CONFIG_USB_RTL8152) || defined(CONFIG_USB_RTL8152_MODULE)
.driver_info = 0,
#else
.driver_info = (unsigned long) &r8153_info,
diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c
index cc49aac70224..72aee8516ca8 100644
--- a/drivers/net/usb/rndis_host.c
+++ b/drivers/net/usb/rndis_host.c
@@ -13,8 +13,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/module.h>
#include <linux/init.h>
diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
index a79e9d334928..a251588762ec 100644
--- a/drivers/net/usb/sierra_net.c
+++ b/drivers/net/usb/sierra_net.c
@@ -21,8 +21,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#define DRIVER_VERSION "v.2.0"
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index 66ebbacf066f..0217b282e7ec 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -13,8 +13,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
*****************************************************************************/
diff --git a/drivers/net/usb/smsc75xx.h b/drivers/net/usb/smsc75xx.h
index 67eba39e6ee2..2c7ea8fd184f 100644
--- a/drivers/net/usb/smsc75xx.h
+++ b/drivers/net/usb/smsc75xx.h
@@ -13,8 +13,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
*****************************************************************************/
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 3f38ba868f61..96f5eee20fa4 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -13,8 +13,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
*****************************************************************************/
diff --git a/drivers/net/usb/smsc95xx.h b/drivers/net/usb/smsc95xx.h
index f360ee372554..526faa0c44e6 100644
--- a/drivers/net/usb/smsc95xx.h
+++ b/drivers/net/usb/smsc95xx.h
@@ -13,8 +13,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
*****************************************************************************/
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 8494bb53ebdc..56c175ebae3c 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -14,8 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
diff --git a/drivers/net/usb/zaurus.c b/drivers/net/usb/zaurus.c
index 35c90307d473..25d1e667a061 100644
--- a/drivers/net/usb/zaurus.c
+++ b/drivers/net/usb/zaurus.c
@@ -13,8 +13,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
// #define DEBUG // error path messages, extra info
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 7bab4de658a9..7b172408cff0 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -13,8 +13,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
//#define DEBUG
#include <linux/netdevice.h>
@@ -299,35 +298,76 @@ static struct sk_buff *page_to_skb(struct receive_queue *rq,
return skb;
}
-static int receive_mergeable(struct receive_queue *rq, struct sk_buff *head_skb)
+static struct sk_buff *receive_small(void *buf, unsigned int len)
{
- struct skb_vnet_hdr *hdr = skb_vnet_hdr(head_skb);
+ struct sk_buff * skb = buf;
+
+ len -= sizeof(struct virtio_net_hdr);
+ skb_trim(skb, len);
+
+ return skb;
+}
+
+static struct sk_buff *receive_big(struct net_device *dev,
+ struct receive_queue *rq,
+ void *buf,
+ unsigned int len)
+{
+ struct page *page = buf;
+ struct sk_buff *skb = page_to_skb(rq, page, 0, len, PAGE_SIZE);
+
+ if (unlikely(!skb))
+ goto err;
+
+ return skb;
+
+err:
+ dev->stats.rx_dropped++;
+ give_pages(rq, page);
+ return NULL;
+}
+
+static struct sk_buff *receive_mergeable(struct net_device *dev,
+ struct receive_queue *rq,
+ void *buf,
+ unsigned int len)
+{
+ struct skb_vnet_hdr *hdr = buf;
+ int num_buf = hdr->mhdr.num_buffers;
+ struct page *page = virt_to_head_page(buf);
+ int offset = buf - page_address(page);
+ struct sk_buff *head_skb = page_to_skb(rq, page, offset, len,
+ MERGE_BUFFER_LEN);
struct sk_buff *curr_skb = head_skb;
- char *buf;
- struct page *page;
- int num_buf, len, offset;
- num_buf = hdr->mhdr.num_buffers;
+ if (unlikely(!curr_skb))
+ goto err_skb;
+
while (--num_buf) {
- int num_skb_frags = skb_shinfo(curr_skb)->nr_frags;
+ int num_skb_frags;
+
buf = virtqueue_get_buf(rq->vq, &len);
if (unlikely(!buf)) {
- pr_debug("%s: rx error: %d buffers missing\n",
- head_skb->dev->name, hdr->mhdr.num_buffers);
- head_skb->dev->stats.rx_length_errors++;
- return -EINVAL;
+ pr_debug("%s: rx error: %d buffers out of %d missing\n",
+ dev->name, num_buf, hdr->mhdr.num_buffers);
+ dev->stats.rx_length_errors++;
+ goto err_buf;
}
if (unlikely(len > MERGE_BUFFER_LEN)) {
pr_debug("%s: rx error: merge buffer too long\n",
- head_skb->dev->name);
+ dev->name);
len = MERGE_BUFFER_LEN;
}
+
+ page = virt_to_head_page(buf);
+ --rq->num;
+
+ num_skb_frags = skb_shinfo(curr_skb)->nr_frags;
if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) {
struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC);
- if (unlikely(!nskb)) {
- head_skb->dev->stats.rx_dropped++;
- return -ENOMEM;
- }
+
+ if (unlikely(!nskb))
+ goto err_skb;
if (curr_skb == head_skb)
skb_shinfo(curr_skb)->frag_list = nskb;
else
@@ -341,8 +381,7 @@ static int receive_mergeable(struct receive_queue *rq, struct sk_buff *head_skb)
head_skb->len += len;
head_skb->truesize += MERGE_BUFFER_LEN;
}
- page = virt_to_head_page(buf);
- offset = buf - (char *)page_address(page);
+ offset = buf - page_address(page);
if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) {
put_page(page);
skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1,
@@ -351,9 +390,28 @@ static int receive_mergeable(struct receive_queue *rq, struct sk_buff *head_skb)
skb_add_rx_frag(curr_skb, num_skb_frags, page,
offset, len, MERGE_BUFFER_LEN);
}
+ }
+
+ return head_skb;
+
+err_skb:
+ put_page(page);
+ while (--num_buf) {
+ buf = virtqueue_get_buf(rq->vq, &len);
+ if (unlikely(!buf)) {
+ pr_debug("%s: rx error: %d buffers missing\n",
+ dev->name, num_buf);
+ dev->stats.rx_length_errors++;
+ break;
+ }
+ page = virt_to_head_page(buf);
+ put_page(page);
--rq->num;
}
- return 0;
+err_buf:
+ dev->stats.rx_dropped++;
+ dev_kfree_skb(head_skb);
+ return NULL;
}
static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
@@ -362,48 +420,29 @@ static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
struct net_device *dev = vi->dev;
struct virtnet_stats *stats = this_cpu_ptr(vi->stats);
struct sk_buff *skb;
- struct page *page;
struct skb_vnet_hdr *hdr;
if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) {
pr_debug("%s: short packet %i\n", dev->name, len);
dev->stats.rx_length_errors++;
- if (vi->big_packets)
- give_pages(rq, buf);
- else if (vi->mergeable_rx_bufs)
+ if (vi->mergeable_rx_bufs)
put_page(virt_to_head_page(buf));
+ else if (vi->big_packets)
+ give_pages(rq, buf);
else
dev_kfree_skb(buf);
return;
}
- if (!vi->mergeable_rx_bufs && !vi->big_packets) {
- skb = buf;
- len -= sizeof(struct virtio_net_hdr);
- skb_trim(skb, len);
- } else if (vi->mergeable_rx_bufs) {
- struct page *page = virt_to_head_page(buf);
- skb = page_to_skb(rq, page,
- (char *)buf - (char *)page_address(page),
- len, MERGE_BUFFER_LEN);
- if (unlikely(!skb)) {
- dev->stats.rx_dropped++;
- put_page(page);
- return;
- }
- if (receive_mergeable(rq, skb)) {
- dev_kfree_skb(skb);
- return;
- }
- } else {
- page = buf;
- skb = page_to_skb(rq, page, 0, len, PAGE_SIZE);
- if (unlikely(!skb)) {
- dev->stats.rx_dropped++;
- give_pages(rq, page);
- return;
- }
- }
+ if (vi->mergeable_rx_bufs)
+ skb = receive_mergeable(dev, rq, buf, len);
+ else if (vi->big_packets)
+ skb = receive_big(dev, rq, buf, len);
+ else
+ skb = receive_small(buf, len);
+
+ if (unlikely(!skb))
+ return;
hdr = skb_vnet_hdr(skb);
@@ -834,16 +873,15 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
/*
* Send command via the control virtqueue and check status. Commands
* supported by the hypervisor, as indicated by feature bits, should
- * never fail unless improperly formated.
+ * never fail unless improperly formatted.
*/
static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
- struct scatterlist *out,
- struct scatterlist *in)
+ struct scatterlist *out)
{
struct scatterlist *sgs[4], hdr, stat;
struct virtio_net_ctrl_hdr ctrl;
virtio_net_ctrl_ack status = ~0;
- unsigned out_num = 0, in_num = 0, tmp;
+ unsigned out_num = 0, tmp;
/* Caller should know better */
BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ));
@@ -856,16 +894,13 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
if (out)
sgs[out_num++] = out;
- if (in)
- sgs[out_num + in_num++] = in;
/* Add return status. */
sg_init_one(&stat, &status, sizeof(status));
- sgs[out_num + in_num++] = &stat;
+ sgs[out_num] = &stat;
- BUG_ON(out_num + in_num > ARRAY_SIZE(sgs));
- BUG_ON(virtqueue_add_sgs(vi->cvq, sgs, out_num, in_num, vi, GFP_ATOMIC)
- < 0);
+ BUG_ON(out_num + 1 > ARRAY_SIZE(sgs));
+ BUG_ON(virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC) < 0);
if (unlikely(!virtqueue_kick(vi->cvq)))
return status == VIRTIO_NET_OK;
@@ -895,8 +930,7 @@ static int virtnet_set_mac_address(struct net_device *dev, void *p)
if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
sg_init_one(&sg, addr->sa_data, dev->addr_len);
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
- VIRTIO_NET_CTRL_MAC_ADDR_SET,
- &sg, NULL)) {
+ VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg)) {
dev_warn(&vdev->dev,
"Failed to set mac address by vq command.\n");
return -EINVAL;
@@ -969,7 +1003,7 @@ static void virtnet_ack_link_announce(struct virtnet_info *vi)
{
rtnl_lock();
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE,
- VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL, NULL))
+ VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL))
dev_warn(&vi->dev->dev, "Failed to ack link announce.\n");
rtnl_unlock();
}
@@ -987,7 +1021,7 @@ static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
sg_init_one(&sg, &s, sizeof(s));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
- VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg, NULL)) {
+ VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg)) {
dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n",
queue_pairs);
return -EINVAL;
@@ -1027,7 +1061,7 @@ static void virtnet_set_rx_mode(struct net_device *dev)
void *buf;
int i;
- /* We can't dynamicaly set ndo_set_rx_mode, so return gracefully */
+ /* We can't dynamically set ndo_set_rx_mode, so return gracefully */
if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX))
return;
@@ -1037,16 +1071,14 @@ static void virtnet_set_rx_mode(struct net_device *dev)
sg_init_one(sg, &promisc, sizeof(promisc));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
- VIRTIO_NET_CTRL_RX_PROMISC,
- sg, NULL))
+ VIRTIO_NET_CTRL_RX_PROMISC, sg))
dev_warn(&dev->dev, "Failed to %sable promisc mode.\n",
promisc ? "en" : "dis");
sg_init_one(sg, &allmulti, sizeof(allmulti));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
- VIRTIO_NET_CTRL_RX_ALLMULTI,
- sg, NULL))
+ VIRTIO_NET_CTRL_RX_ALLMULTI, sg))
dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
allmulti ? "en" : "dis");
@@ -1082,9 +1114,8 @@ static void virtnet_set_rx_mode(struct net_device *dev)
sizeof(mac_data->entries) + (mc_count * ETH_ALEN));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
- VIRTIO_NET_CTRL_MAC_TABLE_SET,
- sg, NULL))
- dev_warn(&dev->dev, "Failed to set MAC fitler table.\n");
+ VIRTIO_NET_CTRL_MAC_TABLE_SET, sg))
+ dev_warn(&dev->dev, "Failed to set MAC filter table.\n");
kfree(buf);
}
@@ -1098,7 +1129,7 @@ static int virtnet_vlan_rx_add_vid(struct net_device *dev,
sg_init_one(&sg, &vid, sizeof(vid));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
- VIRTIO_NET_CTRL_VLAN_ADD, &sg, NULL))
+ VIRTIO_NET_CTRL_VLAN_ADD, &sg))
dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid);
return 0;
}
@@ -1112,7 +1143,7 @@ static int virtnet_vlan_rx_kill_vid(struct net_device *dev,
sg_init_one(&sg, &vid, sizeof(vid));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
- VIRTIO_NET_CTRL_VLAN_DEL, &sg, NULL))
+ VIRTIO_NET_CTRL_VLAN_DEL, &sg))
dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid);
return 0;
}
@@ -1327,6 +1358,11 @@ static void virtnet_config_changed(struct virtio_device *vdev)
static void virtnet_free_queues(struct virtnet_info *vi)
{
+ int i;
+
+ for (i = 0; i < vi->max_queue_pairs; i++)
+ netif_napi_del(&vi->rq[i].napi);
+
kfree(vi->rq);
kfree(vi->sq);
}
@@ -1356,10 +1392,10 @@ static void free_unused_bufs(struct virtnet_info *vi)
struct virtqueue *vq = vi->rq[i].vq;
while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
- if (vi->big_packets)
- give_pages(&vi->rq[i], buf);
- else if (vi->mergeable_rx_bufs)
+ if (vi->mergeable_rx_bufs)
put_page(virt_to_head_page(buf));
+ else if (vi->big_packets)
+ give_pages(&vi->rq[i], buf);
else
dev_kfree_skb(buf);
--vi->rq[i].num;
@@ -1752,16 +1788,17 @@ static int virtnet_restore(struct virtio_device *vdev)
if (err)
return err;
- if (netif_running(vi->dev))
+ if (netif_running(vi->dev)) {
+ for (i = 0; i < vi->curr_queue_pairs; i++)
+ if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
+ schedule_delayed_work(&vi->refill, 0);
+
for (i = 0; i < vi->max_queue_pairs; i++)
virtnet_napi_enable(&vi->rq[i]);
+ }
netif_device_attach(vi->dev);
- for (i = 0; i < vi->curr_queue_pairs; i++)
- if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
- schedule_delayed_work(&vi->refill, 0);
-
mutex_lock(&vi->config_lock);
vi->config_enable = true;
mutex_unlock(&vi->config_lock);
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 7e2788c488ed..3be786faaaec 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -1235,7 +1235,9 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
#ifdef VMXNET3_RSS
if (rcd->rssType != VMXNET3_RCD_RSS_TYPE_NONE &&
(adapter->netdev->features & NETIF_F_RXHASH))
- ctx->skb->rxhash = le32_to_cpu(rcd->rssHash);
+ skb_set_hash(ctx->skb,
+ le32_to_cpu(rcd->rssHash),
+ PKT_HASH_TYPE_L3);
#endif
skb_put(ctx->skb, rcd->len);
@@ -3132,7 +3134,6 @@ err_alloc_queue_desc:
err_alloc_shared:
dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa,
sizeof(struct vmxnet3_adapter), PCI_DMA_TODEVICE);
- pci_set_drvdata(pdev, NULL);
free_netdev(netdev);
return err;
}
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 0358c07f7669..481f85d604a4 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -916,17 +916,32 @@ static bool vxlan_snoop(struct net_device *dev,
}
/* See if multicast group is already in use by other ID */
-static bool vxlan_group_used(struct vxlan_net *vn, union vxlan_addr *remote_ip)
+static bool vxlan_group_used(struct vxlan_net *vn, struct vxlan_dev *dev)
{
struct vxlan_dev *vxlan;
+ /* The vxlan_sock is only used by dev, leaving group has
+ * no effect on other vxlan devices.
+ */
+ if (atomic_read(&dev->vn_sock->refcnt) == 1)
+ return false;
+
list_for_each_entry(vxlan, &vn->vxlan_list, next) {
- if (!netif_running(vxlan->dev))
+ if (!netif_running(vxlan->dev) || vxlan == dev)
continue;
- if (vxlan_addr_equal(&vxlan->default_dst.remote_ip,
- remote_ip))
- return true;
+ if (vxlan->vn_sock != dev->vn_sock)
+ continue;
+
+ if (!vxlan_addr_equal(&vxlan->default_dst.remote_ip,
+ &dev->default_dst.remote_ip))
+ continue;
+
+ if (vxlan->default_dst.remote_ifindex !=
+ dev->default_dst.remote_ifindex)
+ continue;
+
+ return true;
}
return false;
@@ -1066,7 +1081,7 @@ static void vxlan_rcv(struct vxlan_sock *vs,
struct iphdr *oip = NULL;
struct ipv6hdr *oip6 = NULL;
struct vxlan_dev *vxlan;
- struct pcpu_tstats *stats;
+ struct pcpu_sw_netstats *stats;
union vxlan_addr saddr;
__u32 vni;
int err = 0;
@@ -1366,20 +1381,6 @@ static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb)
return false;
}
-static void vxlan_sock_put(struct sk_buff *skb)
-{
- sock_put(skb->sk);
-}
-
-/* On transmit, associate with the tunnel socket */
-static void vxlan_set_owner(struct sock *sk, struct sk_buff *skb)
-{
- skb_orphan(skb);
- sock_hold(sk);
- skb->sk = sk;
- skb->destructor = vxlan_sock_put;
-}
-
/* Compute source port for outgoing packet
* first choice to use L4 flow hash since it will spread
* better and maybe available from hardware
@@ -1390,7 +1391,7 @@ __be16 vxlan_src_port(__u16 port_min, __u16 port_max, struct sk_buff *skb)
unsigned int range = (port_max - port_min) + 1;
u32 hash;
- hash = skb_get_rxhash(skb);
+ hash = skb_get_hash(skb);
if (!hash)
hash = jhash(skb->data, 2 * ETH_ALEN,
(__force u32) skb->protocol);
@@ -1499,8 +1500,6 @@ static int vxlan6_xmit_skb(struct vxlan_sock *vs,
ip6h->daddr = *daddr;
ip6h->saddr = *saddr;
- vxlan_set_owner(vs->sock->sk, skb);
-
err = handle_offloads(skb);
if (err)
return err;
@@ -1557,8 +1556,6 @@ int vxlan_xmit_skb(struct vxlan_sock *vs,
uh->len = htons(skb->len);
uh->check = 0;
- vxlan_set_owner(vs->sock->sk, skb);
-
err = handle_offloads(skb);
if (err)
return err;
@@ -1572,11 +1569,12 @@ EXPORT_SYMBOL_GPL(vxlan_xmit_skb);
static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
struct vxlan_dev *dst_vxlan)
{
- struct pcpu_tstats *tx_stats = this_cpu_ptr(src_vxlan->dev->tstats);
- struct pcpu_tstats *rx_stats = this_cpu_ptr(dst_vxlan->dev->tstats);
+ struct pcpu_sw_netstats *tx_stats, *rx_stats;
union vxlan_addr loopback;
union vxlan_addr *remote_ip = &dst_vxlan->default_dst.remote_ip;
+ tx_stats = this_cpu_ptr(src_vxlan->dev->tstats);
+ rx_stats = this_cpu_ptr(dst_vxlan->dev->tstats);
skb->pkt_type = PACKET_HOST;
skb->encapsulation = 0;
skb->dev = dst_vxlan->dev;
@@ -1668,7 +1666,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
netdev_dbg(dev, "circular route to %pI4\n",
&dst->sin.sin_addr.s_addr);
dev->stats.collisions++;
- goto tx_error;
+ goto rt_tx_error;
}
/* Bypass encapsulation if the destination is local */
@@ -1770,7 +1768,7 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
struct vxlan_dev *vxlan = netdev_priv(dev);
struct ethhdr *eth;
bool did_rsc = false;
- struct vxlan_rdst *rdst;
+ struct vxlan_rdst *rdst, *fdst = NULL;
struct vxlan_fdb *f;
skb_reset_mac_header(skb);
@@ -1812,7 +1810,7 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
vxlan_fdb_miss(vxlan, eth->h_dest);
dev->stats.tx_dropped++;
- dev_kfree_skb(skb);
+ kfree_skb(skb);
return NETDEV_TX_OK;
}
}
@@ -1820,12 +1818,19 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
list_for_each_entry_rcu(rdst, &f->remotes, list) {
struct sk_buff *skb1;
+ if (!fdst) {
+ fdst = rdst;
+ continue;
+ }
skb1 = skb_clone(skb, GFP_ATOMIC);
if (skb1)
vxlan_xmit_one(skb1, dev, rdst, did_rsc);
}
- dev_kfree_skb(skb);
+ if (fdst)
+ vxlan_xmit_one(skb, dev, fdst, did_rsc);
+ else
+ kfree_skb(skb);
return NETDEV_TX_OK;
}
@@ -1882,12 +1887,12 @@ static int vxlan_init(struct net_device *dev)
struct vxlan_sock *vs;
int i;
- dev->tstats = alloc_percpu(struct pcpu_tstats);
+ dev->tstats = alloc_percpu(struct pcpu_sw_netstats);
if (!dev->tstats)
return -ENOMEM;
for_each_possible_cpu(i) {
- struct pcpu_tstats *vxlan_stats;
+ struct pcpu_sw_netstats *vxlan_stats;
vxlan_stats = per_cpu_ptr(dev->tstats, i);
u64_stats_init(&vxlan_stats->syncp);
}
@@ -1935,7 +1940,6 @@ static void vxlan_uninit(struct net_device *dev)
/* Start ageing timer and join group when device is brought up */
static int vxlan_open(struct net_device *dev)
{
- struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
struct vxlan_dev *vxlan = netdev_priv(dev);
struct vxlan_sock *vs = vxlan->vn_sock;
@@ -1943,8 +1947,7 @@ static int vxlan_open(struct net_device *dev)
if (!vs)
return -ENOTCONN;
- if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip) &&
- vxlan_group_used(vn, &vxlan->default_dst.remote_ip)) {
+ if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip)) {
vxlan_sock_hold(vs);
dev_hold(dev);
queue_work(vxlan_wq, &vxlan->igmp_join);
@@ -1983,7 +1986,7 @@ static int vxlan_stop(struct net_device *dev)
struct vxlan_sock *vs = vxlan->vn_sock;
if (vs && vxlan_addr_multicast(&vxlan->default_dst.remote_ip) &&
- ! vxlan_group_used(vn, &vxlan->default_dst.remote_ip)) {
+ !vxlan_group_used(vn, vxlan)) {
vxlan_sock_hold(vs);
dev_hold(dev);
queue_work(vxlan_wq, &vxlan->igmp_leave);
@@ -2001,6 +2004,29 @@ static void vxlan_set_multicast_list(struct net_device *dev)
{
}
+static int vxlan_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct vxlan_dev *vxlan = netdev_priv(dev);
+ struct vxlan_rdst *dst = &vxlan->default_dst;
+ struct net_device *lowerdev;
+ int max_mtu;
+
+ lowerdev = __dev_get_by_index(dev_net(dev), dst->remote_ifindex);
+ if (lowerdev == NULL)
+ return eth_change_mtu(dev, new_mtu);
+
+ if (dst->remote_ip.sa.sa_family == AF_INET6)
+ max_mtu = lowerdev->mtu - VXLAN6_HEADROOM;
+ else
+ max_mtu = lowerdev->mtu - VXLAN_HEADROOM;
+
+ if (new_mtu < 68 || new_mtu > max_mtu)
+ return -EINVAL;
+
+ dev->mtu = new_mtu;
+ return 0;
+}
+
static const struct net_device_ops vxlan_netdev_ops = {
.ndo_init = vxlan_init,
.ndo_uninit = vxlan_uninit,
@@ -2009,7 +2035,7 @@ static const struct net_device_ops vxlan_netdev_ops = {
.ndo_start_xmit = vxlan_xmit,
.ndo_get_stats64 = ip_tunnel_get_stats64,
.ndo_set_rx_mode = vxlan_set_multicast_list,
- .ndo_change_mtu = eth_change_mtu,
+ .ndo_change_mtu = vxlan_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_fdb_add = vxlan_fdb_add,
@@ -2440,7 +2466,8 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
/* update header length based on lower device */
dev->hard_header_len = lowerdev->hard_header_len +
(use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM);
- }
+ } else if (use_ipv6)
+ vxlan->flags |= VXLAN_F_IPV6;
if (data[IFLA_VXLAN_TOS])
vxlan->tos = nla_get_u8(data[IFLA_VXLAN_TOS]);
diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c
index 851dc7b7e8b0..288610df205c 100644
--- a/drivers/net/wan/dscc4.c
+++ b/drivers/net/wan/dscc4.c
@@ -699,8 +699,6 @@ static void dscc4_free1(struct pci_dev *pdev)
for (i = 0; i < dev_per_card; i++)
unregister_hdlc_device(dscc4_to_dev(root + i));
- pci_set_drvdata(pdev, NULL);
-
for (i = 0; i < dev_per_card; i++)
free_netdev(root[i].dev);
kfree(root);
diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c
index 7ef435bab425..c59b91f03660 100644
--- a/drivers/net/wan/lmc/lmc_main.c
+++ b/drivers/net/wan/lmc/lmc_main.c
@@ -973,7 +973,6 @@ static int lmc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
err_hdlcdev:
- pci_set_drvdata(pdev, NULL);
kfree(sc);
err_kzalloc:
pci_release_regions(pdev);
@@ -995,7 +994,6 @@ static void lmc_remove_one(struct pci_dev *pdev)
free_netdev(dev);
pci_release_regions(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
}
}
diff --git a/drivers/net/wan/pc300too.c b/drivers/net/wan/pc300too.c
index 53efc57fcace..5b72f7f8c516 100644
--- a/drivers/net/wan/pc300too.c
+++ b/drivers/net/wan/pc300too.c
@@ -281,7 +281,6 @@ static void pc300_pci_remove_one(struct pci_dev *pdev)
pci_release_regions(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
if (card->ports[0].netdev)
free_netdev(card->ports[0].netdev);
if (card->ports[1].netdev)
diff --git a/drivers/net/wan/pci200syn.c b/drivers/net/wan/pci200syn.c
index ddbce54040e2..fe4e3ece3c42 100644
--- a/drivers/net/wan/pci200syn.c
+++ b/drivers/net/wan/pci200syn.c
@@ -260,7 +260,6 @@ static void pci200_pci_remove_one(struct pci_dev *pdev)
pci_release_regions(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
if (card->ports[0].netdev)
free_netdev(card->ports[0].netdev);
if (card->ports[1].netdev)
diff --git a/drivers/net/wan/sbni.c b/drivers/net/wan/sbni.c
index 388ddf60a66d..1b89ecf0959e 100644
--- a/drivers/net/wan/sbni.c
+++ b/drivers/net/wan/sbni.c
@@ -57,6 +57,7 @@
#include <net/net_namespace.h>
#include <net/arp.h>
+#include <net/Space.h>
#include <asm/io.h>
#include <asm/types.h>
diff --git a/drivers/net/wan/wanxl.c b/drivers/net/wan/wanxl.c
index 4c0a69779b89..f76aa9081585 100644
--- a/drivers/net/wan/wanxl.c
+++ b/drivers/net/wan/wanxl.c
@@ -542,7 +542,6 @@ static void wanxl_pci_remove_one(struct pci_dev *pdev)
pci_release_regions(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
kfree(card);
}
diff --git a/drivers/net/wireless/adm8211.c b/drivers/net/wireless/adm8211.c
index e888f1893179..55eda7afc041 100644
--- a/drivers/net/wireless/adm8211.c
+++ b/drivers/net/wireless/adm8211.c
@@ -1313,7 +1313,7 @@ static void adm8211_bss_info_changed(struct ieee80211_hw *dev,
if (!(changes & BSS_CHANGED_BSSID))
return;
- if (memcmp(conf->bssid, priv->bssid, ETH_ALEN)) {
+ if (!ether_addr_equal(conf->bssid, priv->bssid)) {
adm8211_set_bssid(dev, conf->bssid);
memcpy(priv->bssid, conf->bssid, ETH_ALEN);
}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/brcm80211/brcmfmac/p2p.c
index e23c869bfe33..fc4f98b275d7 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/p2p.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/p2p.c
@@ -1243,7 +1243,7 @@ bool brcmf_p2p_scan_finding_common_channel(struct brcmf_cfg80211_info *cfg,
IEEE80211_P2P_ATTR_DEVICE_ID,
p2p_dev_addr, sizeof(p2p_dev_addr));
if ((err >= 0) &&
- (!memcmp(p2p_dev_addr, afx_hdl->tx_dst_addr, ETH_ALEN))) {
+ (ether_addr_equal(p2p_dev_addr, afx_hdl->tx_dst_addr))) {
if (!bi->ctl_ch) {
ch.chspec = le16_to_cpu(bi->chanspec);
cfg->d11inf.decchspec(&ch);
@@ -1380,8 +1380,7 @@ int brcmf_p2p_notify_action_frame_rx(struct brcmf_if *ifp,
(brcmf_p2p_gon_req_collision(p2p, (u8 *)e->addr))) {
if (test_bit(BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL,
&p2p->status) &&
- (memcmp(afx_hdl->tx_dst_addr, e->addr,
- ETH_ALEN) == 0)) {
+ (ether_addr_equal(afx_hdl->tx_dst_addr, e->addr))) {
afx_hdl->peer_chan = ch.chnum;
brcmf_dbg(INFO, "GON request: Peer found, channel=%d\n",
afx_hdl->peer_chan);
@@ -1865,7 +1864,7 @@ s32 brcmf_p2p_notify_rx_mgmt_p2p_probereq(struct brcmf_if *ifp,
cfg->d11inf.decchspec(&ch);
if (test_bit(BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL, &p2p->status) &&
- (memcmp(afx_hdl->tx_dst_addr, e->addr, ETH_ALEN) == 0)) {
+ (ether_addr_equal(afx_hdl->tx_dst_addr, e->addr))) {
afx_hdl->peer_chan = ch.chnum;
brcmf_dbg(INFO, "PROBE REQUEST: Peer found, channel=%d\n",
afx_hdl->peer_chan);
diff --git a/drivers/net/wireless/cw1200/sta.c b/drivers/net/wireless/cw1200/sta.c
index 010b252be584..103f7bce8932 100644
--- a/drivers/net/wireless/cw1200/sta.c
+++ b/drivers/net/wireless/cw1200/sta.c
@@ -13,6 +13,7 @@
#include <linux/sched.h>
#include <linux/firmware.h>
#include <linux/module.h>
+#include <linux/etherdevice.h>
#include "cw1200.h"
#include "sta.h"
@@ -555,8 +556,8 @@ u64 cw1200_prepare_multicast(struct ieee80211_hw *hw,
pr_debug("[STA] multicast: %pM\n", ha->addr);
memcpy(&priv->multicast_filter.macaddrs[count],
ha->addr, ETH_ALEN);
- if (memcmp(ha->addr, broadcast_ipv4, ETH_ALEN) &&
- memcmp(ha->addr, broadcast_ipv6, ETH_ALEN))
+ if (!ether_addr_equal(ha->addr, broadcast_ipv4) &&
+ !ether_addr_equal(ha->addr, broadcast_ipv6))
priv->has_multicast_subscription = true;
count++;
}
diff --git a/drivers/net/wireless/cw1200/txrx.c b/drivers/net/wireless/cw1200/txrx.c
index e824d4d4a18d..0bd541175ecd 100644
--- a/drivers/net/wireless/cw1200/txrx.c
+++ b/drivers/net/wireless/cw1200/txrx.c
@@ -1166,8 +1166,7 @@ void cw1200_rx_cb(struct cw1200_common *priv,
return;
} else if (ieee80211_is_beacon(frame->frame_control) &&
!arg->status && priv->vif &&
- !memcmp(ieee80211_get_SA(frame), priv->vif->bss_conf.bssid,
- ETH_ALEN)) {
+ ether_addr_equal(ieee80211_get_SA(frame), priv->vif->bss_conf.bssid)) {
const u8 *tim_ie;
u8 *ies = ((struct ieee80211_mgmt *)
(skb->data))->u.beacon.variable;
diff --git a/drivers/net/wireless/hostap/hostap_80211_rx.c b/drivers/net/wireless/hostap/hostap_80211_rx.c
index d39e3e24077b..599f30f22841 100644
--- a/drivers/net/wireless/hostap/hostap_80211_rx.c
+++ b/drivers/net/wireless/hostap/hostap_80211_rx.c
@@ -563,7 +563,7 @@ hostap_rx_frame_wds(local_info_t *local, struct ieee80211_hdr *hdr, u16 fc,
/* Possible WDS frame: either IEEE 802.11 compliant (if FromDS)
* or own non-standard frame with 4th address after payload */
- if (memcmp(hdr->addr1, local->dev->dev_addr, ETH_ALEN) != 0 &&
+ if (!ether_addr_equal(hdr->addr1, local->dev->dev_addr) &&
(hdr->addr1[0] != 0xff || hdr->addr1[1] != 0xff ||
hdr->addr1[2] != 0xff || hdr->addr1[3] != 0xff ||
hdr->addr1[4] != 0xff || hdr->addr1[5] != 0xff)) {
@@ -622,12 +622,12 @@ static int hostap_is_eapol_frame(local_info_t *local, struct sk_buff *skb)
/* check that the frame is unicast frame to us */
if ((fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) ==
IEEE80211_FCTL_TODS &&
- memcmp(hdr->addr1, dev->dev_addr, ETH_ALEN) == 0 &&
- memcmp(hdr->addr3, dev->dev_addr, ETH_ALEN) == 0) {
+ ether_addr_equal(hdr->addr1, dev->dev_addr) &&
+ ether_addr_equal(hdr->addr3, dev->dev_addr)) {
/* ToDS frame with own addr BSSID and DA */
} else if ((fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) ==
IEEE80211_FCTL_FROMDS &&
- memcmp(hdr->addr1, dev->dev_addr, ETH_ALEN) == 0) {
+ ether_addr_equal(hdr->addr1, dev->dev_addr)) {
/* FromDS frame with own addr as DA */
} else
return 0;
diff --git a/drivers/net/wireless/hostap/hostap_80211_tx.c b/drivers/net/wireless/hostap/hostap_80211_tx.c
index 344a981a052e..8bde77689469 100644
--- a/drivers/net/wireless/hostap/hostap_80211_tx.c
+++ b/drivers/net/wireless/hostap/hostap_80211_tx.c
@@ -1,5 +1,6 @@
#include <linux/slab.h>
#include <linux/export.h>
+#include <linux/etherdevice.h>
#include "hostap_80211.h"
#include "hostap_common.h"
@@ -103,8 +104,7 @@ netdev_tx_t hostap_data_start_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
} else if (local->iw_mode == IW_MODE_INFRA &&
(local->wds_type & HOSTAP_WDS_AP_CLIENT) &&
- memcmp(skb->data + ETH_ALEN, dev->dev_addr,
- ETH_ALEN) != 0) {
+ !ether_addr_equal(skb->data + ETH_ALEN, dev->dev_addr)) {
/* AP client mode: send frames with foreign src addr
* using 4-addr WDS frames */
use_wds = WDS_COMPLIANT_FRAME;
diff --git a/drivers/net/wireless/hostap/hostap_ap.c b/drivers/net/wireless/hostap/hostap_ap.c
index d6033a8e5dea..d36e252d2ccb 100644
--- a/drivers/net/wireless/hostap/hostap_ap.c
+++ b/drivers/net/wireless/hostap/hostap_ap.c
@@ -24,6 +24,7 @@
#include <linux/slab.h>
#include <linux/export.h>
#include <linux/moduleparam.h>
+#include <linux/etherdevice.h>
#include "hostap_wlan.h"
#include "hostap.h"
@@ -106,13 +107,12 @@ static void ap_sta_hash_del(struct ap_data *ap, struct sta_info *sta)
s = ap->sta_hash[STA_HASH(sta->addr)];
if (s == NULL) return;
- if (memcmp(s->addr, sta->addr, ETH_ALEN) == 0) {
+ if (ether_addr_equal(s->addr, sta->addr)) {
ap->sta_hash[STA_HASH(sta->addr)] = s->hnext;
return;
}
- while (s->hnext != NULL && memcmp(s->hnext->addr, sta->addr, ETH_ALEN)
- != 0)
+ while (s->hnext != NULL && !ether_addr_equal(s->hnext->addr, sta->addr))
s = s->hnext;
if (s->hnext != NULL)
s->hnext = s->hnext->hnext;
@@ -435,7 +435,7 @@ int ap_control_del_mac(struct mac_restrictions *mac_restrictions, u8 *mac)
ptr != &mac_restrictions->mac_list; ptr = ptr->next) {
entry = list_entry(ptr, struct mac_entry, list);
- if (memcmp(entry->addr, mac, ETH_ALEN) == 0) {
+ if (ether_addr_equal(entry->addr, mac)) {
list_del(ptr);
kfree(entry);
mac_restrictions->entries--;
@@ -459,7 +459,7 @@ static int ap_control_mac_deny(struct mac_restrictions *mac_restrictions,
spin_lock_bh(&mac_restrictions->lock);
list_for_each_entry(entry, &mac_restrictions->mac_list, list) {
- if (memcmp(entry->addr, mac, ETH_ALEN) == 0) {
+ if (ether_addr_equal(entry->addr, mac)) {
found = 1;
break;
}
@@ -957,7 +957,7 @@ static struct sta_info* ap_get_sta(struct ap_data *ap, u8 *sta)
struct sta_info *s;
s = ap->sta_hash[STA_HASH(sta)];
- while (s != NULL && memcmp(s->addr, sta, ETH_ALEN) != 0)
+ while (s != NULL && !ether_addr_equal(s->addr, sta))
s = s->hnext;
return s;
}
@@ -1391,7 +1391,7 @@ static void handle_authen(local_info_t *local, struct sk_buff *skb,
status_code = __le16_to_cpu(*pos);
pos++;
- if (memcmp(dev->dev_addr, hdr->addr2, ETH_ALEN) == 0 ||
+ if (ether_addr_equal(dev->dev_addr, hdr->addr2) ||
ap_control_mac_deny(&ap->mac_restrictions, hdr->addr2)) {
txt = "authentication denied";
resp = WLAN_STATUS_UNSPECIFIED_FAILURE;
@@ -1935,7 +1935,7 @@ static void handle_pspoll(local_info_t *local,
PDEBUG(DEBUG_PS2, "handle_pspoll: BSSID=%pM, TA=%pM PWRMGT=%d\n",
hdr->addr1, hdr->addr2, !!ieee80211_has_pm(hdr->frame_control));
- if (memcmp(hdr->addr1, dev->dev_addr, ETH_ALEN)) {
+ if (!ether_addr_equal(hdr->addr1, dev->dev_addr)) {
PDEBUG(DEBUG_AP,
"handle_pspoll - addr1(BSSID)=%pM not own MAC\n",
hdr->addr1);
@@ -2230,7 +2230,7 @@ static void handle_ap_item(local_info_t *local, struct sk_buff *skb,
goto done;
}
- if (memcmp(hdr->addr1, dev->dev_addr, ETH_ALEN)) {
+ if (!ether_addr_equal(hdr->addr1, dev->dev_addr)) {
PDEBUG(DEBUG_AP, "handle_ap_item - addr1(BSSID)=%pM"
" not own MAC\n", hdr->addr1);
goto done;
@@ -2267,13 +2267,13 @@ static void handle_ap_item(local_info_t *local, struct sk_buff *skb,
goto done;
}
- if (memcmp(hdr->addr1, dev->dev_addr, ETH_ALEN)) {
+ if (!ether_addr_equal(hdr->addr1, dev->dev_addr)) {
PDEBUG(DEBUG_AP, "handle_ap_item - addr1(DA)=%pM"
" not own MAC\n", hdr->addr1);
goto done;
}
- if (memcmp(hdr->addr3, dev->dev_addr, ETH_ALEN)) {
+ if (!ether_addr_equal(hdr->addr3, dev->dev_addr)) {
PDEBUG(DEBUG_AP, "handle_ap_item - addr3(BSSID)=%pM"
" not own MAC\n", hdr->addr3);
goto done;
@@ -3035,7 +3035,7 @@ ap_rx_ret hostap_handle_sta_rx(local_info_t *local, struct net_device *dev,
if (!wds) {
/* FromDS frame - not for us; probably
* broadcast/multicast in another BSS - drop */
- if (memcmp(hdr->addr1, dev->dev_addr, ETH_ALEN) == 0) {
+ if (ether_addr_equal(hdr->addr1, dev->dev_addr)) {
printk(KERN_DEBUG "Odd.. FromDS packet "
"received with own BSSID\n");
hostap_dump_rx_80211(dev->name, skb, rx_stats);
@@ -3044,7 +3044,7 @@ ap_rx_ret hostap_handle_sta_rx(local_info_t *local, struct net_device *dev,
goto out;
}
} else if (stype == IEEE80211_STYPE_NULLFUNC && sta == NULL &&
- memcmp(hdr->addr1, dev->dev_addr, ETH_ALEN) == 0) {
+ ether_addr_equal(hdr->addr1, dev->dev_addr)) {
if (local->hostapd) {
prism2_rx_80211(local->apdev, skb, rx_stats,
@@ -3073,7 +3073,7 @@ ap_rx_ret hostap_handle_sta_rx(local_info_t *local, struct net_device *dev,
/* If BSSID (Addr3) is foreign, this frame is a normal
* broadcast frame from an IBSS network. Drop it silently.
* If BSSID is own, report the dropping of this frame. */
- if (memcmp(hdr->addr3, dev->dev_addr, ETH_ALEN) == 0) {
+ if (ether_addr_equal(hdr->addr3, dev->dev_addr)) {
printk(KERN_DEBUG "%s: dropped received packet from %pM"
" with no ToDS flag "
"(type=0x%02x, subtype=0x%02x)\n", dev->name,
diff --git a/drivers/net/wireless/hostap/hostap_hw.c b/drivers/net/wireless/hostap/hostap_hw.c
index c275dc1623fe..6df3ee561d52 100644
--- a/drivers/net/wireless/hostap/hostap_hw.c
+++ b/drivers/net/wireless/hostap/hostap_hw.c
@@ -2175,7 +2175,7 @@ static void hostap_tx_callback(local_info_t *local,
struct hostap_tx_callback_info *cb;
/* Make sure that frame was from us. */
- if (memcmp(txdesc->addr2, local->dev->dev_addr, ETH_ALEN)) {
+ if (!ether_addr_equal(txdesc->addr2, local->dev->dev_addr)) {
printk(KERN_DEBUG "%s: TX callback - foreign frame\n",
local->dev->name);
return;
diff --git a/drivers/net/wireless/hostap/hostap_ioctl.c b/drivers/net/wireless/hostap/hostap_ioctl.c
index 63e350affc7e..3e5fa7872b64 100644
--- a/drivers/net/wireless/hostap/hostap_ioctl.c
+++ b/drivers/net/wireless/hostap/hostap_ioctl.c
@@ -655,7 +655,7 @@ static int hostap_join_ap(struct net_device *dev)
if (!local->last_scan_results)
break;
entry = &local->last_scan_results[i];
- if (memcmp(local->preferred_ap, entry->bssid, ETH_ALEN) == 0) {
+ if (ether_addr_equal(local->preferred_ap, entry->bssid)) {
req.channel = entry->chid;
break;
}
@@ -1978,7 +1978,7 @@ static inline int prism2_translate_scan(local_info_t *local,
list_for_each(ptr, &local->bss_list) {
struct hostap_bss_info *bss;
bss = list_entry(ptr, struct hostap_bss_info, list);
- if (memcmp(bss->bssid, scan->bssid, ETH_ALEN) == 0) {
+ if (ether_addr_equal(bss->bssid, scan->bssid)) {
bss->included = 1;
current_ev = __prism2_translate_scan(
local, info, scan, bss, current_ev,
diff --git a/drivers/net/wireless/hostap/hostap_main.c b/drivers/net/wireless/hostap/hostap_main.c
index a1257c92afc4..67db34e56d7e 100644
--- a/drivers/net/wireless/hostap/hostap_main.c
+++ b/drivers/net/wireless/hostap/hostap_main.c
@@ -155,8 +155,7 @@ int prism2_wds_add(local_info_t *local, u8 *remote_addr,
if (prism2_wds_special_addr(iface->u.wds.remote_addr))
empty = iface;
- else if (memcmp(iface->u.wds.remote_addr, remote_addr,
- ETH_ALEN) == 0) {
+ else if (ether_addr_equal(iface->u.wds.remote_addr, remote_addr)) {
match = iface;
break;
}
@@ -214,8 +213,7 @@ int prism2_wds_del(local_info_t *local, u8 *remote_addr,
if (iface->type != HOSTAP_INTERFACE_WDS)
continue;
- if (memcmp(iface->u.wds.remote_addr, remote_addr,
- ETH_ALEN) == 0) {
+ if (ether_addr_equal(iface->u.wds.remote_addr, remote_addr)) {
selected = iface;
break;
}
@@ -1085,7 +1083,7 @@ int prism2_sta_deauth(local_info_t *local, u16 reason)
if (local->iw_mode != IW_MODE_INFRA ||
is_zero_ether_addr(local->bssid) ||
- memcmp(local->bssid, "\x44\x44\x44\x44\x44\x44", ETH_ALEN) == 0)
+ ether_addr_equal(local->bssid, "\x44\x44\x44\x44\x44\x44"))
return 0;
ret = prism2_sta_send_mgmt(local, local->bssid, IEEE80211_STYPE_DEAUTH,
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index 9244b3661d34..139326065bd9 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -3012,7 +3012,7 @@ static void ipw_remove_current_network(struct ipw_priv *priv)
spin_lock_irqsave(&priv->ieee->lock, flags);
list_for_each_safe(element, safe, &priv->ieee->network_list) {
network = list_entry(element, struct libipw_network, list);
- if (!memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
+ if (ether_addr_equal(network->bssid, priv->bssid)) {
list_del(element);
list_add_tail(&network->list,
&priv->ieee->network_free_list);
@@ -3921,7 +3921,7 @@ static u8 ipw_add_station(struct ipw_priv *priv, u8 * bssid)
int i;
for (i = 0; i < priv->num_stations; i++) {
- if (!memcmp(priv->stations[i], bssid, ETH_ALEN)) {
+ if (ether_addr_equal(priv->stations[i], bssid)) {
/* Another node is active in network */
priv->missed_adhoc_beacons = 0;
if (!(priv->config & CFG_STATIC_CHANNEL))
@@ -3953,7 +3953,7 @@ static u8 ipw_find_station(struct ipw_priv *priv, u8 * bssid)
int i;
for (i = 0; i < priv->num_stations; i++)
- if (!memcmp(priv->stations[i], bssid, ETH_ALEN))
+ if (ether_addr_equal(priv->stations[i], bssid))
return i;
return IPW_INVALID_STATION;
@@ -5622,7 +5622,7 @@ static int ipw_find_adhoc_network(struct ipw_priv *priv,
return 0;
}
- if (!memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
+ if (ether_addr_equal(network->bssid, priv->bssid)) {
IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
"because of the same BSSID match: %pM"
".\n", print_ssid(ssid, network->ssid,
@@ -5849,7 +5849,7 @@ static int ipw_best_network(struct ipw_priv *priv,
}
if ((priv->config & CFG_STATIC_BSSID) &&
- memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
+ !ether_addr_equal(network->bssid, priv->bssid)) {
IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
"because of BSSID mismatch: %pM.\n",
print_ssid(ssid, network->ssid,
@@ -6988,7 +6988,7 @@ static int ipw_qos_handle_probe_response(struct ipw_priv *priv,
}
if ((priv->status & STATUS_ASSOCIATED) &&
(priv->ieee->iw_mode == IW_MODE_ADHOC) && (active_network == 0)) {
- if (memcmp(network->bssid, priv->bssid, ETH_ALEN))
+ if (!ether_addr_equal(network->bssid, priv->bssid))
if (network->capability & WLAN_CAPABILITY_IBSS)
if ((network->ssid_len ==
priv->assoc_network->ssid_len) &&
@@ -8210,29 +8210,29 @@ static int is_network_packet(struct ipw_priv *priv,
switch (priv->ieee->iw_mode) {
case IW_MODE_ADHOC: /* Header: Dest. | Source | BSSID */
/* packets from our adapter are dropped (echo) */
- if (!memcmp(header->addr2, priv->net_dev->dev_addr, ETH_ALEN))
+ if (ether_addr_equal(header->addr2, priv->net_dev->dev_addr))
return 0;
/* {broad,multi}cast packets to our BSSID go through */
if (is_multicast_ether_addr(header->addr1))
- return !memcmp(header->addr3, priv->bssid, ETH_ALEN);
+ return ether_addr_equal(header->addr3, priv->bssid);
/* packets to our adapter go through */
- return !memcmp(header->addr1, priv->net_dev->dev_addr,
- ETH_ALEN);
+ return ether_addr_equal(header->addr1,
+ priv->net_dev->dev_addr);
case IW_MODE_INFRA: /* Header: Dest. | BSSID | Source */
/* packets from our adapter are dropped (echo) */
- if (!memcmp(header->addr3, priv->net_dev->dev_addr, ETH_ALEN))
+ if (ether_addr_equal(header->addr3, priv->net_dev->dev_addr))
return 0;
/* {broad,multi}cast packets to our BSS go through */
if (is_multicast_ether_addr(header->addr1))
- return !memcmp(header->addr2, priv->bssid, ETH_ALEN);
+ return ether_addr_equal(header->addr2, priv->bssid);
/* packets to our adapter go through */
- return !memcmp(header->addr1, priv->net_dev->dev_addr,
- ETH_ALEN);
+ return ether_addr_equal(header->addr1,
+ priv->net_dev->dev_addr);
}
return 1;
@@ -8260,7 +8260,7 @@ static int is_duplicate_packet(struct ipw_priv *priv,
list_for_each(p, &priv->ibss_mac_hash[index]) {
entry =
list_entry(p, struct ipw_ibss_seq, list);
- if (!memcmp(entry->mac, mac, ETH_ALEN))
+ if (ether_addr_equal(entry->mac, mac))
break;
}
if (p == &priv->ibss_mac_hash[index]) {
@@ -8329,7 +8329,7 @@ static void ipw_handle_mgmt_packet(struct ipw_priv *priv,
IEEE80211_STYPE_PROBE_RESP) ||
(WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) ==
IEEE80211_STYPE_BEACON))) {
- if (!memcmp(header->addr3, priv->bssid, ETH_ALEN))
+ if (ether_addr_equal(header->addr3, priv->bssid))
ipw_add_station(priv, header->addr2);
}
@@ -9045,7 +9045,7 @@ static int ipw_wx_set_wap(struct net_device *dev,
}
priv->config |= CFG_STATIC_BSSID;
- if (!memcmp(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN)) {
+ if (ether_addr_equal(priv->bssid, wrqu->ap_addr.sa_data)) {
IPW_DEBUG_WX("BSSID set to current BSSID.\n");
mutex_unlock(&priv->mutex);
return 0;
diff --git a/drivers/net/wireless/ipw2x00/libipw_rx.c b/drivers/net/wireless/ipw2x00/libipw_rx.c
index ce2785948be1..a586a85bfcfe 100644
--- a/drivers/net/wireless/ipw2x00/libipw_rx.c
+++ b/drivers/net/wireless/ipw2x00/libipw_rx.c
@@ -874,13 +874,13 @@ void libipw_rx_any(struct libipw_device *ieee,
switch (ieee->iw_mode) {
case IW_MODE_ADHOC:
/* our BSS and not from/to DS */
- if (memcmp(hdr->addr3, ieee->bssid, ETH_ALEN) == 0)
+ if (ether_addr_equal(hdr->addr3, ieee->bssid))
if ((fc & (IEEE80211_FCTL_TODS+IEEE80211_FCTL_FROMDS)) == 0) {
/* promisc: get all */
if (ieee->dev->flags & IFF_PROMISC)
is_packet_for_us = 1;
/* to us */
- else if (memcmp(hdr->addr1, ieee->dev->dev_addr, ETH_ALEN) == 0)
+ else if (ether_addr_equal(hdr->addr1, ieee->dev->dev_addr))
is_packet_for_us = 1;
/* mcast */
else if (is_multicast_ether_addr(hdr->addr1))
@@ -889,18 +889,18 @@ void libipw_rx_any(struct libipw_device *ieee,
break;
case IW_MODE_INFRA:
/* our BSS (== from our AP) and from DS */
- if (memcmp(hdr->addr2, ieee->bssid, ETH_ALEN) == 0)
+ if (ether_addr_equal(hdr->addr2, ieee->bssid))
if ((fc & (IEEE80211_FCTL_TODS+IEEE80211_FCTL_FROMDS)) == IEEE80211_FCTL_FROMDS) {
/* promisc: get all */
if (ieee->dev->flags & IFF_PROMISC)
is_packet_for_us = 1;
/* to us */
- else if (memcmp(hdr->addr1, ieee->dev->dev_addr, ETH_ALEN) == 0)
+ else if (ether_addr_equal(hdr->addr1, ieee->dev->dev_addr))
is_packet_for_us = 1;
/* mcast */
else if (is_multicast_ether_addr(hdr->addr1)) {
/* not our own packet bcasted from AP */
- if (memcmp(hdr->addr3, ieee->dev->dev_addr, ETH_ALEN))
+ if (!ether_addr_equal(hdr->addr3, ieee->dev->dev_addr))
is_packet_for_us = 1;
}
}
diff --git a/drivers/net/wireless/mwifiex/11n.c b/drivers/net/wireless/mwifiex/11n.c
index 0b803c05cab3..6261f8c53d44 100644
--- a/drivers/net/wireless/mwifiex/11n.c
+++ b/drivers/net/wireless/mwifiex/11n.c
@@ -483,7 +483,7 @@ mwifiex_get_ba_tbl(struct mwifiex_private *priv, int tid, u8 *ra)
spin_lock_irqsave(&priv->tx_ba_stream_tbl_lock, flags);
list_for_each_entry(tx_ba_tsr_tbl, &priv->tx_ba_stream_tbl_ptr, list) {
- if (!memcmp(tx_ba_tsr_tbl->ra, ra, ETH_ALEN) &&
+ if (ether_addr_equal_unaligned(tx_ba_tsr_tbl->ra, ra) &&
tx_ba_tsr_tbl->tid == tid) {
spin_unlock_irqrestore(&priv->tx_ba_stream_tbl_lock,
flags);
diff --git a/drivers/net/wireless/mwifiex/sta_cmdresp.c b/drivers/net/wireless/mwifiex/sta_cmdresp.c
index 551194605aa7..24523e4015cb 100644
--- a/drivers/net/wireless/mwifiex/sta_cmdresp.c
+++ b/drivers/net/wireless/mwifiex/sta_cmdresp.c
@@ -782,8 +782,7 @@ static int mwifiex_ret_ibss_coalescing_status(struct mwifiex_private *priv,
}
/* If BSSID is diff, modify current BSS parameters */
- if (memcmp(priv->curr_bss_params.bss_descriptor.mac_address,
- ibss_coal_resp->bssid, ETH_ALEN)) {
+ if (!ether_addr_equal(priv->curr_bss_params.bss_descriptor.mac_address, ibss_coal_resp->bssid)) {
/* BSSID */
memcpy(priv->curr_bss_params.bss_descriptor.mac_address,
ibss_coal_resp->bssid, ETH_ALEN);
diff --git a/drivers/net/wireless/mwifiex/sta_rx.c b/drivers/net/wireless/mwifiex/sta_rx.c
index 0bb510de8071..4651d676df38 100644
--- a/drivers/net/wireless/mwifiex/sta_rx.c
+++ b/drivers/net/wireless/mwifiex/sta_rx.c
@@ -224,7 +224,7 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_private *priv,
* directly to os. Don't pass thru rx reordering
*/
if (!IS_11N_ENABLED(priv) ||
- memcmp(priv->curr_addr, rx_pkt_hdr->eth803_hdr.h_dest, ETH_ALEN)) {
+ !ether_addr_equal_unaligned(priv->curr_addr, rx_pkt_hdr->eth803_hdr.h_dest)) {
mwifiex_process_rx_packet(priv, skb);
return ret;
}
diff --git a/drivers/net/wireless/prism54/isl_ioctl.c b/drivers/net/wireless/prism54/isl_ioctl.c
index df784fefb8e3..78fa64d3f223 100644
--- a/drivers/net/wireless/prism54/isl_ioctl.c
+++ b/drivers/net/wireless/prism54/isl_ioctl.c
@@ -24,6 +24,7 @@
#include <linux/if_arp.h>
#include <linux/slab.h>
#include <linux/pci.h>
+#include <linux/etherdevice.h>
#include <asm/uaccess.h>
@@ -1860,7 +1861,7 @@ prism54_del_mac(struct net_device *ndev, struct iw_request_info *info,
if (mutex_lock_interruptible(&acl->lock))
return -ERESTARTSYS;
list_for_each_entry(entry, &acl->mac_list, _list) {
- if (memcmp(entry->addr, addr->sa_data, ETH_ALEN) == 0) {
+ if (ether_addr_equal(entry->addr, addr->sa_data)) {
list_del(&entry->_list);
acl->size--;
kfree(entry);
diff --git a/drivers/net/wireless/rtlwifi/cam.c b/drivers/net/wireless/rtlwifi/cam.c
index 0e510f73041a..0276153c72cc 100644
--- a/drivers/net/wireless/rtlwifi/cam.c
+++ b/drivers/net/wireless/rtlwifi/cam.c
@@ -295,7 +295,7 @@ u8 rtl_cam_get_free_entry(struct ieee80211_hw *hw, u8 *sta_addr)
/* Does STA already exist? */
for (i = 4; i < TOTAL_CAM_ENTRY; i++) {
addr = rtlpriv->sec.hwsec_cam_sta_addr[i];
- if (memcmp(addr, sta_addr, ETH_ALEN) == 0)
+ if (ether_addr_equal_unaligned(addr, sta_addr))
return i;
}
/* Get a free CAM entry. */
@@ -335,7 +335,7 @@ void rtl_cam_del_entry(struct ieee80211_hw *hw, u8 *sta_addr)
addr = rtlpriv->sec.hwsec_cam_sta_addr[i];
bitmap = (rtlpriv->sec.hwsec_cam_bitmap) >> i;
if (((bitmap & BIT(0)) == BIT(0)) &&
- (memcmp(addr, sta_addr, ETH_ALEN) == 0)) {
+ (ether_addr_equal_unaligned(addr, sta_addr))) {
/* Remove from HW Security CAM */
eth_zero_addr(rtlpriv->sec.hwsec_cam_sta_addr[i]);
rtlpriv->sec.hwsec_cam_bitmap &= ~(BIT(0) << i);
diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c
index 80f92110a3bf..119c148f7740 100644
--- a/drivers/net/wireless/ti/wl1251/main.c
+++ b/drivers/net/wireless/ti/wl1251/main.c
@@ -525,7 +525,7 @@ static int wl1251_op_add_interface(struct ieee80211_hw *hw,
goto out;
}
- if (memcmp(wl->mac_addr, vif->addr, ETH_ALEN)) {
+ if (!ether_addr_equal_unaligned(wl->mac_addr, vif->addr)) {
memcpy(wl->mac_addr, vif->addr, ETH_ALEN);
SET_IEEE80211_PERM_ADDR(wl->hw, wl->mac_addr);
ret = wl1251_acx_station_id(wl);
diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c
index 1f5987d142c1..d24d4a958c67 100644
--- a/drivers/net/wireless/wl3501_cs.c
+++ b/drivers/net/wireless/wl3501_cs.c
@@ -43,6 +43,7 @@
#include <linux/string.h>
#include <linux/wireless.h>
#include <linux/ieee80211.h>
+#include <linux/etherdevice.h>
#include <net/iw_handler.h>
@@ -672,8 +673,7 @@ static void wl3501_mgmt_scan_confirm(struct wl3501_card *this, u16 addr)
matchflag = 1;
if (matchflag) {
for (i = 0; i < this->bss_cnt; i++) {
- if (!memcmp(this->bss_set[i].bssid,
- sig.bssid, ETH_ALEN)) {
+ if (ether_addr_equal_unaligned(this->bss_set[i].bssid, sig.bssid)) {
matchflag = 0;
break;
}
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c
index eff79a37bc2a..e7af261e9198 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zd1211rw/zd_mac.c
@@ -532,9 +532,8 @@ void zd_mac_tx_failed(struct urb *urb)
tx_hdr = (struct ieee80211_hdr *)skb->data;
/* we skip all frames not matching the reported destination */
- if (unlikely(memcmp(tx_hdr->addr1, tx_status->mac, ETH_ALEN))) {
+ if (unlikely(!ether_addr_equal(tx_hdr->addr1, tx_status->mac)))
continue;
- }
/* we skip all frames not matching the reported final rate */
@@ -997,7 +996,7 @@ static int filter_ack(struct ieee80211_hw *hw, struct ieee80211_hdr *rx_hdr,
continue;
tx_hdr = (struct ieee80211_hdr *)skb->data;
- if (likely(!memcmp(tx_hdr->addr2, rx_hdr->addr1, ETH_ALEN)))
+ if (likely(ether_addr_equal(tx_hdr->addr2, rx_hdr->addr1)))
{
found = 1;
break;
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 08ae01b41c83..4c76bcb9a879 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -101,6 +101,13 @@ struct xenvif_rx_meta {
#define MAX_PENDING_REQS 256
+/* It's possible for an skb to have a maximal number of frags
+ * but still be less than MAX_BUFFER_OFFSET in size. Thus the
+ * worst-case number of copy operations is MAX_SKB_FRAGS per
+ * ring slot.
+ */
+#define MAX_GRANT_COPY_OPS (MAX_SKB_FRAGS * XEN_NETIF_RX_RING_SIZE)
+
struct xenvif {
/* Unique identifier for this interface. */
domid_t domid;
@@ -136,20 +143,19 @@ struct xenvif {
char rx_irq_name[IFNAMSIZ+4]; /* DEVNAME-rx */
struct xen_netif_rx_back_ring rx;
struct sk_buff_head rx_queue;
-
- /* Allow xenvif_start_xmit() to peek ahead in the rx request
- * ring. This is a prediction of what rx_req_cons will be
- * once all queued skbs are put on the ring.
+ bool rx_queue_stopped;
+ /* Set when the RX interrupt is triggered by the frontend.
+ * The worker thread may need to wake the queue.
*/
- RING_IDX rx_req_cons_peek;
+ bool rx_event;
- /* Given MAX_BUFFER_OFFSET of 4096 the worst case is that each
- * head/fragment page uses 2 copy operations because it
- * straddles two buffers in the frontend.
- */
- struct gnttab_copy grant_copy_op[2*XEN_NETIF_RX_RING_SIZE];
- struct xenvif_rx_meta meta[2*XEN_NETIF_RX_RING_SIZE];
+ /* This array is allocated seperately as it is large */
+ struct gnttab_copy *grant_copy_op;
+ /* We create one meta structure per ring request we consume, so
+ * the maximum number is the same as the ring size.
+ */
+ struct xenvif_rx_meta meta[XEN_NETIF_RX_RING_SIZE];
u8 fe_dev_addr[6];
@@ -198,8 +204,6 @@ void xenvif_xenbus_fini(void);
int xenvif_schedulable(struct xenvif *vif);
-int xenvif_rx_ring_full(struct xenvif *vif);
-
int xenvif_must_stop_queue(struct xenvif *vif);
/* (Un)Map communication rings. */
@@ -211,21 +215,20 @@ int xenvif_map_frontend_rings(struct xenvif *vif,
/* Check for SKBs from frontend and schedule backend processing */
void xenvif_check_rx_xenvif(struct xenvif *vif);
-/* Queue an SKB for transmission to the frontend */
-void xenvif_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb);
-/* Notify xenvif that ring now has space to send an skb to the frontend */
-void xenvif_notify_tx_completion(struct xenvif *vif);
-
/* Prevent the device from generating any further traffic. */
void xenvif_carrier_off(struct xenvif *vif);
-/* Returns number of ring slots required to send an skb to the frontend */
-unsigned int xenvif_count_skb_slots(struct xenvif *vif, struct sk_buff *skb);
-
int xenvif_tx_action(struct xenvif *vif, int budget);
-void xenvif_rx_action(struct xenvif *vif);
int xenvif_kthread(void *data);
+void xenvif_kick_thread(struct xenvif *vif);
+
+/* Determine whether the needed number of slots (req) are available,
+ * and set req_event if not.
+ */
+bool xenvif_rx_ring_slots_available(struct xenvif *vif, int needed);
+
+void xenvif_stop_queue(struct xenvif *vif);
extern bool separate_tx_rx_irq;
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 2329cccf1fa6..b9de31ea7fc4 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -34,6 +34,7 @@
#include <linux/ethtool.h>
#include <linux/rtnetlink.h>
#include <linux/if_vlan.h>
+#include <linux/vmalloc.h>
#include <xen/events.h>
#include <asm/xen/hypercall.h>
@@ -46,11 +47,6 @@ int xenvif_schedulable(struct xenvif *vif)
return netif_running(vif->dev) && netif_carrier_ok(vif->dev);
}
-static int xenvif_rx_schedulable(struct xenvif *vif)
-{
- return xenvif_schedulable(vif) && !xenvif_rx_ring_full(vif);
-}
-
static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id)
{
struct xenvif *vif = dev_id;
@@ -104,8 +100,8 @@ static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id)
{
struct xenvif *vif = dev_id;
- if (xenvif_rx_schedulable(vif))
- netif_wake_queue(vif->dev);
+ vif->rx_event = true;
+ xenvif_kick_thread(vif);
return IRQ_HANDLED;
}
@@ -121,24 +117,35 @@ static irqreturn_t xenvif_interrupt(int irq, void *dev_id)
static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct xenvif *vif = netdev_priv(dev);
+ int min_slots_needed;
BUG_ON(skb->dev != dev);
/* Drop the packet if vif is not ready */
- if (vif->task == NULL)
+ if (vif->task == NULL || !xenvif_schedulable(vif))
goto drop;
- /* Drop the packet if the target domain has no receive buffers. */
- if (!xenvif_rx_schedulable(vif))
- goto drop;
+ /* At best we'll need one slot for the header and one for each
+ * frag.
+ */
+ min_slots_needed = 1 + skb_shinfo(skb)->nr_frags;
- /* Reserve ring slots for the worst-case number of fragments. */
- vif->rx_req_cons_peek += xenvif_count_skb_slots(vif, skb);
+ /* If the skb is GSO then we'll also need an extra slot for the
+ * metadata.
+ */
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 ||
+ skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
+ min_slots_needed++;
- if (vif->can_queue && xenvif_must_stop_queue(vif))
- netif_stop_queue(dev);
+ /* If the skb can't possibly fit in the remaining slots
+ * then turn off the queue to give the ring a chance to
+ * drain.
+ */
+ if (!xenvif_rx_ring_slots_available(vif, min_slots_needed))
+ xenvif_stop_queue(vif);
- xenvif_queue_tx_skb(vif, skb);
+ skb_queue_tail(&vif->rx_queue, skb);
+ xenvif_kick_thread(vif);
return NETDEV_TX_OK;
@@ -148,12 +155,6 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
-void xenvif_notify_tx_completion(struct xenvif *vif)
-{
- if (netif_queue_stopped(vif->dev) && xenvif_rx_schedulable(vif))
- netif_wake_queue(vif->dev);
-}
-
static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
{
struct xenvif *vif = netdev_priv(dev);
@@ -307,6 +308,15 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
SET_NETDEV_DEV(dev, parent);
vif = netdev_priv(dev);
+
+ vif->grant_copy_op = vmalloc(sizeof(struct gnttab_copy) *
+ MAX_GRANT_COPY_OPS);
+ if (vif->grant_copy_op == NULL) {
+ pr_warn("Could not allocate grant copy space for %s\n", name);
+ free_netdev(dev);
+ return ERR_PTR(-ENOMEM);
+ }
+
vif->domid = domid;
vif->handle = handle;
vif->can_sg = 1;
@@ -368,16 +378,18 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
unsigned long rx_ring_ref, unsigned int tx_evtchn,
unsigned int rx_evtchn)
{
+ struct task_struct *task;
int err = -ENOMEM;
- /* Already connected through? */
- if (vif->tx_irq)
- return 0;
+ BUG_ON(vif->tx_irq);
+ BUG_ON(vif->task);
err = xenvif_map_frontend_rings(vif, tx_ring_ref, rx_ring_ref);
if (err < 0)
goto err;
+ init_waitqueue_head(&vif->wq);
+
if (tx_evtchn == rx_evtchn) {
/* feature-split-event-channels == 0 */
err = bind_interdomain_evtchn_to_irqhandler(
@@ -410,15 +422,16 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
disable_irq(vif->rx_irq);
}
- init_waitqueue_head(&vif->wq);
- vif->task = kthread_create(xenvif_kthread,
- (void *)vif, "%s", vif->dev->name);
- if (IS_ERR(vif->task)) {
+ task = kthread_create(xenvif_kthread,
+ (void *)vif, "%s", vif->dev->name);
+ if (IS_ERR(task)) {
pr_warn("Could not allocate kthread for %s\n", vif->dev->name);
- err = PTR_ERR(vif->task);
+ err = PTR_ERR(task);
goto err_rx_unbind;
}
+ vif->task = task;
+
rtnl_lock();
if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN)
dev_set_mtu(vif->dev, ETH_DATA_LEN);
@@ -461,8 +474,10 @@ void xenvif_disconnect(struct xenvif *vif)
if (netif_carrier_ok(vif->dev))
xenvif_carrier_off(vif);
- if (vif->task)
+ if (vif->task) {
kthread_stop(vif->task);
+ vif->task = NULL;
+ }
if (vif->tx_irq) {
if (vif->tx_irq == vif->rx_irq)
@@ -483,6 +498,7 @@ void xenvif_free(struct xenvif *vif)
unregister_netdev(vif->dev);
+ vfree(vif->grant_copy_op);
free_netdev(vif->dev);
module_put(THIS_MODULE);
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 919b6509455c..27385639b6e5 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -39,6 +39,7 @@
#include <linux/udp.h>
#include <net/tcp.h>
+#include <net/ip6_checksum.h>
#include <xen/xen.h>
#include <xen/events.h>
@@ -137,36 +138,26 @@ static inline pending_ring_idx_t nr_pending_reqs(struct xenvif *vif)
vif->pending_prod + vif->pending_cons;
}
-static int max_required_rx_slots(struct xenvif *vif)
+bool xenvif_rx_ring_slots_available(struct xenvif *vif, int needed)
{
- int max = DIV_ROUND_UP(vif->dev->mtu, PAGE_SIZE);
+ RING_IDX prod, cons;
- /* XXX FIXME: RX path dependent on MAX_SKB_FRAGS */
- if (vif->can_sg || vif->gso_mask || vif->gso_prefix_mask)
- max += MAX_SKB_FRAGS + 1; /* extra_info + frags */
-
- return max;
-}
-
-int xenvif_rx_ring_full(struct xenvif *vif)
-{
- RING_IDX peek = vif->rx_req_cons_peek;
- RING_IDX needed = max_required_rx_slots(vif);
+ do {
+ prod = vif->rx.sring->req_prod;
+ cons = vif->rx.req_cons;
- return ((vif->rx.sring->req_prod - peek) < needed) ||
- ((vif->rx.rsp_prod_pvt + XEN_NETIF_RX_RING_SIZE - peek) < needed);
-}
+ if (prod - cons >= needed)
+ return true;
-int xenvif_must_stop_queue(struct xenvif *vif)
-{
- if (!xenvif_rx_ring_full(vif))
- return 0;
+ vif->rx.sring->req_event = prod + 1;
- vif->rx.sring->req_event = vif->rx_req_cons_peek +
- max_required_rx_slots(vif);
- mb(); /* request notification /then/ check the queue */
+ /* Make sure event is visible before we check prod
+ * again.
+ */
+ mb();
+ } while (vif->rx.sring->req_prod != prod);
- return xenvif_rx_ring_full(vif);
+ return false;
}
/*
@@ -209,93 +200,6 @@ static bool start_new_rx_buffer(int offset, unsigned long size, int head)
return false;
}
-struct xenvif_count_slot_state {
- unsigned long copy_off;
- bool head;
-};
-
-unsigned int xenvif_count_frag_slots(struct xenvif *vif,
- unsigned long offset, unsigned long size,
- struct xenvif_count_slot_state *state)
-{
- unsigned count = 0;
-
- offset &= ~PAGE_MASK;
-
- while (size > 0) {
- unsigned long bytes;
-
- bytes = PAGE_SIZE - offset;
-
- if (bytes > size)
- bytes = size;
-
- if (start_new_rx_buffer(state->copy_off, bytes, state->head)) {
- count++;
- state->copy_off = 0;
- }
-
- if (state->copy_off + bytes > MAX_BUFFER_OFFSET)
- bytes = MAX_BUFFER_OFFSET - state->copy_off;
-
- state->copy_off += bytes;
-
- offset += bytes;
- size -= bytes;
-
- if (offset == PAGE_SIZE)
- offset = 0;
-
- state->head = false;
- }
-
- return count;
-}
-
-/*
- * Figure out how many ring slots we're going to need to send @skb to
- * the guest. This function is essentially a dry run of
- * xenvif_gop_frag_copy.
- */
-unsigned int xenvif_count_skb_slots(struct xenvif *vif, struct sk_buff *skb)
-{
- struct xenvif_count_slot_state state;
- unsigned int count;
- unsigned char *data;
- unsigned i;
-
- state.head = true;
- state.copy_off = 0;
-
- /* Slot for the first (partial) page of data. */
- count = 1;
-
- /* Need a slot for the GSO prefix for GSO extra data? */
- if (skb_shinfo(skb)->gso_size)
- count++;
-
- data = skb->data;
- while (data < skb_tail_pointer(skb)) {
- unsigned long offset = offset_in_page(data);
- unsigned long size = PAGE_SIZE - offset;
-
- if (data + size > skb_tail_pointer(skb))
- size = skb_tail_pointer(skb) - data;
-
- count += xenvif_count_frag_slots(vif, offset, size, &state);
-
- data += size;
- }
-
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- unsigned long size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
- unsigned long offset = skb_shinfo(skb)->frags[i].page_offset;
-
- count += xenvif_count_frag_slots(vif, offset, size, &state);
- }
- return count;
-}
-
struct netrx_pending_operations {
unsigned copy_prod, copy_cons;
unsigned meta_prod, meta_cons;
@@ -451,7 +355,7 @@ static int xenvif_gop_skb(struct sk_buff *skb,
}
/* Set up a GSO prefix descriptor, if necessary */
- if ((1 << skb_shinfo(skb)->gso_type) & vif->gso_prefix_mask) {
+ if ((1 << gso_type) & vif->gso_prefix_mask) {
req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
meta = npo->meta + npo->meta_prod++;
meta->gso_type = gso_type;
@@ -556,12 +460,12 @@ struct skb_cb_overlay {
int meta_slots_used;
};
-static void xenvif_kick_thread(struct xenvif *vif)
+void xenvif_kick_thread(struct xenvif *vif)
{
wake_up(&vif->wq);
}
-void xenvif_rx_action(struct xenvif *vif)
+static void xenvif_rx_action(struct xenvif *vif)
{
s8 status;
u16 flags;
@@ -570,11 +474,10 @@ void xenvif_rx_action(struct xenvif *vif)
struct sk_buff *skb;
LIST_HEAD(notify);
int ret;
- int nr_frags;
- int count;
unsigned long offset;
struct skb_cb_overlay *sco;
- int need_to_notify = 0;
+ bool need_to_notify = false;
+ bool ring_full = false;
struct netrx_pending_operations npo = {
.copy = vif->grant_copy_op,
@@ -583,38 +486,54 @@ void xenvif_rx_action(struct xenvif *vif)
skb_queue_head_init(&rxq);
- count = 0;
-
while ((skb = skb_dequeue(&vif->rx_queue)) != NULL) {
- vif = netdev_priv(skb->dev);
- nr_frags = skb_shinfo(skb)->nr_frags;
+ int max_slots_needed;
+ int i;
+
+ /* We need a cheap worse case estimate for the number of
+ * slots we'll use.
+ */
+
+ max_slots_needed = DIV_ROUND_UP(offset_in_page(skb->data) +
+ skb_headlen(skb),
+ PAGE_SIZE);
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ unsigned int size;
+ size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
+ max_slots_needed += DIV_ROUND_UP(size, PAGE_SIZE);
+ }
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 ||
+ skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
+ max_slots_needed++;
+
+ /* If the skb may not fit then bail out now */
+ if (!xenvif_rx_ring_slots_available(vif, max_slots_needed)) {
+ skb_queue_head(&vif->rx_queue, skb);
+ need_to_notify = true;
+ ring_full = true;
+ break;
+ }
sco = (struct skb_cb_overlay *)skb->cb;
sco->meta_slots_used = xenvif_gop_skb(skb, &npo);
-
- count += nr_frags + 1;
+ BUG_ON(sco->meta_slots_used > max_slots_needed);
__skb_queue_tail(&rxq, skb);
-
- /* Filled the batch queue? */
- /* XXX FIXME: RX path dependent on MAX_SKB_FRAGS */
- if (count + MAX_SKB_FRAGS >= XEN_NETIF_RX_RING_SIZE)
- break;
}
BUG_ON(npo.meta_prod > ARRAY_SIZE(vif->meta));
+ vif->rx_queue_stopped = !npo.copy_prod && ring_full;
+
if (!npo.copy_prod)
- return;
+ goto done;
- BUG_ON(npo.copy_prod > ARRAY_SIZE(vif->grant_copy_op));
+ BUG_ON(npo.copy_prod > MAX_GRANT_COPY_OPS);
gnttab_batch_copy(vif->grant_copy_op, npo.copy_prod);
while ((skb = __skb_dequeue(&rxq)) != NULL) {
sco = (struct skb_cb_overlay *)skb->cb;
- vif = netdev_priv(skb->dev);
-
if ((1 << vif->meta[npo.meta_cons].gso_type) &
vif->gso_prefix_mask) {
resp = RING_GET_RESPONSE(&vif->rx,
@@ -677,28 +596,15 @@ void xenvif_rx_action(struct xenvif *vif)
RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->rx, ret);
- if (ret)
- need_to_notify = 1;
-
- xenvif_notify_tx_completion(vif);
+ need_to_notify |= !!ret;
npo.meta_cons += sco->meta_slots_used;
dev_kfree_skb(skb);
}
+done:
if (need_to_notify)
notify_remote_via_irq(vif->rx_irq);
-
- /* More work to do? */
- if (!skb_queue_empty(&vif->rx_queue))
- xenvif_kick_thread(vif);
-}
-
-void xenvif_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb)
-{
- skb_queue_tail(&vif->rx_queue, skb);
-
- xenvif_kick_thread(vif);
}
void xenvif_check_rx_xenvif(struct xenvif *vif)
@@ -1140,83 +1046,104 @@ static int xenvif_set_skb_gso(struct xenvif *vif,
}
skb_shinfo(skb)->gso_size = gso->u.gso.size;
-
- /* Header must be checked, and gso_segs computed. */
- skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
- skb_shinfo(skb)->gso_segs = 0;
+ /* gso_segs will be calculated later */
return 0;
}
-static inline void maybe_pull_tail(struct sk_buff *skb, unsigned int len)
+static inline int maybe_pull_tail(struct sk_buff *skb, unsigned int len,
+ unsigned int max)
{
- if (skb_is_nonlinear(skb) && skb_headlen(skb) < len) {
- /* If we need to pullup then pullup to the max, so we
- * won't need to do it again.
- */
- int target = min_t(int, skb->len, MAX_TCP_HEADER);
- __pskb_pull_tail(skb, target - skb_headlen(skb));
- }
+ if (skb_headlen(skb) >= len)
+ return 0;
+
+ /* If we need to pullup then pullup to the max, so we
+ * won't need to do it again.
+ */
+ if (max > skb->len)
+ max = skb->len;
+
+ if (__pskb_pull_tail(skb, max - skb_headlen(skb)) == NULL)
+ return -ENOMEM;
+
+ if (skb_headlen(skb) < len)
+ return -EPROTO;
+
+ return 0;
}
+/* This value should be large enough to cover a tagged ethernet header plus
+ * maximally sized IP and TCP or UDP headers.
+ */
+#define MAX_IP_HDR_LEN 128
+
static int checksum_setup_ip(struct xenvif *vif, struct sk_buff *skb,
int recalculate_partial_csum)
{
- struct iphdr *iph = (void *)skb->data;
- unsigned int header_size;
unsigned int off;
- int err = -EPROTO;
+ bool fragment;
+ int err;
- off = sizeof(struct iphdr);
+ fragment = false;
- header_size = skb->network_header + off + MAX_IPOPTLEN;
- maybe_pull_tail(skb, header_size);
+ err = maybe_pull_tail(skb,
+ sizeof(struct iphdr),
+ MAX_IP_HDR_LEN);
+ if (err < 0)
+ goto out;
- off = iph->ihl * 4;
+ if (ip_hdr(skb)->frag_off & htons(IP_OFFSET | IP_MF))
+ fragment = true;
- switch (iph->protocol) {
- case IPPROTO_TCP:
- if (!skb_partial_csum_set(skb, off,
- offsetof(struct tcphdr, check)))
- goto out;
+ off = ip_hdrlen(skb);
+
+ err = -EPROTO;
- if (recalculate_partial_csum) {
- struct tcphdr *tcph = tcp_hdr(skb);
+ if (fragment)
+ goto out;
- header_size = skb->network_header +
- off +
- sizeof(struct tcphdr);
- maybe_pull_tail(skb, header_size);
+ switch (ip_hdr(skb)->protocol) {
+ case IPPROTO_TCP:
+ err = maybe_pull_tail(skb,
+ off + sizeof(struct tcphdr),
+ MAX_IP_HDR_LEN);
+ if (err < 0)
+ goto out;
- tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
- skb->len - off,
- IPPROTO_TCP, 0);
+ if (!skb_partial_csum_set(skb, off,
+ offsetof(struct tcphdr, check))) {
+ err = -EPROTO;
+ goto out;
}
+
+ if (recalculate_partial_csum)
+ tcp_hdr(skb)->check =
+ ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
+ ip_hdr(skb)->daddr,
+ skb->len - off,
+ IPPROTO_TCP, 0);
break;
case IPPROTO_UDP:
- if (!skb_partial_csum_set(skb, off,
- offsetof(struct udphdr, check)))
+ err = maybe_pull_tail(skb,
+ off + sizeof(struct udphdr),
+ MAX_IP_HDR_LEN);
+ if (err < 0)
goto out;
- if (recalculate_partial_csum) {
- struct udphdr *udph = udp_hdr(skb);
-
- header_size = skb->network_header +
- off +
- sizeof(struct udphdr);
- maybe_pull_tail(skb, header_size);
-
- udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
- skb->len - off,
- IPPROTO_UDP, 0);
+ if (!skb_partial_csum_set(skb, off,
+ offsetof(struct udphdr, check))) {
+ err = -EPROTO;
+ goto out;
}
+
+ if (recalculate_partial_csum)
+ udp_hdr(skb)->check =
+ ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
+ ip_hdr(skb)->daddr,
+ skb->len - off,
+ IPPROTO_UDP, 0);
break;
default:
- if (net_ratelimit())
- netdev_err(vif->dev,
- "Attempting to checksum a non-TCP/UDP packet, "
- "dropping a protocol %d packet\n",
- iph->protocol);
goto out;
}
@@ -1226,121 +1153,142 @@ out:
return err;
}
+/* This value should be large enough to cover a tagged ethernet header plus
+ * an IPv6 header, all options, and a maximal TCP or UDP header.
+ */
+#define MAX_IPV6_HDR_LEN 256
+
+#define OPT_HDR(type, skb, off) \
+ (type *)(skb_network_header(skb) + (off))
+
static int checksum_setup_ipv6(struct xenvif *vif, struct sk_buff *skb,
int recalculate_partial_csum)
{
- int err = -EPROTO;
- struct ipv6hdr *ipv6h = (void *)skb->data;
+ int err;
u8 nexthdr;
- unsigned int header_size;
unsigned int off;
+ unsigned int len;
bool fragment;
bool done;
+ fragment = false;
done = false;
off = sizeof(struct ipv6hdr);
- header_size = skb->network_header + off;
- maybe_pull_tail(skb, header_size);
+ err = maybe_pull_tail(skb, off, MAX_IPV6_HDR_LEN);
+ if (err < 0)
+ goto out;
- nexthdr = ipv6h->nexthdr;
+ nexthdr = ipv6_hdr(skb)->nexthdr;
- while ((off <= sizeof(struct ipv6hdr) + ntohs(ipv6h->payload_len)) &&
- !done) {
+ len = sizeof(struct ipv6hdr) + ntohs(ipv6_hdr(skb)->payload_len);
+ while (off <= len && !done) {
switch (nexthdr) {
case IPPROTO_DSTOPTS:
case IPPROTO_HOPOPTS:
case IPPROTO_ROUTING: {
- struct ipv6_opt_hdr *hp = (void *)(skb->data + off);
+ struct ipv6_opt_hdr *hp;
- header_size = skb->network_header +
- off +
- sizeof(struct ipv6_opt_hdr);
- maybe_pull_tail(skb, header_size);
+ err = maybe_pull_tail(skb,
+ off +
+ sizeof(struct ipv6_opt_hdr),
+ MAX_IPV6_HDR_LEN);
+ if (err < 0)
+ goto out;
+ hp = OPT_HDR(struct ipv6_opt_hdr, skb, off);
nexthdr = hp->nexthdr;
off += ipv6_optlen(hp);
break;
}
case IPPROTO_AH: {
- struct ip_auth_hdr *hp = (void *)(skb->data + off);
+ struct ip_auth_hdr *hp;
- header_size = skb->network_header +
- off +
- sizeof(struct ip_auth_hdr);
- maybe_pull_tail(skb, header_size);
+ err = maybe_pull_tail(skb,
+ off +
+ sizeof(struct ip_auth_hdr),
+ MAX_IPV6_HDR_LEN);
+ if (err < 0)
+ goto out;
+ hp = OPT_HDR(struct ip_auth_hdr, skb, off);
nexthdr = hp->nexthdr;
- off += (hp->hdrlen+2)<<2;
+ off += ipv6_authlen(hp);
+ break;
+ }
+ case IPPROTO_FRAGMENT: {
+ struct frag_hdr *hp;
+
+ err = maybe_pull_tail(skb,
+ off +
+ sizeof(struct frag_hdr),
+ MAX_IPV6_HDR_LEN);
+ if (err < 0)
+ goto out;
+
+ hp = OPT_HDR(struct frag_hdr, skb, off);
+
+ if (hp->frag_off & htons(IP6_OFFSET | IP6_MF))
+ fragment = true;
+
+ nexthdr = hp->nexthdr;
+ off += sizeof(struct frag_hdr);
break;
}
- case IPPROTO_FRAGMENT:
- fragment = true;
- /* fall through */
default:
done = true;
break;
}
}
- if (!done) {
- if (net_ratelimit())
- netdev_err(vif->dev, "Failed to parse packet header\n");
- goto out;
- }
+ err = -EPROTO;
- if (fragment) {
- if (net_ratelimit())
- netdev_err(vif->dev, "Packet is a fragment!\n");
+ if (!done || fragment)
goto out;
- }
switch (nexthdr) {
case IPPROTO_TCP:
- if (!skb_partial_csum_set(skb, off,
- offsetof(struct tcphdr, check)))
+ err = maybe_pull_tail(skb,
+ off + sizeof(struct tcphdr),
+ MAX_IPV6_HDR_LEN);
+ if (err < 0)
goto out;
- if (recalculate_partial_csum) {
- struct tcphdr *tcph = tcp_hdr(skb);
-
- header_size = skb->network_header +
- off +
- sizeof(struct tcphdr);
- maybe_pull_tail(skb, header_size);
-
- tcph->check = ~csum_ipv6_magic(&ipv6h->saddr,
- &ipv6h->daddr,
- skb->len - off,
- IPPROTO_TCP, 0);
+ if (!skb_partial_csum_set(skb, off,
+ offsetof(struct tcphdr, check))) {
+ err = -EPROTO;
+ goto out;
}
+
+ if (recalculate_partial_csum)
+ tcp_hdr(skb)->check =
+ ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+ &ipv6_hdr(skb)->daddr,
+ skb->len - off,
+ IPPROTO_TCP, 0);
break;
case IPPROTO_UDP:
- if (!skb_partial_csum_set(skb, off,
- offsetof(struct udphdr, check)))
+ err = maybe_pull_tail(skb,
+ off + sizeof(struct udphdr),
+ MAX_IPV6_HDR_LEN);
+ if (err < 0)
goto out;
- if (recalculate_partial_csum) {
- struct udphdr *udph = udp_hdr(skb);
-
- header_size = skb->network_header +
- off +
- sizeof(struct udphdr);
- maybe_pull_tail(skb, header_size);
-
- udph->check = ~csum_ipv6_magic(&ipv6h->saddr,
- &ipv6h->daddr,
- skb->len - off,
- IPPROTO_UDP, 0);
+ if (!skb_partial_csum_set(skb, off,
+ offsetof(struct udphdr, check))) {
+ err = -EPROTO;
+ goto out;
}
+
+ if (recalculate_partial_csum)
+ udp_hdr(skb)->check =
+ ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+ &ipv6_hdr(skb)->daddr,
+ skb->len - off,
+ IPPROTO_UDP, 0);
break;
default:
- if (net_ratelimit())
- netdev_err(vif->dev,
- "Attempting to checksum a non-TCP/UDP packet, "
- "dropping a protocol %d packet\n",
- nexthdr);
goto out;
}
@@ -1410,14 +1358,15 @@ static bool tx_credit_exceeded(struct xenvif *vif, unsigned size)
return false;
}
-static unsigned xenvif_tx_build_gops(struct xenvif *vif)
+static unsigned xenvif_tx_build_gops(struct xenvif *vif, int budget)
{
struct gnttab_copy *gop = vif->tx_copy_ops, *request_gop;
struct sk_buff *skb;
int ret;
while ((nr_pending_reqs(vif) + XEN_NETBK_LEGACY_SLOTS_MAX
- < MAX_PENDING_REQS)) {
+ < MAX_PENDING_REQS) &&
+ (skb_queue_len(&vif->tx_queue) < budget)) {
struct xen_netif_tx_request txreq;
struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX];
struct page *page;
@@ -1439,7 +1388,7 @@ static unsigned xenvif_tx_build_gops(struct xenvif *vif)
continue;
}
- RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, work_to_do);
+ work_to_do = RING_HAS_UNCONSUMED_REQUESTS(&vif->tx);
if (!work_to_do)
break;
@@ -1579,14 +1528,13 @@ static unsigned xenvif_tx_build_gops(struct xenvif *vif)
}
-static int xenvif_tx_submit(struct xenvif *vif, int budget)
+static int xenvif_tx_submit(struct xenvif *vif)
{
struct gnttab_copy *gop = vif->tx_copy_ops;
struct sk_buff *skb;
int work_done = 0;
- while (work_done < budget &&
- (skb = __skb_dequeue(&vif->tx_queue)) != NULL) {
+ while ((skb = __skb_dequeue(&vif->tx_queue)) != NULL) {
struct xen_netif_tx_request *txp;
u16 pending_idx;
unsigned data_len;
@@ -1641,6 +1589,20 @@ static int xenvif_tx_submit(struct xenvif *vif, int budget)
skb_probe_transport_header(skb, 0);
+ /* If the packet is GSO then we will have just set up the
+ * transport header offset in checksum_setup so it's now
+ * straightforward to calculate gso_segs.
+ */
+ if (skb_is_gso(skb)) {
+ int mss = skb_shinfo(skb)->gso_size;
+ int hdrlen = skb_transport_header(skb) -
+ skb_mac_header(skb) +
+ tcp_hdrlen(skb);
+
+ skb_shinfo(skb)->gso_segs =
+ DIV_ROUND_UP(skb->len - hdrlen, mss);
+ }
+
vif->dev->stats.rx_bytes += skb->len;
vif->dev->stats.rx_packets++;
@@ -1661,14 +1623,14 @@ int xenvif_tx_action(struct xenvif *vif, int budget)
if (unlikely(!tx_work_todo(vif)))
return 0;
- nr_gops = xenvif_tx_build_gops(vif);
+ nr_gops = xenvif_tx_build_gops(vif, budget);
if (nr_gops == 0)
return 0;
gnttab_batch_copy(vif->tx_copy_ops, nr_gops);
- work_done = xenvif_tx_submit(vif, nr_gops);
+ work_done = xenvif_tx_submit(vif);
return work_done;
}
@@ -1765,7 +1727,8 @@ static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif,
static inline int rx_work_todo(struct xenvif *vif)
{
- return !skb_queue_empty(&vif->rx_queue);
+ return (!skb_queue_empty(&vif->rx_queue) && !vif->rx_queue_stopped) ||
+ vif->rx_event;
}
static inline int tx_work_todo(struct xenvif *vif)
@@ -1815,8 +1778,6 @@ int xenvif_map_frontend_rings(struct xenvif *vif,
rxs = (struct xen_netif_rx_sring *)addr;
BACK_RING_INIT(&vif->rx, rxs, PAGE_SIZE);
- vif->rx_req_cons_peek = 0;
-
return 0;
err:
@@ -1824,9 +1785,24 @@ err:
return err;
}
+void xenvif_stop_queue(struct xenvif *vif)
+{
+ if (!vif->can_queue)
+ return;
+
+ netif_stop_queue(vif->dev);
+}
+
+static void xenvif_start_queue(struct xenvif *vif)
+{
+ if (xenvif_schedulable(vif))
+ netif_wake_queue(vif->dev);
+}
+
int xenvif_kthread(void *data)
{
struct xenvif *vif = data;
+ struct sk_buff *skb;
while (!kthread_should_stop()) {
wait_event_interruptible(vif->wq,
@@ -1835,12 +1811,22 @@ int xenvif_kthread(void *data)
if (kthread_should_stop())
break;
- if (rx_work_todo(vif))
+ if (!skb_queue_empty(&vif->rx_queue))
xenvif_rx_action(vif);
+ vif->rx_event = false;
+
+ if (skb_queue_empty(&vif->rx_queue) &&
+ netif_queue_stopped(vif->dev))
+ xenvif_start_queue(vif);
+
cond_resched();
}
+ /* Bin any remaining skbs */
+ while ((skb = skb_dequeue(&vif->rx_queue)) != NULL)
+ dev_kfree_skb(skb);
+
return 0;
}
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index f0358992b04f..7a206cffb062 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -15,8 +15,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include "common.h"
diff --git a/drivers/ntb/ntb_hw.c b/drivers/ntb/ntb_hw.c
index 1cb6e51e6bda..170e8e60cdb7 100644
--- a/drivers/ntb/ntb_hw.c
+++ b/drivers/ntb/ntb_hw.c
@@ -141,6 +141,24 @@ void ntb_unregister_event_callback(struct ntb_device *ndev)
ndev->event_cb = NULL;
}
+static void ntb_irq_work(unsigned long data)
+{
+ struct ntb_db_cb *db_cb = (struct ntb_db_cb *)data;
+ int rc;
+
+ rc = db_cb->callback(db_cb->data, db_cb->db_num);
+ if (rc)
+ tasklet_schedule(&db_cb->irq_work);
+ else {
+ struct ntb_device *ndev = db_cb->ndev;
+ unsigned long mask;
+
+ mask = readw(ndev->reg_ofs.ldb_mask);
+ clear_bit(db_cb->db_num * ndev->bits_per_vector, &mask);
+ writew(mask, ndev->reg_ofs.ldb_mask);
+ }
+}
+
/**
* ntb_register_db_callback() - register a callback for doorbell interrupt
* @ndev: pointer to ntb_device instance
@@ -155,7 +173,7 @@ void ntb_unregister_event_callback(struct ntb_device *ndev)
* RETURNS: An appropriate -ERRNO error value on error, or zero for success.
*/
int ntb_register_db_callback(struct ntb_device *ndev, unsigned int idx,
- void *data, void (*func)(void *data, int db_num))
+ void *data, int (*func)(void *data, int db_num))
{
unsigned long mask;
@@ -166,6 +184,10 @@ int ntb_register_db_callback(struct ntb_device *ndev, unsigned int idx,
ndev->db_cb[idx].callback = func;
ndev->db_cb[idx].data = data;
+ ndev->db_cb[idx].ndev = ndev;
+
+ tasklet_init(&ndev->db_cb[idx].irq_work, ntb_irq_work,
+ (unsigned long) &ndev->db_cb[idx]);
/* unmask interrupt */
mask = readw(ndev->reg_ofs.ldb_mask);
@@ -194,6 +216,8 @@ void ntb_unregister_db_callback(struct ntb_device *ndev, unsigned int idx)
set_bit(idx * ndev->bits_per_vector, &mask);
writew(mask, ndev->reg_ofs.ldb_mask);
+ tasklet_disable(&ndev->db_cb[idx].irq_work);
+
ndev->db_cb[idx].callback = NULL;
}
@@ -678,6 +702,7 @@ static int ntb_xeon_setup(struct ntb_device *ndev)
return -EINVAL;
ndev->limits.max_mw = SNB_ERRATA_MAX_MW;
+ ndev->limits.max_db_bits = SNB_MAX_DB_BITS;
ndev->reg_ofs.spad_write = ndev->mw[1].vbase +
SNB_SPAD_OFFSET;
ndev->reg_ofs.rdb = ndev->mw[1].vbase +
@@ -688,8 +713,21 @@ static int ntb_xeon_setup(struct ntb_device *ndev)
*/
writeq(ndev->mw[1].bar_sz + 0x1000, ndev->reg_base +
SNB_PBAR4LMT_OFFSET);
+ /* HW errata on the Limit registers. They can only be
+ * written when the base register is 4GB aligned and
+ * < 32bit. This should already be the case based on the
+ * driver defaults, but write the Limit registers first
+ * just in case.
+ */
} else {
ndev->limits.max_mw = SNB_MAX_MW;
+
+ /* HW Errata on bit 14 of b2bdoorbell register. Writes
+ * will not be mirrored to the remote system. Shrink
+ * the number of bits by one, since bit 14 is the last
+ * bit.
+ */
+ ndev->limits.max_db_bits = SNB_MAX_DB_BITS - 1;
ndev->reg_ofs.spad_write = ndev->reg_base +
SNB_B2B_SPAD_OFFSET;
ndev->reg_ofs.rdb = ndev->reg_base +
@@ -699,6 +737,12 @@ static int ntb_xeon_setup(struct ntb_device *ndev)
* something silly
*/
writeq(0, ndev->reg_base + SNB_PBAR4LMT_OFFSET);
+ /* HW errata on the Limit registers. They can only be
+ * written when the base register is 4GB aligned and
+ * < 32bit. This should already be the case based on the
+ * driver defaults, but write the Limit registers first
+ * just in case.
+ */
}
/* The Xeon errata workaround requires setting SBAR Base
@@ -769,6 +813,7 @@ static int ntb_xeon_setup(struct ntb_device *ndev)
* have an equal amount.
*/
ndev->limits.max_spads = SNB_MAX_COMPAT_SPADS / 2;
+ ndev->limits.max_db_bits = SNB_MAX_DB_BITS;
/* Note: The SDOORBELL is the cause of the errata. You REALLY
* don't want to touch it.
*/
@@ -793,6 +838,7 @@ static int ntb_xeon_setup(struct ntb_device *ndev)
* have an equal amount.
*/
ndev->limits.max_spads = SNB_MAX_COMPAT_SPADS / 2;
+ ndev->limits.max_db_bits = SNB_MAX_DB_BITS;
ndev->reg_ofs.rdb = ndev->reg_base + SNB_PDOORBELL_OFFSET;
ndev->reg_ofs.ldb = ndev->reg_base + SNB_SDOORBELL_OFFSET;
ndev->reg_ofs.ldb_mask = ndev->reg_base + SNB_SDBMSK_OFFSET;
@@ -819,7 +865,6 @@ static int ntb_xeon_setup(struct ntb_device *ndev)
ndev->reg_ofs.lnk_stat = ndev->reg_base + SNB_SLINK_STATUS_OFFSET;
ndev->reg_ofs.spci_cmd = ndev->reg_base + SNB_PCICMD_OFFSET;
- ndev->limits.max_db_bits = SNB_MAX_DB_BITS;
ndev->limits.msix_cnt = SNB_MSIX_CNT;
ndev->bits_per_vector = SNB_DB_BITS_PER_VEC;
@@ -934,12 +979,16 @@ static irqreturn_t bwd_callback_msix_irq(int irq, void *data)
{
struct ntb_db_cb *db_cb = data;
struct ntb_device *ndev = db_cb->ndev;
+ unsigned long mask;
dev_dbg(&ndev->pdev->dev, "MSI-X irq %d received for DB %d\n", irq,
db_cb->db_num);
- if (db_cb->callback)
- db_cb->callback(db_cb->data, db_cb->db_num);
+ mask = readw(ndev->reg_ofs.ldb_mask);
+ set_bit(db_cb->db_num * ndev->bits_per_vector, &mask);
+ writew(mask, ndev->reg_ofs.ldb_mask);
+
+ tasklet_schedule(&db_cb->irq_work);
/* No need to check for the specific HB irq, any interrupt means
* we're connected.
@@ -955,12 +1004,16 @@ static irqreturn_t xeon_callback_msix_irq(int irq, void *data)
{
struct ntb_db_cb *db_cb = data;
struct ntb_device *ndev = db_cb->ndev;
+ unsigned long mask;
dev_dbg(&ndev->pdev->dev, "MSI-X irq %d received for DB %d\n", irq,
db_cb->db_num);
- if (db_cb->callback)
- db_cb->callback(db_cb->data, db_cb->db_num);
+ mask = readw(ndev->reg_ofs.ldb_mask);
+ set_bit(db_cb->db_num * ndev->bits_per_vector, &mask);
+ writew(mask, ndev->reg_ofs.ldb_mask);
+
+ tasklet_schedule(&db_cb->irq_work);
/* On Sandybridge, there are 16 bits in the interrupt register
* but only 4 vectors. So, 5 bits are assigned to the first 3
@@ -986,7 +1039,7 @@ static irqreturn_t xeon_event_msix_irq(int irq, void *dev)
dev_err(&ndev->pdev->dev, "Error determining link status\n");
/* bit 15 is always the link bit */
- writew(1 << ndev->limits.max_db_bits, ndev->reg_ofs.ldb);
+ writew(1 << SNB_LINK_DB, ndev->reg_ofs.ldb);
return IRQ_HANDLED;
}
@@ -1075,6 +1128,10 @@ static int ntb_setup_msix(struct ntb_device *ndev)
"Only %d MSI-X vectors. Limiting the number of queues to that number.\n",
rc);
msix_entries = rc;
+
+ rc = pci_enable_msix(pdev, ndev->msix_entries, msix_entries);
+ if (rc)
+ goto err1;
}
for (i = 0; i < msix_entries; i++) {
@@ -1176,9 +1233,10 @@ static int ntb_setup_interrupts(struct ntb_device *ndev)
*/
if (ndev->hw_type == BWD_HW)
writeq(~0, ndev->reg_ofs.ldb_mask);
- else
- writew(~(1 << ndev->limits.max_db_bits),
- ndev->reg_ofs.ldb_mask);
+ else {
+ u16 var = 1 << SNB_LINK_DB;
+ writew(~var, ndev->reg_ofs.ldb_mask);
+ }
rc = ntb_setup_msix(ndev);
if (!rc)
@@ -1286,6 +1344,39 @@ static void ntb_free_debugfs(struct ntb_device *ndev)
}
}
+static void ntb_hw_link_up(struct ntb_device *ndev)
+{
+ if (ndev->conn_type == NTB_CONN_TRANSPARENT)
+ ntb_link_event(ndev, NTB_LINK_UP);
+ else {
+ u32 ntb_cntl;
+
+ /* Let's bring the NTB link up */
+ ntb_cntl = readl(ndev->reg_ofs.lnk_cntl);
+ ntb_cntl &= ~(NTB_CNTL_LINK_DISABLE | NTB_CNTL_CFG_LOCK);
+ ntb_cntl |= NTB_CNTL_P2S_BAR23_SNOOP | NTB_CNTL_S2P_BAR23_SNOOP;
+ ntb_cntl |= NTB_CNTL_P2S_BAR45_SNOOP | NTB_CNTL_S2P_BAR45_SNOOP;
+ writel(ntb_cntl, ndev->reg_ofs.lnk_cntl);
+ }
+}
+
+static void ntb_hw_link_down(struct ntb_device *ndev)
+{
+ u32 ntb_cntl;
+
+ if (ndev->conn_type == NTB_CONN_TRANSPARENT) {
+ ntb_link_event(ndev, NTB_LINK_DOWN);
+ return;
+ }
+
+ /* Bring NTB link down */
+ ntb_cntl = readl(ndev->reg_ofs.lnk_cntl);
+ ntb_cntl &= ~(NTB_CNTL_P2S_BAR23_SNOOP | NTB_CNTL_S2P_BAR23_SNOOP);
+ ntb_cntl &= ~(NTB_CNTL_P2S_BAR45_SNOOP | NTB_CNTL_S2P_BAR45_SNOOP);
+ ntb_cntl |= NTB_CNTL_LINK_DISABLE | NTB_CNTL_CFG_LOCK;
+ writel(ntb_cntl, ndev->reg_ofs.lnk_cntl);
+}
+
static int ntb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct ntb_device *ndev;
@@ -1374,9 +1465,7 @@ static int ntb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (rc)
goto err6;
- /* Let's bring the NTB link up */
- writel(NTB_CNTL_BAR23_SNOOP | NTB_CNTL_BAR45_SNOOP,
- ndev->reg_ofs.lnk_cntl);
+ ntb_hw_link_up(ndev);
return 0;
@@ -1406,12 +1495,8 @@ static void ntb_pci_remove(struct pci_dev *pdev)
{
struct ntb_device *ndev = pci_get_drvdata(pdev);
int i;
- u32 ntb_cntl;
- /* Bring NTB link down */
- ntb_cntl = readl(ndev->reg_ofs.lnk_cntl);
- ntb_cntl |= NTB_CNTL_LINK_DISABLE;
- writel(ntb_cntl, ndev->reg_ofs.lnk_cntl);
+ ntb_hw_link_down(ndev);
ntb_transport_free(ndev->ntb_transport);
diff --git a/drivers/ntb/ntb_hw.h b/drivers/ntb/ntb_hw.h
index 0a31cedae7d4..bbdb7edca10c 100644
--- a/drivers/ntb/ntb_hw.h
+++ b/drivers/ntb/ntb_hw.h
@@ -106,10 +106,11 @@ struct ntb_mw {
};
struct ntb_db_cb {
- void (*callback) (void *data, int db_num);
+ int (*callback)(void *data, int db_num);
unsigned int db_num;
void *data;
struct ntb_device *ndev;
+ struct tasklet_struct irq_work;
};
struct ntb_device {
@@ -228,8 +229,8 @@ struct ntb_device *ntb_register_transport(struct pci_dev *pdev,
void ntb_unregister_transport(struct ntb_device *ndev);
void ntb_set_mw_addr(struct ntb_device *ndev, unsigned int mw, u64 addr);
int ntb_register_db_callback(struct ntb_device *ndev, unsigned int idx,
- void *data, void (*db_cb_func) (void *data,
- int db_num));
+ void *data, int (*db_cb_func)(void *data,
+ int db_num));
void ntb_unregister_db_callback(struct ntb_device *ndev, unsigned int idx);
int ntb_register_event_callback(struct ntb_device *ndev,
void (*event_cb_func) (void *handle,
diff --git a/drivers/ntb/ntb_regs.h b/drivers/ntb/ntb_regs.h
index aa4bdd393c58..9774506419d7 100644
--- a/drivers/ntb/ntb_regs.h
+++ b/drivers/ntb/ntb_regs.h
@@ -55,6 +55,7 @@
#define SNB_MAX_COMPAT_SPADS 16
/* Reserve the uppermost bit for link interrupt */
#define SNB_MAX_DB_BITS 15
+#define SNB_LINK_DB 15
#define SNB_DB_BITS_PER_VEC 5
#define SNB_MAX_MW 2
#define SNB_ERRATA_MAX_MW 1
@@ -75,9 +76,6 @@
#define SNB_SBAR2XLAT_OFFSET 0x0030
#define SNB_SBAR4XLAT_OFFSET 0x0038
#define SNB_SBAR0BASE_OFFSET 0x0040
-#define SNB_SBAR0BASE_OFFSET 0x0040
-#define SNB_SBAR2BASE_OFFSET 0x0048
-#define SNB_SBAR4BASE_OFFSET 0x0050
#define SNB_SBAR2BASE_OFFSET 0x0048
#define SNB_SBAR4BASE_OFFSET 0x0050
#define SNB_NTBCNTL_OFFSET 0x0058
@@ -145,11 +143,13 @@
#define BWD_LTSSMSTATEJMP_FORCEDETECT (1 << 2)
#define BWD_IBIST_ERR_OFLOW 0x7FFF7FFF
-#define NTB_CNTL_CFG_LOCK (1 << 0)
-#define NTB_CNTL_LINK_DISABLE (1 << 1)
-#define NTB_CNTL_BAR23_SNOOP (1 << 2)
-#define NTB_CNTL_BAR45_SNOOP (1 << 6)
-#define BWD_CNTL_LINK_DOWN (1 << 16)
+#define NTB_CNTL_CFG_LOCK (1 << 0)
+#define NTB_CNTL_LINK_DISABLE (1 << 1)
+#define NTB_CNTL_S2P_BAR23_SNOOP (1 << 2)
+#define NTB_CNTL_P2S_BAR23_SNOOP (1 << 4)
+#define NTB_CNTL_S2P_BAR45_SNOOP (1 << 6)
+#define NTB_CNTL_P2S_BAR45_SNOOP (1 << 8)
+#define BWD_CNTL_LINK_DOWN (1 << 16)
#define NTB_PPD_OFFSET 0x00D4
#define SNB_PPD_CONN_TYPE 0x0003
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
index 12a9e83c008b..3217f394d45b 100644
--- a/drivers/ntb/ntb_transport.c
+++ b/drivers/ntb/ntb_transport.c
@@ -119,7 +119,6 @@ struct ntb_transport_qp {
void (*rx_handler) (struct ntb_transport_qp *qp, void *qp_data,
void *data, int len);
- struct tasklet_struct rx_work;
struct list_head rx_pend_q;
struct list_head rx_free_q;
spinlock_t ntb_rx_pend_q_lock;
@@ -584,11 +583,8 @@ static int ntb_set_mw(struct ntb_transport *nt, int num_mw, unsigned int size)
return 0;
}
-static void ntb_qp_link_cleanup(struct work_struct *work)
+static void ntb_qp_link_cleanup(struct ntb_transport_qp *qp)
{
- struct ntb_transport_qp *qp = container_of(work,
- struct ntb_transport_qp,
- link_cleanup);
struct ntb_transport *nt = qp->transport;
struct pci_dev *pdev = ntb_query_pdev(nt->ndev);
@@ -602,6 +598,16 @@ static void ntb_qp_link_cleanup(struct work_struct *work)
dev_info(&pdev->dev, "qp %d: Link Down\n", qp->qp_num);
qp->qp_link = NTB_LINK_DOWN;
+}
+
+static void ntb_qp_link_cleanup_work(struct work_struct *work)
+{
+ struct ntb_transport_qp *qp = container_of(work,
+ struct ntb_transport_qp,
+ link_cleanup);
+ struct ntb_transport *nt = qp->transport;
+
+ ntb_qp_link_cleanup(qp);
if (nt->transport_link == NTB_LINK_UP)
schedule_delayed_work(&qp->link_work,
@@ -613,22 +619,20 @@ static void ntb_qp_link_down(struct ntb_transport_qp *qp)
schedule_work(&qp->link_cleanup);
}
-static void ntb_transport_link_cleanup(struct work_struct *work)
+static void ntb_transport_link_cleanup(struct ntb_transport *nt)
{
- struct ntb_transport *nt = container_of(work, struct ntb_transport,
- link_cleanup);
int i;
+ /* Pass along the info to any clients */
+ for (i = 0; i < nt->max_qps; i++)
+ if (!test_bit(i, &nt->qp_bitmap))
+ ntb_qp_link_cleanup(&nt->qps[i]);
+
if (nt->transport_link == NTB_LINK_DOWN)
cancel_delayed_work_sync(&nt->link_work);
else
nt->transport_link = NTB_LINK_DOWN;
- /* Pass along the info to any clients */
- for (i = 0; i < nt->max_qps; i++)
- if (!test_bit(i, &nt->qp_bitmap))
- ntb_qp_link_down(&nt->qps[i]);
-
/* The scratchpad registers keep the values if the remote side
* goes down, blast them now to give them a sane value the next
* time they are accessed
@@ -637,6 +641,14 @@ static void ntb_transport_link_cleanup(struct work_struct *work)
ntb_write_local_spad(nt->ndev, i, 0);
}
+static void ntb_transport_link_cleanup_work(struct work_struct *work)
+{
+ struct ntb_transport *nt = container_of(work, struct ntb_transport,
+ link_cleanup);
+
+ ntb_transport_link_cleanup(nt);
+}
+
static void ntb_transport_event_callback(void *data, enum ntb_hw_event event)
{
struct ntb_transport *nt = data;
@@ -880,7 +892,7 @@ static int ntb_transport_init_queue(struct ntb_transport *nt,
}
INIT_DELAYED_WORK(&qp->link_work, ntb_qp_link_work);
- INIT_WORK(&qp->link_cleanup, ntb_qp_link_cleanup);
+ INIT_WORK(&qp->link_cleanup, ntb_qp_link_cleanup_work);
spin_lock_init(&qp->ntb_rx_pend_q_lock);
spin_lock_init(&qp->ntb_rx_free_q_lock);
@@ -936,7 +948,7 @@ int ntb_transport_init(struct pci_dev *pdev)
}
INIT_DELAYED_WORK(&nt->link_work, ntb_transport_link_work);
- INIT_WORK(&nt->link_cleanup, ntb_transport_link_cleanup);
+ INIT_WORK(&nt->link_cleanup, ntb_transport_link_cleanup_work);
rc = ntb_register_event_callback(nt->ndev,
ntb_transport_event_callback);
@@ -972,7 +984,7 @@ void ntb_transport_free(void *transport)
struct ntb_device *ndev = nt->ndev;
int i;
- nt->transport_link = NTB_LINK_DOWN;
+ ntb_transport_link_cleanup(nt);
/* verify that all the qp's are freed */
for (i = 0; i < nt->max_qps; i++) {
@@ -1034,10 +1046,9 @@ static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset,
struct dma_chan *chan = qp->dma_chan;
struct dma_device *device;
size_t pay_off, buff_off;
- dma_addr_t src, dest;
+ struct dmaengine_unmap_data *unmap;
dma_cookie_t cookie;
void *buf = entry->buf;
- unsigned long flags;
entry->len = len;
@@ -1045,35 +1056,49 @@ static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset,
goto err;
if (len < copy_bytes)
- goto err1;
+ goto err_wait;
device = chan->device;
pay_off = (size_t) offset & ~PAGE_MASK;
buff_off = (size_t) buf & ~PAGE_MASK;
if (!is_dma_copy_aligned(device, pay_off, buff_off, len))
- goto err1;
+ goto err_wait;
- dest = dma_map_single(device->dev, buf, len, DMA_FROM_DEVICE);
- if (dma_mapping_error(device->dev, dest))
- goto err1;
+ unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOWAIT);
+ if (!unmap)
+ goto err_wait;
- src = dma_map_single(device->dev, offset, len, DMA_TO_DEVICE);
- if (dma_mapping_error(device->dev, src))
- goto err2;
+ unmap->len = len;
+ unmap->addr[0] = dma_map_page(device->dev, virt_to_page(offset),
+ pay_off, len, DMA_TO_DEVICE);
+ if (dma_mapping_error(device->dev, unmap->addr[0]))
+ goto err_get_unmap;
+
+ unmap->to_cnt = 1;
+
+ unmap->addr[1] = dma_map_page(device->dev, virt_to_page(buf),
+ buff_off, len, DMA_FROM_DEVICE);
+ if (dma_mapping_error(device->dev, unmap->addr[1]))
+ goto err_get_unmap;
+
+ unmap->from_cnt = 1;
- flags = DMA_COMPL_DEST_UNMAP_SINGLE | DMA_COMPL_SRC_UNMAP_SINGLE |
- DMA_PREP_INTERRUPT;
- txd = device->device_prep_dma_memcpy(chan, dest, src, len, flags);
+ txd = device->device_prep_dma_memcpy(chan, unmap->addr[1],
+ unmap->addr[0], len,
+ DMA_PREP_INTERRUPT);
if (!txd)
- goto err3;
+ goto err_get_unmap;
txd->callback = ntb_rx_copy_callback;
txd->callback_param = entry;
+ dma_set_unmap(txd, unmap);
cookie = dmaengine_submit(txd);
if (dma_submit_error(cookie))
- goto err3;
+ goto err_set_unmap;
+
+ dmaengine_unmap_put(unmap);
qp->last_cookie = cookie;
@@ -1081,11 +1106,11 @@ static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset,
return;
-err3:
- dma_unmap_single(device->dev, src, len, DMA_TO_DEVICE);
-err2:
- dma_unmap_single(device->dev, dest, len, DMA_FROM_DEVICE);
-err1:
+err_set_unmap:
+ dmaengine_unmap_put(unmap);
+err_get_unmap:
+ dmaengine_unmap_put(unmap);
+err_wait:
/* If the callbacks come out of order, the writing of the index to the
* last completed will be out of order. This may result in the
* receive stalling forever.
@@ -1175,11 +1200,14 @@ err:
goto out;
}
-static void ntb_transport_rx(unsigned long data)
+static int ntb_transport_rxc_db(void *data, int db_num)
{
- struct ntb_transport_qp *qp = (struct ntb_transport_qp *)data;
+ struct ntb_transport_qp *qp = data;
int rc, i;
+ dev_dbg(&ntb_query_pdev(qp->ndev)->dev, "%s: doorbell %d received\n",
+ __func__, db_num);
+
/* Limit the number of packets processed in a single interrupt to
* provide fairness to others
*/
@@ -1191,16 +1219,8 @@ static void ntb_transport_rx(unsigned long data)
if (qp->dma_chan)
dma_async_issue_pending(qp->dma_chan);
-}
-
-static void ntb_transport_rxc_db(void *data, int db_num)
-{
- struct ntb_transport_qp *qp = data;
-
- dev_dbg(&ntb_query_pdev(qp->ndev)->dev, "%s: doorbell %d received\n",
- __func__, db_num);
- tasklet_schedule(&qp->rx_work);
+ return i;
}
static void ntb_tx_copy_callback(void *data)
@@ -1245,12 +1265,12 @@ static void ntb_async_tx(struct ntb_transport_qp *qp,
struct dma_chan *chan = qp->dma_chan;
struct dma_device *device;
size_t dest_off, buff_off;
- dma_addr_t src, dest;
+ struct dmaengine_unmap_data *unmap;
+ dma_addr_t dest;
dma_cookie_t cookie;
void __iomem *offset;
size_t len = entry->len;
void *buf = entry->buf;
- unsigned long flags;
offset = qp->tx_mw + qp->tx_max_frame * qp->tx_index;
hdr = offset + qp->tx_max_frame - sizeof(struct ntb_payload_header);
@@ -1273,28 +1293,41 @@ static void ntb_async_tx(struct ntb_transport_qp *qp,
if (!is_dma_copy_aligned(device, buff_off, dest_off, len))
goto err;
- src = dma_map_single(device->dev, buf, len, DMA_TO_DEVICE);
- if (dma_mapping_error(device->dev, src))
+ unmap = dmaengine_get_unmap_data(device->dev, 1, GFP_NOWAIT);
+ if (!unmap)
goto err;
- flags = DMA_COMPL_SRC_UNMAP_SINGLE | DMA_PREP_INTERRUPT;
- txd = device->device_prep_dma_memcpy(chan, dest, src, len, flags);
+ unmap->len = len;
+ unmap->addr[0] = dma_map_page(device->dev, virt_to_page(buf),
+ buff_off, len, DMA_TO_DEVICE);
+ if (dma_mapping_error(device->dev, unmap->addr[0]))
+ goto err_get_unmap;
+
+ unmap->to_cnt = 1;
+
+ txd = device->device_prep_dma_memcpy(chan, dest, unmap->addr[0], len,
+ DMA_PREP_INTERRUPT);
if (!txd)
- goto err1;
+ goto err_get_unmap;
txd->callback = ntb_tx_copy_callback;
txd->callback_param = entry;
+ dma_set_unmap(txd, unmap);
cookie = dmaengine_submit(txd);
if (dma_submit_error(cookie))
- goto err1;
+ goto err_set_unmap;
+
+ dmaengine_unmap_put(unmap);
dma_async_issue_pending(chan);
qp->tx_async++;
return;
-err1:
- dma_unmap_single(device->dev, src, len, DMA_TO_DEVICE);
+err_set_unmap:
+ dmaengine_unmap_put(unmap);
+err_get_unmap:
+ dmaengine_unmap_put(unmap);
err:
ntb_memcpy_tx(entry, offset);
qp->tx_memcpy++;
@@ -1406,11 +1439,12 @@ ntb_transport_create_queue(void *data, struct pci_dev *pdev,
qp->tx_handler = handlers->tx_handler;
qp->event_handler = handlers->event_handler;
+ dmaengine_get();
qp->dma_chan = dma_find_channel(DMA_MEMCPY);
- if (!qp->dma_chan)
+ if (!qp->dma_chan) {
+ dmaengine_put();
dev_info(&pdev->dev, "Unable to allocate DMA channel, using CPU instead\n");
- else
- dmaengine_get();
+ }
for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) {
entry = kzalloc(sizeof(struct ntb_queue_entry), GFP_ATOMIC);
@@ -1432,25 +1466,23 @@ ntb_transport_create_queue(void *data, struct pci_dev *pdev,
&qp->tx_free_q);
}
- tasklet_init(&qp->rx_work, ntb_transport_rx, (unsigned long) qp);
-
rc = ntb_register_db_callback(qp->ndev, free_queue, qp,
ntb_transport_rxc_db);
if (rc)
- goto err3;
+ goto err2;
dev_info(&pdev->dev, "NTB Transport QP %d created\n", qp->qp_num);
return qp;
-err3:
- tasklet_disable(&qp->rx_work);
err2:
while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
kfree(entry);
err1:
while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q)))
kfree(entry);
+ if (qp->dma_chan)
+ dmaengine_put();
set_bit(free_queue, &nt->qp_bitmap);
err:
return NULL;
@@ -1489,7 +1521,6 @@ void ntb_transport_free_queue(struct ntb_transport_qp *qp)
}
ntb_unregister_db_callback(qp->ndev, qp->qp_num);
- tasklet_disable(&qp->rx_work);
cancel_delayed_work_sync(&qp->link_work);
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig
index de6f8990246f..c6973f101a3e 100644
--- a/drivers/of/Kconfig
+++ b/drivers/of/Kconfig
@@ -20,7 +20,7 @@ config OF_SELFTEST
depends on OF_IRQ
help
This option builds in test cases for the device tree infrastructure
- that are executed one at boot time, and the results dumped to the
+ that are executed once at boot time, and the results dumped to the
console.
If unsure, say N here, but this option is safe to enable.
diff --git a/drivers/of/address.c b/drivers/of/address.c
index 4b9317bdb81c..d3dd41c840f1 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -69,14 +69,6 @@ static u64 of_bus_default_map(__be32 *addr, const __be32 *range,
(unsigned long long)cp, (unsigned long long)s,
(unsigned long long)da);
- /*
- * If the number of address cells is larger than 2 we assume the
- * mapping doesn't specify a physical address. Rather, the address
- * specifies an identifier that must match exactly.
- */
- if (na > 2 && memcmp(range, addr, na * 4) != 0)
- return OF_BAD_ADDR;
-
if (da < cp || da >= (cp + s))
return OF_BAD_ADDR;
return da - cp;
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 2fa024b97c43..758b4f8b30b7 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -922,8 +922,16 @@ void __init unflatten_device_tree(void)
*/
void __init unflatten_and_copy_device_tree(void)
{
- int size = __be32_to_cpu(initial_boot_params->totalsize);
- void *dt = early_init_dt_alloc_memory_arch(size,
+ int size;
+ void *dt;
+
+ if (!initial_boot_params) {
+ pr_warn("No valid device tree found, continuing without\n");
+ return;
+ }
+
+ size = __be32_to_cpu(initial_boot_params->totalsize);
+ dt = early_init_dt_alloc_memory_arch(size,
__alignof__(struct boot_param_header));
if (dt) {
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index 786b0b47fae4..27212402c532 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -165,7 +165,6 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
if (of_get_property(ipar, "interrupt-controller", NULL) !=
NULL) {
pr_debug(" -> got it !\n");
- of_node_put(old);
return 0;
}
@@ -250,8 +249,7 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
* Successfully parsed an interrrupt-map translation; copy new
* interrupt specifier into the out_irq structure
*/
- of_node_put(out_irq->np);
- out_irq->np = of_node_get(newpar);
+ out_irq->np = newpar;
match_array = imap - newaddrsize - newintsize;
for (i = 0; i < newintsize; i++)
@@ -268,7 +266,6 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
}
fail:
of_node_put(ipar);
- of_node_put(out_irq->np);
of_node_put(newpar);
return -EINVAL;
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index d5a57a9e329c..a43b8523c61e 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -22,6 +22,71 @@
MODULE_AUTHOR("Grant Likely <grant.likely@secretlab.ca>");
MODULE_LICENSE("GPL");
+static void of_set_phy_supported(struct phy_device *phydev, u32 max_speed)
+{
+ phydev->supported |= PHY_DEFAULT_FEATURES;
+
+ switch (max_speed) {
+ default:
+ return;
+
+ case SPEED_1000:
+ phydev->supported |= PHY_1000BT_FEATURES;
+ case SPEED_100:
+ phydev->supported |= PHY_100BT_FEATURES;
+ case SPEED_10:
+ phydev->supported |= PHY_10BT_FEATURES;
+ }
+}
+
+static int of_mdiobus_register_phy(struct mii_bus *mdio, struct device_node *child,
+ u32 addr)
+{
+ struct phy_device *phy;
+ bool is_c45;
+ int rc, prev_irq;
+ u32 max_speed = 0;
+
+ is_c45 = of_device_is_compatible(child,
+ "ethernet-phy-ieee802.3-c45");
+
+ phy = get_phy_device(mdio, addr, is_c45);
+ if (!phy || IS_ERR(phy))
+ return 1;
+
+ if (mdio->irq) {
+ prev_irq = mdio->irq[addr];
+ mdio->irq[addr] =
+ irq_of_parse_and_map(child, 0);
+ if (!mdio->irq[addr])
+ mdio->irq[addr] = prev_irq;
+ }
+
+ /* Associate the OF node with the device structure so it
+ * can be looked up later */
+ of_node_get(child);
+ phy->dev.of_node = child;
+
+ /* All data is now stored in the phy struct;
+ * register it */
+ rc = phy_device_register(phy);
+ if (rc) {
+ phy_device_free(phy);
+ of_node_put(child);
+ return 1;
+ }
+
+ /* Set phydev->supported based on the "max-speed" property
+ * if present */
+ if (!of_property_read_u32(child, "max-speed", &max_speed))
+ of_set_phy_supported(phy, max_speed);
+
+ dev_dbg(&mdio->dev, "registered phy %s at address %i\n",
+ child->name, addr);
+
+ return 0;
+}
+
/**
* of_mdiobus_register - Register mii_bus and create PHYs from the device tree
* @mdio: pointer to mii_bus structure
@@ -32,11 +97,10 @@ MODULE_LICENSE("GPL");
*/
int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
{
- struct phy_device *phy;
struct device_node *child;
const __be32 *paddr;
u32 addr;
- bool is_c45, scanphys = false;
+ bool scanphys = false;
int rc, i, len;
/* Mask out all PHYs from auto probing. Instead the PHYs listed in
@@ -67,44 +131,15 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
}
addr = be32_to_cpup(paddr);
- if (addr >= 32) {
+ if (addr >= PHY_MAX_ADDR) {
dev_err(&mdio->dev, "%s PHY address %i is too large\n",
child->full_name, addr);
continue;
}
- if (mdio->irq) {
- mdio->irq[addr] = irq_of_parse_and_map(child, 0);
- if (!mdio->irq[addr])
- mdio->irq[addr] = PHY_POLL;
- }
-
- is_c45 = of_device_is_compatible(child,
- "ethernet-phy-ieee802.3-c45");
- phy = get_phy_device(mdio, addr, is_c45);
-
- if (!phy || IS_ERR(phy)) {
- dev_err(&mdio->dev,
- "cannot get PHY at address %i\n",
- addr);
- continue;
- }
-
- /* Associate the OF node with the device structure so it
- * can be looked up later */
- of_node_get(child);
- phy->dev.of_node = child;
-
- /* All data is now stored in the phy struct; register it */
- rc = phy_device_register(phy);
- if (rc) {
- phy_device_free(phy);
- of_node_put(child);
+ rc = of_mdiobus_register_phy(mdio, child, addr);
+ if (rc)
continue;
- }
-
- dev_dbg(&mdio->dev, "registered phy %s at address %i\n",
- child->name, addr);
}
if (!scanphys)
@@ -117,9 +152,6 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
if (paddr)
continue;
- is_c45 = of_device_is_compatible(child,
- "ethernet-phy-ieee802.3-c45");
-
for (addr = 0; addr < PHY_MAX_ADDR; addr++) {
/* skip already registered PHYs */
if (mdio->phy_map[addr])
@@ -129,34 +161,9 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
dev_info(&mdio->dev, "scan phy %s at address %i\n",
child->name, addr);
- phy = get_phy_device(mdio, addr, is_c45);
- if (!phy || IS_ERR(phy))
+ rc = of_mdiobus_register_phy(mdio, child, addr);
+ if (rc)
continue;
-
- if (mdio->irq) {
- mdio->irq[addr] =
- irq_of_parse_and_map(child, 0);
- if (!mdio->irq[addr])
- mdio->irq[addr] = PHY_POLL;
- }
-
- /* Associate the OF node with the device structure so it
- * can be looked up later */
- of_node_get(child);
- phy->dev.of_node = child;
-
- /* All data is now stored in the phy struct;
- * register it */
- rc = phy_device_register(phy);
- if (rc) {
- phy_device_free(phy);
- of_node_put(child);
- continue;
- }
-
- dev_info(&mdio->dev, "registered phy %s at address %i\n",
- child->name, addr);
- break;
}
}
diff --git a/drivers/pci/ats.c b/drivers/pci/ats.c
index 95655d7c0d0b..e52d7ffa38b9 100644
--- a/drivers/pci/ats.c
+++ b/drivers/pci/ats.c
@@ -410,7 +410,7 @@ EXPORT_SYMBOL_GPL(pci_disable_pasid);
* Otherwise is returns a bitmask with supported features. Current
* features reported are:
* PCI_PASID_CAP_EXEC - Execute permission supported
- * PCI_PASID_CAP_PRIV - Priviledged mode supported
+ * PCI_PASID_CAP_PRIV - Privileged mode supported
*/
int pci_pasid_features(struct pci_dev *pdev)
{
diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c
index c269e430c760..2aa7b77c7c88 100644
--- a/drivers/pci/host/pci-mvebu.c
+++ b/drivers/pci/host/pci-mvebu.c
@@ -447,6 +447,11 @@ static int mvebu_sw_pci_bridge_read(struct mvebu_pcie_port *port,
*value = 0;
break;
+ case PCI_INTERRUPT_LINE:
+ /* LINE PIN MIN_GNT MAX_LAT */
+ *value = 0;
+ break;
+
default:
*value = 0xffffffff;
return PCIBIOS_BAD_REGISTER_NUMBER;
diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c
index 7c4f38dd42ba..0afbbbc55c81 100644
--- a/drivers/pci/host/pci-tegra.c
+++ b/drivers/pci/host/pci-tegra.c
@@ -249,7 +249,7 @@ struct tegra_pcie {
void __iomem *afi;
int irq;
- struct list_head busses;
+ struct list_head buses;
struct resource *cs;
struct resource io;
@@ -399,14 +399,14 @@ free:
/*
* Look up a virtual address mapping for the specified bus number. If no such
- * mapping existis, try to create one.
+ * mapping exists, try to create one.
*/
static void __iomem *tegra_pcie_bus_map(struct tegra_pcie *pcie,
unsigned int busnr)
{
struct tegra_pcie_bus *bus;
- list_for_each_entry(bus, &pcie->busses, list)
+ list_for_each_entry(bus, &pcie->buses, list)
if (bus->nr == busnr)
return (void __iomem *)bus->area->addr;
@@ -414,7 +414,7 @@ static void __iomem *tegra_pcie_bus_map(struct tegra_pcie *pcie,
if (IS_ERR(bus))
return NULL;
- list_add_tail(&bus->list, &pcie->busses);
+ list_add_tail(&bus->list, &pcie->buses);
return (void __iomem *)bus->area->addr;
}
@@ -808,7 +808,7 @@ static int tegra_pcie_enable_controller(struct tegra_pcie *pcie)
value &= ~AFI_FUSE_PCIE_T0_GEN2_DIS;
afi_writel(pcie, value, AFI_FUSE);
- /* initialze internal PHY, enable up to 16 PCIE lanes */
+ /* initialize internal PHY, enable up to 16 PCIE lanes */
pads_writel(pcie, 0x0, PADS_CTL_SEL);
/* override IDDQ to 1 on all 4 lanes */
@@ -1624,7 +1624,7 @@ static int tegra_pcie_probe(struct platform_device *pdev)
if (!pcie)
return -ENOMEM;
- INIT_LIST_HEAD(&pcie->busses);
+ INIT_LIST_HEAD(&pcie->buses);
INIT_LIST_HEAD(&pcie->ports);
pcie->soc_data = match->data;
pcie->dev = &pdev->dev;
diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c
index 1e1fea4d959b..e33b68be0391 100644
--- a/drivers/pci/host/pcie-designware.c
+++ b/drivers/pci/host/pcie-designware.c
@@ -197,7 +197,7 @@ static int find_valid_pos0(struct pcie_port *pp, int msgvec, int pos, int *pos0)
return -ENOSPC;
/*
* Check if this position is at correct offset.nvec is always a
- * power of two. pos0 must be nvec bit alligned.
+ * power of two. pos0 must be nvec bit aligned.
*/
if (pos % msgvec)
pos += msgvec - (pos % msgvec);
diff --git a/drivers/pci/hotplug/Kconfig b/drivers/pci/hotplug/Kconfig
index 0a648af89531..df8caec59789 100644
--- a/drivers/pci/hotplug/Kconfig
+++ b/drivers/pci/hotplug/Kconfig
@@ -133,8 +133,8 @@ config HOTPLUG_PCI_RPA_DLPAR
To compile this driver as a module, choose M here: the
module will be called rpadlpar_io.
-
- When in doubt, say N.
+
+ When in doubt, say N.
config HOTPLUG_PCI_SGI
tristate "SGI PCI Hotplug Support"
diff --git a/drivers/pci/hotplug/Makefile b/drivers/pci/hotplug/Makefile
index 47ec8c80e16d..3e6532b945c1 100644
--- a/drivers/pci/hotplug/Makefile
+++ b/drivers/pci/hotplug/Makefile
@@ -31,7 +31,7 @@ pci_hotplug-objs += cpci_hotplug_core.o \
cpci_hotplug_pci.o
endif
ifdef CONFIG_ACPI
-pci_hotplug-objs += acpi_pcihp.o
+pci_hotplug-objs += acpi_pcihp.o
endif
cpqphp-objs := cpqphp_core.o \
diff --git a/drivers/pci/hotplug/acpi_pcihp.c b/drivers/pci/hotplug/acpi_pcihp.c
index 1ce8ee054f1a..a94d850ae228 100644
--- a/drivers/pci/hotplug/acpi_pcihp.c
+++ b/drivers/pci/hotplug/acpi_pcihp.c
@@ -367,7 +367,7 @@ int acpi_get_hp_hw_control_from_firmware(struct pci_dev *pdev, u32 flags)
string = (struct acpi_buffer){ ACPI_ALLOCATE_BUFFER, NULL };
}
- handle = DEVICE_ACPI_HANDLE(&pdev->dev);
+ handle = ACPI_HANDLE(&pdev->dev);
if (!handle) {
/*
* This hotplug controller was not listed in the ACPI name
diff --git a/drivers/pci/hotplug/acpiphp.h b/drivers/pci/hotplug/acpiphp.h
index 26100f510b10..1592dbe4f904 100644
--- a/drivers/pci/hotplug/acpiphp.h
+++ b/drivers/pci/hotplug/acpiphp.h
@@ -176,7 +176,6 @@ u8 acpiphp_get_latch_status(struct acpiphp_slot *slot);
u8 acpiphp_get_adapter_status(struct acpiphp_slot *slot);
/* variables */
-extern bool acpiphp_debug;
extern bool acpiphp_disabled;
#endif /* _ACPIPHP_H */
diff --git a/drivers/pci/hotplug/acpiphp_core.c b/drivers/pci/hotplug/acpiphp_core.c
index 8650d39db392..dca66bc44578 100644
--- a/drivers/pci/hotplug/acpiphp_core.c
+++ b/drivers/pci/hotplug/acpiphp_core.c
@@ -111,7 +111,7 @@ int acpiphp_register_attention(struct acpiphp_attention_info *info)
* @info: must match the pointer used to register
*
* Description: This is used to un-register a hardware specific acpi
- * driver that manipulates the attention LED. The pointer to the
+ * driver that manipulates the attention LED. The pointer to the
* info struct must be the same as the one used to set it.
*/
int acpiphp_unregister_attention(struct acpiphp_attention_info *info)
@@ -169,8 +169,8 @@ static int disable_slot(struct hotplug_slot *hotplug_slot)
* was registered with us. This allows hardware specific
* ACPI implementations to blink the light for us.
*/
- static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 status)
- {
+static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 status)
+{
int retval = -ENODEV;
pr_debug("%s - physical_slot = %s\n", __func__,
@@ -182,8 +182,8 @@ static int disable_slot(struct hotplug_slot *hotplug_slot)
} else
attention_info = NULL;
return retval;
- }
-
+}
+
/**
* get_power_status - get power status of a slot
@@ -323,7 +323,7 @@ int acpiphp_register_hotplug_slot(struct acpiphp_slot *acpiphp_slot,
if (retval) {
pr_err("pci_hp_register failed with error %d\n", retval);
goto error_hpslot;
- }
+ }
pr_info("Slot [%s] registered\n", slot_name(slot));
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index 5b4e9eb0e8ff..1cf605f67673 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -325,7 +325,7 @@ static acpi_status register_slot(acpi_handle handle, u32 lvl, void *data,
list_add_tail(&slot->node, &bridge->slots);
- /* Register slots for ejectable funtions only. */
+ /* Register slots for ejectable functions only. */
if (acpi_pci_check_ejectable(pbus, handle) || is_dock_device(handle)) {
unsigned long long sun;
int retval;
diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c
index 0d64c414bf78..ecfac7e72d91 100644
--- a/drivers/pci/hotplug/acpiphp_ibm.c
+++ b/drivers/pci/hotplug/acpiphp_ibm.c
@@ -116,7 +116,7 @@ static struct bin_attribute ibm_apci_table_attr = {
.read = ibm_read_apci_table,
.write = NULL,
};
-static struct acpiphp_attention_info ibm_attention_info =
+static struct acpiphp_attention_info ibm_attention_info =
{
.set_attn = ibm_set_attention_status,
.get_attn = ibm_get_attention_status,
@@ -171,9 +171,9 @@ ibm_slot_done:
*/
static int ibm_set_attention_status(struct hotplug_slot *slot, u8 status)
{
- union acpi_object args[2];
+ union acpi_object args[2];
struct acpi_object_list params = { .pointer = args, .count = 2 };
- acpi_status stat;
+ acpi_status stat;
unsigned long long rc;
union apci_descriptor *ibm_slot;
@@ -208,7 +208,7 @@ static int ibm_set_attention_status(struct hotplug_slot *slot, u8 status)
*
* Description: This method is registered with the acpiphp module as a
* callback to do the device specific task of getting the LED status.
- *
+ *
* Because there is no direct method of getting the LED status directly
* from an ACPI call, we read the aPCI table and parse out our
* slot descriptor to read the status from that.
@@ -259,7 +259,7 @@ static void ibm_handle_events(acpi_handle handle, u32 event, void *context)
pr_debug("%s: Received notification %02x\n", __func__, event);
if (subevent == 0x80) {
- pr_debug("%s: generationg bus event\n", __func__);
+ pr_debug("%s: generating bus event\n", __func__);
acpi_bus_generate_netlink_event(note->device->pnp.device_class,
dev_name(&note->device->dev),
note->event, detail);
@@ -387,7 +387,7 @@ static acpi_status __init ibm_find_acpi_device(acpi_handle handle,
u32 lvl, void *context, void **rv)
{
acpi_handle *phandle = (acpi_handle *)context;
- acpi_status status;
+ acpi_status status;
struct acpi_device_info *info;
int retval = 0;
@@ -405,7 +405,7 @@ static acpi_status __init ibm_find_acpi_device(acpi_handle handle,
info->hardware_id.string, handle);
*phandle = handle;
/* returning non-zero causes the search to stop
- * and returns this value to the caller of
+ * and returns this value to the caller of
* acpi_walk_namespace, but it also causes some warnings
* in the acpi debug code to print...
*/
diff --git a/drivers/pci/hotplug/cpci_hotplug_core.c b/drivers/pci/hotplug/cpci_hotplug_core.c
index 2b4c412f94c3..00c81a3cefc9 100644
--- a/drivers/pci/hotplug/cpci_hotplug_core.c
+++ b/drivers/pci/hotplug/cpci_hotplug_core.c
@@ -46,7 +46,7 @@
do { \
if (cpci_debug) \
printk (KERN_DEBUG "%s: " format "\n", \
- MY_NAME , ## arg); \
+ MY_NAME , ## arg); \
} while (0)
#define err(format, arg...) printk(KERN_ERR "%s: " format "\n", MY_NAME , ## arg)
#define info(format, arg...) printk(KERN_INFO "%s: " format "\n", MY_NAME , ## arg)
diff --git a/drivers/pci/hotplug/cpci_hotplug_pci.c b/drivers/pci/hotplug/cpci_hotplug_pci.c
index d8add34177f2..d3add9819f63 100644
--- a/drivers/pci/hotplug/cpci_hotplug_pci.c
+++ b/drivers/pci/hotplug/cpci_hotplug_pci.c
@@ -39,7 +39,7 @@ extern int cpci_debug;
do { \
if (cpci_debug) \
printk (KERN_DEBUG "%s: " format "\n", \
- MY_NAME , ## arg); \
+ MY_NAME , ## arg); \
} while (0)
#define err(format, arg...) printk(KERN_ERR "%s: " format "\n", MY_NAME , ## arg)
#define info(format, arg...) printk(KERN_INFO "%s: " format "\n", MY_NAME , ## arg)
diff --git a/drivers/pci/hotplug/cpcihp_generic.c b/drivers/pci/hotplug/cpcihp_generic.c
index a6a71c41cdf8..7536eef620b0 100644
--- a/drivers/pci/hotplug/cpcihp_generic.c
+++ b/drivers/pci/hotplug/cpcihp_generic.c
@@ -13,14 +13,14 @@
* option) any later version.
*
* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
- * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
- * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
- * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
+ * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
+ * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* You should have received a copy of the GNU General Public License along
@@ -53,9 +53,9 @@
#define dbg(format, arg...) \
do { \
- if(debug) \
+ if (debug) \
printk (KERN_DEBUG "%s: " format "\n", \
- MY_NAME , ## arg); \
+ MY_NAME , ## arg); \
} while(0)
#define err(format, arg...) printk(KERN_ERR "%s: " format "\n", MY_NAME , ## arg)
#define info(format, arg...) printk(KERN_INFO "%s: " format "\n", MY_NAME , ## arg)
diff --git a/drivers/pci/hotplug/cpcihp_zt5550.c b/drivers/pci/hotplug/cpcihp_zt5550.c
index 449b4bbc8301..e8c4a7ccf578 100644
--- a/drivers/pci/hotplug/cpcihp_zt5550.c
+++ b/drivers/pci/hotplug/cpcihp_zt5550.c
@@ -13,14 +13,14 @@
* option) any later version.
*
* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
- * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
- * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
- * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
+ * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
+ * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* You should have received a copy of the GNU General Public License along
@@ -48,9 +48,9 @@
#define dbg(format, arg...) \
do { \
- if(debug) \
+ if (debug) \
printk (KERN_DEBUG "%s: " format "\n", \
- MY_NAME , ## arg); \
+ MY_NAME , ## arg); \
} while(0)
#define err(format, arg...) printk(KERN_ERR "%s: " format "\n", MY_NAME , ## arg)
#define info(format, arg...) printk(KERN_INFO "%s: " format "\n", MY_NAME , ## arg)
@@ -285,7 +285,7 @@ static struct pci_device_id zt5550_hc_pci_tbl[] = {
{ 0, }
};
MODULE_DEVICE_TABLE(pci, zt5550_hc_pci_tbl);
-
+
static struct pci_driver zt5550_hc_driver = {
.name = "zt5550_hc",
.id_table = zt5550_hc_pci_tbl,
diff --git a/drivers/pci/hotplug/cpcihp_zt5550.h b/drivers/pci/hotplug/cpcihp_zt5550.h
index bebc6060a558..9a57fda5348c 100644
--- a/drivers/pci/hotplug/cpcihp_zt5550.h
+++ b/drivers/pci/hotplug/cpcihp_zt5550.h
@@ -13,14 +13,14 @@
* option) any later version.
*
* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
- * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
- * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
- * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
+ * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
+ * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* You should have received a copy of the GNU General Public License along
@@ -55,7 +55,7 @@
#define HC_CMD_REG 0x0C
#define ARB_CONFIG_GNT_REG 0x10
#define ARB_CONFIG_CFG_REG 0x12
-#define ARB_CONFIG_REG 0x10
+#define ARB_CONFIG_REG 0x10
#define ISOL_CONFIG_REG 0x18
#define FAULT_STATUS_REG 0x20
#define FAULT_CONFIG_REG 0x24
diff --git a/drivers/pci/hotplug/cpqphp_core.c b/drivers/pci/hotplug/cpqphp_core.c
index c8eaeb43fa5d..31273e155e6c 100644
--- a/drivers/pci/hotplug/cpqphp_core.c
+++ b/drivers/pci/hotplug/cpqphp_core.c
@@ -862,10 +862,10 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_disable_device;
}
- /* Check for the proper subsystem ID's
+ /* Check for the proper subsystem IDs
* Intel uses a different SSID programming model than Compaq.
* For Intel, each SSID bit identifies a PHP capability.
- * Also Intel HPC's may have RID=0.
+ * Also Intel HPCs may have RID=0.
*/
if ((pdev->revision <= 2) && (vendor_id != PCI_VENDOR_ID_INTEL)) {
err(msg_HPC_not_supported);
diff --git a/drivers/pci/hotplug/cpqphp_ctrl.c b/drivers/pci/hotplug/cpqphp_ctrl.c
index d282019cda5f..11845b796799 100644
--- a/drivers/pci/hotplug/cpqphp_ctrl.c
+++ b/drivers/pci/hotplug/cpqphp_ctrl.c
@@ -1231,7 +1231,7 @@ static u8 set_controller_speed(struct controller *ctrl, u8 adapter_speed, u8 hp_
/* Only if mode change...*/
if (((bus->cur_bus_speed == PCI_SPEED_66MHz) && (adapter_speed == PCI_SPEED_66MHz_PCIX)) ||
- ((bus->cur_bus_speed == PCI_SPEED_66MHz_PCIX) && (adapter_speed == PCI_SPEED_66MHz)))
+ ((bus->cur_bus_speed == PCI_SPEED_66MHz_PCIX) && (adapter_speed == PCI_SPEED_66MHz)))
set_SOGO(ctrl);
wait_for_ctrl_irq(ctrl);
@@ -1828,7 +1828,7 @@ static void interrupt_event_handler(struct controller *ctrl)
if (ctrl->event_queue[loop].event_type == INT_BUTTON_PRESS) {
dbg("button pressed\n");
- } else if (ctrl->event_queue[loop].event_type ==
+ } else if (ctrl->event_queue[loop].event_type ==
INT_BUTTON_CANCEL) {
dbg("button cancel\n");
del_timer(&p_slot->task_event);
@@ -2411,11 +2411,11 @@ static int configure_new_function(struct controller *ctrl, struct pci_func *func
if (rc)
return rc;
- /* find range of busses to use */
+ /* find range of buses to use */
dbg("find ranges of buses to use\n");
bus_node = get_max_resource(&(resources->bus_head), 1);
- /* If we don't have any busses to allocate, we can't continue */
+ /* If we don't have any buses to allocate, we can't continue */
if (!bus_node)
return -ENOMEM;
@@ -2900,7 +2900,7 @@ static int configure_new_function(struct controller *ctrl, struct pci_func *func
/* If this function needs an interrupt and we are behind
* a bridge and the pin is tied to something that's
- * alread mapped, set this one the same */
+ * already mapped, set this one the same */
if (temp_byte && resources->irqs &&
(resources->irqs->valid_INT &
(0x01 << ((temp_byte + resources->irqs->barber_pole - 1) & 0x03)))) {
diff --git a/drivers/pci/hotplug/cpqphp_pci.c b/drivers/pci/hotplug/cpqphp_pci.c
index 09801c6945ce..6e4a12c91adb 100644
--- a/drivers/pci/hotplug/cpqphp_pci.c
+++ b/drivers/pci/hotplug/cpqphp_pci.c
@@ -291,7 +291,7 @@ int cpqhp_get_bus_dev (struct controller *ctrl, u8 * bus_num, u8 * dev_num, u8 s
*
* Reads configuration for all slots in a PCI bus and saves info.
*
- * Note: For non-hot plug busses, the slot # saved is the device #
+ * Note: For non-hot plug buses, the slot # saved is the device #
*
* returns 0 if success
*/
@@ -455,7 +455,7 @@ int cpqhp_save_config(struct controller *ctrl, int busnumber, int is_hot_plug)
* cpqhp_save_slot_config
*
* Saves configuration info for all PCI devices in a given slot
- * including subordinate busses.
+ * including subordinate buses.
*
* returns 0 if success
*/
@@ -1556,4 +1556,3 @@ void cpqhp_destroy_board_resources (struct pci_func * func)
kfree(tres);
}
}
-
diff --git a/drivers/pci/hotplug/ibmphp.h b/drivers/pci/hotplug/ibmphp.h
index 8c5b25871d02..e3e46a7b3ee7 100644
--- a/drivers/pci/hotplug/ibmphp.h
+++ b/drivers/pci/hotplug/ibmphp.h
@@ -59,7 +59,7 @@ extern int ibmphp_debug;
/************************************************************
-* RESOURE TYPE *
+* RESOURCE TYPE *
************************************************************/
#define EBDA_RSRC_TYPE_MASK 0x03
@@ -103,7 +103,7 @@ extern int ibmphp_debug;
//--------------------------------------------------------------
struct rio_table_hdr {
- u8 ver_num;
+ u8 ver_num;
u8 scal_count;
u8 riodev_count;
u16 offset;
@@ -127,7 +127,7 @@ struct scal_detail {
};
//--------------------------------------------------------------
-// RIO DETAIL
+// RIO DETAIL
//--------------------------------------------------------------
struct rio_detail {
@@ -152,7 +152,7 @@ struct opt_rio {
u8 first_slot_num;
u8 middle_num;
struct list_head opt_rio_list;
-};
+};
struct opt_rio_lo {
u8 rio_type;
@@ -161,7 +161,7 @@ struct opt_rio_lo {
u8 middle_num;
u8 pack_count;
struct list_head opt_rio_lo_list;
-};
+};
/****************************************************************
* HPC DESCRIPTOR NODE *
@@ -574,7 +574,7 @@ void ibmphp_hpc_stop_poll_thread(void);
#define HPC_CTLR_IRQ_PENDG 0x80
//----------------------------------------------------------------------------
-// HPC_CTLR_WROKING status return codes
+// HPC_CTLR_WORKING status return codes
//----------------------------------------------------------------------------
#define HPC_CTLR_WORKING_NO 0x00
#define HPC_CTLR_WORKING_YES 0x01
diff --git a/drivers/pci/hotplug/ibmphp_core.c b/drivers/pci/hotplug/ibmphp_core.c
index cbd72d81d253..efdc13adbe41 100644
--- a/drivers/pci/hotplug/ibmphp_core.c
+++ b/drivers/pci/hotplug/ibmphp_core.c
@@ -58,7 +58,7 @@ MODULE_DESCRIPTION (DRIVER_DESC);
struct pci_bus *ibmphp_pci_bus;
static int max_slots;
-static int irqs[16]; /* PIC mode IRQ's we're using so far (in case MPS
+static int irqs[16]; /* PIC mode IRQs we're using so far (in case MPS
* tables don't provide default info for empty slots */
static int init_flag;
@@ -71,20 +71,20 @@ static inline int get_max_adapter_speed (struct hotplug_slot *hs, u8 *value)
return get_max_adapter_speed_1 (hs, value, 1);
}
*/
-static inline int get_cur_bus_info(struct slot **sl)
+static inline int get_cur_bus_info(struct slot **sl)
{
int rc = 1;
struct slot * slot_cur = *sl;
debug("options = %x\n", slot_cur->ctrl->options);
- debug("revision = %x\n", slot_cur->ctrl->revision);
+ debug("revision = %x\n", slot_cur->ctrl->revision);
- if (READ_BUS_STATUS(slot_cur->ctrl))
+ if (READ_BUS_STATUS(slot_cur->ctrl))
rc = ibmphp_hpc_readslot(slot_cur, READ_BUSSTATUS, NULL);
-
- if (rc)
+
+ if (rc)
return rc;
-
+
slot_cur->bus_on->current_speed = CURRENT_BUS_SPEED(slot_cur->busstatus);
if (READ_BUS_MODE(slot_cur->ctrl))
slot_cur->bus_on->current_bus_mode =
@@ -96,7 +96,7 @@ static inline int get_cur_bus_info(struct slot **sl)
slot_cur->busstatus,
slot_cur->bus_on->current_speed,
slot_cur->bus_on->current_bus_mode);
-
+
*sl = slot_cur;
return 0;
}
@@ -104,8 +104,8 @@ static inline int get_cur_bus_info(struct slot **sl)
static inline int slot_update(struct slot **sl)
{
int rc;
- rc = ibmphp_hpc_readslot(*sl, READ_ALLSTAT, NULL);
- if (rc)
+ rc = ibmphp_hpc_readslot(*sl, READ_ALLSTAT, NULL);
+ if (rc)
return rc;
if (!init_flag)
rc = get_cur_bus_info(sl);
@@ -172,7 +172,7 @@ int ibmphp_init_devno(struct slot **cur_slot)
debug("(*cur_slot)->irq[3] = %x\n",
(*cur_slot)->irq[3]);
- debug("rtable->exlusive_irqs = %x\n",
+ debug("rtable->exclusive_irqs = %x\n",
rtable->exclusive_irqs);
debug("rtable->slots[loop].irq[0].bitmap = %x\n",
rtable->slots[loop].irq[0].bitmap);
@@ -271,7 +271,7 @@ static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 value)
else
rc = -ENODEV;
}
- } else
+ } else
rc = -ENODEV;
ibmphp_unlock_operations();
@@ -288,7 +288,7 @@ static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 * value)
debug("get_attention_status - Entry hotplug_slot[%lx] pvalue[%lx]\n",
(ulong) hotplug_slot, (ulong) value);
-
+
ibmphp_lock_operations();
if (hotplug_slot) {
pslot = hotplug_slot->private;
@@ -406,14 +406,14 @@ static int get_max_bus_speed(struct slot *slot)
ibmphp_lock_operations();
mode = slot->supported_bus_mode;
- speed = slot->supported_speed;
+ speed = slot->supported_speed;
ibmphp_unlock_operations();
switch (speed) {
case BUS_SPEED_33:
break;
case BUS_SPEED_66:
- if (mode == BUS_MODE_PCIX)
+ if (mode == BUS_MODE_PCIX)
speed += 0x01;
break;
case BUS_SPEED_100:
@@ -515,13 +515,13 @@ static int __init init_ops(void)
debug("BEFORE GETTING SLOT STATUS, slot # %x\n",
slot_cur->number);
- if (slot_cur->ctrl->revision == 0xFF)
+ if (slot_cur->ctrl->revision == 0xFF)
if (get_ctrl_revision(slot_cur,
&slot_cur->ctrl->revision))
return -1;
- if (slot_cur->bus_on->current_speed == 0xFF)
- if (get_cur_bus_info(&slot_cur))
+ if (slot_cur->bus_on->current_speed == 0xFF)
+ if (get_cur_bus_info(&slot_cur))
return -1;
get_max_bus_speed(slot_cur);
@@ -539,8 +539,8 @@ static int __init init_ops(void)
debug("SLOT_PRESENT = %x\n", SLOT_PRESENT(slot_cur->status));
debug("SLOT_LATCH = %x\n", SLOT_LATCH(slot_cur->status));
- if ((SLOT_PWRGD(slot_cur->status)) &&
- !(SLOT_PRESENT(slot_cur->status)) &&
+ if ((SLOT_PWRGD(slot_cur->status)) &&
+ !(SLOT_PRESENT(slot_cur->status)) &&
!(SLOT_LATCH(slot_cur->status))) {
debug("BEFORE POWER OFF COMMAND\n");
rc = power_off(slot_cur);
@@ -581,13 +581,13 @@ static int validate(struct slot *slot_cur, int opn)
switch (opn) {
case ENABLE:
- if (!(SLOT_PWRGD(slot_cur->status)) &&
- (SLOT_PRESENT(slot_cur->status)) &&
+ if (!(SLOT_PWRGD(slot_cur->status)) &&
+ (SLOT_PRESENT(slot_cur->status)) &&
!(SLOT_LATCH(slot_cur->status)))
return 0;
break;
case DISABLE:
- if ((SLOT_PWRGD(slot_cur->status)) &&
+ if ((SLOT_PWRGD(slot_cur->status)) &&
(SLOT_PRESENT(slot_cur->status)) &&
!(SLOT_LATCH(slot_cur->status)))
return 0;
@@ -617,7 +617,7 @@ int ibmphp_update_slot_info(struct slot *slot_cur)
err("out of system memory\n");
return -ENOMEM;
}
-
+
info->power_status = SLOT_PWRGD(slot_cur->status);
info->attention_status = SLOT_ATTN(slot_cur->status,
slot_cur->ext_status);
@@ -638,7 +638,7 @@ int ibmphp_update_slot_info(struct slot *slot_cur)
case BUS_SPEED_33:
break;
case BUS_SPEED_66:
- if (mode == BUS_MODE_PCIX)
+ if (mode == BUS_MODE_PCIX)
bus_speed += 0x01;
else if (mode == BUS_MODE_PCI)
;
@@ -654,8 +654,8 @@ int ibmphp_update_slot_info(struct slot *slot_cur)
}
bus->cur_bus_speed = bus_speed;
- // To do: bus_names
-
+ // To do: bus_names
+
rc = pci_hp_change_slot_info(slot_cur->hotplug_slot, info);
kfree(info);
return rc;
@@ -729,8 +729,8 @@ static void ibm_unconfigure_device(struct pci_func *func)
}
/*
- * The following function is to fix kernel bug regarding
- * getting bus entries, here we manually add those primary
+ * The following function is to fix kernel bug regarding
+ * getting bus entries, here we manually add those primary
* bus entries to kernel bus structure whenever apply
*/
static u8 bus_structure_fixup(u8 busno)
@@ -814,7 +814,7 @@ static int ibm_configure_device(struct pci_func *func)
}
/*******************************************************
- * Returns whether the bus is empty or not
+ * Returns whether the bus is empty or not
*******************************************************/
static int is_bus_empty(struct slot * slot_cur)
{
@@ -842,7 +842,7 @@ static int is_bus_empty(struct slot * slot_cur)
}
/***********************************************************
- * If the HPC permits and the bus currently empty, tries to set the
+ * If the HPC permits and the bus currently empty, tries to set the
* bus speed and mode at the maximum card and bus capability
* Parameters: slot
* Returns: bus is set (0) or error code
@@ -856,7 +856,7 @@ static int set_bus(struct slot * slot_cur)
static struct pci_device_id ciobx[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, 0x0101) },
{ },
- };
+ };
debug("%s - entry slot # %d\n", __func__, slot_cur->number);
if (SET_BUS_STATUS(slot_cur->ctrl) && is_bus_empty(slot_cur)) {
@@ -877,7 +877,7 @@ static int set_bus(struct slot * slot_cur)
else if (!SLOT_BUS_MODE(slot_cur->ext_status))
/* if max slot/bus capability is 66 pci
and there's no bus mode mismatch, then
- the adapter supports 66 pci */
+ the adapter supports 66 pci */
cmd = HPC_BUS_66CONVMODE;
else
cmd = HPC_BUS_33CONVMODE;
@@ -930,7 +930,7 @@ static int set_bus(struct slot * slot_cur)
return -EIO;
}
}
- /* This is for x440, once Brandon fixes the firmware,
+ /* This is for x440, once Brandon fixes the firmware,
will not need this delay */
msleep(1000);
debug("%s -Exit\n", __func__);
@@ -938,9 +938,9 @@ static int set_bus(struct slot * slot_cur)
}
/* This routine checks the bus limitations that the slot is on from the BIOS.
- * This is used in deciding whether or not to power up the slot.
+ * This is used in deciding whether or not to power up the slot.
* (electrical/spec limitations. For example, >1 133 MHz or >2 66 PCI cards on
- * same bus)
+ * same bus)
* Parameters: slot
* Returns: 0 = no limitations, -EINVAL = exceeded limitations on the bus
*/
@@ -986,7 +986,7 @@ static int check_limitations(struct slot *slot_cur)
static inline void print_card_capability(struct slot *slot_cur)
{
info("capability of the card is ");
- if ((slot_cur->ext_status & CARD_INFO) == PCIX133)
+ if ((slot_cur->ext_status & CARD_INFO) == PCIX133)
info(" 133 MHz PCI-X\n");
else if ((slot_cur->ext_status & CARD_INFO) == PCIX66)
info(" 66 MHz PCI-X\n");
@@ -1020,7 +1020,7 @@ static int enable_slot(struct hotplug_slot *hs)
}
attn_LED_blink(slot_cur);
-
+
rc = set_bus(slot_cur);
if (rc) {
err("was not able to set the bus\n");
@@ -1082,7 +1082,7 @@ static int enable_slot(struct hotplug_slot *hs)
rc = slot_update(&slot_cur);
if (rc)
goto error_power;
-
+
rc = -EINVAL;
if (SLOT_POWER(slot_cur->status) && !(SLOT_PWRGD(slot_cur->status))) {
err("power fault occurred trying to power up...\n");
@@ -1093,7 +1093,7 @@ static int enable_slot(struct hotplug_slot *hs)
"speed and card capability\n");
print_card_capability(slot_cur);
goto error_power;
- }
+ }
/* Don't think this case will happen after above checks...
* but just in case, for paranoia sake */
if (!(SLOT_POWER(slot_cur->status))) {
@@ -1144,7 +1144,7 @@ static int enable_slot(struct hotplug_slot *hs)
ibmphp_print_test();
rc = ibmphp_update_slot_info(slot_cur);
exit:
- ibmphp_unlock_operations();
+ ibmphp_unlock_operations();
return rc;
error_nopower:
@@ -1180,7 +1180,7 @@ static int ibmphp_disable_slot(struct hotplug_slot *hotplug_slot)
{
struct slot *slot = hotplug_slot->private;
int rc;
-
+
ibmphp_lock_operations();
rc = ibmphp_do_disable_slot(slot);
ibmphp_unlock_operations();
@@ -1192,12 +1192,12 @@ int ibmphp_do_disable_slot(struct slot *slot_cur)
int rc;
u8 flag;
- debug("DISABLING SLOT...\n");
-
+ debug("DISABLING SLOT...\n");
+
if ((slot_cur == NULL) || (slot_cur->ctrl == NULL)) {
return -ENODEV;
}
-
+
flag = slot_cur->flag;
slot_cur->flag = 1;
@@ -1210,7 +1210,7 @@ int ibmphp_do_disable_slot(struct slot *slot_cur)
attn_LED_blink(slot_cur);
if (slot_cur->func == NULL) {
- /* We need this for fncs's that were there on bootup */
+ /* We need this for functions that were there on bootup */
slot_cur->func = kzalloc(sizeof(struct pci_func), GFP_KERNEL);
if (!slot_cur->func) {
err("out of system memory\n");
@@ -1222,12 +1222,13 @@ int ibmphp_do_disable_slot(struct slot *slot_cur)
}
ibm_unconfigure_device(slot_cur->func);
-
- /* If we got here from latch suddenly opening on operating card or
- a power fault, there's no power to the card, so cannot
- read from it to determine what resources it occupied. This operation
- is forbidden anyhow. The best we can do is remove it from kernel
- lists at least */
+
+ /*
+ * If we got here from latch suddenly opening on operating card or
+ * a power fault, there's no power to the card, so cannot
+ * read from it to determine what resources it occupied. This operation
+ * is forbidden anyhow. The best we can do is remove it from kernel
+ * lists at least */
if (!flag) {
attn_off(slot_cur);
@@ -1264,7 +1265,7 @@ error:
rc = -EFAULT;
goto exit;
}
- if (flag)
+ if (flag)
ibmphp_update_slot_info(slot_cur);
goto exit;
}
@@ -1339,7 +1340,7 @@ static int __init ibmphp_init(void)
debug("AFTER Resource & EBDA INITIALIZATIONS\n");
max_slots = get_max_slots();
-
+
if ((rc = ibmphp_register_pci()))
goto error;
diff --git a/drivers/pci/hotplug/ibmphp_ebda.c b/drivers/pci/hotplug/ibmphp_ebda.c
index 9df78bc14541..bd044158b36c 100644
--- a/drivers/pci/hotplug/ibmphp_ebda.c
+++ b/drivers/pci/hotplug/ibmphp_ebda.c
@@ -123,7 +123,7 @@ static struct ebda_pci_rsrc *alloc_ebda_pci_rsrc (void)
static void __init print_bus_info (void)
{
struct bus_info *ptr;
-
+
list_for_each_entry(ptr, &bus_info_head, bus_info_list) {
debug ("%s - slot_min = %x\n", __func__, ptr->slot_min);
debug ("%s - slot_max = %x\n", __func__, ptr->slot_max);
@@ -131,7 +131,7 @@ static void __init print_bus_info (void)
debug ("%s - bus# = %x\n", __func__, ptr->busno);
debug ("%s - current_speed = %x\n", __func__, ptr->current_speed);
debug ("%s - controller_id = %x\n", __func__, ptr->controller_id);
-
+
debug ("%s - slots_at_33_conv = %x\n", __func__, ptr->slots_at_33_conv);
debug ("%s - slots_at_66_conv = %x\n", __func__, ptr->slots_at_66_conv);
debug ("%s - slots_at_66_pcix = %x\n", __func__, ptr->slots_at_66_pcix);
@@ -144,7 +144,7 @@ static void __init print_bus_info (void)
static void print_lo_info (void)
{
struct rio_detail *ptr;
- debug ("print_lo_info ----\n");
+ debug ("print_lo_info ----\n");
list_for_each_entry(ptr, &rio_lo_head, rio_detail_list) {
debug ("%s - rio_node_id = %x\n", __func__, ptr->rio_node_id);
debug ("%s - rio_type = %x\n", __func__, ptr->rio_type);
@@ -176,7 +176,7 @@ static void __init print_ebda_pci_rsrc (void)
struct ebda_pci_rsrc *ptr;
list_for_each_entry(ptr, &ibmphp_ebda_pci_rsrc_head, ebda_pci_rsrc_list) {
- debug ("%s - rsrc type: %x bus#: %x dev_func: %x start addr: %x end addr: %x\n",
+ debug ("%s - rsrc type: %x bus#: %x dev_func: %x start addr: %x end addr: %x\n",
__func__, ptr->rsrc_type ,ptr->bus_num, ptr->dev_fun,ptr->start_addr, ptr->end_addr);
}
}
@@ -259,7 +259,7 @@ int __init ibmphp_access_ebda (void)
ebda_seg = readw (io_mem);
iounmap (io_mem);
debug ("returned ebda segment: %x\n", ebda_seg);
-
+
io_mem = ioremap(ebda_seg<<4, 1);
if (!io_mem)
return -ENOMEM;
@@ -310,7 +310,7 @@ int __init ibmphp_access_ebda (void)
re = readw (io_mem + sub_addr); /* next sub blk */
sub_addr += 2;
- rc_id = readw (io_mem + sub_addr); /* sub blk id */
+ rc_id = readw (io_mem + sub_addr); /* sub blk id */
sub_addr += 2;
if (rc_id != 0x5243)
@@ -330,7 +330,7 @@ int __init ibmphp_access_ebda (void)
debug ("info about hpc descriptor---\n");
debug ("hot blk format: %x\n", format);
debug ("num of controller: %x\n", num_ctlrs);
- debug ("offset of hpc data structure enteries: %x\n ", sub_addr);
+ debug ("offset of hpc data structure entries: %x\n ", sub_addr);
sub_addr = base + re; /* re sub blk */
/* FIXME: rc is never used/checked */
@@ -359,7 +359,7 @@ int __init ibmphp_access_ebda (void)
debug ("info about rsrc descriptor---\n");
debug ("format: %x\n", format);
debug ("num of rsrc: %x\n", num_entries);
- debug ("offset of rsrc data structure enteries: %x\n ", sub_addr);
+ debug ("offset of rsrc data structure entries: %x\n ", sub_addr);
hs_complete = 1;
} else {
@@ -376,7 +376,7 @@ int __init ibmphp_access_ebda (void)
rio_table_ptr->scal_count = readb (io_mem + offset + 1);
rio_table_ptr->riodev_count = readb (io_mem + offset + 2);
rio_table_ptr->offset = offset +3 ;
-
+
debug("info about rio table hdr ---\n");
debug("ver_num: %x\nscal_count: %x\nriodev_count: %x\noffset of rio table: %x\n ",
rio_table_ptr->ver_num, rio_table_ptr->scal_count,
@@ -440,12 +440,12 @@ static int __init ebda_rio_table (void)
rio_detail_ptr->chassis_num = readb (io_mem + offset + 14);
// debug ("rio_node_id: %x\nbbar: %x\nrio_type: %x\nowner_id: %x\nport0_node: %x\nport0_port: %x\nport1_node: %x\nport1_port: %x\nfirst_slot_num: %x\nstatus: %x\n", rio_detail_ptr->rio_node_id, rio_detail_ptr->bbar, rio_detail_ptr->rio_type, rio_detail_ptr->owner_id, rio_detail_ptr->port0_node_connect, rio_detail_ptr->port0_port_connect, rio_detail_ptr->port1_node_connect, rio_detail_ptr->port1_port_connect, rio_detail_ptr->first_slot_num, rio_detail_ptr->status);
//create linked list of chassis
- if (rio_detail_ptr->rio_type == 4 || rio_detail_ptr->rio_type == 5)
+ if (rio_detail_ptr->rio_type == 4 || rio_detail_ptr->rio_type == 5)
list_add (&rio_detail_ptr->rio_detail_list, &rio_vg_head);
- //create linked list of expansion box
- else if (rio_detail_ptr->rio_type == 6 || rio_detail_ptr->rio_type == 7)
+ //create linked list of expansion box
+ else if (rio_detail_ptr->rio_type == 6 || rio_detail_ptr->rio_type == 7)
list_add (&rio_detail_ptr->rio_detail_list, &rio_lo_head);
- else
+ else
// not in my concern
kfree (rio_detail_ptr);
offset += 15;
@@ -456,7 +456,7 @@ static int __init ebda_rio_table (void)
}
/*
- * reorganizing linked list of chassis
+ * reorganizing linked list of chassis
*/
static struct opt_rio *search_opt_vg (u8 chassis_num)
{
@@ -464,7 +464,7 @@ static struct opt_rio *search_opt_vg (u8 chassis_num)
list_for_each_entry(ptr, &opt_vg_head, opt_rio_list) {
if (ptr->chassis_num == chassis_num)
return ptr;
- }
+ }
return NULL;
}
@@ -472,7 +472,7 @@ static int __init combine_wpg_for_chassis (void)
{
struct opt_rio *opt_rio_ptr = NULL;
struct rio_detail *rio_detail_ptr = NULL;
-
+
list_for_each_entry(rio_detail_ptr, &rio_vg_head, rio_detail_list) {
opt_rio_ptr = search_opt_vg (rio_detail_ptr->chassis_num);
if (!opt_rio_ptr) {
@@ -484,14 +484,14 @@ static int __init combine_wpg_for_chassis (void)
opt_rio_ptr->first_slot_num = rio_detail_ptr->first_slot_num;
opt_rio_ptr->middle_num = rio_detail_ptr->first_slot_num;
list_add (&opt_rio_ptr->opt_rio_list, &opt_vg_head);
- } else {
+ } else {
opt_rio_ptr->first_slot_num = min (opt_rio_ptr->first_slot_num, rio_detail_ptr->first_slot_num);
opt_rio_ptr->middle_num = max (opt_rio_ptr->middle_num, rio_detail_ptr->first_slot_num);
- }
+ }
}
print_opt_vg ();
- return 0;
-}
+ return 0;
+}
/*
* reorganizing linked list of expansion box
@@ -502,7 +502,7 @@ static struct opt_rio_lo *search_opt_lo (u8 chassis_num)
list_for_each_entry(ptr, &opt_lo_head, opt_rio_lo_list) {
if (ptr->chassis_num == chassis_num)
return ptr;
- }
+ }
return NULL;
}
@@ -510,7 +510,7 @@ static int combine_wpg_for_expansion (void)
{
struct opt_rio_lo *opt_rio_lo_ptr = NULL;
struct rio_detail *rio_detail_ptr = NULL;
-
+
list_for_each_entry(rio_detail_ptr, &rio_lo_head, rio_detail_list) {
opt_rio_lo_ptr = search_opt_lo (rio_detail_ptr->chassis_num);
if (!opt_rio_lo_ptr) {
@@ -522,22 +522,22 @@ static int combine_wpg_for_expansion (void)
opt_rio_lo_ptr->first_slot_num = rio_detail_ptr->first_slot_num;
opt_rio_lo_ptr->middle_num = rio_detail_ptr->first_slot_num;
opt_rio_lo_ptr->pack_count = 1;
-
+
list_add (&opt_rio_lo_ptr->opt_rio_lo_list, &opt_lo_head);
- } else {
+ } else {
opt_rio_lo_ptr->first_slot_num = min (opt_rio_lo_ptr->first_slot_num, rio_detail_ptr->first_slot_num);
opt_rio_lo_ptr->middle_num = max (opt_rio_lo_ptr->middle_num, rio_detail_ptr->first_slot_num);
opt_rio_lo_ptr->pack_count = 2;
- }
+ }
}
- return 0;
+ return 0;
}
-
+
/* Since we don't know the max slot number per each chassis, hence go
* through the list of all chassis to find out the range
- * Arguments: slot_num, 1st slot number of the chassis we think we are on,
- * var (0 = chassis, 1 = expansion box)
+ * Arguments: slot_num, 1st slot number of the chassis we think we are on,
+ * var (0 = chassis, 1 = expansion box)
*/
static int first_slot_num (u8 slot_num, u8 first_slot, u8 var)
{
@@ -547,7 +547,7 @@ static int first_slot_num (u8 slot_num, u8 first_slot, u8 var)
if (!var) {
list_for_each_entry(opt_vg_ptr, &opt_vg_head, opt_rio_list) {
- if ((first_slot < opt_vg_ptr->first_slot_num) && (slot_num >= opt_vg_ptr->first_slot_num)) {
+ if ((first_slot < opt_vg_ptr->first_slot_num) && (slot_num >= opt_vg_ptr->first_slot_num)) {
rc = -ENODEV;
break;
}
@@ -569,7 +569,7 @@ static struct opt_rio_lo * find_rxe_num (u8 slot_num)
list_for_each_entry(opt_lo_ptr, &opt_lo_head, opt_rio_lo_list) {
//check to see if this slot_num belongs to expansion box
- if ((slot_num >= opt_lo_ptr->first_slot_num) && (!first_slot_num (slot_num, opt_lo_ptr->first_slot_num, 1)))
+ if ((slot_num >= opt_lo_ptr->first_slot_num) && (!first_slot_num (slot_num, opt_lo_ptr->first_slot_num, 1)))
return opt_lo_ptr;
}
return NULL;
@@ -580,8 +580,8 @@ static struct opt_rio * find_chassis_num (u8 slot_num)
struct opt_rio *opt_vg_ptr;
list_for_each_entry(opt_vg_ptr, &opt_vg_head, opt_rio_list) {
- //check to see if this slot_num belongs to chassis
- if ((slot_num >= opt_vg_ptr->first_slot_num) && (!first_slot_num (slot_num, opt_vg_ptr->first_slot_num, 0)))
+ //check to see if this slot_num belongs to chassis
+ if ((slot_num >= opt_vg_ptr->first_slot_num) && (!first_slot_num (slot_num, opt_vg_ptr->first_slot_num, 0)))
return opt_vg_ptr;
}
return NULL;
@@ -594,13 +594,13 @@ static u8 calculate_first_slot (u8 slot_num)
{
u8 first_slot = 1;
struct slot * slot_cur;
-
+
list_for_each_entry(slot_cur, &ibmphp_slot_head, ibm_slot_list) {
if (slot_cur->ctrl) {
- if ((slot_cur->ctrl->ctlr_type != 4) && (slot_cur->ctrl->ending_slot_num > first_slot) && (slot_num > slot_cur->ctrl->ending_slot_num))
+ if ((slot_cur->ctrl->ctlr_type != 4) && (slot_cur->ctrl->ending_slot_num > first_slot) && (slot_num > slot_cur->ctrl->ending_slot_num))
first_slot = slot_cur->ctrl->ending_slot_num;
}
- }
+ }
return first_slot + 1;
}
@@ -622,11 +622,11 @@ static char *create_file_name (struct slot * slot_cur)
err ("Structure passed is empty\n");
return NULL;
}
-
+
slot_num = slot_cur->number;
memset (str, 0, sizeof(str));
-
+
if (rio_table_ptr) {
if (rio_table_ptr->ver_num == 3) {
opt_vg_ptr = find_chassis_num (slot_num);
@@ -660,7 +660,7 @@ static char *create_file_name (struct slot * slot_cur)
/* if both NULL and we DO have correct RIO table in BIOS */
return NULL;
}
- }
+ }
if (!flag) {
if (slot_cur->ctrl->ctlr_type == 4) {
first_slot = calculate_first_slot (slot_num);
@@ -798,7 +798,7 @@ static int __init ebda_rsrc_controller (void)
slot_ptr->ctl_index = readb (io_mem + addr_slot + 2*slot_num);
slot_ptr->slot_cap = readb (io_mem + addr_slot + 3*slot_num);
- // create bus_info lined list --- if only one slot per bus: slot_min = slot_max
+ // create bus_info lined list --- if only one slot per bus: slot_min = slot_max
bus_info_ptr2 = ibmphp_find_same_bus_num (slot_ptr->slot_bus_num);
if (!bus_info_ptr2) {
@@ -814,9 +814,9 @@ static int __init ebda_rsrc_controller (void)
bus_info_ptr1->index = bus_index++;
bus_info_ptr1->current_speed = 0xff;
bus_info_ptr1->current_bus_mode = 0xff;
-
+
bus_info_ptr1->controller_id = hpc_ptr->ctlr_id;
-
+
list_add_tail (&bus_info_ptr1->bus_info_list, &bus_info_head);
} else {
@@ -851,7 +851,7 @@ static int __init ebda_rsrc_controller (void)
bus_info_ptr2->slots_at_66_conv = bus_ptr->slots_at_66_conv;
bus_info_ptr2->slots_at_66_pcix = bus_ptr->slots_at_66_pcix;
bus_info_ptr2->slots_at_100_pcix = bus_ptr->slots_at_100_pcix;
- bus_info_ptr2->slots_at_133_pcix = bus_ptr->slots_at_133_pcix;
+ bus_info_ptr2->slots_at_133_pcix = bus_ptr->slots_at_133_pcix;
}
bus_ptr++;
}
@@ -864,7 +864,7 @@ static int __init ebda_rsrc_controller (void)
hpc_ptr->u.pci_ctlr.dev_fun = readb (io_mem + addr + 1);
hpc_ptr->irq = readb (io_mem + addr + 2);
addr += 3;
- debug ("ctrl bus = %x, ctlr devfun = %x, irq = %x\n",
+ debug ("ctrl bus = %x, ctlr devfun = %x, irq = %x\n",
hpc_ptr->u.pci_ctlr.bus,
hpc_ptr->u.pci_ctlr.dev_fun, hpc_ptr->irq);
break;
@@ -932,7 +932,7 @@ static int __init ebda_rsrc_controller (void)
tmp_slot->supported_speed = 2;
else if ((hpc_ptr->slots[index].slot_cap & EBDA_SLOT_66_MAX) == EBDA_SLOT_66_MAX)
tmp_slot->supported_speed = 1;
-
+
if ((hpc_ptr->slots[index].slot_cap & EBDA_SLOT_PCIX_CAP) == EBDA_SLOT_PCIX_CAP)
tmp_slot->supported_bus_mode = 1;
else
@@ -1000,7 +1000,7 @@ error_no_hpc:
return rc;
}
-/*
+/*
* map info (bus, devfun, start addr, end addr..) of i/o, memory,
* pfm from the physical addr to a list of resource.
*/
@@ -1057,7 +1057,7 @@ static int __init ebda_rsrc_rsrc (void)
addr += 10;
debug ("rsrc from mem or pfm ---\n");
- debug ("rsrc type: %x bus#: %x dev_func: %x start addr: %x end addr: %x\n",
+ debug ("rsrc type: %x bus#: %x dev_func: %x start addr: %x end addr: %x\n",
rsrc_ptr->rsrc_type, rsrc_ptr->bus_num, rsrc_ptr->dev_fun, rsrc_ptr->start_addr, rsrc_ptr->end_addr);
list_add (&rsrc_ptr->ebda_pci_rsrc_list, &ibmphp_ebda_pci_rsrc_head);
@@ -1096,7 +1096,7 @@ struct bus_info *ibmphp_find_same_bus_num (u32 num)
struct bus_info *ptr;
list_for_each_entry(ptr, &bus_info_head, bus_info_list) {
- if (ptr->busno == num)
+ if (ptr->busno == num)
return ptr;
}
return NULL;
@@ -1110,7 +1110,7 @@ int ibmphp_get_bus_index (u8 num)
struct bus_info *ptr;
list_for_each_entry(ptr, &bus_info_head, bus_info_list) {
- if (ptr->busno == num)
+ if (ptr->busno == num)
return ptr->index;
}
return -ENODEV;
@@ -1168,7 +1168,7 @@ static struct pci_device_id id_table[] = {
.subdevice = HPC_SUBSYSTEM_ID,
.class = ((PCI_CLASS_SYSTEM_PCI_HOTPLUG << 8) | 0x00),
}, {}
-};
+};
MODULE_DEVICE_TABLE(pci, id_table);
@@ -1197,7 +1197,7 @@ static int ibmphp_probe (struct pci_dev * dev, const struct pci_device_id *ids)
struct controller *ctrl;
debug ("inside ibmphp_probe\n");
-
+
list_for_each_entry(ctrl, &ebda_hpc_head, ebda_hpc_list) {
if (ctrl->ctlr_type == 1) {
if ((dev->devfn == ctrl->u.pci_ctlr.dev_fun) && (dev->bus->number == ctrl->u.pci_ctlr.bus)) {
@@ -1210,4 +1210,3 @@ static int ibmphp_probe (struct pci_dev * dev, const struct pci_device_id *ids)
}
return -ENODEV;
}
-
diff --git a/drivers/pci/hotplug/ibmphp_hpc.c b/drivers/pci/hotplug/ibmphp_hpc.c
index f59ed30512b5..5fc7a089f532 100644
--- a/drivers/pci/hotplug/ibmphp_hpc.c
+++ b/drivers/pci/hotplug/ibmphp_hpc.c
@@ -258,7 +258,7 @@ static u8 i2c_ctrl_write (struct controller *ctlr_ptr, void __iomem *WPGBbar, u8
{
u8 rc;
void __iomem *wpg_addr; // base addr + offset
- unsigned long wpg_data; // data to/from WPG LOHI format
+ unsigned long wpg_data; // data to/from WPG LOHI format
unsigned long ultemp;
unsigned long data; // actual data HILO format
int i;
@@ -351,7 +351,7 @@ static u8 i2c_ctrl_write (struct controller *ctlr_ptr, void __iomem *WPGBbar, u8
}
//------------------------------------------------------------
-// Read from ISA type HPC
+// Read from ISA type HPC
//------------------------------------------------------------
static u8 isa_ctrl_read (struct controller *ctlr_ptr, u8 offset)
{
@@ -372,7 +372,7 @@ static void isa_ctrl_write (struct controller *ctlr_ptr, u8 offset, u8 data)
{
u16 start_address;
u16 port_address;
-
+
start_address = ctlr_ptr->u.isa_ctlr.io_start;
port_address = start_address + (u16) offset;
outb (data, port_address);
@@ -656,11 +656,11 @@ int ibmphp_hpc_readslot (struct slot * pslot, u8 cmd, u8 * pstatus)
//--------------------------------------------------------------------
// cleanup
//--------------------------------------------------------------------
-
+
// remove physical to logical address mapping
if ((ctlr_ptr->ctlr_type == 2) || (ctlr_ptr->ctlr_type == 4))
iounmap (wpg_bbar);
-
+
free_hpc_access ();
debug_polling ("%s - Exit rc[%d]\n", __func__, rc);
@@ -835,7 +835,7 @@ static int poll_hpc(void *data)
down (&semOperations);
switch (poll_state) {
- case POLL_LATCH_REGISTER:
+ case POLL_LATCH_REGISTER:
oldlatchlow = curlatchlow;
ctrl_count = 0x00;
list_for_each (pslotlist, &ibmphp_slot_head) {
@@ -892,16 +892,16 @@ static int poll_hpc(void *data)
if (kthread_should_stop())
goto out_sleep;
-
+
down (&semOperations);
-
+
if (poll_count >= POLL_LATCH_CNT) {
poll_count = 0;
poll_state = POLL_SLOTS;
} else
poll_state = POLL_LATCH_REGISTER;
break;
- }
+ }
/* give up the hardware semaphore */
up (&semOperations);
/* sleep for a short time just for good measure */
@@ -958,7 +958,7 @@ static int process_changeinstatus (struct slot *pslot, struct slot *poldslot)
// bit 5 - HPC_SLOT_PWRGD
if ((pslot->status & 0x20) != (poldslot->status & 0x20))
// OFF -> ON: ignore, ON -> OFF: disable slot
- if ((poldslot->status & 0x20) && (SLOT_CONNECT (poldslot->status) == HPC_SLOT_CONNECTED) && (SLOT_PRESENT (poldslot->status)))
+ if ((poldslot->status & 0x20) && (SLOT_CONNECT (poldslot->status) == HPC_SLOT_CONNECTED) && (SLOT_PRESENT (poldslot->status)))
disable = 1;
// bit 6 - HPC_SLOT_BUS_SPEED
@@ -980,7 +980,7 @@ static int process_changeinstatus (struct slot *pslot, struct slot *poldslot)
pslot->status &= ~HPC_SLOT_POWER;
}
}
- // CLOSE -> OPEN
+ // CLOSE -> OPEN
else if ((SLOT_PWRGD (poldslot->status) == HPC_SLOT_PWRGD_GOOD)
&& (SLOT_CONNECT (poldslot->status) == HPC_SLOT_CONNECTED) && (SLOT_PRESENT (poldslot->status))) {
disable = 1;
@@ -1075,7 +1075,7 @@ void __exit ibmphp_hpc_stop_poll_thread (void)
debug ("before locking operations \n");
ibmphp_lock_operations ();
debug ("after locking operations \n");
-
+
// wait for poll thread to exit
debug ("before sem_exit down \n");
down (&sem_exit);
diff --git a/drivers/pci/hotplug/ibmphp_pci.c b/drivers/pci/hotplug/ibmphp_pci.c
index c60f5f3e838d..639ea3a75e14 100644
--- a/drivers/pci/hotplug/ibmphp_pci.c
+++ b/drivers/pci/hotplug/ibmphp_pci.c
@@ -1,8 +1,8 @@
/*
* IBM Hot Plug Controller Driver
- *
+ *
* Written By: Irene Zubarev, IBM Corporation
- *
+ *
* Copyright (C) 2001 Greg Kroah-Hartman (greg@kroah.com)
* Copyright (C) 2001,2002 IBM Corp.
*
@@ -42,7 +42,7 @@ static u8 find_sec_number (u8 primary_busno, u8 slotno);
/*
* NOTE..... If BIOS doesn't provide default routing, we assign:
- * 9 for SCSI, 10 for LAN adapters, and 11 for everything else.
+ * 9 for SCSI, 10 for LAN adapters, and 11 for everything else.
* If adapter is bridged, then we assign 11 to it and devices behind it.
* We also assign the same irq numbers for multi function devices.
* These are PIC mode, so shouldn't matter n.e.ways (hopefully)
@@ -71,11 +71,11 @@ static void assign_alt_irq (struct pci_func * cur_func, u8 class_code)
* Configures the device to be added (will allocate needed resources if it
* can), the device can be a bridge or a regular pci device, can also be
* multi-functional
- *
+ *
* Input: function to be added
- *
+ *
* TO DO: The error case with Multifunction device or multi function bridge,
- * if there is an error, will need to go through all previous functions and
+ * if there is an error, will need to go through all previous functions and
* unconfigure....or can add some code into unconfigure_card....
*/
int ibmphp_configure_card (struct pci_func *func, u8 slotno)
@@ -98,7 +98,7 @@ int ibmphp_configure_card (struct pci_func *func, u8 slotno)
cur_func = func;
/* We only get bus and device from IRQ routing table. So at this point,
- * func->busno is correct, and func->device contains only device (at the 5
+ * func->busno is correct, and func->device contains only device (at the 5
* highest bits)
*/
@@ -151,7 +151,7 @@ int ibmphp_configure_card (struct pci_func *func, u8 slotno)
cur_func->device, cur_func->busno);
cleanup_count = 6;
goto error;
- }
+ }
cur_func->next = NULL;
function = 0x8;
break;
@@ -339,7 +339,7 @@ error:
}
/*
- * This function configures the pci BARs of a single device.
+ * This function configures the pci BARs of a single device.
* Input: pointer to the pci_func
* Output: configured PCI, 0, or error
*/
@@ -371,17 +371,17 @@ static int configure_device (struct pci_func *func)
for (count = 0; address[count]; count++) { /* for 6 BARs */
- /* not sure if i need this. per scott, said maybe need smth like this
+ /* not sure if i need this. per scott, said maybe need * something like this
if devices don't adhere 100% to the spec, so don't want to write
to the reserved bits
- pcibios_read_config_byte(cur_func->busno, cur_func->device,
+ pcibios_read_config_byte(cur_func->busno, cur_func->device,
PCI_BASE_ADDRESS_0 + 4 * count, &tmp);
if (tmp & 0x01) // IO
- pcibios_write_config_dword(cur_func->busno, cur_func->device,
+ pcibios_write_config_dword(cur_func->busno, cur_func->device,
PCI_BASE_ADDRESS_0 + 4 * count, 0xFFFFFFFD);
else // Memory
- pcibios_write_config_dword(cur_func->busno, cur_func->device,
+ pcibios_write_config_dword(cur_func->busno, cur_func->device,
PCI_BASE_ADDRESS_0 + 4 * count, 0xFFFFFFFF);
*/
pci_bus_write_config_dword (ibmphp_pci_bus, devfn, address[count], 0xFFFFFFFF);
@@ -421,8 +421,8 @@ static int configure_device (struct pci_func *func)
return -EIO;
}
pci_bus_write_config_dword (ibmphp_pci_bus, devfn, address[count], func->io[count]->start);
-
- /* _______________This is for debugging purposes only_____________________ */
+
+ /* _______________This is for debugging purposes only_____________________ */
debug ("b4 writing, the IO address is %x\n", func->io[count]->start);
pci_bus_read_config_dword (ibmphp_pci_bus, devfn, address[count], &bar[count]);
debug ("after writing.... the start address is %x\n", bar[count]);
@@ -484,7 +484,7 @@ static int configure_device (struct pci_func *func)
pci_bus_write_config_dword (ibmphp_pci_bus, devfn, address[count], func->pfmem[count]->start);
- /*_______________This is for debugging purposes only______________________________*/
+ /*_______________This is for debugging purposes only______________________________*/
debug ("b4 writing, start address is %x\n", func->pfmem[count]->start);
pci_bus_read_config_dword (ibmphp_pci_bus, devfn, address[count], &bar[count]);
debug ("after writing, start address is %x\n", bar[count]);
@@ -559,7 +559,7 @@ static int configure_device (struct pci_func *func)
/******************************************************************************
* This routine configures a PCI-2-PCI bridge and the functions behind it
* Parameters: pci_func
- * Returns:
+ * Returns:
******************************************************************************/
static int configure_bridge (struct pci_func **func_passed, u8 slotno)
{
@@ -622,7 +622,7 @@ static int configure_bridge (struct pci_func **func_passed, u8 slotno)
debug ("AFTER FIND_SEC_NUMBER, func->busno IS %x\n", func->busno);
pci_bus_write_config_byte (ibmphp_pci_bus, devfn, PCI_SECONDARY_BUS, sec_number);
-
+
/* __________________For debugging purposes only __________________________________
pci_bus_read_config_byte (ibmphp_pci_bus, devfn, PCI_SECONDARY_BUS, &sec_number);
debug ("sec_number after write/read is %x\n", sec_number);
@@ -644,7 +644,7 @@ static int configure_bridge (struct pci_func **func_passed, u8 slotno)
/* !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- !!!!!!!!!!!!!!!NEED TO ADD!!! FAST BACK-TO-BACK ENABLE!!!!!!!!!!!!!!!!!!!!
+ !!!!!!!!!!!!!!!NEED TO ADD!!! FAST BACK-TO-BACK ENABLE!!!!!!!!!!!!!!!!!!!!
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!*/
@@ -670,7 +670,7 @@ static int configure_bridge (struct pci_func **func_passed, u8 slotno)
debug ("len[count] in IO = %x\n", len[count]);
bus_io[count] = kzalloc(sizeof(struct resource_node), GFP_KERNEL);
-
+
if (!bus_io[count]) {
err ("out of system memory\n");
retval = -ENOMEM;
@@ -735,7 +735,7 @@ static int configure_bridge (struct pci_func **func_passed, u8 slotno)
ibmphp_add_pfmem_from_mem (bus_pfmem[count]);
func->pfmem[count] = bus_pfmem[count];
} else {
- err ("cannot allocate requested pfmem for bus %x, device %x, len %x\n",
+ err ("cannot allocate requested pfmem for bus %x, device %x, len %x\n",
func->busno, func->device, len[count]);
kfree (mem_tmp);
kfree (bus_pfmem[count]);
@@ -805,7 +805,7 @@ static int configure_bridge (struct pci_func **func_passed, u8 slotno)
debug ("amount_needed->mem = %x\n", amount_needed->mem);
debug ("amount_needed->pfmem = %x\n", amount_needed->pfmem);
- if (amount_needed->not_correct) {
+ if (amount_needed->not_correct) {
debug ("amount_needed is not correct\n");
for (count = 0; address[count]; count++) {
/* for 2 BARs */
@@ -830,7 +830,7 @@ static int configure_bridge (struct pci_func **func_passed, u8 slotno)
} else {
debug ("it wants %x IO behind the bridge\n", amount_needed->io);
io = kzalloc(sizeof(*io), GFP_KERNEL);
-
+
if (!io) {
err ("out of system memory\n");
retval = -ENOMEM;
@@ -959,7 +959,7 @@ static int configure_bridge (struct pci_func **func_passed, u8 slotno)
if (bus->noIORanges) {
pci_bus_write_config_byte (ibmphp_pci_bus, devfn, PCI_IO_BASE, 0x00 | bus->rangeIO->start >> 8);
- pci_bus_write_config_byte (ibmphp_pci_bus, devfn, PCI_IO_LIMIT, 0x00 | bus->rangeIO->end >> 8);
+ pci_bus_write_config_byte (ibmphp_pci_bus, devfn, PCI_IO_LIMIT, 0x00 | bus->rangeIO->end >> 8);
/* _______________This is for debugging purposes only ____________________
pci_bus_read_config_byte (ibmphp_pci_bus, devfn, PCI_IO_BASE, &temp);
@@ -980,7 +980,7 @@ static int configure_bridge (struct pci_func **func_passed, u8 slotno)
if (bus->noMemRanges) {
pci_bus_write_config_word (ibmphp_pci_bus, devfn, PCI_MEMORY_BASE, 0x0000 | bus->rangeMem->start >> 16);
pci_bus_write_config_word (ibmphp_pci_bus, devfn, PCI_MEMORY_LIMIT, 0x0000 | bus->rangeMem->end >> 16);
-
+
/* ____________________This is for debugging purposes only ________________________
pci_bus_read_config_word (ibmphp_pci_bus, devfn, PCI_MEMORY_BASE, &temp);
debug ("mem_base = %x\n", (temp & PCI_MEMORY_RANGE_TYPE_MASK) << 16);
@@ -1017,7 +1017,7 @@ static int configure_bridge (struct pci_func **func_passed, u8 slotno)
pci_bus_read_config_byte (ibmphp_pci_bus, devfn, PCI_INTERRUPT_PIN, &irq);
if ((irq > 0x00) && (irq < 0x05))
pci_bus_write_config_byte (ibmphp_pci_bus, devfn, PCI_INTERRUPT_LINE, func->irq[irq - 1]);
- /*
+ /*
pci_bus_write_config_byte (ibmphp_pci_bus, devfn, PCI_BRIDGE_CONTROL, ctrl);
pci_bus_write_config_byte (ibmphp_pci_bus, devfn, PCI_BRIDGE_CONTROL, PCI_BRIDGE_CTL_PARITY);
pci_bus_write_config_byte (ibmphp_pci_bus, devfn, PCI_BRIDGE_CONTROL, PCI_BRIDGE_CTL_SERR);
@@ -1071,7 +1071,7 @@ error:
* This function adds up the amount of resources needed behind the PPB bridge
* and passes it to the configure_bridge function
* Input: bridge function
- * Ouput: amount of resources needed
+ * Output: amount of resources needed
*****************************************************************************/
static struct res_needed *scan_behind_bridge (struct pci_func * func, u8 busno)
{
@@ -1204,9 +1204,9 @@ static struct res_needed *scan_behind_bridge (struct pci_func * func, u8 busno)
return amount;
}
-/* The following 3 unconfigure_boot_ routines deal with the case when we had the card
- * upon bootup in the system, since we don't allocate func to such case, we need to read
- * the start addresses from pci config space and then find the corresponding entries in
+/* The following 3 unconfigure_boot_ routines deal with the case when we had the card
+ * upon bootup in the system, since we don't allocate func to such case, we need to read
+ * the start addresses from pci config space and then find the corresponding entries in
* our resource lists. The functions return either 0, -ENODEV, or -1 (general failure)
* Change: we also call these functions even if we configured the card ourselves (i.e., not
* the bootup case), since it should work same way
@@ -1561,8 +1561,8 @@ static int unconfigure_boot_card (struct slot *slot_cur)
* unconfiguring the device
* TO DO: will probably need to add some code in case there was some resource,
* to remove it... this is from when we have errors in the configure_card...
- * !!!!!!!!!!!!!!!!!!!!!!!!!FOR BUSES!!!!!!!!!!!!
- * Returns: 0, -1, -ENODEV
+ * !!!!!!!!!!!!!!!!!!!!!!!!!FOR BUSES!!!!!!!!!!!!
+ * Returns: 0, -1, -ENODEV
*/
int ibmphp_unconfigure_card (struct slot **slot_cur, int the_end)
{
@@ -1634,7 +1634,7 @@ int ibmphp_unconfigure_card (struct slot **slot_cur, int the_end)
* Input: bus and the amount of resources needed (we know we can assign those,
* since they've been checked already
* Output: bus added to the correct spot
- * 0, -1, error
+ * 0, -1, error
*/
static int add_new_bus (struct bus_node *bus, struct resource_node *io, struct resource_node *mem, struct resource_node *pfmem, u8 parent_busno)
{
@@ -1650,7 +1650,7 @@ static int add_new_bus (struct bus_node *bus, struct resource_node *io, struct r
err ("strange, cannot find bus which is supposed to be at the system... something is terribly wrong...\n");
return -ENODEV;
}
-
+
list_add (&bus->bus_list, &cur_bus->bus_list);
}
if (io) {
@@ -1679,7 +1679,7 @@ static int add_new_bus (struct bus_node *bus, struct resource_node *io, struct r
}
if (pfmem) {
pfmem_range = kzalloc(sizeof(*pfmem_range), GFP_KERNEL);
- if (!pfmem_range) {
+ if (!pfmem_range) {
err ("out of system memory\n");
return -ENOMEM;
}
@@ -1726,4 +1726,3 @@ static u8 find_sec_number (u8 primary_busno, u8 slotno)
return busno;
return 0xff;
}
-
diff --git a/drivers/pci/hotplug/ibmphp_res.c b/drivers/pci/hotplug/ibmphp_res.c
index e2dc289f767c..a265acb2d518 100644
--- a/drivers/pci/hotplug/ibmphp_res.c
+++ b/drivers/pci/hotplug/ibmphp_res.c
@@ -72,7 +72,7 @@ static struct bus_node * __init alloc_error_bus (struct ebda_pci_rsrc * curr, u8
static struct resource_node * __init alloc_resources (struct ebda_pci_rsrc * curr)
{
struct resource_node *rs;
-
+
if (!curr) {
err ("NULL passed to allocate\n");
return NULL;
@@ -128,7 +128,7 @@ static int __init alloc_bus_range (struct bus_node **new_bus, struct range_node
}
newrange->start = curr->start_addr;
newrange->end = curr->end_addr;
-
+
if (first_bus || (!num_ranges))
newrange->rangeno = 1;
else {
@@ -162,7 +162,7 @@ static int __init alloc_bus_range (struct bus_node **new_bus, struct range_node
newbus->rangePFMem = newrange;
if (first_bus)
newbus->noPFMemRanges = 1;
- else {
+ else {
debug ("1st PFMemory Primary on Bus %x [%x - %x]\n", newbus->busno, newrange->start, newrange->end);
++newbus->noPFMemRanges;
fix_resources (newbus);
@@ -190,7 +190,7 @@ static int __init alloc_bus_range (struct bus_node **new_bus, struct range_node
* This is the Resource Management initialization function. It will go through
* the Resource list taken from EBDA and fill in this module's data structures
*
- * THIS IS NOT TAKING INTO CONSIDERATION IO RESTRICTIONS OF PRIMARY BUSES,
+ * THIS IS NOT TAKING INTO CONSIDERATION IO RESTRICTIONS OF PRIMARY BUSES,
* SINCE WE'RE GOING TO ASSUME FOR NOW WE DON'T HAVE THOSE ON OUR BUSES FOR NOW
*
* Input: ptr to the head of the resource list from EBDA
@@ -382,7 +382,7 @@ int __init ibmphp_rsrc_init (void)
* pci devices' resources for the appropriate resource
*
* Input: type of the resource, range to add, current bus
- * Output: 0 or -1, bus and range ptrs
+ * Output: 0 or -1, bus and range ptrs
********************************************************************************/
static int add_bus_range (int type, struct range_node *range, struct bus_node *bus_cur)
{
@@ -466,7 +466,7 @@ static void update_resources (struct bus_node *bus_cur, int type, int rangeno)
switch (type) {
case MEM:
- if (bus_cur->firstMem)
+ if (bus_cur->firstMem)
res = bus_cur->firstMem;
break;
case PFMEM:
@@ -583,7 +583,7 @@ static void fix_resources (struct bus_node *bus_cur)
}
/*******************************************************************************
- * This routine adds a resource to the list of resources to the appropriate bus
+ * This routine adds a resource to the list of resources to the appropriate bus
* based on their resource type and sorted by their starting addresses. It assigns
* the ptrs to next and nextRange if needed.
*
@@ -605,11 +605,11 @@ int ibmphp_add_resource (struct resource_node *res)
err ("NULL passed to add\n");
return -ENODEV;
}
-
+
bus_cur = find_bus_wprev (res->busno, NULL, 0);
-
+
if (!bus_cur) {
- /* didn't find a bus, smth's wrong!!! */
+ /* didn't find a bus, something's wrong!!! */
debug ("no bus in the system, either pci_dev's wrong or allocation failed\n");
return -ENODEV;
}
@@ -648,7 +648,7 @@ int ibmphp_add_resource (struct resource_node *res)
if (!range_cur) {
switch (res->type) {
case IO:
- ++bus_cur->needIOUpdate;
+ ++bus_cur->needIOUpdate;
break;
case MEM:
++bus_cur->needMemUpdate;
@@ -659,13 +659,13 @@ int ibmphp_add_resource (struct resource_node *res)
}
res->rangeno = -1;
}
-
+
debug ("The range is %d\n", res->rangeno);
if (!res_start) {
/* no first{IO,Mem,Pfmem} on the bus, 1st IO/Mem/Pfmem resource ever */
switch (res->type) {
case IO:
- bus_cur->firstIO = res;
+ bus_cur->firstIO = res;
break;
case MEM:
bus_cur->firstMem = res;
@@ -673,7 +673,7 @@ int ibmphp_add_resource (struct resource_node *res)
case PFMEM:
bus_cur->firstPFMem = res;
break;
- }
+ }
res->next = NULL;
res->nextRange = NULL;
} else {
@@ -770,7 +770,7 @@ int ibmphp_add_resource (struct resource_node *res)
* This routine will remove the resource from the list of resources
*
* Input: io, mem, and/or pfmem resource to be deleted
- * Ouput: modified resource list
+ * Output: modified resource list
* 0 or error code
****************************************************************************/
int ibmphp_remove_resource (struct resource_node *res)
@@ -825,7 +825,7 @@ int ibmphp_remove_resource (struct resource_node *res)
if (!res_cur) {
if (res->type == PFMEM) {
- /*
+ /*
* case where pfmem might be in the PFMemFromMem list
* so will also need to remove the corresponding mem
* entry
@@ -961,12 +961,12 @@ static struct range_node * find_range (struct bus_node *bus_cur, struct resource
}
/*****************************************************************************
- * This routine will check to make sure the io/mem/pfmem->len that the device asked for
+ * This routine will check to make sure the io/mem/pfmem->len that the device asked for
* can fit w/i our list of available IO/MEM/PFMEM resources. If cannot, returns -EINVAL,
* otherwise, returns 0
*
* Input: resource
- * Ouput: the correct start and end address are inputted into the resource node,
+ * Output: the correct start and end address are inputted into the resource node,
* 0 or -EINVAL
*****************************************************************************/
int ibmphp_check_resource (struct resource_node *res, u8 bridge)
@@ -996,7 +996,7 @@ int ibmphp_check_resource (struct resource_node *res, u8 bridge)
bus_cur = find_bus_wprev (res->busno, NULL, 0);
if (!bus_cur) {
- /* didn't find a bus, smth's wrong!!! */
+ /* didn't find a bus, something's wrong!!! */
debug ("no bus in the system, either pci_dev's wrong or allocation failed\n");
return -EINVAL;
}
@@ -1066,7 +1066,7 @@ int ibmphp_check_resource (struct resource_node *res, u8 bridge)
break;
}
}
-
+
if (flag && len_cur == res->len) {
debug ("but we are not here, right?\n");
res->start = start_cur;
@@ -1118,10 +1118,10 @@ int ibmphp_check_resource (struct resource_node *res, u8 bridge)
if (res_prev) {
if (res_prev->rangeno != res_cur->rangeno) {
/* 1st device on this range */
- if ((res_cur->start != range->start) &&
+ if ((res_cur->start != range->start) &&
((len_tmp = res_cur->start - 1 - range->start) >= res->len)) {
if ((len_tmp < len_cur) || (len_cur == 0)) {
- if ((range->start % tmp_divide) == 0) {
+ if ((range->start % tmp_divide) == 0) {
/* just perfect, starting address is divisible by length */
flag = 1;
len_cur = len_tmp;
@@ -1344,7 +1344,7 @@ int ibmphp_check_resource (struct resource_node *res, u8 bridge)
* This routine is called from remove_card if the card contained PPB.
* It will remove all the resources on the bus as well as the bus itself
* Input: Bus
- * Ouput: 0, -ENODEV
+ * Output: 0, -ENODEV
********************************************************************************/
int ibmphp_remove_bus (struct bus_node *bus, u8 parent_busno)
{
@@ -1353,7 +1353,7 @@ int ibmphp_remove_bus (struct bus_node *bus, u8 parent_busno)
struct bus_node *prev_bus;
int rc;
- prev_bus = find_bus_wprev (parent_busno, NULL, 0);
+ prev_bus = find_bus_wprev (parent_busno, NULL, 0);
if (!prev_bus) {
debug ("something terribly wrong. Cannot find parent bus to the one to remove\n");
@@ -1424,7 +1424,7 @@ int ibmphp_remove_bus (struct bus_node *bus, u8 parent_busno)
}
/******************************************************************************
- * This routine deletes the ranges from a given bus, and the entries from the
+ * This routine deletes the ranges from a given bus, and the entries from the
* parent's bus in the resources
* Input: current bus, previous bus
* Output: 0, -EINVAL
@@ -1453,7 +1453,7 @@ static int remove_ranges (struct bus_node *bus_cur, struct bus_node *bus_prev)
if (bus_cur->noMemRanges) {
range_cur = bus_cur->rangeMem;
for (i = 0; i < bus_cur->noMemRanges; i++) {
- if (ibmphp_find_resource (bus_prev, range_cur->start, &res, MEM) < 0)
+ if (ibmphp_find_resource (bus_prev, range_cur->start, &res, MEM) < 0)
return -EINVAL;
ibmphp_remove_resource (res);
@@ -1467,7 +1467,7 @@ static int remove_ranges (struct bus_node *bus_cur, struct bus_node *bus_prev)
if (bus_cur->noPFMemRanges) {
range_cur = bus_cur->rangePFMem;
for (i = 0; i < bus_cur->noPFMemRanges; i++) {
- if (ibmphp_find_resource (bus_prev, range_cur->start, &res, PFMEM) < 0)
+ if (ibmphp_find_resource (bus_prev, range_cur->start, &res, PFMEM) < 0)
return -EINVAL;
ibmphp_remove_resource (res);
@@ -1482,7 +1482,7 @@ static int remove_ranges (struct bus_node *bus_cur, struct bus_node *bus_prev)
}
/*
- * find the resource node in the bus
+ * find the resource node in the bus
* Input: Resource needed, start address of the resource, type of resource
*/
int ibmphp_find_resource (struct bus_node *bus, u32 start_address, struct resource_node **res, int flag)
@@ -1512,7 +1512,7 @@ int ibmphp_find_resource (struct bus_node *bus, u32 start_address, struct resour
err ("wrong type of flag\n");
return -EINVAL;
}
-
+
while (res_cur) {
if (res_cur->start == start_address) {
*res = res_cur;
@@ -1718,7 +1718,7 @@ static int __init once_over (void)
} /* end for pfmem */
} /* end if */
} /* end list_for_each bus */
- return 0;
+ return 0;
}
int ibmphp_add_pfmem_from_mem (struct resource_node *pfmem)
@@ -1760,9 +1760,9 @@ static struct bus_node *find_bus_wprev (u8 bus_number, struct bus_node **prev, u
list_for_each (tmp, &gbuses) {
tmp_prev = tmp->prev;
bus_cur = list_entry (tmp, struct bus_node, bus_list);
- if (flag)
+ if (flag)
*prev = list_entry (tmp_prev, struct bus_node, bus_list);
- if (bus_cur->busno == bus_number)
+ if (bus_cur->busno == bus_number)
return bus_cur;
}
@@ -1776,7 +1776,7 @@ void ibmphp_print_test (void)
struct range_node *range;
struct resource_node *res;
struct list_head *tmp;
-
+
debug_pci ("*****************START**********************\n");
if ((!list_empty(&gbuses)) && flags) {
@@ -1906,7 +1906,7 @@ static int range_exists_already (struct range_node * range, struct bus_node * bu
return 1;
range_cur = range_cur->next;
}
-
+
return 0;
}
@@ -1920,7 +1920,7 @@ static int range_exists_already (struct range_node * range, struct bus_node * bu
* Returns: none
* Note: this function doesn't take into account IO restrictions etc,
* so will only work for bridges with no video/ISA devices behind them It
- * also will not work for onboard PPB's that can have more than 1 *bus
+ * also will not work for onboard PPBs that can have more than 1 *bus
* behind them All these are TO DO.
* Also need to add more error checkings... (from fnc returns etc)
*/
@@ -1963,7 +1963,7 @@ static int __init update_bridge_ranges (struct bus_node **bus)
case PCI_HEADER_TYPE_BRIDGE:
function = 0x8;
case PCI_HEADER_TYPE_MULTIBRIDGE:
- /* We assume here that only 1 bus behind the bridge
+ /* We assume here that only 1 bus behind the bridge
TO DO: add functionality for several:
temp = secondary;
while (temp < subordinate) {
@@ -1972,7 +1972,7 @@ static int __init update_bridge_ranges (struct bus_node **bus)
}
*/
pci_bus_read_config_byte (ibmphp_pci_bus, devfn, PCI_SECONDARY_BUS, &sec_busno);
- bus_sec = find_bus_wprev (sec_busno, NULL, 0);
+ bus_sec = find_bus_wprev (sec_busno, NULL, 0);
/* this bus structure doesn't exist yet, PPB was configured during previous loading of ibmphp */
if (!bus_sec) {
bus_sec = alloc_error_bus (NULL, sec_busno, 1);
@@ -2028,7 +2028,7 @@ static int __init update_bridge_ranges (struct bus_node **bus)
io->len = io->end - io->start + 1;
ibmphp_add_resource (io);
}
- }
+ }
pci_bus_read_config_word (ibmphp_pci_bus, devfn, PCI_MEMORY_BASE, &start_mem_address);
pci_bus_read_config_word (ibmphp_pci_bus, devfn, PCI_MEMORY_LIMIT, &end_mem_address);
diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
index ec20f74c8981..cfa92a984e62 100644
--- a/drivers/pci/hotplug/pci_hotplug_core.c
+++ b/drivers/pci/hotplug/pci_hotplug_core.c
@@ -131,7 +131,7 @@ static ssize_t power_write_file(struct pci_slot *pci_slot, const char *buf,
}
module_put(slot->ops->owner);
-exit:
+exit:
if (retval)
return retval;
return count;
@@ -177,7 +177,7 @@ static ssize_t attention_write_file(struct pci_slot *slot, const char *buf,
retval = ops->set_attention_status(slot->hotplug, attention);
module_put(ops->owner);
-exit:
+exit:
if (retval)
return retval;
return count;
@@ -247,7 +247,7 @@ static ssize_t test_write_file(struct pci_slot *pci_slot, const char *buf,
retval = slot->ops->hardware_test(slot, test);
module_put(slot->ops->owner);
-exit:
+exit:
if (retval)
return retval;
return count;
@@ -512,7 +512,7 @@ int pci_hp_deregister(struct hotplug_slot *hotplug)
* @hotplug: pointer to the slot whose info has changed
* @info: pointer to the info copy into the slot's info structure
*
- * @slot must have been registered with the pci
+ * @slot must have been registered with the pci
* hotplug subsystem previously with a call to pci_hp_register().
*
* Returns 0 if successful, anything else for an error.
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index 541bbe6d5343..21e865ded1dc 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -180,5 +180,5 @@ static inline int pciehp_acpi_slot_detection_check(struct pci_dev *dev)
{
return 0;
}
-#endif /* CONFIG_ACPI */
+#endif /* CONFIG_ACPI */
#endif /* _PCIEHP_H */
diff --git a/drivers/pci/hotplug/pciehp_acpi.c b/drivers/pci/hotplug/pciehp_acpi.c
index ead7c534095e..eddddd447d0d 100644
--- a/drivers/pci/hotplug/pciehp_acpi.c
+++ b/drivers/pci/hotplug/pciehp_acpi.c
@@ -54,7 +54,7 @@ int pciehp_acpi_slot_detection_check(struct pci_dev *dev)
{
if (slot_detection_mode != PCIEHP_DETECT_ACPI)
return 0;
- if (acpi_pci_detect_ejectable(DEVICE_ACPI_HANDLE(&dev->dev)))
+ if (acpi_pci_detect_ejectable(ACPI_HANDLE(&dev->dev)))
return 0;
return -ENODEV;
}
@@ -78,7 +78,7 @@ static int __initdata dup_slot_id;
static int __initdata acpi_slot_detected;
static struct list_head __initdata dummy_slots = LIST_HEAD_INIT(dummy_slots);
-/* Dummy driver for dumplicate name detection */
+/* Dummy driver for duplicate name detection */
static int __init dummy_probe(struct pcie_device *dev)
{
u32 slot_cap;
@@ -96,7 +96,7 @@ static int __init dummy_probe(struct pcie_device *dev)
dup_slot_id++;
}
list_add_tail(&slot->list, &dummy_slots);
- handle = DEVICE_ACPI_HANDLE(&pdev->dev);
+ handle = ACPI_HANDLE(&pdev->dev);
if (!acpi_slot_detected && acpi_pci_detect_ejectable(handle))
acpi_slot_detected = 1;
return -ENODEV; /* dummy driver always returns error */
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index f4a18f51a29c..bbd48bbe4e9b 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -351,8 +351,8 @@ static int __init pcied_init(void)
pciehp_firmware_init();
retval = pcie_port_service_register(&hpdriver_portdrv);
- dbg("pcie_port_service_register = %d\n", retval);
- info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
+ dbg("pcie_port_service_register = %d\n", retval);
+ info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
if (retval)
dbg("Failure to register service\n");
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 51f56ef4ab6f..3eea3fdd4b0b 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -92,7 +92,7 @@ static void start_int_poll_timer(struct controller *ctrl, int sec)
{
/* Clamp to sane value */
if ((sec <= 0) || (sec > 60))
- sec = 2;
+ sec = 2;
ctrl->poll_timer.function = &int_poll_timeout;
ctrl->poll_timer.data = (unsigned long)ctrl;
@@ -194,7 +194,7 @@ static int pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask)
ctrl_dbg(ctrl, "CMD_COMPLETED not clear after 1 sec\n");
} else if (!NO_CMD_CMPL(ctrl)) {
/*
- * This controller semms to notify of command completed
+ * This controller seems to notify of command completed
* event even though it supports none of power
* controller, attention led, power led and EMI.
*/
@@ -926,7 +926,7 @@ struct controller *pcie_init(struct pcie_device *dev)
if (pciehp_writew(ctrl, PCI_EXP_SLTSTA, 0x1f))
goto abort_ctrl;
- /* Disable sotfware notification */
+ /* Disable software notification */
pcie_disable_notification(ctrl);
ctrl_info(ctrl, "HPC vendor_id %x device_id %x ss_vid %x ss_did %x\n",
diff --git a/drivers/pci/hotplug/pcihp_skeleton.c b/drivers/pci/hotplug/pcihp_skeleton.c
index 1f00b937f721..ac69094e4b20 100644
--- a/drivers/pci/hotplug/pcihp_skeleton.c
+++ b/drivers/pci/hotplug/pcihp_skeleton.c
@@ -52,7 +52,7 @@ static LIST_HEAD(slot_list);
do { \
if (debug) \
printk (KERN_DEBUG "%s: " format "\n", \
- MY_NAME , ## arg); \
+ MY_NAME , ## arg); \
} while (0)
#define err(format, arg...) printk(KERN_ERR "%s: " format "\n", MY_NAME , ## arg)
#define info(format, arg...) printk(KERN_INFO "%s: " format "\n", MY_NAME , ## arg)
@@ -287,7 +287,7 @@ static int __init init_slots(void)
hotplug_slot->release = &release_slot;
make_slot_name(slot);
hotplug_slot->ops = &skel_hotplug_slot_ops;
-
+
/*
* Initialize the slot info structure with some known
* good values.
@@ -296,7 +296,7 @@ static int __init init_slots(void)
get_attention_status(hotplug_slot, &info->attention_status);
get_latch_status(hotplug_slot, &info->latch_status);
get_adapter_status(hotplug_slot, &info->adapter_status);
-
+
dbg("registering slot %d\n", i);
retval = pci_hp_register(slot->hotplug_slot);
if (retval) {
@@ -336,7 +336,7 @@ static void __exit cleanup_slots(void)
pci_hp_deregister(slot->hotplug_slot);
}
}
-
+
static int __init pcihp_skel_init(void)
{
int retval;
diff --git a/drivers/pci/hotplug/rpadlpar_core.c b/drivers/pci/hotplug/rpadlpar_core.c
index bb7af78e4eed..e9c044d15add 100644
--- a/drivers/pci/hotplug/rpadlpar_core.c
+++ b/drivers/pci/hotplug/rpadlpar_core.c
@@ -217,7 +217,7 @@ static int dlpar_remove_phb(char *drc_name, struct device_node *dn)
if (!pcibios_find_pci_bus(dn))
return -EINVAL;
- /* If pci slot is hotplugable, use hotplug to remove it */
+ /* If pci slot is hotpluggable, use hotplug to remove it */
slot = find_php_slot(dn);
if (slot && rpaphp_deregister_slot(slot)) {
printk(KERN_ERR "%s: unable to remove hotplug slot %s\n",
diff --git a/drivers/pci/hotplug/rpaphp.h b/drivers/pci/hotplug/rpaphp.h
index 3135856e5e1c..b2593e876a09 100644
--- a/drivers/pci/hotplug/rpaphp.h
+++ b/drivers/pci/hotplug/rpaphp.h
@@ -49,9 +49,9 @@
extern bool rpaphp_debug;
#define dbg(format, arg...) \
do { \
- if (rpaphp_debug) \
+ if (rpaphp_debug) \
printk(KERN_DEBUG "%s: " format, \
- MY_NAME , ## arg); \
+ MY_NAME , ## arg); \
} while (0)
#define err(format, arg...) printk(KERN_ERR "%s: " format, MY_NAME , ## arg)
#define info(format, arg...) printk(KERN_INFO "%s: " format, MY_NAME , ## arg)
@@ -99,5 +99,5 @@ void dealloc_slot_struct(struct slot *slot);
struct slot *alloc_slot_struct(struct device_node *dn, int drc_index, char *drc_name, int power_domain);
int rpaphp_register_slot(struct slot *slot);
int rpaphp_deregister_slot(struct slot *slot);
-
+
#endif /* _PPC64PHP_H */
diff --git a/drivers/pci/hotplug/rpaphp_core.c b/drivers/pci/hotplug/rpaphp_core.c
index 127d6e600185..b7fc5c9255a5 100644
--- a/drivers/pci/hotplug/rpaphp_core.c
+++ b/drivers/pci/hotplug/rpaphp_core.c
@@ -226,7 +226,7 @@ int rpaphp_get_drc_props(struct device_node *dn, int *drc_index,
for (i = 0; i < indexes[0]; i++) {
if ((unsigned int) indexes[i + 1] == *my_index) {
if (drc_name)
- *drc_name = name_tmp;
+ *drc_name = name_tmp;
if (drc_type)
*drc_type = type_tmp;
if (drc_index)
@@ -289,7 +289,7 @@ static int is_php_dn(struct device_node *dn, const int **indexes,
* rpaphp_add_slot -- declare a hotplug slot to the hotplug subsystem.
* @dn: device node of slot
*
- * This subroutine will register a hotplugable slot with the
+ * This subroutine will register a hotpluggable slot with the
* PCI hotplug infrastructure. This routine is typically called
* during boot time, if the hotplug slots are present at boot time,
* or is called later, by the dlpar add code, if the slot is
@@ -328,7 +328,7 @@ int rpaphp_add_slot(struct device_node *dn)
return -ENOMEM;
slot->type = simple_strtoul(type, NULL, 10);
-
+
dbg("Found drc-index:0x%x drc-name:%s drc-type:%s\n",
indexes[i + 1], name, type);
@@ -356,7 +356,7 @@ static void __exit cleanup_slots(void)
/*
* Unregister all of our slots with the pci_hotplug subsystem,
* and free up all memory that we had allocated.
- * memory will be freed in release_slot callback.
+ * memory will be freed in release_slot callback.
*/
list_for_each_safe(tmp, n, &rpaphp_slot_head) {
diff --git a/drivers/pci/hotplug/rpaphp_pci.c b/drivers/pci/hotplug/rpaphp_pci.c
index 513e1e282391..9243f3e7a1c9 100644
--- a/drivers/pci/hotplug/rpaphp_pci.c
+++ b/drivers/pci/hotplug/rpaphp_pci.c
@@ -44,7 +44,7 @@ int rpaphp_get_sensor_state(struct slot *slot, int *state)
dbg("%s: slot must be power up to get sensor-state\n",
__func__);
- /* some slots have to be powered up
+ /* some slots have to be powered up
* before get-sensor will succeed.
*/
rc = rtas_set_power_level(slot->power_domain, POWER_ON,
@@ -133,4 +133,3 @@ int rpaphp_enable_slot(struct slot *slot)
return 0;
}
-
diff --git a/drivers/pci/hotplug/rpaphp_slot.c b/drivers/pci/hotplug/rpaphp_slot.c
index b283bbea6d24..a6082cc263f7 100644
--- a/drivers/pci/hotplug/rpaphp_slot.c
+++ b/drivers/pci/hotplug/rpaphp_slot.c
@@ -1,5 +1,5 @@
/*
- * RPA Virtual I/O device functions
+ * RPA Virtual I/O device functions
* Copyright (C) 2004 Linda Xie <lxie@us.ibm.com>
*
* All rights reserved.
@@ -51,27 +51,27 @@ struct slot *alloc_slot_struct(struct device_node *dn,
int drc_index, char *drc_name, int power_domain)
{
struct slot *slot;
-
+
slot = kzalloc(sizeof(struct slot), GFP_KERNEL);
if (!slot)
goto error_nomem;
slot->hotplug_slot = kzalloc(sizeof(struct hotplug_slot), GFP_KERNEL);
if (!slot->hotplug_slot)
- goto error_slot;
+ goto error_slot;
slot->hotplug_slot->info = kzalloc(sizeof(struct hotplug_slot_info),
GFP_KERNEL);
if (!slot->hotplug_slot->info)
goto error_hpslot;
slot->name = kstrdup(drc_name, GFP_KERNEL);
if (!slot->name)
- goto error_info;
+ goto error_info;
slot->dn = dn;
slot->index = drc_index;
slot->power_domain = power_domain;
slot->hotplug_slot->private = slot;
slot->hotplug_slot->ops = &rpaphp_hotplug_slot_ops;
slot->hotplug_slot->release = &rpaphp_release_slot;
-
+
return (slot);
error_info:
@@ -91,7 +91,7 @@ static int is_registered(struct slot *slot)
list_for_each_entry(tmp_slot, &rpaphp_slot_head, rpaphp_slot_list) {
if (!strcmp(tmp_slot->name, slot->name))
return 1;
- }
+ }
return 0;
}
@@ -104,7 +104,7 @@ int rpaphp_deregister_slot(struct slot *slot)
__func__, slot->name);
list_del(&slot->rpaphp_slot_list);
-
+
retval = pci_hp_deregister(php_slot);
if (retval)
err("Problem unregistering a slot %s\n", slot->name);
@@ -120,7 +120,7 @@ int rpaphp_register_slot(struct slot *slot)
int retval;
int slotno;
- dbg("%s registering slot:path[%s] index[%x], name[%s] pdomain[%x] type[%d]\n",
+ dbg("%s registering slot:path[%s] index[%x], name[%s] pdomain[%x] type[%d]\n",
__func__, slot->dn->full_name, slot->index, slot->name,
slot->power_domain, slot->type);
@@ -128,7 +128,7 @@ int rpaphp_register_slot(struct slot *slot)
if (is_registered(slot)) {
err("rpaphp_register_slot: slot[%s] is already registered\n", slot->name);
return -EAGAIN;
- }
+ }
if (slot->dn->child)
slotno = PCI_SLOT(PCI_DN(slot->dn->child)->devfn);
@@ -145,4 +145,3 @@ int rpaphp_register_slot(struct slot *slot)
info("Slot [%s] registered\n", slot->name);
return 0;
}
-
diff --git a/drivers/pci/hotplug/sgi_hotplug.c b/drivers/pci/hotplug/sgi_hotplug.c
index b2781dfe60e9..5b05a68cca6c 100644
--- a/drivers/pci/hotplug/sgi_hotplug.c
+++ b/drivers/pci/hotplug/sgi_hotplug.c
@@ -9,6 +9,7 @@
* Work to add BIOS PROM support was completed by Mike Habeck.
*/
+#include <linux/acpi.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -29,7 +30,6 @@
#include <asm/sn/sn_feature_sets.h>
#include <asm/sn/sn_sal.h>
#include <asm/sn/types.h>
-#include <linux/acpi.h>
#include <asm/sn/acpi.h>
#include "../pci.h"
@@ -414,7 +414,7 @@ static int enable_slot(struct hotplug_slot *bss_hotplug_slot)
acpi_handle rethandle;
acpi_status ret;
- phandle = PCI_CONTROLLER(slot->pci_bus)->acpi_handle;
+ phandle = acpi_device_handle(PCI_CONTROLLER(slot->pci_bus)->companion);
if (acpi_bus_get_device(phandle, &pdevice)) {
dev_dbg(&slot->pci_bus->self->dev,
@@ -495,7 +495,7 @@ static int disable_slot(struct hotplug_slot *bss_hotplug_slot)
/* free the ACPI resources for the slot */
if (SN_ACPI_BASE_SUPPORT() &&
- PCI_CONTROLLER(slot->pci_bus)->acpi_handle) {
+ PCI_CONTROLLER(slot->pci_bus)->companion) {
unsigned long long adr;
struct acpi_device *device;
acpi_handle phandle;
@@ -504,7 +504,7 @@ static int disable_slot(struct hotplug_slot *bss_hotplug_slot)
acpi_status ret;
/* Get the rootbus node pointer */
- phandle = PCI_CONTROLLER(slot->pci_bus)->acpi_handle;
+ phandle = acpi_device_handle(PCI_CONTROLLER(slot->pci_bus)->companion);
acpi_scan_lock_acquire();
/*
diff --git a/drivers/pci/hotplug/shpchp.h b/drivers/pci/hotplug/shpchp.h
index d876e4b3c6a9..61529097464d 100644
--- a/drivers/pci/hotplug/shpchp.h
+++ b/drivers/pci/hotplug/shpchp.h
@@ -216,13 +216,13 @@ struct ctrl_reg {
/* offsets to the controller registers based on the above structure layout */
enum ctrl_offsets {
- BASE_OFFSET = offsetof(struct ctrl_reg, base_offset),
- SLOT_AVAIL1 = offsetof(struct ctrl_reg, slot_avail1),
+ BASE_OFFSET = offsetof(struct ctrl_reg, base_offset),
+ SLOT_AVAIL1 = offsetof(struct ctrl_reg, slot_avail1),
SLOT_AVAIL2 = offsetof(struct ctrl_reg, slot_avail2),
- SLOT_CONFIG = offsetof(struct ctrl_reg, slot_config),
+ SLOT_CONFIG = offsetof(struct ctrl_reg, slot_config),
SEC_BUS_CONFIG = offsetof(struct ctrl_reg, sec_bus_config),
MSI_CTRL = offsetof(struct ctrl_reg, msi_ctrl),
- PROG_INTERFACE = offsetof(struct ctrl_reg, prog_interface),
+ PROG_INTERFACE = offsetof(struct ctrl_reg, prog_interface),
CMD = offsetof(struct ctrl_reg, cmd),
CMD_STATUS = offsetof(struct ctrl_reg, cmd_status),
INTR_LOC = offsetof(struct ctrl_reg, intr_loc),
diff --git a/drivers/pci/hotplug/shpchp_core.c b/drivers/pci/hotplug/shpchp_core.c
index d3f757df691c..faf13abd5b99 100644
--- a/drivers/pci/hotplug/shpchp_core.c
+++ b/drivers/pci/hotplug/shpchp_core.c
@@ -143,11 +143,11 @@ static int init_slots(struct controller *ctrl)
snprintf(name, SLOT_NAME_SIZE, "%d", slot->number);
hotplug_slot->ops = &shpchp_hotplug_slot_ops;
- ctrl_dbg(ctrl, "Registering domain:bus:dev=%04x:%02x:%02x "
- "hp_slot=%x sun=%x slot_device_offset=%x\n",
- pci_domain_nr(ctrl->pci_dev->subordinate),
- slot->bus, slot->device, slot->hp_slot, slot->number,
- ctrl->slot_device_offset);
+ ctrl_dbg(ctrl, "Registering domain:bus:dev=%04x:%02x:%02x "
+ "hp_slot=%x sun=%x slot_device_offset=%x\n",
+ pci_domain_nr(ctrl->pci_dev->subordinate),
+ slot->bus, slot->device, slot->hp_slot, slot->number,
+ ctrl->slot_device_offset);
retval = pci_hp_register(slot->hotplug_slot,
ctrl->pci_dev->subordinate, slot->device, name);
if (retval) {
diff --git a/drivers/pci/hotplug/shpchp_hpc.c b/drivers/pci/hotplug/shpchp_hpc.c
index 75ba2311b54f..2d7f474ca0ec 100644
--- a/drivers/pci/hotplug/shpchp_hpc.c
+++ b/drivers/pci/hotplug/shpchp_hpc.c
@@ -116,7 +116,7 @@
#define SLOT_REG_RSVDZ_MASK ((1 << 15) | (7 << 21))
/*
- * SHPC Command Code definitnions
+ * SHPC Command Code definitions
*
* Slot Operation 00h - 3Fh
* Set Bus Segment Speed/Mode A 40h - 47h
diff --git a/drivers/pci/ioapic.c b/drivers/pci/ioapic.c
index 1b90579b233a..50ce68098298 100644
--- a/drivers/pci/ioapic.c
+++ b/drivers/pci/ioapic.c
@@ -37,7 +37,7 @@ static int ioapic_probe(struct pci_dev *dev, const struct pci_device_id *ent)
char *type;
struct resource *res;
- handle = DEVICE_ACPI_HANDLE(&dev->dev);
+ handle = ACPI_HANDLE(&dev->dev);
if (!handle)
return -EINVAL;
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index 21a7182dccd4..1fe2d6fb19d5 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -610,7 +610,7 @@ resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, int resno)
struct resource tmp;
enum pci_bar_type type;
int reg = pci_iov_resource_bar(dev, resno, &type);
-
+
if (!reg)
return 0;
diff --git a/drivers/pci/irq.c b/drivers/pci/irq.c
index b008cf86b9c3..6684f153ab57 100644
--- a/drivers/pci/irq.c
+++ b/drivers/pci/irq.c
@@ -25,7 +25,7 @@ static void pci_note_irq_problem(struct pci_dev *pdev, const char *reason)
/**
* pci_lost_interrupt - reports a lost PCI interrupt
* @pdev: device whose interrupt is lost
- *
+ *
* The primary function of this routine is to report a lost interrupt
* in a standard way which users can recognise (instead of blaming the
* driver).
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 5e63645a7abe..3fcd67a16677 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -784,7 +784,7 @@ error:
* @nvec: how many MSIs have been requested ?
* @type: are we checking for MSI or MSI-X ?
*
- * Look at global flags, the device itself, and its parent busses
+ * Look at global flags, the device itself, and its parent buses
* to determine if MSI/-X are supported for the device. If MSI/-X is
* supported return 0, else return an error code.
**/
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index dfd1f59de729..577074efbe62 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -141,7 +141,7 @@ phys_addr_t acpi_pci_root_get_mcfg_addr(acpi_handle handle)
* if (_PRW at S-state x)
* choose from highest power _SxD to lowest power _SxW
* else // no _PRW at S-state x
- * choose highest power _SxD or any lower power
+ * choose highest power _SxD or any lower power
*/
static pci_power_t acpi_pci_choose_state(struct pci_dev *pdev)
@@ -173,14 +173,14 @@ static pci_power_t acpi_pci_choose_state(struct pci_dev *pdev)
static bool acpi_pci_power_manageable(struct pci_dev *dev)
{
- acpi_handle handle = DEVICE_ACPI_HANDLE(&dev->dev);
+ acpi_handle handle = ACPI_HANDLE(&dev->dev);
return handle ? acpi_bus_power_manageable(handle) : false;
}
static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state)
{
- acpi_handle handle = DEVICE_ACPI_HANDLE(&dev->dev);
+ acpi_handle handle = ACPI_HANDLE(&dev->dev);
static const u8 state_conv[] = {
[PCI_D0] = ACPI_STATE_D0,
[PCI_D1] = ACPI_STATE_D1,
@@ -217,7 +217,7 @@ static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state)
static bool acpi_pci_can_wakeup(struct pci_dev *dev)
{
- acpi_handle handle = DEVICE_ACPI_HANDLE(&dev->dev);
+ acpi_handle handle = ACPI_HANDLE(&dev->dev);
return handle ? acpi_bus_can_wakeup(handle) : false;
}
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 454853507b7e..25f0bc659164 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -19,6 +19,7 @@
#include <linux/cpu.h>
#include <linux/pm_runtime.h>
#include <linux/suspend.h>
+#include <linux/kexec.h>
#include "pci.h"
struct pci_dynid {
@@ -288,12 +289,27 @@ static int pci_call_probe(struct pci_driver *drv, struct pci_dev *dev,
int error, node;
struct drv_dev_and_id ddi = { drv, dev, id };
- /* Execute driver initialization on node where the device's
- bus is attached to. This way the driver likely allocates
- its local memory on the right node without any need to
- change it. */
+ /*
+ * Execute driver initialization on node where the device is
+ * attached. This way the driver likely allocates its local memory
+ * on the right node.
+ */
node = dev_to_node(&dev->dev);
- if (node >= 0) {
+
+ /*
+ * On NUMA systems, we are likely to call a PF probe function using
+ * work_on_cpu(). If that probe calls pci_enable_sriov() (which
+ * adds the VF devices via pci_bus_add_device()), we may re-enter
+ * this function to call the VF probe function. Calling
+ * work_on_cpu() again will cause a lockdep warning. Since VFs are
+ * always on the same node as the PF, we can work around this by
+ * avoiding work_on_cpu() when we're already on the correct node.
+ *
+ * Preemption is enabled, so it's theoretically unsafe to use
+ * numa_node_id(), but even if we run the probe function on the
+ * wrong node, it should be functionally correct.
+ */
+ if (node >= 0 && node != numa_node_id()) {
int cpu;
get_online_cpus();
@@ -305,6 +321,7 @@ static int pci_call_probe(struct pci_driver *drv, struct pci_dev *dev,
put_online_cpus();
} else
error = local_pci_probe(&ddi);
+
return error;
}
@@ -312,7 +329,7 @@ static int pci_call_probe(struct pci_driver *drv, struct pci_dev *dev,
* __pci_device_probe - check if a driver wants to claim a specific PCI device
* @drv: driver to call to check if it wants the PCI device
* @pci_dev: PCI device being probed
- *
+ *
* returns 0 on success, else error.
* side-effect: pci_dev->driver is set to drv when drv claims pci_dev.
*/
@@ -378,7 +395,7 @@ static int pci_device_remove(struct device * dev)
* We would love to complain here if pci_dev->is_enabled is set, that
* the driver should have called pci_disable_device(), but the
* unfortunate fact is there are too many odd BIOS and bridge setups
- * that don't like drivers doing that all of the time.
+ * that don't like drivers doing that all of the time.
* Oh well, we can dream of sane hardware when we sleep, no matter how
* horrible the crap we have to deal with is when we are awake...
*/
@@ -399,12 +416,17 @@ static void pci_device_shutdown(struct device *dev)
pci_msi_shutdown(pci_dev);
pci_msix_shutdown(pci_dev);
+#ifdef CONFIG_KEXEC
/*
- * Turn off Bus Master bit on the device to tell it to not
- * continue to do DMA. Don't touch devices in D3cold or unknown states.
+ * If this is a kexec reboot, turn off Bus Master bit on the
+ * device to tell it to not continue to do DMA. Don't touch
+ * devices in D3cold or unknown states.
+ * If it is not a kexec reboot, firmware will hit the PCI
+ * devices with big hammer and stop their DMA any way.
*/
- if (pci_dev->current_state <= PCI_D3hot)
+ if (kexec_in_progress && (pci_dev->current_state <= PCI_D3hot))
pci_clear_master(pci_dev);
+#endif
}
#ifdef CONFIG_PM
@@ -1156,10 +1178,10 @@ static const struct dev_pm_ops pci_dev_pm_ops = {
* @drv: the driver structure to register
* @owner: owner module of drv
* @mod_name: module name string
- *
+ *
* Adds the driver structure to the list of registered drivers.
- * Returns a negative value on error, otherwise 0.
- * If no error occurred, the driver remains registered even if
+ * Returns a negative value on error, otherwise 0.
+ * If no error occurred, the driver remains registered even if
* no device was claimed during registration.
*/
int __pci_register_driver(struct pci_driver *drv, struct module *owner,
@@ -1181,7 +1203,7 @@ int __pci_register_driver(struct pci_driver *drv, struct module *owner,
/**
* pci_unregister_driver - unregister a pci driver
* @drv: the driver structure to unregister
- *
+ *
* Deletes the driver structure from the list of registered PCI drivers,
* gives it a chance to clean up by calling its remove() function for
* each device it was responsible for, and marks those devices as
@@ -1203,7 +1225,7 @@ static struct pci_driver pci_compat_driver = {
* pci_dev_driver - get the pci_driver of a device
* @dev: the device to query
*
- * Returns the appropriate pci_driver structure or %NULL if there is no
+ * Returns the appropriate pci_driver structure or %NULL if there is no
* registered driver for the device.
*/
struct pci_driver *
@@ -1224,7 +1246,7 @@ pci_dev_driver(const struct pci_dev *dev)
* pci_bus_match - Tell if a PCI device structure has a matching PCI device id structure
* @dev: the PCI device structure to match against
* @drv: the device driver to search for matching PCI device id structures
- *
+ *
* Used by a driver to check whether a PCI device present in the
* system is in its list of supported devices. Returns the matching
* pci_device_id structure or %NULL if there is no match.
diff --git a/drivers/pci/pci-label.c b/drivers/pci/pci-label.c
index edaed6f4da6c..d51f45aa669e 100644
--- a/drivers/pci/pci-label.c
+++ b/drivers/pci/pci-label.c
@@ -263,7 +263,7 @@ device_has_dsm(struct device *dev)
acpi_handle handle;
struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
- handle = DEVICE_ACPI_HANDLE(dev);
+ handle = ACPI_HANDLE(dev);
if (!handle)
return FALSE;
@@ -295,7 +295,7 @@ acpilabel_show(struct device *dev, struct device_attribute *attr, char *buf)
acpi_handle handle;
int length;
- handle = DEVICE_ACPI_HANDLE(dev);
+ handle = ACPI_HANDLE(dev);
if (!handle)
return -1;
@@ -316,7 +316,7 @@ acpiindex_show(struct device *dev, struct device_attribute *attr, char *buf)
acpi_handle handle;
int length;
- handle = DEVICE_ACPI_HANDLE(dev);
+ handle = ACPI_HANDLE(dev);
if (!handle)
return -1;
diff --git a/drivers/pci/pci-stub.c b/drivers/pci/pci-stub.c
index 6e47c519c510..2ff77509d8e5 100644
--- a/drivers/pci/pci-stub.c
+++ b/drivers/pci/pci-stub.c
@@ -2,13 +2,13 @@
*
* Copyright (C) 2008 Red Hat, Inc.
* Author:
- * Chris Wright
+ * Chris Wright
*
* This work is licensed under the terms of the GNU GPL, version 2.
*
* Usage is simple, allocate a new id to the stub driver and bind the
* device to it. For example:
- *
+ *
* # echo "8086 10f5" > /sys/bus/pci/drivers/pci-stub/new_id
* # echo -n 0000:00:19.0 > /sys/bus/pci/drivers/e1000e/unbind
* # echo -n 0000:00:19.0 > /sys/bus/pci/drivers/pci-stub/bind
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 2aaa83c85a4e..c91e6c18debc 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -10,7 +10,7 @@
*
* File attributes for PCI devices
*
- * Modeled after usb's driverfs.c
+ * Modeled after usb's driverfs.c
*
*/
@@ -270,13 +270,17 @@ msi_bus_store(struct device *dev, struct device_attribute *attr,
if (kstrtoul(buf, 0, &val) < 0)
return -EINVAL;
- /* bad things may happen if the no_msi flag is changed
- * while some drivers are loaded */
+ /*
+ * Bad things may happen if the no_msi flag is changed
+ * while drivers are loaded.
+ */
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- /* Maybe pci devices without subordinate busses shouldn't even have this
- * attribute in the first place? */
+ /*
+ * Maybe devices without subordinate buses shouldn't have this
+ * attribute in the first place?
+ */
if (!pdev->subordinate)
return count;
@@ -670,7 +674,7 @@ pci_write_config(struct file* filp, struct kobject *kobj,
size = dev->cfg_size - off;
count = size;
}
-
+
pci_config_pm_runtime_get(dev);
if ((off & 1) && size) {
@@ -678,7 +682,7 @@ pci_write_config(struct file* filp, struct kobject *kobj,
off++;
size--;
}
-
+
if ((off & 3) && size > 2) {
u16 val = data[off - init_off];
val |= (u16) data[off - init_off + 1] << 8;
@@ -696,7 +700,7 @@ pci_write_config(struct file* filp, struct kobject *kobj,
off += 4;
size -= 4;
}
-
+
if (size >= 2) {
u16 val = data[off - init_off];
val |= (u16) data[off - init_off + 1] << 8;
@@ -1229,21 +1233,21 @@ pci_read_rom(struct file *filp, struct kobject *kobj,
if (!pdev->rom_attr_enabled)
return -EINVAL;
-
+
rom = pci_map_rom(pdev, &size); /* size starts out as PCI window size */
if (!rom || !size)
return -EIO;
-
+
if (off >= size)
count = 0;
else {
if (off + count > size)
count = size - off;
-
+
memcpy_fromio(buf, rom + off, count);
}
pci_unmap_rom(pdev, rom);
-
+
return count;
}
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index b127fbda6fc8..07369f32e8bb 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -198,7 +198,7 @@ static int __pci_bus_find_cap_start(struct pci_bus *bus,
}
/**
- * pci_find_capability - query for devices' capabilities
+ * pci_find_capability - query for devices' capabilities
* @dev: PCI device to query
* @cap: capability code
*
@@ -207,12 +207,12 @@ static int __pci_bus_find_cap_start(struct pci_bus *bus,
* device's PCI configuration space or 0 in case the device does not
* support it. Possible values for @cap:
*
- * %PCI_CAP_ID_PM Power Management
- * %PCI_CAP_ID_AGP Accelerated Graphics Port
- * %PCI_CAP_ID_VPD Vital Product Data
- * %PCI_CAP_ID_SLOTID Slot Identification
+ * %PCI_CAP_ID_PM Power Management
+ * %PCI_CAP_ID_AGP Accelerated Graphics Port
+ * %PCI_CAP_ID_VPD Vital Product Data
+ * %PCI_CAP_ID_SLOTID Slot Identification
* %PCI_CAP_ID_MSI Message Signalled Interrupts
- * %PCI_CAP_ID_CHSWP CompactPCI HotSwap
+ * %PCI_CAP_ID_CHSWP CompactPCI HotSwap
* %PCI_CAP_ID_PCIX PCI-X
* %PCI_CAP_ID_EXP PCI Express
*/
@@ -228,13 +228,13 @@ int pci_find_capability(struct pci_dev *dev, int cap)
}
/**
- * pci_bus_find_capability - query for devices' capabilities
+ * pci_bus_find_capability - query for devices' capabilities
* @bus: the PCI bus to query
* @devfn: PCI device to query
* @cap: capability code
*
* Like pci_find_capability() but works for pci devices that do not have a
- * pci_dev structure set up yet.
+ * pci_dev structure set up yet.
*
* Returns the address of the requested capability structure within the
* device's PCI configuration space or 0 in case the device does not
@@ -515,7 +515,7 @@ static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
return -EINVAL;
/* Validate current state:
- * Can enter D0 from any state, but if we can only go deeper
+ * Can enter D0 from any state, but if we can only go deeper
* to sleep if we're already in a low power state
*/
if (state != PCI_D0 && dev->current_state <= PCI_D3cold
@@ -998,7 +998,7 @@ static void pci_restore_config_space(struct pci_dev *pdev)
}
}
-/**
+/**
* pci_restore_state - Restore the saved state of a PCI device
* @dev: - PCI device that we're dealing with
*/
@@ -1030,7 +1030,7 @@ struct pci_saved_state {
* the device saved state.
* @dev: PCI device that we're dealing with
*
- * Rerturn NULL if no state or error.
+ * Return NULL if no state or error.
*/
struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev)
{
@@ -1880,7 +1880,7 @@ int pci_finish_runtime_suspend(struct pci_dev *dev)
* pci_dev_run_wake - Check if device can generate run-time wake-up events.
* @dev: Device to check.
*
- * Return true if the device itself is cabable of generating wake-up events
+ * Return true if the device itself is capable of generating wake-up events
* (through the platform or using the native PCIe PME) or if the device supports
* PME and one of its upstream bridges can generate wake-up events.
*/
@@ -2447,7 +2447,7 @@ bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags)
switch (pci_pcie_type(pdev)) {
/*
* PCI/X-to-PCIe bridges are not specifically mentioned by the spec,
- * but since their primary inteface is PCI/X, we conservatively
+ * but since their primary interface is PCI/X, we conservatively
* handle them as we would a non-PCIe device.
*/
case PCI_EXP_TYPE_PCIE_BRIDGE:
@@ -2471,7 +2471,7 @@ bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags)
/*
* PCIe 3.0, 6.12.1.2 specifies ACS capabilities that should be
* implemented by the remaining PCIe types to indicate peer-to-peer
- * capabilities, but only when they are part of a multifunciton
+ * capabilities, but only when they are part of a multifunction
* device. The footnote for section 6.12 indicates the specific
* PCIe types included here.
*/
@@ -2486,7 +2486,7 @@ bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags)
}
/*
- * PCIe 3.0, 6.12.1.3 specifies no ACS capabilties are applicable
+ * PCIe 3.0, 6.12.1.3 specifies no ACS capabilities are applicable
* to single function devices with the exception of downstream ports.
*/
return true;
@@ -2622,7 +2622,7 @@ void pci_release_region(struct pci_dev *pdev, int bar)
*
* If @exclusive is set, then the region is marked so that userspace
* is explicitly not allowed to map the resource via /dev/mem or
- * sysfs MMIO access.
+ * sysfs MMIO access.
*
* Returns 0 on success, or %EBUSY on error. A warning
* message is also printed on failure.
@@ -2634,7 +2634,7 @@ static int __pci_request_region(struct pci_dev *pdev, int bar, const char *res_n
if (pci_resource_len(pdev, bar) == 0)
return 0;
-
+
if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) {
if (!request_region(pci_resource_start(pdev, bar),
pci_resource_len(pdev, bar), res_name))
@@ -2694,7 +2694,7 @@ int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
*
* The key difference that _exclusive makes it that userspace is
* explicitly not allowed to map the resource via /dev/mem or
- * sysfs.
+ * sysfs.
*/
int pci_request_region_exclusive(struct pci_dev *pdev, int bar, const char *res_name)
{
@@ -2799,7 +2799,7 @@ int pci_request_regions(struct pci_dev *pdev, const char *res_name)
* successfully.
*
* pci_request_regions_exclusive() will mark the region so that
- * /dev/mem and the sysfs MMIO access will not be allowed.
+ * /dev/mem and the sysfs MMIO access will not be allowed.
*
* Returns 0 on success, or %EBUSY on error. A warning
* message is also printed on failure.
@@ -2967,7 +2967,7 @@ pci_set_mwi(struct pci_dev *dev)
cmd |= PCI_COMMAND_INVALIDATE;
pci_write_config_word(dev, PCI_COMMAND, cmd);
}
-
+
return 0;
}
@@ -3292,7 +3292,7 @@ clear:
*
* NOTE: This causes the caller to sleep for twice the device power transition
* cooldown period, which for the D0->D3hot and D3hot->D0 transitions is 10 ms
- * by devault (i.e. unless the @dev's d3_delay field has a different value).
+ * by default (i.e. unless the @dev's d3_delay field has a different value).
* Moreover, only devices in D0 can be reset by this function.
*/
static int pci_pm_reset(struct pci_dev *dev, int probe)
@@ -3341,7 +3341,7 @@ void pci_reset_bridge_secondary_bus(struct pci_dev *dev)
pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
/*
* PCI spec v3.0 7.6.4.2 requires minimum Trst of 1ms. Double
- * this to 2ms to ensure that we meet the minium requirement.
+ * this to 2ms to ensure that we meet the minimum requirement.
*/
msleep(2);
@@ -3998,7 +3998,7 @@ int pcie_set_mps(struct pci_dev *dev, int mps)
return -EINVAL;
v = ffs(mps) - 8;
- if (v > dev->pcie_mpss)
+ if (v > dev->pcie_mpss)
return -EINVAL;
v <<= 5;
@@ -4165,6 +4165,14 @@ int pci_set_vga_state(struct pci_dev *dev, bool decode,
return 0;
}
+bool pci_device_is_present(struct pci_dev *pdev)
+{
+ u32 v;
+
+ return pci_bus_read_dev_vendor_id(pdev->bus, pdev->devfn, &v, 0);
+}
+EXPORT_SYMBOL_GPL(pci_device_is_present);
+
#define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE
static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0};
static DEFINE_SPINLOCK(resource_alignment_lock);
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
index 6b3a958e1be6..b2c8881da764 100644
--- a/drivers/pci/pcie/aer/aerdrv_core.c
+++ b/drivers/pci/pcie/aer/aerdrv_core.c
@@ -525,7 +525,7 @@ static void handle_error_source(struct pcie_device *aerdev,
if (info->severity == AER_CORRECTABLE) {
/*
- * Correctable error does not need software intevention.
+ * Correctable error does not need software intervention.
* No need to go through error recovery process.
*/
pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 403a44374ed5..f1272dc54de1 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -548,7 +548,7 @@ static struct pcie_link_state *alloc_pcie_link_state(struct pci_dev *pdev)
/*
* pcie_aspm_init_link_state: Initiate PCI express link state.
- * It is called after the pcie and its children devices are scaned.
+ * It is called after the pcie and its children devices are scanned.
* @pdev: the root port or switch downstream port
*/
void pcie_aspm_init_link_state(struct pci_dev *pdev)
diff --git a/drivers/pci/pcie/pme.c b/drivers/pci/pcie/pme.c
index e56e594ce112..bbc3bdd2b189 100644
--- a/drivers/pci/pcie/pme.c
+++ b/drivers/pci/pcie/pme.c
@@ -419,8 +419,8 @@ static void pcie_pme_remove(struct pcie_device *srv)
static struct pcie_port_service_driver pcie_pme_driver = {
.name = "pcie_pme",
- .port_type = PCI_EXP_TYPE_ROOT_PORT,
- .service = PCIE_PORT_SERVICE_PME,
+ .port_type = PCI_EXP_TYPE_ROOT_PORT,
+ .service = PCIE_PORT_SERVICE_PME,
.probe = pcie_pme_probe,
.suspend = pcie_pme_suspend,
diff --git a/drivers/pci/pcie/portdrv.h b/drivers/pci/pcie/portdrv.h
index d2eb80aab569..d525548404d6 100644
--- a/drivers/pci/pcie/portdrv.h
+++ b/drivers/pci/pcie/portdrv.h
@@ -14,7 +14,7 @@
#define PCIE_PORT_DEVICE_MAXSERVICES 4
/*
* According to the PCI Express Base Specification 2.0, the indices of
- * the MSI-X table entires used by port services must not exceed 31
+ * the MSI-X table entries used by port services must not exceed 31
*/
#define PCIE_PORT_MAX_MSIX_ENTRIES 32
diff --git a/drivers/pci/pcie/portdrv_bus.c b/drivers/pci/pcie/portdrv_bus.c
index 67be55a7f260..87e79a6ffb5a 100644
--- a/drivers/pci/pcie/portdrv_bus.c
+++ b/drivers/pci/pcie/portdrv_bus.c
@@ -18,8 +18,8 @@
static int pcie_port_bus_match(struct device *dev, struct device_driver *drv);
struct bus_type pcie_port_bus_type = {
- .name = "pci_express",
- .match = pcie_port_bus_match,
+ .name = "pci_express",
+ .match = pcie_port_bus_match,
};
EXPORT_SYMBOL_GPL(pcie_port_bus_type);
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index 08d131f7815b..0b6e76604068 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -46,7 +46,7 @@ static void release_pcie_device(struct device *dev)
* pcie_port_msix_add_entry - add entry to given array of MSI-X entries
* @entries: Array of MSI-X entries
* @new_entry: Index of the entry to add to the array
- * @nr_entries: Number of entries aleady in the array
+ * @nr_entries: Number of entries already in the array
*
* Return value: Position of the added entry in the array
*/
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
index 696caed5fdf5..0d8fdc48e642 100644
--- a/drivers/pci/pcie/portdrv_pci.c
+++ b/drivers/pci/pcie/portdrv_pci.c
@@ -223,7 +223,6 @@ static int pcie_portdrv_probe(struct pci_dev *dev,
static void pcie_portdrv_remove(struct pci_dev *dev)
{
pcie_port_device_remove(dev);
- pci_disable_device(dev);
}
static int error_detected_iter(struct device *device, void *data)
@@ -390,9 +389,9 @@ static struct pci_driver pcie_portdriver = {
.probe = pcie_portdrv_probe,
.remove = pcie_portdrv_remove,
- .err_handler = &pcie_portdrv_err_handler,
+ .err_handler = &pcie_portdrv_err_handler,
- .driver.pm = PCIE_PORTDRV_PM_OPS,
+ .driver.pm = PCIE_PORTDRV_PM_OPS,
};
static int __init dmi_pcie_pme_disable_msi(const struct dmi_system_id *d)
@@ -412,7 +411,7 @@ static struct dmi_system_id __initdata pcie_portdrv_dmi_table[] = {
.ident = "MSI Wind U-100",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR,
- "MICRO-STAR INTERNATIONAL CO., LTD"),
+ "MICRO-STAR INTERNATIONAL CO., LTD"),
DMI_MATCH(DMI_PRODUCT_NAME, "U-100"),
},
},
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 5e14f5a51357..38e403dddf6e 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -582,7 +582,7 @@ static enum pci_bus_speed agp_speed(int agp3, int agpstat)
index = 1;
else
goto out;
-
+
if (agp3) {
index += 2;
if (index == 5)
@@ -789,7 +789,7 @@ int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
}
/* Disable MasterAbortMode during probing to avoid reporting
- of bus errors (in some architectures) */
+ of bus errors (in some architectures) */
pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &bctl);
pci_write_config_word(dev, PCI_BRIDGE_CONTROL,
bctl & ~PCI_BRIDGE_CTL_MASTER_ABORT);
@@ -1005,7 +1005,7 @@ void set_pcie_hotplug_bridge(struct pci_dev *pdev)
* pci_setup_device - fill in class and map information of a device
* @dev: the device structure to fill
*
- * Initialize the device structure with information about the device's
+ * Initialize the device structure with information about the device's
* vendor,class,memory and IO-space addresses,IRQ lines etc.
* Called at initialisation of the PCI subsystem and by CardBus services.
* Returns 0 on success and negative if unknown type of device (not normal,
@@ -1111,7 +1111,7 @@ int pci_setup_device(struct pci_dev *dev)
goto bad;
/* The PCI-to-PCI bridge spec requires that subtractive
decoding (i.e. transparent) bridge must have programming
- interface code of 0x01. */
+ interface code of 0x01. */
pci_read_irq(dev);
dev->transparent = ((dev->class & 0xff) == 1);
pci_read_bases(dev, 2, PCI_ROM_ADDRESS1);
@@ -1570,7 +1570,7 @@ static void pcie_write_mrrs(struct pci_dev *dev)
* subsequent read will verify if the value is acceptable or not.
* If the MRRS value provided is not acceptable (e.g., too large),
* shrink the value until it is acceptable to the HW.
- */
+ */
while (mrrs != pcie_get_readrq(dev) && mrrs >= 128) {
rc = pcie_set_readrq(dev, mrrs);
if (!rc)
diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
index cdc7836d7e3d..46d1378f2e9e 100644
--- a/drivers/pci/proc.c
+++ b/drivers/pci/proc.c
@@ -222,7 +222,7 @@ static long proc_bus_pci_ioctl(struct file *file, unsigned int cmd,
default:
ret = -EINVAL;
break;
- };
+ }
return ret;
}
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 91490453c229..3a02717473ad 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -9,10 +9,6 @@
*
* Init/reset quirks for USB host controllers should be in the
* USB quirks file, where their drivers can access reuse it.
- *
- * The bridge optimization stuff has been removed. If you really
- * have a silly BIOS which is unable to set your host bridge right,
- * use the PowerTweak utility (see http://powertweak.sourceforge.net).
*/
#include <linux/types.h>
@@ -55,7 +51,7 @@ static void quirk_mellanox_tavor(struct pci_dev *dev)
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX,PCI_DEVICE_ID_MELLANOX_TAVOR,quirk_mellanox_tavor);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX,PCI_DEVICE_ID_MELLANOX_TAVOR_BRIDGE,quirk_mellanox_tavor);
-/* Deal with broken BIOS'es that neglect to enable passive release,
+/* Deal with broken BIOSes that neglect to enable passive release,
which can cause problems in combination with the 82441FX/PPro MTRRs */
static void quirk_passive_release(struct pci_dev *dev)
{
@@ -78,11 +74,11 @@ DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82441, quirk_p
/* The VIA VP2/VP3/MVP3 seem to have some 'features'. There may be a workaround
but VIA don't answer queries. If you happen to have good contacts at VIA
- ask them for me please -- Alan
-
- This appears to be BIOS not version dependent. So presumably there is a
+ ask them for me please -- Alan
+
+ This appears to be BIOS not version dependent. So presumably there is a
chipset level fix */
-
+
static void quirk_isa_dma_hangs(struct pci_dev *dev)
{
if (!isa_dma_bridge_buggy) {
@@ -97,7 +93,7 @@ static void quirk_isa_dma_hangs(struct pci_dev *dev)
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_0, quirk_isa_dma_hangs);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C596, quirk_isa_dma_hangs);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371SB_0, quirk_isa_dma_hangs);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, quirk_isa_dma_hangs);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, quirk_isa_dma_hangs);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_1, quirk_isa_dma_hangs);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_2, quirk_isa_dma_hangs);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_3, quirk_isa_dma_hangs);
@@ -157,10 +153,10 @@ static void quirk_triton(struct pci_dev *dev)
pci_pci_problems |= PCIPCI_TRITON;
}
}
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82437, quirk_triton);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82437VX, quirk_triton);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82439, quirk_triton);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82439TX, quirk_triton);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82437, quirk_triton);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82437VX, quirk_triton);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82439, quirk_triton);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82439TX, quirk_triton);
/*
* VIA Apollo KT133 needs PCI latency patch
@@ -171,7 +167,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82439TX, quir
* the info on which Mr Breese based his work.
*
* Updated based on further information from the site and also on
- * information provided by VIA
+ * information provided by VIA
*/
static void quirk_vialatency(struct pci_dev *dev)
{
@@ -179,7 +175,7 @@ static void quirk_vialatency(struct pci_dev *dev)
u8 busarb;
/* Ok we have a potential problem chipset here. Now see if we have
a buggy southbridge */
-
+
p = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, NULL);
if (p!=NULL) {
/* 0x40 - 0x4f == 686B, 0x10 - 0x2f == 686A; thanks Dan Hollis */
@@ -194,9 +190,9 @@ static void quirk_vialatency(struct pci_dev *dev)
if (p->revision < 0x10 || p->revision > 0x12)
goto exit;
}
-
+
/*
- * Ok we have the problem. Now set the PCI master grant to
+ * Ok we have the problem. Now set the PCI master grant to
* occur every master grant. The apparent bug is that under high
* PCI load (quite common in Linux of course) you can get data
* loss when the CPU is held off the bus for 3 bus master requests
@@ -209,7 +205,7 @@ static void quirk_vialatency(struct pci_dev *dev)
*/
pci_read_config_byte(dev, 0x76, &busarb);
- /* Set bit 4 and bi 5 of byte 76 to 0x01
+ /* Set bit 4 and bi 5 of byte 76 to 0x01
"Master priority rotation on every PCI master grant */
busarb &= ~(1<<5);
busarb |= (1<<4);
@@ -252,7 +248,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C576, quirk_vsfx)
* that DMA to AGP space. Latency must be set to 0xA and triton
* workaround applied too
* [Info kindly provided by ALi]
- */
+ */
static void quirk_alimagik(struct pci_dev *dev)
{
if ((pci_pci_problems&PCIPCI_ALIMAGIK)==0) {
@@ -260,8 +256,8 @@ static void quirk_alimagik(struct pci_dev *dev)
pci_pci_problems |= PCIPCI_ALIMAGIK|PCIPCI_TRITON;
}
}
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1647, quirk_alimagik);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1651, quirk_alimagik);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1647, quirk_alimagik);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1651, quirk_alimagik);
/*
* Natoma has some interesting boundary conditions with Zoran stuff
@@ -274,12 +270,12 @@ static void quirk_natoma(struct pci_dev *dev)
pci_pci_problems |= PCIPCI_NATOMA;
}
}
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82441, quirk_natoma);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443LX_0, quirk_natoma);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443LX_1, quirk_natoma);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0, quirk_natoma);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_1, quirk_natoma);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2, quirk_natoma);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82441, quirk_natoma);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443LX_0, quirk_natoma);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443LX_1, quirk_natoma);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0, quirk_natoma);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_1, quirk_natoma);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2, quirk_natoma);
/*
* This chip can cause PCI parity errors if config register 0xA0 is read
@@ -400,7 +396,7 @@ static void piix4_io_quirk(struct pci_dev *dev, const char *name, unsigned int p
/*
* For now we only print it out. Eventually we'll want to
* reserve it (at least if it's in the 0x1000+ range), but
- * let's get enough confirmation reports first.
+ * let's get enough confirmation reports first.
*/
base &= -size;
dev_info(&dev->dev, "%s PIO at %04x-%04x\n", name, base, base + size - 1);
@@ -425,7 +421,7 @@ static void piix4_mem_quirk(struct pci_dev *dev, const char *name, unsigned int
}
/*
* For now we only print it out. Eventually we'll want to
- * reserve it, but let's get enough confirmation reports first.
+ * reserve it, but let's get enough confirmation reports first.
*/
base &= -size;
dev_info(&dev->dev, "%s MMIO at %04x-%04x\n", name, base, base + size - 1);
@@ -682,7 +678,7 @@ static void quirk_xio2000a(struct pci_dev *dev)
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_XIO2000A,
quirk_xio2000a);
-#ifdef CONFIG_X86_IO_APIC
+#ifdef CONFIG_X86_IO_APIC
#include <asm/io_apic.h>
@@ -696,12 +692,12 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_XIO2000A,
static void quirk_via_ioapic(struct pci_dev *dev)
{
u8 tmp;
-
+
if (nr_ioapics < 1)
tmp = 0; /* nothing routed to external APIC */
else
tmp = 0x1f; /* all known bits (4-0) routed to external APIC */
-
+
dev_info(&dev->dev, "%sbling VIA external APIC routing\n",
tmp == 0 ? "Disa" : "Ena");
@@ -712,7 +708,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, quirk_via_i
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, quirk_via_ioapic);
/*
- * VIA 8237: Some BIOSs don't set the 'Bypass APIC De-Assert Message' Bit.
+ * VIA 8237: Some BIOSes don't set the 'Bypass APIC De-Assert Message' Bit.
* This leads to doubled level interrupt rates.
* Set this bit to get rid of cycle wastage.
* Otherwise uncritical.
@@ -986,7 +982,7 @@ DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_PCI_MASTER, qu
static void quirk_disable_pxb(struct pci_dev *pdev)
{
u16 config;
-
+
if (pdev->revision != 0x04) /* Only C0 requires this */
return;
pci_read_config_word(pdev, 0x40, &config);
@@ -1094,11 +1090,11 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82375, quirk_e
* On ASUS P4B boards, the SMBus PCI Device within the ICH2/4 southbridge
* is not activated. The myth is that Asus said that they do not want the
* users to be irritated by just another PCI Device in the Win98 device
- * manager. (see the file prog/hotplug/README.p4b in the lm_sensors
+ * manager. (see the file prog/hotplug/README.p4b in the lm_sensors
* package 2.7.0 for details)
*
- * The SMBus PCI Device can be activated by setting a bit in the ICH LPC
- * bridge. Unfortunately, this device has no subvendor/subdevice ID. So it
+ * The SMBus PCI Device can be activated by setting a bit in the ICH LPC
+ * bridge. Unfortunately, this device has no subvendor/subdevice ID. So it
* becomes necessary to do this tweak in two steps -- the chosen trigger
* is either the Host bridge (preferred) or on-board VGA controller.
*
@@ -1253,7 +1249,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82815_CGC, asu
static void asus_hides_smbus_lpc(struct pci_dev *dev)
{
u16 val;
-
+
if (likely(!asus_hides_smbus))
return;
@@ -1640,8 +1636,8 @@ static void quirk_disable_intel_boot_interrupt(struct pci_dev *dev)
dev_info(&dev->dev, "disabled boot interrupts on device [%04x:%04x]\n",
dev->vendor, dev->device);
}
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10, quirk_disable_intel_boot_interrupt);
-DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10, quirk_disable_intel_boot_interrupt);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10, quirk_disable_intel_boot_interrupt);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10, quirk_disable_intel_boot_interrupt);
/*
* disable boot interrupts on HT-1000
@@ -1673,8 +1669,8 @@ static void quirk_disable_broadcom_boot_interrupt(struct pci_dev *dev)
dev_info(&dev->dev, "disabled boot interrupts on device [%04x:%04x]\n",
dev->vendor, dev->device);
}
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000SB, quirk_disable_broadcom_boot_interrupt);
-DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000SB, quirk_disable_broadcom_boot_interrupt);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000SB, quirk_disable_broadcom_boot_interrupt);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000SB, quirk_disable_broadcom_boot_interrupt);
/*
* disable boot interrupts on AMD and ATI chipsets
@@ -1730,8 +1726,8 @@ static void quirk_disable_amd_8111_boot_interrupt(struct pci_dev *dev)
dev_info(&dev->dev, "disabled boot interrupts on device [%04x:%04x]\n",
dev->vendor, dev->device);
}
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_SMBUS, quirk_disable_amd_8111_boot_interrupt);
-DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_SMBUS, quirk_disable_amd_8111_boot_interrupt);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_SMBUS, quirk_disable_amd_8111_boot_interrupt);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_SMBUS, quirk_disable_amd_8111_boot_interrupt);
#endif /* CONFIG_X86_IO_APIC */
/*
@@ -2127,8 +2123,8 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_PLX, 0x8624, quirk_tile_plx_gen1);
#ifdef CONFIG_PCI_MSI
/* Some chipsets do not support MSI. We cannot easily rely on setting
* PCI_BUS_FLAGS_NO_MSI in its bus flags because there are actually
- * some other busses controlled by the chipset even if Linux is not
- * aware of it. Instead of setting the flag on all busses in the
+ * some other buses controlled by the chipset even if Linux is not
+ * aware of it. Instead of setting the flag on all buses in the
* machine, simply disable MSI globally.
*/
static void quirk_disable_all_msi(struct pci_dev *dev)
@@ -2288,14 +2284,14 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA,
nvenet_msi_disable);
/*
- * Some versions of the MCP55 bridge from nvidia have a legacy irq routing
- * config register. This register controls the routing of legacy interrupts
- * from devices that route through the MCP55. If this register is misprogramed
- * interrupts are only sent to the bsp, unlike conventional systems where the
- * irq is broadxast to all online cpus. Not having this register set
- * properly prevents kdump from booting up properly, so lets make sure that
- * we have it set correctly.
- * Note this is an undocumented register.
+ * Some versions of the MCP55 bridge from Nvidia have a legacy IRQ routing
+ * config register. This register controls the routing of legacy
+ * interrupts from devices that route through the MCP55. If this register
+ * is misprogrammed, interrupts are only sent to the BSP, unlike
+ * conventional systems where the IRQ is broadcast to all online CPUs. Not
+ * having this register set properly prevents kdump from booting up
+ * properly, so let's make sure that we have it set correctly.
+ * Note that this is an undocumented register.
*/
static void nvbridge_check_legacy_irq_routing(struct pci_dev *dev)
{
@@ -2626,7 +2622,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0xe091,
/* Allow manual resource allocation for PCI hotplug bridges
* via pci=hpmemsize=nnM and pci=hpiosize=nnM parameters. For
* some PCI-PCI hotplug bridges, like PLX 6254 (former HINT HB6),
- * kernel fails to allocate resources when hotplug device is
+ * kernel fails to allocate resources when hotplug device is
* inserted and PCI bus is rescanned.
*/
static void quirk_hotplug_bridge(struct pci_dev *dev)
diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c
index 8fc54b7327bc..cc9337a71529 100644
--- a/drivers/pci/remove.c
+++ b/drivers/pci/remove.c
@@ -7,7 +7,7 @@ static void pci_free_resources(struct pci_dev *dev)
{
int i;
- msi_remove_pci_irq_vectors(dev);
+ msi_remove_pci_irq_vectors(dev);
pci_cleanup_rom(dev);
for (i = 0; i < PCI_NUM_RESOURCES; i++) {
@@ -24,7 +24,7 @@ static void pci_stop_dev(struct pci_dev *dev)
if (dev->is_added) {
pci_proc_detach_device(dev);
pci_remove_sysfs_dev_files(dev);
- device_del(&dev->dev);
+ device_release_driver(&dev->dev);
dev->is_added = 0;
}
@@ -34,6 +34,8 @@ static void pci_stop_dev(struct pci_dev *dev)
static void pci_destroy_dev(struct pci_dev *dev)
{
+ device_del(&dev->dev);
+
down_write(&pci_bus_sem);
list_del(&dev->bus_list);
up_write(&pci_bus_sem);
diff --git a/drivers/pci/search.c b/drivers/pci/search.c
index d0627fa9f368..3ff2ac7c14e2 100644
--- a/drivers/pci/search.c
+++ b/drivers/pci/search.c
@@ -1,5 +1,5 @@
/*
- * PCI searching functions.
+ * PCI searching functions.
*
* Copyright (C) 1993 -- 1997 Drew Eckhardt, Frederic Potter,
* David Mosberger-Tang
@@ -96,12 +96,12 @@ struct pci_bus * pci_find_bus(int domain, int busnr)
* pci_find_next_bus - begin or continue searching for a PCI bus
* @from: Previous PCI bus found, or %NULL for new search.
*
- * Iterates through the list of known PCI busses. A new search is
+ * Iterates through the list of known PCI buses. A new search is
* initiated by passing %NULL as the @from argument. Otherwise if
* @from is not %NULL, searches continue from next device on the
* global list.
*/
-struct pci_bus *
+struct pci_bus *
pci_find_next_bus(const struct pci_bus *from)
{
struct list_head *n;
@@ -119,11 +119,11 @@ pci_find_next_bus(const struct pci_bus *from)
/**
* pci_get_slot - locate PCI device for a given PCI slot
* @bus: PCI bus on which desired PCI device resides
- * @devfn: encodes number of PCI slot in which the desired PCI
- * device resides and the logical device number within that slot
+ * @devfn: encodes number of PCI slot in which the desired PCI
+ * device resides and the logical device number within that slot
* in case of multi-function devices.
*
- * Given a PCI bus and slot/function number, the desired PCI device
+ * Given a PCI bus and slot/function number, the desired PCI device
* is located in the list of PCI devices.
* If the device is found, its reference count is increased and this
* function returns a pointer to its data structure. The caller must
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 4ce83b26ae9e..219a4106480a 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -292,8 +292,8 @@ static void assign_requested_resources_sorted(struct list_head *head,
(!(res->flags & IORESOURCE_ROM_ENABLE))))
add_to_list(fail_head,
dev_res->dev, res,
- 0 /* dont care */,
- 0 /* dont care */);
+ 0 /* don't care */,
+ 0 /* don't care */);
}
reset_resource(res);
}
@@ -667,9 +667,9 @@ static void pci_bridge_check_ranges(struct pci_bus *bus)
if (!io) {
pci_write_config_word(bridge, PCI_IO_BASE, 0xf0f0);
pci_read_config_word(bridge, PCI_IO_BASE, &io);
- pci_write_config_word(bridge, PCI_IO_BASE, 0x0);
- }
- if (io)
+ pci_write_config_word(bridge, PCI_IO_BASE, 0x0);
+ }
+ if (io)
b_res[0].flags |= IORESOURCE_IO;
/* DECchip 21050 pass 2 errata: the bridge may miss an address
disconnect boundary by one PCI data phase.
@@ -819,7 +819,7 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size,
resource_size_t min_align, align;
if (!b_res)
- return;
+ return;
min_align = window_alignment(bus, IORESOURCE_IO);
list_for_each_entry(dev, &bus->devices, bus_list) {
@@ -950,7 +950,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
if (realloc_head && i >= PCI_IOV_RESOURCES &&
i <= PCI_IOV_RESOURCE_END) {
r->end = r->start - 1;
- add_to_list(realloc_head, dev, r, r_size, 0/* dont' care */);
+ add_to_list(realloc_head, dev, r, r_size, 0/* don't care */);
children_add_size += r_size;
continue;
}
@@ -1456,8 +1456,8 @@ static enum enable_type pci_realloc_detect(struct pci_bus *bus,
/*
* first try will not touch pci bridge res
- * second and later try will clear small leaf bridge res
- * will stop till to the max deepth if can not find good one
+ * second and later try will clear small leaf bridge res
+ * will stop till to the max depth if can not find good one
*/
void pci_assign_unassigned_root_bus_resources(struct pci_bus *bus)
{
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index 07f2eddc09ce..83c4d3bc47ab 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -159,7 +159,7 @@ resource_size_t __weak pcibios_retrieve_fw_addr(struct pci_dev *dev, int idx)
return 0;
}
-static int pci_revert_fw_address(struct resource *res, struct pci_dev *dev,
+static int pci_revert_fw_address(struct resource *res, struct pci_dev *dev,
int resno, resource_size_t size)
{
struct resource *root, *conflict;
diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c
index c1e9284a677b..448ca562d1f8 100644
--- a/drivers/pci/slot.c
+++ b/drivers/pci/slot.c
@@ -53,7 +53,7 @@ static ssize_t address_read_file(struct pci_slot *slot, char *buf)
static const char *pci_bus_speed_strings[] = {
"33 MHz PCI", /* 0x00 */
"66 MHz PCI", /* 0x01 */
- "66 MHz PCI-X", /* 0x02 */
+ "66 MHz PCI-X", /* 0x02 */
"100 MHz PCI-X", /* 0x03 */
"133 MHz PCI-X", /* 0x04 */
NULL, /* 0x05 */
diff --git a/drivers/pci/syscall.c b/drivers/pci/syscall.c
index e1c1ec540893..24750a1b39b6 100644
--- a/drivers/pci/syscall.c
+++ b/drivers/pci/syscall.c
@@ -44,7 +44,7 @@ SYSCALL_DEFINE5(pciconfig_read, unsigned long, bus, unsigned long, dfn,
default:
err = -EINVAL;
goto error;
- };
+ }
err = -EIO;
if (cfg_ret != PCIBIOS_SUCCESSFUL)
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index a344f3d52361..330ef2d06567 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -24,8 +24,8 @@ config PHY_EXYNOS_MIPI_VIDEO
config OMAP_USB2
tristate "OMAP USB2 PHY Driver"
depends on ARCH_OMAP2PLUS
+ depends on USB_PHY
select GENERIC_PHY
- select USB_PHY
select OMAP_CONTROL_USB
help
Enable this to support the transceiver that is part of SOC. This
@@ -36,8 +36,8 @@ config OMAP_USB2
config TWL4030_USB
tristate "TWL4030 USB Transceiver Driver"
depends on TWL4030_CORE && REGULATOR_TWL4030 && USB_MUSB_OMAP2PLUS
+ depends on USB_PHY
select GENERIC_PHY
- select USB_PHY
help
Enable this to support the USB OTG transceiver on TWL4030
family chips (including the TWL5030 and TPS659x0 devices).
diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c
index 03cf8fb81554..58e0e9739028 100644
--- a/drivers/phy/phy-core.c
+++ b/drivers/phy/phy-core.c
@@ -437,23 +437,18 @@ struct phy *phy_create(struct device *dev, const struct phy_ops *ops,
int id;
struct phy *phy;
- if (!dev) {
- dev_WARN(dev, "no device provided for PHY\n");
- ret = -EINVAL;
- goto err0;
- }
+ if (WARN_ON(!dev))
+ return ERR_PTR(-EINVAL);
phy = kzalloc(sizeof(*phy), GFP_KERNEL);
- if (!phy) {
- ret = -ENOMEM;
- goto err0;
- }
+ if (!phy)
+ return ERR_PTR(-ENOMEM);
id = ida_simple_get(&phy_ida, 0, 0, GFP_KERNEL);
if (id < 0) {
dev_err(dev, "unable to get id\n");
ret = id;
- goto err0;
+ goto free_phy;
}
device_initialize(&phy->dev);
@@ -468,11 +463,11 @@ struct phy *phy_create(struct device *dev, const struct phy_ops *ops,
ret = dev_set_name(&phy->dev, "phy-%s.%d", dev_name(dev), id);
if (ret)
- goto err1;
+ goto put_dev;
ret = device_add(&phy->dev);
if (ret)
- goto err1;
+ goto put_dev;
if (pm_runtime_enabled(dev)) {
pm_runtime_enable(&phy->dev);
@@ -481,12 +476,11 @@ struct phy *phy_create(struct device *dev, const struct phy_ops *ops,
return phy;
-err1:
- ida_remove(&phy_ida, phy->id);
+put_dev:
put_device(&phy->dev);
+ ida_remove(&phy_ida, phy->id);
+free_phy:
kfree(phy);
-
-err0:
return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(phy_create);
diff --git a/drivers/pinctrl/pinctrl-abx500.c b/drivers/pinctrl/pinctrl-abx500.c
index 4780959e11d4..5183e7bb8de3 100644
--- a/drivers/pinctrl/pinctrl-abx500.c
+++ b/drivers/pinctrl/pinctrl-abx500.c
@@ -418,7 +418,7 @@ static int abx500_set_mode(struct pinctrl_dev *pctldev, struct gpio_chip *chip,
ret = abx500_gpio_set_bits(chip,
AB8500_GPIO_ALTFUN_REG,
af.alt_bit1,
- !!(af.alta_val && BIT(0)));
+ !!(af.alta_val & BIT(0)));
if (ret < 0)
goto out;
@@ -439,7 +439,7 @@ static int abx500_set_mode(struct pinctrl_dev *pctldev, struct gpio_chip *chip,
goto out;
ret = abx500_gpio_set_bits(chip, AB8500_GPIO_ALTFUN_REG,
- af.alt_bit1, !!(af.altb_val && BIT(0)));
+ af.alt_bit1, !!(af.altb_val & BIT(0)));
if (ret < 0)
goto out;
@@ -462,7 +462,7 @@ static int abx500_set_mode(struct pinctrl_dev *pctldev, struct gpio_chip *chip,
goto out;
ret = abx500_gpio_set_bits(chip, AB8500_GPIO_ALTFUN_REG,
- af.alt_bit2, !!(af.altc_val && BIT(1)));
+ af.alt_bit2, !!(af.altc_val & BIT(1)));
break;
default:
diff --git a/drivers/pinctrl/pinctrl-abx500.h b/drivers/pinctrl/pinctrl-abx500.h
index eeca8f973999..82293806e842 100644
--- a/drivers/pinctrl/pinctrl-abx500.h
+++ b/drivers/pinctrl/pinctrl-abx500.h
@@ -1,4 +1,4 @@
-#ifndef PINCTRL_PINCTRL_ABx5O0_H
+#ifndef PINCTRL_PINCTRL_ABx500_H
#define PINCTRL_PINCTRL_ABx500_H
/* Package definitions */
diff --git a/drivers/pinctrl/pinctrl-baytrail.c b/drivers/pinctrl/pinctrl-baytrail.c
index 2832576d8b12..114f5ef4b73a 100644
--- a/drivers/pinctrl/pinctrl-baytrail.c
+++ b/drivers/pinctrl/pinctrl-baytrail.c
@@ -512,6 +512,7 @@ static const struct dev_pm_ops byt_gpio_pm_ops = {
static const struct acpi_device_id byt_gpio_acpi_match[] = {
{ "INT33B2", 0 },
+ { "INT33FC", 0 },
{ }
};
MODULE_DEVICE_TABLE(acpi, byt_gpio_acpi_match);
diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
index e939c28cbf1f..46dddc159286 100644
--- a/drivers/pinctrl/pinctrl-rockchip.c
+++ b/drivers/pinctrl/pinctrl-rockchip.c
@@ -504,6 +504,7 @@ static int rockchip_set_pull(struct rockchip_pin_bank *bank,
data |= (3 << bit);
break;
default:
+ spin_unlock_irqrestore(&bank->slock, flags);
dev_err(info->dev, "unsupported pull setting %d\n",
pull);
return -EINVAL;
@@ -1453,8 +1454,8 @@ static int rockchip_pinctrl_probe(struct platform_device *pdev)
if (ctrl->type == RK3188) {
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
info->reg_pull = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(info->reg_base))
- return PTR_ERR(info->reg_base);
+ if (IS_ERR(info->reg_pull))
+ return PTR_ERR(info->reg_pull);
}
ret = rockchip_gpiolib_register(pdev, info);
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7740.c b/drivers/pinctrl/sh-pfc/pfc-r8a7740.c
index 009174d07767..bc5eb453a45c 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7740.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7740.c
@@ -3720,7 +3720,7 @@ static void __iomem *r8a7740_pinmux_portcr(struct sh_pfc *pfc, unsigned int pin)
const struct r8a7740_portcr_group *group =
&r8a7740_portcr_offsets[i];
- if (i <= group->end_pin)
+ if (pin <= group->end_pin)
return pfc->window->virt + group->offset + pin;
}
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7372.c b/drivers/pinctrl/sh-pfc/pfc-sh7372.c
index 70b522d34821..cc097b693820 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7372.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7372.c
@@ -2584,7 +2584,7 @@ static void __iomem *sh7372_pinmux_portcr(struct sh_pfc *pfc, unsigned int pin)
const struct sh7372_portcr_group *group =
&sh7372_portcr_offsets[i];
- if (i <= group->end_pin)
+ if (pin <= group->end_pin)
return pfc->window->virt + group->offset + pin;
}
diff --git a/drivers/pinctrl/sh-pfc/sh_pfc.h b/drivers/pinctrl/sh-pfc/sh_pfc.h
index 11bd0d970a52..e2142956a8e5 100644
--- a/drivers/pinctrl/sh-pfc/sh_pfc.h
+++ b/drivers/pinctrl/sh-pfc/sh_pfc.h
@@ -254,7 +254,7 @@ struct sh_pfc_soc_info {
#define PINMUX_GPIO(_pin) \
[GPIO_##_pin] = { \
.pin = (u16)-1, \
- .name = __stringify(name), \
+ .name = __stringify(GPIO_##_pin), \
.enum_id = _pin##_DATA, \
}
diff --git a/drivers/platform/Kconfig b/drivers/platform/Kconfig
index 69616aeaa966..09fde58b12e0 100644
--- a/drivers/platform/Kconfig
+++ b/drivers/platform/Kconfig
@@ -5,3 +5,4 @@ if GOLDFISH
source "drivers/platform/goldfish/Kconfig"
endif
+source "drivers/platform/chrome/Kconfig"
diff --git a/drivers/platform/Makefile b/drivers/platform/Makefile
index 8a44a4cd6d1e..3656b7b17b99 100644
--- a/drivers/platform/Makefile
+++ b/drivers/platform/Makefile
@@ -5,3 +5,4 @@
obj-$(CONFIG_X86) += x86/
obj-$(CONFIG_OLPC) += olpc/
obj-$(CONFIG_GOLDFISH) += goldfish/
+obj-$(CONFIG_CHROME_PLATFORMS) += chrome/
diff --git a/drivers/platform/chrome/Kconfig b/drivers/platform/chrome/Kconfig
new file mode 100644
index 000000000000..b13303e75a34
--- /dev/null
+++ b/drivers/platform/chrome/Kconfig
@@ -0,0 +1,28 @@
+#
+# Platform support for Chrome OS hardware (Chromebooks and Chromeboxes)
+#
+
+menuconfig CHROME_PLATFORMS
+ bool "Platform support for Chrome hardware"
+ depends on X86
+ ---help---
+ Say Y here to get to see options for platform support for
+ various Chromebooks and Chromeboxes. This option alone does
+ not add any kernel code.
+
+ If you say N, all options in this submenu will be skipped and disabled.
+
+if CHROME_PLATFORMS
+
+config CHROMEOS_LAPTOP
+ tristate "Chrome OS Laptop"
+ depends on I2C
+ depends on DMI
+ ---help---
+ This driver instantiates i2c and smbus devices such as
+ light sensors and touchpads.
+
+ If you have a supported Chromebook, choose Y or M here.
+ The module will be called chromeos_laptop.
+
+endif # CHROMEOS_PLATFORMS
diff --git a/drivers/platform/chrome/Makefile b/drivers/platform/chrome/Makefile
new file mode 100644
index 000000000000..015e9195e226
--- /dev/null
+++ b/drivers/platform/chrome/Makefile
@@ -0,0 +1,2 @@
+
+obj-$(CONFIG_CHROMEOS_LAPTOP) += chromeos_laptop.o
diff --git a/drivers/platform/x86/chromeos_laptop.c b/drivers/platform/chrome/chromeos_laptop.c
index 3e5b4497a1d0..3e5b4497a1d0 100644
--- a/drivers/platform/x86/chromeos_laptop.c
+++ b/drivers/platform/chrome/chromeos_laptop.c
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index b51a7460cc49..d9dcd37b5a52 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -79,17 +79,6 @@ config ASUS_LAPTOP
If you have an ACPI-compatible ASUS laptop, say Y or M here.
-config CHROMEOS_LAPTOP
- tristate "Chrome OS Laptop"
- depends on I2C
- depends on DMI
- ---help---
- This driver instantiates i2c and smbus devices such as
- light sensors and touchpads.
-
- If you have a supported Chromebook, choose Y or M here.
- The module will be called chromeos_laptop.
-
config DELL_LAPTOP
tristate "Dell Laptop Extras"
depends on X86
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index 5dbe19324351..f0e6aa407ffb 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -50,7 +50,6 @@ obj-$(CONFIG_INTEL_MID_POWER_BUTTON) += intel_mid_powerbtn.o
obj-$(CONFIG_INTEL_OAKTRAIL) += intel_oaktrail.o
obj-$(CONFIG_SAMSUNG_Q10) += samsung-q10.o
obj-$(CONFIG_APPLE_GMUX) += apple-gmux.o
-obj-$(CONFIG_CHROMEOS_LAPTOP) += chromeos_laptop.o
obj-$(CONFIG_INTEL_RST) += intel-rst.o
obj-$(CONFIG_INTEL_SMARTCONNECT) += intel-smartconnect.o
diff --git a/drivers/platform/x86/apple-gmux.c b/drivers/platform/x86/apple-gmux.c
index 605a9be55129..b9429fbf1cd8 100644
--- a/drivers/platform/x86/apple-gmux.c
+++ b/drivers/platform/x86/apple-gmux.c
@@ -519,7 +519,7 @@ static int gmux_probe(struct pnp_dev *pnp, const struct pnp_device_id *id)
gmux_data->power_state = VGA_SWITCHEROO_ON;
- gmux_data->dhandle = DEVICE_ACPI_HANDLE(&pnp->dev);
+ gmux_data->dhandle = ACPI_HANDLE(&pnp->dev);
if (!gmux_data->dhandle) {
pr_err("Cannot find acpi handle for pnp device %s\n",
dev_name(&pnp->dev));
diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
index 0e9c169b42f8..594323a926cf 100644
--- a/drivers/platform/x86/asus-laptop.c
+++ b/drivers/platform/x86/asus-laptop.c
@@ -1494,10 +1494,9 @@ static int asus_input_init(struct asus_laptop *asus)
int error;
input = input_allocate_device();
- if (!input) {
- pr_warn("Unable to allocate input device\n");
+ if (!input)
return -ENOMEM;
- }
+
input->name = "Asus Laptop extra buttons";
input->phys = ASUS_LAPTOP_FILE "/input0";
input->id.bustype = BUS_HOST;
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index bb77e18b3dd4..c608b1d33f4a 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -21,6 +21,7 @@
#include <linux/err.h>
#include <linux/dmi.h>
#include <linux/io.h>
+#include <linux/rfkill.h>
#include <linux/power_supply.h>
#include <linux/acpi.h>
#include <linux/mm.h>
@@ -89,6 +90,13 @@ static struct platform_driver platform_driver = {
static struct platform_device *platform_device;
static struct backlight_device *dell_backlight_device;
+static struct rfkill *wifi_rfkill;
+static struct rfkill *bluetooth_rfkill;
+static struct rfkill *wwan_rfkill;
+static bool force_rfkill;
+
+module_param(force_rfkill, bool, 0444);
+MODULE_PARM_DESC(force_rfkill, "enable rfkill on non whitelisted models");
static const struct dmi_system_id dell_device_table[] __initconst = {
{
@@ -355,6 +363,108 @@ dell_send_request(struct calling_interface_buffer *buffer, int class,
return buffer;
}
+/* Derived from information in DellWirelessCtl.cpp:
+ Class 17, select 11 is radio control. It returns an array of 32-bit values.
+
+ Input byte 0 = 0: Wireless information
+
+ result[0]: return code
+ result[1]:
+ Bit 0: Hardware switch supported
+ Bit 1: Wifi locator supported
+ Bit 2: Wifi is supported
+ Bit 3: Bluetooth is supported
+ Bit 4: WWAN is supported
+ Bit 5: Wireless keyboard supported
+ Bits 6-7: Reserved
+ Bit 8: Wifi is installed
+ Bit 9: Bluetooth is installed
+ Bit 10: WWAN is installed
+ Bits 11-15: Reserved
+ Bit 16: Hardware switch is on
+ Bit 17: Wifi is blocked
+ Bit 18: Bluetooth is blocked
+ Bit 19: WWAN is blocked
+ Bits 20-31: Reserved
+ result[2]: NVRAM size in bytes
+ result[3]: NVRAM format version number
+
+ Input byte 0 = 2: Wireless switch configuration
+ result[0]: return code
+ result[1]:
+ Bit 0: Wifi controlled by switch
+ Bit 1: Bluetooth controlled by switch
+ Bit 2: WWAN controlled by switch
+ Bits 3-6: Reserved
+ Bit 7: Wireless switch config locked
+ Bit 8: Wifi locator enabled
+ Bits 9-14: Reserved
+ Bit 15: Wifi locator setting locked
+ Bits 16-31: Reserved
+*/
+
+static int dell_rfkill_set(void *data, bool blocked)
+{
+ int disable = blocked ? 1 : 0;
+ unsigned long radio = (unsigned long)data;
+ int hwswitch_bit = (unsigned long)data - 1;
+
+ get_buffer();
+ dell_send_request(buffer, 17, 11);
+
+ /* If the hardware switch controls this radio, and the hardware
+ switch is disabled, always disable the radio */
+ if ((hwswitch_state & BIT(hwswitch_bit)) &&
+ !(buffer->output[1] & BIT(16)))
+ disable = 1;
+
+ buffer->input[0] = (1 | (radio<<8) | (disable << 16));
+ dell_send_request(buffer, 17, 11);
+
+ release_buffer();
+ return 0;
+}
+
+/* Must be called with the buffer held */
+static void dell_rfkill_update_sw_state(struct rfkill *rfkill, int radio,
+ int status)
+{
+ if (status & BIT(0)) {
+ /* Has hw-switch, sync sw_state to BIOS */
+ int block = rfkill_blocked(rfkill);
+ buffer->input[0] = (1 | (radio << 8) | (block << 16));
+ dell_send_request(buffer, 17, 11);
+ } else {
+ /* No hw-switch, sync BIOS state to sw_state */
+ rfkill_set_sw_state(rfkill, !!(status & BIT(radio + 16)));
+ }
+}
+
+static void dell_rfkill_update_hw_state(struct rfkill *rfkill, int radio,
+ int status)
+{
+ if (hwswitch_state & (BIT(radio - 1)))
+ rfkill_set_hw_state(rfkill, !(status & BIT(16)));
+}
+
+static void dell_rfkill_query(struct rfkill *rfkill, void *data)
+{
+ int status;
+
+ get_buffer();
+ dell_send_request(buffer, 17, 11);
+ status = buffer->output[1];
+
+ dell_rfkill_update_hw_state(rfkill, (unsigned long)data, status);
+
+ release_buffer();
+}
+
+static const struct rfkill_ops dell_rfkill_ops = {
+ .set_block = dell_rfkill_set,
+ .query = dell_rfkill_query,
+};
+
static struct dentry *dell_laptop_dir;
static int dell_debugfs_show(struct seq_file *s, void *data)
@@ -424,6 +534,136 @@ static const struct file_operations dell_debugfs_fops = {
.release = single_release,
};
+static void dell_update_rfkill(struct work_struct *ignored)
+{
+ int status;
+
+ get_buffer();
+ dell_send_request(buffer, 17, 11);
+ status = buffer->output[1];
+
+ if (wifi_rfkill) {
+ dell_rfkill_update_hw_state(wifi_rfkill, 1, status);
+ dell_rfkill_update_sw_state(wifi_rfkill, 1, status);
+ }
+ if (bluetooth_rfkill) {
+ dell_rfkill_update_hw_state(bluetooth_rfkill, 2, status);
+ dell_rfkill_update_sw_state(bluetooth_rfkill, 2, status);
+ }
+ if (wwan_rfkill) {
+ dell_rfkill_update_hw_state(wwan_rfkill, 3, status);
+ dell_rfkill_update_sw_state(wwan_rfkill, 3, status);
+ }
+
+ release_buffer();
+}
+static DECLARE_DELAYED_WORK(dell_rfkill_work, dell_update_rfkill);
+
+
+static int __init dell_setup_rfkill(void)
+{
+ int status;
+ int ret;
+ const char *product;
+
+ /*
+ * rfkill causes trouble on various non Latitudes, according to Dell
+ * actually testing the rfkill functionality is only done on Latitudes.
+ */
+ product = dmi_get_system_info(DMI_PRODUCT_NAME);
+ if (!force_rfkill && (!product || strncmp(product, "Latitude", 8)))
+ return 0;
+
+ get_buffer();
+ dell_send_request(buffer, 17, 11);
+ status = buffer->output[1];
+ buffer->input[0] = 0x2;
+ dell_send_request(buffer, 17, 11);
+ hwswitch_state = buffer->output[1];
+ release_buffer();
+
+ if (!(status & BIT(0))) {
+ if (force_rfkill) {
+ /* No hwsitch, clear all hw-controlled bits */
+ hwswitch_state &= ~7;
+ } else {
+ /* rfkill is only tested on laptops with a hwswitch */
+ return 0;
+ }
+ }
+
+ if ((status & (1<<2|1<<8)) == (1<<2|1<<8)) {
+ wifi_rfkill = rfkill_alloc("dell-wifi", &platform_device->dev,
+ RFKILL_TYPE_WLAN,
+ &dell_rfkill_ops, (void *) 1);
+ if (!wifi_rfkill) {
+ ret = -ENOMEM;
+ goto err_wifi;
+ }
+ ret = rfkill_register(wifi_rfkill);
+ if (ret)
+ goto err_wifi;
+ }
+
+ if ((status & (1<<3|1<<9)) == (1<<3|1<<9)) {
+ bluetooth_rfkill = rfkill_alloc("dell-bluetooth",
+ &platform_device->dev,
+ RFKILL_TYPE_BLUETOOTH,
+ &dell_rfkill_ops, (void *) 2);
+ if (!bluetooth_rfkill) {
+ ret = -ENOMEM;
+ goto err_bluetooth;
+ }
+ ret = rfkill_register(bluetooth_rfkill);
+ if (ret)
+ goto err_bluetooth;
+ }
+
+ if ((status & (1<<4|1<<10)) == (1<<4|1<<10)) {
+ wwan_rfkill = rfkill_alloc("dell-wwan",
+ &platform_device->dev,
+ RFKILL_TYPE_WWAN,
+ &dell_rfkill_ops, (void *) 3);
+ if (!wwan_rfkill) {
+ ret = -ENOMEM;
+ goto err_wwan;
+ }
+ ret = rfkill_register(wwan_rfkill);
+ if (ret)
+ goto err_wwan;
+ }
+
+ return 0;
+err_wwan:
+ rfkill_destroy(wwan_rfkill);
+ if (bluetooth_rfkill)
+ rfkill_unregister(bluetooth_rfkill);
+err_bluetooth:
+ rfkill_destroy(bluetooth_rfkill);
+ if (wifi_rfkill)
+ rfkill_unregister(wifi_rfkill);
+err_wifi:
+ rfkill_destroy(wifi_rfkill);
+
+ return ret;
+}
+
+static void dell_cleanup_rfkill(void)
+{
+ if (wifi_rfkill) {
+ rfkill_unregister(wifi_rfkill);
+ rfkill_destroy(wifi_rfkill);
+ }
+ if (bluetooth_rfkill) {
+ rfkill_unregister(bluetooth_rfkill);
+ rfkill_destroy(bluetooth_rfkill);
+ }
+ if (wwan_rfkill) {
+ rfkill_unregister(wwan_rfkill);
+ rfkill_destroy(wwan_rfkill);
+ }
+}
+
static int dell_send_intensity(struct backlight_device *bd)
{
int ret = 0;
@@ -515,6 +755,30 @@ static void touchpad_led_exit(void)
led_classdev_unregister(&touchpad_led);
}
+static bool dell_laptop_i8042_filter(unsigned char data, unsigned char str,
+ struct serio *port)
+{
+ static bool extended;
+
+ if (str & 0x20)
+ return false;
+
+ if (unlikely(data == 0xe0)) {
+ extended = true;
+ return false;
+ } else if (unlikely(extended)) {
+ switch (data) {
+ case 0x8:
+ schedule_delayed_work(&dell_rfkill_work,
+ round_jiffies_relative(HZ / 4));
+ break;
+ }
+ extended = false;
+ }
+
+ return false;
+}
+
static int __init dell_init(void)
{
int max_intensity = 0;
@@ -557,10 +821,26 @@ static int __init dell_init(void)
}
buffer = page_address(bufferpage);
+ ret = dell_setup_rfkill();
+
+ if (ret) {
+ pr_warn("Unable to setup rfkill\n");
+ goto fail_rfkill;
+ }
+
+ ret = i8042_install_filter(dell_laptop_i8042_filter);
+ if (ret) {
+ pr_warn("Unable to install key filter\n");
+ goto fail_filter;
+ }
+
if (quirks && quirks->touchpad_led)
touchpad_led_init(&platform_device->dev);
dell_laptop_dir = debugfs_create_dir("dell_laptop", NULL);
+ if (dell_laptop_dir != NULL)
+ debugfs_create_file("rfkill", 0444, dell_laptop_dir, NULL,
+ &dell_debugfs_fops);
#ifdef CONFIG_ACPI
/* In the event of an ACPI backlight being available, don't
@@ -603,6 +883,11 @@ static int __init dell_init(void)
return 0;
fail_backlight:
+ i8042_remove_filter(dell_laptop_i8042_filter);
+ cancel_delayed_work_sync(&dell_rfkill_work);
+fail_filter:
+ dell_cleanup_rfkill();
+fail_rfkill:
free_page((unsigned long)bufferpage);
fail_buffer:
platform_device_del(platform_device);
@@ -620,7 +905,10 @@ static void __exit dell_exit(void)
debugfs_remove_recursive(dell_laptop_dir);
if (quirks && quirks->touchpad_led)
touchpad_led_exit();
+ i8042_remove_filter(dell_laptop_i8042_filter);
+ cancel_delayed_work_sync(&dell_rfkill_work);
backlight_device_unregister(dell_backlight_device);
+ dell_cleanup_rfkill();
if (platform_device) {
platform_device_unregister(platform_device);
platform_driver_unregister(&platform_driver);
diff --git a/drivers/platform/x86/dell-wmi.c b/drivers/platform/x86/dell-wmi.c
index fa9a2171cc13..60e0900bc117 100644
--- a/drivers/platform/x86/dell-wmi.c
+++ b/drivers/platform/x86/dell-wmi.c
@@ -130,7 +130,8 @@ static const u16 bios_to_linux_keycode[256] __initconst = {
KEY_BRIGHTNESSUP, KEY_UNKNOWN, KEY_KBDILLUMTOGGLE,
KEY_UNKNOWN, KEY_SWITCHVIDEOMODE, KEY_UNKNOWN, KEY_UNKNOWN,
KEY_SWITCHVIDEOMODE, KEY_UNKNOWN, KEY_UNKNOWN, KEY_PROG2,
- KEY_UNKNOWN, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
+ KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_MICMUTE,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
@@ -139,8 +140,8 @@ static const u16 bios_to_linux_keycode[256] __initconst = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- KEY_PROG3
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, KEY_PROG3
};
static struct input_dev *dell_wmi_input_dev;
diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
index aefcc32e5634..dec68e7a99c7 100644
--- a/drivers/platform/x86/eeepc-laptop.c
+++ b/drivers/platform/x86/eeepc-laptop.c
@@ -1203,10 +1203,8 @@ static int eeepc_input_init(struct eeepc_laptop *eeepc)
int error;
input = input_allocate_device();
- if (!input) {
- pr_info("Unable to allocate input device\n");
+ if (!input)
return -ENOMEM;
- }
input->name = "Asus EeePC extra buttons";
input->phys = EEEPC_LAPTOP_FILE "/input0";
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
index 1c86fa0857c8..8ba8956b5a48 100644
--- a/drivers/platform/x86/hp-wmi.c
+++ b/drivers/platform/x86/hp-wmi.c
@@ -54,6 +54,7 @@ MODULE_ALIAS("wmi:5FB7F034-2C63-45e9-BE91-3D44E2C707E4");
#define HPWMI_HARDWARE_QUERY 0x4
#define HPWMI_WIRELESS_QUERY 0x5
#define HPWMI_HOTKEY_QUERY 0xc
+#define HPWMI_FEATURE_QUERY 0xd
#define HPWMI_WIRELESS2_QUERY 0x1b
#define HPWMI_POSTCODEERROR_QUERY 0x2a
@@ -292,6 +293,17 @@ static int hp_wmi_tablet_state(void)
return (state & 0x4) ? 1 : 0;
}
+static int hp_wmi_bios_2009_later(void)
+{
+ int state = 0;
+ int ret = hp_wmi_perform_query(HPWMI_FEATURE_QUERY, 0, &state,
+ sizeof(state), sizeof(state));
+ if (ret)
+ return ret;
+
+ return (state & 0x10) ? 1 : 0;
+}
+
static int hp_wmi_set_block(void *data, bool blocked)
{
enum hp_wmi_radio r = (enum hp_wmi_radio) data;
@@ -871,7 +883,7 @@ static int __init hp_wmi_bios_setup(struct platform_device *device)
gps_rfkill = NULL;
rfkill2_count = 0;
- if (hp_wmi_rfkill_setup(device))
+ if (hp_wmi_bios_2009_later() || hp_wmi_rfkill_setup(device))
hp_wmi_rfkill2_setup(device);
err = device_create_file(&device->dev, &dev_attr_display);
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index 6788acc22ab9..19ec95147f69 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -570,10 +570,8 @@ static int ideapad_input_init(struct ideapad_private *priv)
int error;
inputdev = input_allocate_device();
- if (!inputdev) {
- pr_info("Unable to allocate input device\n");
+ if (!inputdev)
return -ENOMEM;
- }
inputdev->name = "Ideapad extra buttons";
inputdev->phys = "ideapad/input0";
diff --git a/drivers/platform/x86/intel_mid_powerbtn.c b/drivers/platform/x86/intel_mid_powerbtn.c
index 6b18aba82cfa..8d6775266d66 100644
--- a/drivers/platform/x86/intel_mid_powerbtn.c
+++ b/drivers/platform/x86/intel_mid_powerbtn.c
@@ -66,10 +66,8 @@ static int mfld_pb_probe(struct platform_device *pdev)
return -EINVAL;
input = input_allocate_device();
- if (!input) {
- dev_err(&pdev->dev, "Input device allocation error\n");
+ if (!input)
return -ENOMEM;
- }
input->name = pdev->name;
input->phys = "power-button/input0";
diff --git a/drivers/platform/x86/intel_scu_ipc.c b/drivers/platform/x86/intel_scu_ipc.c
index d654f831410d..60ea476a9130 100644
--- a/drivers/platform/x86/intel_scu_ipc.c
+++ b/drivers/platform/x86/intel_scu_ipc.c
@@ -58,12 +58,56 @@
* message handler is called within firmware.
*/
-#define IPC_BASE_ADDR 0xFF11C000 /* IPC1 base register address */
-#define IPC_MAX_ADDR 0x100 /* Maximum IPC regisers */
#define IPC_WWBUF_SIZE 20 /* IPC Write buffer Size */
#define IPC_RWBUF_SIZE 20 /* IPC Read buffer Size */
-#define IPC_I2C_BASE 0xFF12B000 /* I2C control register base address */
-#define IPC_I2C_MAX_ADDR 0x10 /* Maximum I2C regisers */
+#define IPC_IOC 0x100 /* IPC command register IOC bit */
+
+enum {
+ SCU_IPC_LINCROFT,
+ SCU_IPC_PENWELL,
+ SCU_IPC_CLOVERVIEW,
+ SCU_IPC_TANGIER,
+};
+
+/* intel scu ipc driver data*/
+struct intel_scu_ipc_pdata_t {
+ u32 ipc_base;
+ u32 i2c_base;
+ u32 ipc_len;
+ u32 i2c_len;
+ u8 irq_mode;
+};
+
+static struct intel_scu_ipc_pdata_t intel_scu_ipc_pdata[] = {
+ [SCU_IPC_LINCROFT] = {
+ .ipc_base = 0xff11c000,
+ .i2c_base = 0xff12b000,
+ .ipc_len = 0x100,
+ .i2c_len = 0x10,
+ .irq_mode = 0,
+ },
+ [SCU_IPC_PENWELL] = {
+ .ipc_base = 0xff11c000,
+ .i2c_base = 0xff12b000,
+ .ipc_len = 0x100,
+ .i2c_len = 0x10,
+ .irq_mode = 1,
+ },
+ [SCU_IPC_CLOVERVIEW] = {
+ .ipc_base = 0xff11c000,
+ .i2c_base = 0xff12b000,
+ .ipc_len = 0x100,
+ .i2c_len = 0x10,
+ .irq_mode = 1,
+ },
+ [SCU_IPC_TANGIER] = {
+ .ipc_base = 0xff009000,
+ .i2c_base = 0xff00d000,
+ .ipc_len = 0x100,
+ .i2c_len = 0x10,
+ .irq_mode = 0,
+ },
+};
static int ipc_probe(struct pci_dev *dev, const struct pci_device_id *id);
static void ipc_remove(struct pci_dev *pdev);
@@ -72,6 +116,8 @@ struct intel_scu_ipc_dev {
struct pci_dev *pdev;
void __iomem *ipc_base;
void __iomem *i2c_base;
+ struct completion cmd_complete;
+ u8 irq_mode;
};
static struct intel_scu_ipc_dev ipcdev; /* Only one for now */
@@ -98,6 +144,10 @@ static DEFINE_MUTEX(ipclock); /* lock used to prevent multiple call to SCU */
*/
static inline void ipc_command(u32 cmd) /* Send ipc command */
{
+ if (ipcdev.irq_mode) {
+ reinit_completion(&ipcdev.cmd_complete);
+ writel(cmd | IPC_IOC, ipcdev.ipc_base);
+ }
writel(cmd, ipcdev.ipc_base);
}
@@ -156,6 +206,30 @@ static inline int busy_loop(void) /* Wait till scu status is busy */
return 0;
}
+/* Wait till ipc ioc interrupt is received or timeout in 3 HZ */
+static inline int ipc_wait_for_interrupt(void)
+{
+ int status;
+
+ if (!wait_for_completion_timeout(&ipcdev.cmd_complete, 3 * HZ)) {
+ struct device *dev = &ipcdev.pdev->dev;
+ dev_err(dev, "IPC timed out\n");
+ return -ETIMEDOUT;
+ }
+
+ status = ipc_read_status();
+
+ if ((status >> 1) & 1)
+ return -EIO;
+
+ return 0;
+}
+
+int intel_scu_ipc_check_status(void)
+{
+ return ipcdev.irq_mode ? ipc_wait_for_interrupt() : busy_loop();
+}
+
/* Read/Write power control(PMIC in Langwell, MSIC in PenWell) registers */
static int pwr_reg_rdwr(u16 *addr, u8 *data, u32 count, u32 op, u32 id)
{
@@ -196,8 +270,8 @@ static int pwr_reg_rdwr(u16 *addr, u8 *data, u32 count, u32 op, u32 id)
ipc_command(4 << 16 | id << 12 | 0 << 8 | op);
}
- err = busy_loop();
- if (id == IPC_CMD_PCNTRL_R) { /* Read rbuf */
+ err = intel_scu_ipc_check_status();
+ if (!err && id == IPC_CMD_PCNTRL_R) { /* Read rbuf */
/* Workaround: values are read as 0 without memcpy_fromio */
memcpy_fromio(cbuf, ipcdev.ipc_base + 0x90, 16);
for (nc = 0; nc < count; nc++)
@@ -391,7 +465,7 @@ int intel_scu_ipc_simple_command(int cmd, int sub)
return -ENODEV;
}
ipc_command(sub << 12 | cmd);
- err = busy_loop();
+ err = intel_scu_ipc_check_status();
mutex_unlock(&ipclock);
return err;
}
@@ -425,10 +499,12 @@ int intel_scu_ipc_command(int cmd, int sub, u32 *in, int inlen,
ipc_data_writel(*in++, 4 * i);
ipc_command((inlen << 16) | (sub << 12) | cmd);
- err = busy_loop();
+ err = intel_scu_ipc_check_status();
- for (i = 0; i < outlen; i++)
- *out++ = ipc_data_readl(4 * i);
+ if (!err) {
+ for (i = 0; i < outlen; i++)
+ *out++ = ipc_data_readl(4 * i);
+ }
mutex_unlock(&ipclock);
return err;
@@ -491,6 +567,9 @@ EXPORT_SYMBOL(intel_scu_ipc_i2c_cntrl);
*/
static irqreturn_t ioc(int irq, void *dev_id)
{
+ if (ipcdev.irq_mode)
+ complete(&ipcdev.cmd_complete);
+
return IRQ_HANDLED;
}
@@ -504,13 +583,18 @@ static irqreturn_t ioc(int irq, void *dev_id)
*/
static int ipc_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
- int err;
+ int err, pid;
+ struct intel_scu_ipc_pdata_t *pdata;
resource_size_t pci_resource;
if (ipcdev.pdev) /* We support only one SCU */
return -EBUSY;
+ pid = id->driver_data;
+ pdata = &intel_scu_ipc_pdata[pid];
+
ipcdev.pdev = pci_dev_get(dev);
+ ipcdev.irq_mode = pdata->irq_mode;
err = pci_enable_device(dev);
if (err)
@@ -524,14 +608,16 @@ static int ipc_probe(struct pci_dev *dev, const struct pci_device_id *id)
if (!pci_resource)
return -ENOMEM;
+ init_completion(&ipcdev.cmd_complete);
+
if (request_irq(dev->irq, ioc, 0, "intel_scu_ipc", &ipcdev))
return -EBUSY;
- ipcdev.ipc_base = ioremap_nocache(IPC_BASE_ADDR, IPC_MAX_ADDR);
+ ipcdev.ipc_base = ioremap_nocache(pdata->ipc_base, pdata->ipc_len);
if (!ipcdev.ipc_base)
return -ENOMEM;
- ipcdev.i2c_base = ioremap_nocache(IPC_I2C_BASE, IPC_I2C_MAX_ADDR);
+ ipcdev.i2c_base = ioremap_nocache(pdata->i2c_base, pdata->i2c_len);
if (!ipcdev.i2c_base) {
iounmap(ipcdev.ipc_base);
return -ENOMEM;
@@ -564,7 +650,10 @@ static void ipc_remove(struct pci_dev *pdev)
}
static DEFINE_PCI_DEVICE_TABLE(pci_ids) = {
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x082a)},
+ {PCI_VDEVICE(INTEL, 0x082a), SCU_IPC_LINCROFT},
+ {PCI_VDEVICE(INTEL, 0x080e), SCU_IPC_PENWELL},
+ {PCI_VDEVICE(INTEL, 0x08ea), SCU_IPC_CLOVERVIEW},
+ {PCI_VDEVICE(INTEL, 0x11a0), SCU_IPC_TANGIER},
{ 0,}
};
MODULE_DEVICE_TABLE(pci, pci_ids);
diff --git a/drivers/platform/x86/panasonic-laptop.c b/drivers/platform/x86/panasonic-laptop.c
index 10d12b221601..3008fd20572e 100644
--- a/drivers/platform/x86/panasonic-laptop.c
+++ b/drivers/platform/x86/panasonic-laptop.c
@@ -490,11 +490,8 @@ static int acpi_pcc_init_input(struct pcc_acpi *pcc)
int error;
input_dev = input_allocate_device();
- if (!input_dev) {
- ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
- "Couldn't allocate input device for hotkey"));
+ if (!input_dev)
return -ENOMEM;
- }
input_dev->name = ACPI_PCC_DRIVER_NAME;
input_dev->phys = ACPI_PCC_INPUT_PHYS;
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index 47caab0ea7a1..fb233ae7bb0e 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -140,12 +140,12 @@ MODULE_PARM_DESC(kbd_backlight_timeout,
"on the model (default: no change from current value)");
#ifdef CONFIG_PM_SLEEP
-static void sony_nc_kbd_backlight_resume(void);
static void sony_nc_thermal_resume(void);
#endif
static int sony_nc_kbd_backlight_setup(struct platform_device *pd,
unsigned int handle);
-static void sony_nc_kbd_backlight_cleanup(struct platform_device *pd);
+static void sony_nc_kbd_backlight_cleanup(struct platform_device *pd,
+ unsigned int handle);
static int sony_nc_battery_care_setup(struct platform_device *pd,
unsigned int handle);
@@ -304,8 +304,8 @@ static int sony_laptop_input_keycode_map[] = {
KEY_FN_F10, /* 14 SONYPI_EVENT_FNKEY_F10 */
KEY_FN_F11, /* 15 SONYPI_EVENT_FNKEY_F11 */
KEY_FN_F12, /* 16 SONYPI_EVENT_FNKEY_F12 */
- KEY_FN_F1, /* 17 SONYPI_EVENT_FNKEY_1 */
- KEY_FN_F2, /* 18 SONYPI_EVENT_FNKEY_2 */
+ KEY_FN_1, /* 17 SONYPI_EVENT_FNKEY_1 */
+ KEY_FN_2, /* 18 SONYPI_EVENT_FNKEY_2 */
KEY_FN_D, /* 19 SONYPI_EVENT_FNKEY_D */
KEY_FN_E, /* 20 SONYPI_EVENT_FNKEY_E */
KEY_FN_F, /* 21 SONYPI_EVENT_FNKEY_F */
@@ -1444,7 +1444,7 @@ static void sony_nc_function_cleanup(struct platform_device *pd)
case 0x014b:
case 0x014c:
case 0x0163:
- sony_nc_kbd_backlight_cleanup(pd);
+ sony_nc_kbd_backlight_cleanup(pd, handle);
break;
default:
continue;
@@ -1486,13 +1486,6 @@ static void sony_nc_function_resume(void)
case 0x0135:
sony_nc_rfkill_update();
break;
- case 0x0137:
- case 0x0143:
- case 0x014b:
- case 0x014c:
- case 0x0163:
- sony_nc_kbd_backlight_resume();
- break;
default:
continue;
}
@@ -1822,6 +1815,12 @@ static int sony_nc_kbd_backlight_setup(struct platform_device *pd,
int result;
int ret = 0;
+ if (kbdbl_ctl) {
+ pr_warn("handle 0x%.4x: keyboard backlight setup already done for 0x%.4x\n",
+ handle, kbdbl_ctl->handle);
+ return -EBUSY;
+ }
+
/* verify the kbd backlight presence, these handles are not used for
* keyboard backlight only
*/
@@ -1881,9 +1880,10 @@ outkzalloc:
return ret;
}
-static void sony_nc_kbd_backlight_cleanup(struct platform_device *pd)
+static void sony_nc_kbd_backlight_cleanup(struct platform_device *pd,
+ unsigned int handle)
{
- if (kbdbl_ctl) {
+ if (kbdbl_ctl && handle == kbdbl_ctl->handle) {
device_remove_file(&pd->dev, &kbdbl_ctl->mode_attr);
device_remove_file(&pd->dev, &kbdbl_ctl->timeout_attr);
kfree(kbdbl_ctl);
@@ -1891,25 +1891,6 @@ static void sony_nc_kbd_backlight_cleanup(struct platform_device *pd)
}
}
-#ifdef CONFIG_PM_SLEEP
-static void sony_nc_kbd_backlight_resume(void)
-{
- int ignore = 0;
-
- if (!kbdbl_ctl)
- return;
-
- if (kbdbl_ctl->mode == 0)
- sony_call_snc_handle(kbdbl_ctl->handle, kbdbl_ctl->base,
- &ignore);
-
- if (kbdbl_ctl->timeout != 0)
- sony_call_snc_handle(kbdbl_ctl->handle,
- (kbdbl_ctl->base + 0x200) |
- (kbdbl_ctl->timeout << 0x10), &ignore);
-}
-#endif
-
struct battery_care_control {
struct device_attribute attrs[2];
unsigned int handle;
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 05e046aa5e31..58b0274d24cc 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -6438,7 +6438,12 @@ static struct ibm_struct brightness_driver_data = {
#define TPACPI_ALSA_SHRTNAME "ThinkPad Console Audio Control"
#define TPACPI_ALSA_MIXERNAME TPACPI_ALSA_SHRTNAME
-static int alsa_index = ~((1 << (SNDRV_CARDS - 3)) - 1); /* last three slots */
+#if SNDRV_CARDS <= 32
+#define DEFAULT_ALSA_IDX ~((1 << (SNDRV_CARDS - 3)) - 1)
+#else
+#define DEFAULT_ALSA_IDX ~((1 << (32 - 3)) - 1)
+#endif
+static int alsa_index = DEFAULT_ALSA_IDX; /* last three slots */
static char *alsa_id = "ThinkPadEC";
static bool alsa_enable = SNDRV_DEFAULT_ENABLE1;
@@ -9163,7 +9168,6 @@ static int __init thinkpad_acpi_module_init(void)
mutex_init(&tpacpi_inputdev_send_mutex);
tpacpi_inputdev = input_allocate_device();
if (!tpacpi_inputdev) {
- pr_err("unable to allocate input device\n");
thinkpad_acpi_module_exit();
return -ENOMEM;
} else {
diff --git a/drivers/platform/x86/topstar-laptop.c b/drivers/platform/x86/topstar-laptop.c
index 67897c8740ba..e597de05e6c2 100644
--- a/drivers/platform/x86/topstar-laptop.c
+++ b/drivers/platform/x86/topstar-laptop.c
@@ -97,10 +97,8 @@ static int acpi_topstar_init_hkey(struct topstar_hkey *hkey)
int error;
input = input_allocate_device();
- if (!input) {
- pr_err("Unable to allocate input device\n");
+ if (!input)
return -ENOMEM;
- }
input->name = "Topstar Laptop extra buttons";
input->phys = "topstar/input0";
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index 0cfadb65f7c6..7fce391818d3 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -975,10 +975,8 @@ static int toshiba_acpi_setup_keyboard(struct toshiba_acpi_dev *dev)
u32 hci_result;
dev->hotkey_dev = input_allocate_device();
- if (!dev->hotkey_dev) {
- pr_info("Unable to register input device\n");
+ if (!dev->hotkey_dev)
return -ENOMEM;
- }
dev->hotkey_dev->name = "Toshiba input device";
dev->hotkey_dev->phys = "toshiba_acpi/input0";
diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
index 62e8c221d01e..c2e7b2657aeb 100644
--- a/drivers/platform/x86/wmi.c
+++ b/drivers/platform/x86/wmi.c
@@ -672,8 +672,10 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
struct wmi_block *wblock;
wblock = dev_get_drvdata(dev);
- if (!wblock)
- return -ENOMEM;
+ if (!wblock) {
+ strcat(buf, "\n");
+ return strlen(buf);
+ }
wmi_gtoa(wblock->gblock.guid, guid_string);
diff --git a/drivers/pnp/driver.c b/drivers/pnp/driver.c
index 6936e0acedcd..f748cc8cbb03 100644
--- a/drivers/pnp/driver.c
+++ b/drivers/pnp/driver.c
@@ -197,6 +197,11 @@ static int pnp_bus_freeze(struct device *dev)
return __pnp_bus_suspend(dev, PMSG_FREEZE);
}
+static int pnp_bus_poweroff(struct device *dev)
+{
+ return __pnp_bus_suspend(dev, PMSG_HIBERNATE);
+}
+
static int pnp_bus_resume(struct device *dev)
{
struct pnp_dev *pnp_dev = to_pnp_dev(dev);
@@ -234,9 +239,14 @@ static int pnp_bus_resume(struct device *dev)
}
static const struct dev_pm_ops pnp_bus_dev_pm_ops = {
+ /* Suspend callbacks */
.suspend = pnp_bus_suspend,
- .freeze = pnp_bus_freeze,
.resume = pnp_bus_resume,
+ /* Hibernate callbacks */
+ .freeze = pnp_bus_freeze,
+ .thaw = pnp_bus_resume,
+ .poweroff = pnp_bus_poweroff,
+ .restore = pnp_bus_resume,
};
struct bus_type pnp_bus_type = {
diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c
index 747826d99059..14655a0f0431 100644
--- a/drivers/pnp/pnpacpi/core.c
+++ b/drivers/pnp/pnpacpi/core.c
@@ -89,7 +89,7 @@ static int pnpacpi_set_resources(struct pnp_dev *dev)
pnp_dbg(&dev->dev, "set resources\n");
- handle = DEVICE_ACPI_HANDLE(&dev->dev);
+ handle = ACPI_HANDLE(&dev->dev);
if (!handle || acpi_bus_get_device(handle, &acpi_dev)) {
dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__);
return -ENODEV;
@@ -122,7 +122,7 @@ static int pnpacpi_disable_resources(struct pnp_dev *dev)
dev_dbg(&dev->dev, "disable resources\n");
- handle = DEVICE_ACPI_HANDLE(&dev->dev);
+ handle = ACPI_HANDLE(&dev->dev);
if (!handle || acpi_bus_get_device(handle, &acpi_dev)) {
dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__);
return 0;
@@ -144,7 +144,7 @@ static bool pnpacpi_can_wakeup(struct pnp_dev *dev)
struct acpi_device *acpi_dev;
acpi_handle handle;
- handle = DEVICE_ACPI_HANDLE(&dev->dev);
+ handle = ACPI_HANDLE(&dev->dev);
if (!handle || acpi_bus_get_device(handle, &acpi_dev)) {
dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__);
return false;
@@ -159,7 +159,7 @@ static int pnpacpi_suspend(struct pnp_dev *dev, pm_message_t state)
acpi_handle handle;
int error = 0;
- handle = DEVICE_ACPI_HANDLE(&dev->dev);
+ handle = ACPI_HANDLE(&dev->dev);
if (!handle || acpi_bus_get_device(handle, &acpi_dev)) {
dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__);
return 0;
@@ -194,7 +194,7 @@ static int pnpacpi_suspend(struct pnp_dev *dev, pm_message_t state)
static int pnpacpi_resume(struct pnp_dev *dev)
{
struct acpi_device *acpi_dev;
- acpi_handle handle = DEVICE_ACPI_HANDLE(&dev->dev);
+ acpi_handle handle = ACPI_HANDLE(&dev->dev);
int error = 0;
if (!handle || acpi_bus_get_device(handle, &acpi_dev)) {
diff --git a/drivers/powercap/intel_rapl.c b/drivers/powercap/intel_rapl.c
index 2a786c504460..3c6768378a94 100644
--- a/drivers/powercap/intel_rapl.c
+++ b/drivers/powercap/intel_rapl.c
@@ -833,6 +833,11 @@ static int rapl_write_data_raw(struct rapl_domain *rd,
return 0;
}
+static const struct x86_cpu_id energy_unit_quirk_ids[] = {
+ { X86_VENDOR_INTEL, 6, 0x37},/* VLV */
+ {}
+};
+
static int rapl_check_unit(struct rapl_package *rp, int cpu)
{
u64 msr_val;
@@ -853,8 +858,11 @@ static int rapl_check_unit(struct rapl_package *rp, int cpu)
* time unit: 1/time_unit_divisor Seconds
*/
value = (msr_val & ENERGY_UNIT_MASK) >> ENERGY_UNIT_OFFSET;
- rp->energy_unit_divisor = 1 << value;
-
+ /* some CPUs have different way to calculate energy unit */
+ if (x86_match_cpu(energy_unit_quirk_ids))
+ rp->energy_unit_divisor = 1000000 / (1 << value);
+ else
+ rp->energy_unit_divisor = 1 << value;
value = (msr_val & POWER_UNIT_MASK) >> POWER_UNIT_OFFSET;
rp->power_unit_divisor = 1 << value;
@@ -941,6 +949,7 @@ static void package_power_limit_irq_restore(int package_id)
static const struct x86_cpu_id rapl_ids[] = {
{ X86_VENDOR_INTEL, 6, 0x2a},/* SNB */
{ X86_VENDOR_INTEL, 6, 0x2d},/* SNB EP */
+ { X86_VENDOR_INTEL, 6, 0x37},/* VLV */
{ X86_VENDOR_INTEL, 6, 0x3a},/* IVB */
{ X86_VENDOR_INTEL, 6, 0x45},/* HSW */
/* TODO: Add more CPU IDs after testing */
diff --git a/drivers/powercap/powercap_sys.c b/drivers/powercap/powercap_sys.c
index 8d0fe431dbdd..84419af16f77 100644
--- a/drivers/powercap/powercap_sys.c
+++ b/drivers/powercap/powercap_sys.c
@@ -377,9 +377,14 @@ static void create_power_zone_common_attributes(
if (power_zone->ops->get_max_energy_range_uj)
power_zone->zone_dev_attrs[count++] =
&dev_attr_max_energy_range_uj.attr;
- if (power_zone->ops->get_energy_uj)
+ if (power_zone->ops->get_energy_uj) {
+ if (power_zone->ops->reset_energy_uj)
+ dev_attr_energy_uj.attr.mode = S_IWUSR | S_IRUGO;
+ else
+ dev_attr_energy_uj.attr.mode = S_IRUGO;
power_zone->zone_dev_attrs[count++] =
&dev_attr_energy_uj.attr;
+ }
if (power_zone->ops->get_power_uw)
power_zone->zone_dev_attrs[count++] =
&dev_attr_power_uw.attr;
diff --git a/drivers/regulator/arizona-micsupp.c b/drivers/regulator/arizona-micsupp.c
index 724706a97dc4..fd3154d86901 100644
--- a/drivers/regulator/arizona-micsupp.c
+++ b/drivers/regulator/arizona-micsupp.c
@@ -174,6 +174,33 @@ static const struct regulator_desc arizona_micsupp = {
.owner = THIS_MODULE,
};
+static const struct regulator_linear_range arizona_micsupp_ext_ranges[] = {
+ REGULATOR_LINEAR_RANGE(900000, 0, 0x14, 25000),
+ REGULATOR_LINEAR_RANGE(1500000, 0x15, 0x27, 100000),
+};
+
+static const struct regulator_desc arizona_micsupp_ext = {
+ .name = "MICVDD",
+ .supply_name = "CPVDD",
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = 40,
+ .ops = &arizona_micsupp_ops,
+
+ .vsel_reg = ARIZONA_LDO2_CONTROL_1,
+ .vsel_mask = ARIZONA_LDO2_VSEL_MASK,
+ .enable_reg = ARIZONA_MIC_CHARGE_PUMP_1,
+ .enable_mask = ARIZONA_CPMIC_ENA,
+ .bypass_reg = ARIZONA_MIC_CHARGE_PUMP_1,
+ .bypass_mask = ARIZONA_CPMIC_BYPASS,
+
+ .linear_ranges = arizona_micsupp_ext_ranges,
+ .n_linear_ranges = ARRAY_SIZE(arizona_micsupp_ext_ranges),
+
+ .enable_time = 3000,
+
+ .owner = THIS_MODULE,
+};
+
static const struct regulator_init_data arizona_micsupp_default = {
.constraints = {
.valid_ops_mask = REGULATOR_CHANGE_STATUS |
@@ -186,9 +213,22 @@ static const struct regulator_init_data arizona_micsupp_default = {
.num_consumer_supplies = 1,
};
+static const struct regulator_init_data arizona_micsupp_ext_default = {
+ .constraints = {
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS |
+ REGULATOR_CHANGE_VOLTAGE |
+ REGULATOR_CHANGE_BYPASS,
+ .min_uV = 900000,
+ .max_uV = 3300000,
+ },
+
+ .num_consumer_supplies = 1,
+};
+
static int arizona_micsupp_probe(struct platform_device *pdev)
{
struct arizona *arizona = dev_get_drvdata(pdev->dev.parent);
+ const struct regulator_desc *desc;
struct regulator_config config = { };
struct arizona_micsupp *micsupp;
int ret;
@@ -207,7 +247,17 @@ static int arizona_micsupp_probe(struct platform_device *pdev)
* default init_data for it. This will be overridden with
* platform data if provided.
*/
- micsupp->init_data = arizona_micsupp_default;
+ switch (arizona->type) {
+ case WM5110:
+ desc = &arizona_micsupp_ext;
+ micsupp->init_data = arizona_micsupp_ext_default;
+ break;
+ default:
+ desc = &arizona_micsupp;
+ micsupp->init_data = arizona_micsupp_default;
+ break;
+ }
+
micsupp->init_data.consumer_supplies = &micsupp->supply;
micsupp->supply.supply = "MICVDD";
micsupp->supply.dev_name = dev_name(arizona->dev);
@@ -226,7 +276,7 @@ static int arizona_micsupp_probe(struct platform_device *pdev)
ARIZONA_CPMIC_BYPASS, 0);
micsupp->regulator = devm_regulator_register(&pdev->dev,
- &arizona_micsupp,
+ desc,
&config);
if (IS_ERR(micsupp->regulator)) {
ret = PTR_ERR(micsupp->regulator);
diff --git a/drivers/regulator/as3722-regulator.c b/drivers/regulator/as3722-regulator.c
index 5917fe3dc983..b9f1d24c6812 100644
--- a/drivers/regulator/as3722-regulator.c
+++ b/drivers/regulator/as3722-regulator.c
@@ -590,8 +590,8 @@ static int as3722_sd016_set_current_limit(struct regulator_dev *rdev,
default:
return -EINVAL;
}
+ ret <<= ffs(mask) - 1;
val = ret & mask;
- val <<= ffs(mask) - 1;
return as3722_update_bits(as3722, reg, mask, val);
}
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 6382f0af353b..d85f31385b24 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -119,6 +119,11 @@ static const char *rdev_get_name(struct regulator_dev *rdev)
return "";
}
+static bool have_full_constraints(void)
+{
+ return has_full_constraints || of_have_populated_dt();
+}
+
/**
* of_get_regulator - get a regulator device node based on supply name
* @dev: Device pointer for the consumer (of regulator) device
@@ -1340,7 +1345,7 @@ static struct regulator *_regulator_get(struct device *dev, const char *id,
* Assume that a regulator is physically present and enabled
* even if it isn't hooked up and just provide a dummy.
*/
- if (has_full_constraints && allow_dummy) {
+ if (have_full_constraints() && allow_dummy) {
pr_warn("%s supply %s not found, using dummy regulator\n",
devname, id);
@@ -2184,6 +2189,9 @@ int regulator_list_voltage(struct regulator *regulator, unsigned selector)
struct regulator_ops *ops = rdev->desc->ops;
int ret;
+ if (rdev->desc->fixed_uV && rdev->desc->n_voltages == 1 && !selector)
+ return rdev->desc->fixed_uV;
+
if (!ops->list_voltage || selector >= rdev->desc->n_voltages)
return -EINVAL;
@@ -3624,7 +3632,7 @@ int regulator_suspend_finish(void)
if (error)
ret = error;
} else {
- if (!has_full_constraints)
+ if (!have_full_constraints())
goto unlock;
if (!ops->disable)
goto unlock;
@@ -3822,7 +3830,7 @@ static int __init regulator_init_complete(void)
if (!enabled)
goto unlock;
- if (has_full_constraints) {
+ if (have_full_constraints()) {
/* We log since this may kill the system if it
* goes wrong. */
rdev_info(rdev, "disabling\n");
diff --git a/drivers/regulator/gpio-regulator.c b/drivers/regulator/gpio-regulator.c
index 04406a918c04..234960dc9607 100644
--- a/drivers/regulator/gpio-regulator.c
+++ b/drivers/regulator/gpio-regulator.c
@@ -139,6 +139,7 @@ of_get_gpio_regulator_config(struct device *dev, struct device_node *np)
struct property *prop;
const char *regtype;
int proplen, gpio, i;
+ int ret;
config = devm_kzalloc(dev,
sizeof(struct gpio_regulator_config),
@@ -202,7 +203,11 @@ of_get_gpio_regulator_config(struct device *dev, struct device_node *np)
}
config->nr_states = i;
- of_property_read_string(np, "regulator-type", &regtype);
+ ret = of_property_read_string(np, "regulator-type", &regtype);
+ if (ret < 0) {
+ dev_err(dev, "Missing 'regulator-type' property\n");
+ return ERR_PTR(-EINVAL);
+ }
if (!strncmp("voltage", regtype, 7))
config->type = REGULATOR_VOLTAGE;
diff --git a/drivers/regulator/pfuze100-regulator.c b/drivers/regulator/pfuze100-regulator.c
index ba67b2c4e2e7..8b5e4c712a01 100644
--- a/drivers/regulator/pfuze100-regulator.c
+++ b/drivers/regulator/pfuze100-regulator.c
@@ -38,7 +38,7 @@
#define PFUZE100_DEVICEID 0x0
#define PFUZE100_REVID 0x3
-#define PFUZE100_FABID 0x3
+#define PFUZE100_FABID 0x4
#define PFUZE100_SW1ABVOL 0x20
#define PFUZE100_SW1CVOL 0x2e
@@ -308,9 +308,15 @@ static int pfuze_identify(struct pfuze_chip *pfuze_chip)
if (ret)
return ret;
- if (value & 0x0f) {
- dev_warn(pfuze_chip->dev, "Illegal ID: %x\n", value);
- return -ENODEV;
+ switch (value & 0x0f) {
+ /* Freescale misprogrammed 1-3% of parts prior to week 8 of 2013 as ID=8 */
+ case 0x8:
+ dev_info(pfuze_chip->dev, "Assuming misprogrammed ID=0x8");
+ case 0x0:
+ break;
+ default:
+ dev_warn(pfuze_chip->dev, "Illegal ID: %x\n", value);
+ return -ENODEV;
}
ret = regmap_read(pfuze_chip->regmap, PFUZE100_REVID, &value);
diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c
index 333677d68d0e..9e61922d8230 100644
--- a/drivers/regulator/s2mps11.c
+++ b/drivers/regulator/s2mps11.c
@@ -438,7 +438,7 @@ common_reg:
platform_set_drvdata(pdev, s2mps11);
config.dev = &pdev->dev;
- config.regmap = iodev->regmap;
+ config.regmap = iodev->regmap_pmic;
config.driver_data = s2mps11;
for (i = 0; i < S2MPS11_REGULATOR_MAX; i++) {
if (!reg_np) {
diff --git a/drivers/regulator/s5m8767.c b/drivers/regulator/s5m8767.c
index cbf91e25cf7f..aeb40aad0ae7 100644
--- a/drivers/regulator/s5m8767.c
+++ b/drivers/regulator/s5m8767.c
@@ -925,7 +925,7 @@ static int s5m8767_pmic_probe(struct platform_device *pdev)
config.dev = s5m8767->dev;
config.init_data = pdata->regulators[i].initdata;
config.driver_data = s5m8767;
- config.regmap = iodev->regmap;
+ config.regmap = iodev->regmap_pmic;
config.of_node = pdata->regulators[i].reg_node;
rdev[i] = devm_regulator_register(&pdev->dev, &regulators[id],
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 15f166a470a7..007730222116 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -626,7 +626,7 @@ comment "Platform RTC drivers"
config RTC_DRV_CMOS
tristate "PC-style 'CMOS'"
- depends on X86 || ALPHA || ARM || M32R || ATARI || PPC || MIPS || SPARC64
+ depends on X86 || ARM || M32R || ATARI || PPC || MIPS || SPARC64
default y if X86
help
Say "yes" here to get direct support for the real time clock
@@ -643,6 +643,14 @@ config RTC_DRV_CMOS
This driver can also be built as a module. If so, the module
will be called rtc-cmos.
+config RTC_DRV_ALPHA
+ bool "Alpha PC-style CMOS"
+ depends on ALPHA
+ default y
+ help
+ Direct support for the real-time clock found on every Alpha
+ system, specifically MC146818 compatibles. If in doubt, say Y.
+
config RTC_DRV_VRTC
tristate "Virtual RTC for Intel MID platforms"
depends on X86_INTEL_MID
diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c
index 8b2cd8a5a2ff..3281c90691c3 100644
--- a/drivers/rtc/rtc-at91rm9200.c
+++ b/drivers/rtc/rtc-at91rm9200.c
@@ -220,6 +220,8 @@ static int at91_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
at91_alarm_year = tm.tm_year;
+ tm.tm_mon = alrm->time.tm_mon;
+ tm.tm_mday = alrm->time.tm_mday;
tm.tm_hour = alrm->time.tm_hour;
tm.tm_min = alrm->time.tm_min;
tm.tm_sec = alrm->time.tm_sec;
@@ -428,6 +430,14 @@ static int __exit at91_rtc_remove(struct platform_device *pdev)
return 0;
}
+static void at91_rtc_shutdown(struct platform_device *pdev)
+{
+ /* Disable all interrupts */
+ at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD | AT91_RTC_ALARM |
+ AT91_RTC_SECEV | AT91_RTC_TIMEV |
+ AT91_RTC_CALEV);
+}
+
#ifdef CONFIG_PM_SLEEP
/* AT91RM9200 RTC Power management control */
@@ -466,6 +476,7 @@ static SIMPLE_DEV_PM_OPS(at91_rtc_pm_ops, at91_rtc_suspend, at91_rtc_resume);
static struct platform_driver at91_rtc_driver = {
.remove = __exit_p(at91_rtc_remove),
+ .shutdown = at91_rtc_shutdown,
.driver = {
.name = "at91_rtc",
.owner = THIS_MODULE,
diff --git a/drivers/rtc/rtc-s5m.c b/drivers/rtc/rtc-s5m.c
index b7fd02bc0a14..ae8119dc2846 100644
--- a/drivers/rtc/rtc-s5m.c
+++ b/drivers/rtc/rtc-s5m.c
@@ -28,10 +28,20 @@
#include <linux/mfd/samsung/irq.h>
#include <linux/mfd/samsung/rtc.h>
+/*
+ * Maximum number of retries for checking changes in UDR field
+ * of SEC_RTC_UDR_CON register (to limit possible endless loop).
+ *
+ * After writing to RTC registers (setting time or alarm) read the UDR field
+ * in SEC_RTC_UDR_CON register. UDR is auto-cleared when data have
+ * been transferred.
+ */
+#define UDR_READ_RETRY_CNT 5
+
struct s5m_rtc_info {
struct device *dev;
struct sec_pmic_dev *s5m87xx;
- struct regmap *rtc;
+ struct regmap *regmap;
struct rtc_device *rtc_dev;
int irq;
int device_type;
@@ -84,12 +94,31 @@ static int s5m8767_tm_to_data(struct rtc_time *tm, u8 *data)
}
}
+/*
+ * Read RTC_UDR_CON register and wait till UDR field is cleared.
+ * This indicates that time/alarm update ended.
+ */
+static inline int s5m8767_wait_for_udr_update(struct s5m_rtc_info *info)
+{
+ int ret, retry = UDR_READ_RETRY_CNT;
+ unsigned int data;
+
+ do {
+ ret = regmap_read(info->regmap, SEC_RTC_UDR_CON, &data);
+ } while (--retry && (data & RTC_UDR_MASK) && !ret);
+
+ if (!retry)
+ dev_err(info->dev, "waiting for UDR update, reached max number of retries\n");
+
+ return ret;
+}
+
static inline int s5m8767_rtc_set_time_reg(struct s5m_rtc_info *info)
{
int ret;
unsigned int data;
- ret = regmap_read(info->rtc, SEC_RTC_UDR_CON, &data);
+ ret = regmap_read(info->regmap, SEC_RTC_UDR_CON, &data);
if (ret < 0) {
dev_err(info->dev, "failed to read update reg(%d)\n", ret);
return ret;
@@ -98,15 +127,13 @@ static inline int s5m8767_rtc_set_time_reg(struct s5m_rtc_info *info)
data |= RTC_TIME_EN_MASK;
data |= RTC_UDR_MASK;
- ret = regmap_write(info->rtc, SEC_RTC_UDR_CON, data);
+ ret = regmap_write(info->regmap, SEC_RTC_UDR_CON, data);
if (ret < 0) {
dev_err(info->dev, "failed to write update reg(%d)\n", ret);
return ret;
}
- do {
- ret = regmap_read(info->rtc, SEC_RTC_UDR_CON, &data);
- } while ((data & RTC_UDR_MASK) && !ret);
+ ret = s5m8767_wait_for_udr_update(info);
return ret;
}
@@ -116,7 +143,7 @@ static inline int s5m8767_rtc_set_alarm_reg(struct s5m_rtc_info *info)
int ret;
unsigned int data;
- ret = regmap_read(info->rtc, SEC_RTC_UDR_CON, &data);
+ ret = regmap_read(info->regmap, SEC_RTC_UDR_CON, &data);
if (ret < 0) {
dev_err(info->dev, "%s: fail to read update reg(%d)\n",
__func__, ret);
@@ -126,16 +153,14 @@ static inline int s5m8767_rtc_set_alarm_reg(struct s5m_rtc_info *info)
data &= ~RTC_TIME_EN_MASK;
data |= RTC_UDR_MASK;
- ret = regmap_write(info->rtc, SEC_RTC_UDR_CON, data);
+ ret = regmap_write(info->regmap, SEC_RTC_UDR_CON, data);
if (ret < 0) {
dev_err(info->dev, "%s: fail to write update reg(%d)\n",
__func__, ret);
return ret;
}
- do {
- ret = regmap_read(info->rtc, SEC_RTC_UDR_CON, &data);
- } while ((data & RTC_UDR_MASK) && !ret);
+ ret = s5m8767_wait_for_udr_update(info);
return ret;
}
@@ -178,7 +203,7 @@ static int s5m_rtc_read_time(struct device *dev, struct rtc_time *tm)
u8 data[8];
int ret;
- ret = regmap_bulk_read(info->rtc, SEC_RTC_SEC, data, 8);
+ ret = regmap_bulk_read(info->regmap, SEC_RTC_SEC, data, 8);
if (ret < 0)
return ret;
@@ -226,7 +251,7 @@ static int s5m_rtc_set_time(struct device *dev, struct rtc_time *tm)
1900 + tm->tm_year, 1 + tm->tm_mon, tm->tm_mday,
tm->tm_hour, tm->tm_min, tm->tm_sec, tm->tm_wday);
- ret = regmap_raw_write(info->rtc, SEC_RTC_SEC, data, 8);
+ ret = regmap_raw_write(info->regmap, SEC_RTC_SEC, data, 8);
if (ret < 0)
return ret;
@@ -242,20 +267,20 @@ static int s5m_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
unsigned int val;
int ret, i;
- ret = regmap_bulk_read(info->rtc, SEC_ALARM0_SEC, data, 8);
+ ret = regmap_bulk_read(info->regmap, SEC_ALARM0_SEC, data, 8);
if (ret < 0)
return ret;
switch (info->device_type) {
case S5M8763X:
s5m8763_data_to_tm(data, &alrm->time);
- ret = regmap_read(info->rtc, SEC_ALARM0_CONF, &val);
+ ret = regmap_read(info->regmap, SEC_ALARM0_CONF, &val);
if (ret < 0)
return ret;
alrm->enabled = !!val;
- ret = regmap_read(info->rtc, SEC_RTC_STATUS, &val);
+ ret = regmap_read(info->regmap, SEC_RTC_STATUS, &val);
if (ret < 0)
return ret;
@@ -278,7 +303,7 @@ static int s5m_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
}
alrm->pending = 0;
- ret = regmap_read(info->rtc, SEC_RTC_STATUS, &val);
+ ret = regmap_read(info->regmap, SEC_RTC_STATUS, &val);
if (ret < 0)
return ret;
break;
@@ -301,7 +326,7 @@ static int s5m_rtc_stop_alarm(struct s5m_rtc_info *info)
int ret, i;
struct rtc_time tm;
- ret = regmap_bulk_read(info->rtc, SEC_ALARM0_SEC, data, 8);
+ ret = regmap_bulk_read(info->regmap, SEC_ALARM0_SEC, data, 8);
if (ret < 0)
return ret;
@@ -312,14 +337,14 @@ static int s5m_rtc_stop_alarm(struct s5m_rtc_info *info)
switch (info->device_type) {
case S5M8763X:
- ret = regmap_write(info->rtc, SEC_ALARM0_CONF, 0);
+ ret = regmap_write(info->regmap, SEC_ALARM0_CONF, 0);
break;
case S5M8767X:
for (i = 0; i < 7; i++)
data[i] &= ~ALARM_ENABLE_MASK;
- ret = regmap_raw_write(info->rtc, SEC_ALARM0_SEC, data, 8);
+ ret = regmap_raw_write(info->regmap, SEC_ALARM0_SEC, data, 8);
if (ret < 0)
return ret;
@@ -341,7 +366,7 @@ static int s5m_rtc_start_alarm(struct s5m_rtc_info *info)
u8 alarm0_conf;
struct rtc_time tm;
- ret = regmap_bulk_read(info->rtc, SEC_ALARM0_SEC, data, 8);
+ ret = regmap_bulk_read(info->regmap, SEC_ALARM0_SEC, data, 8);
if (ret < 0)
return ret;
@@ -353,7 +378,7 @@ static int s5m_rtc_start_alarm(struct s5m_rtc_info *info)
switch (info->device_type) {
case S5M8763X:
alarm0_conf = 0x77;
- ret = regmap_write(info->rtc, SEC_ALARM0_CONF, alarm0_conf);
+ ret = regmap_write(info->regmap, SEC_ALARM0_CONF, alarm0_conf);
break;
case S5M8767X:
@@ -368,7 +393,7 @@ static int s5m_rtc_start_alarm(struct s5m_rtc_info *info)
if (data[RTC_YEAR1] & 0x7f)
data[RTC_YEAR1] |= ALARM_ENABLE_MASK;
- ret = regmap_raw_write(info->rtc, SEC_ALARM0_SEC, data, 8);
+ ret = regmap_raw_write(info->regmap, SEC_ALARM0_SEC, data, 8);
if (ret < 0)
return ret;
ret = s5m8767_rtc_set_alarm_reg(info);
@@ -410,7 +435,7 @@ static int s5m_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
if (ret < 0)
return ret;
- ret = regmap_raw_write(info->rtc, SEC_ALARM0_SEC, data, 8);
+ ret = regmap_raw_write(info->regmap, SEC_ALARM0_SEC, data, 8);
if (ret < 0)
return ret;
@@ -455,7 +480,7 @@ static const struct rtc_class_ops s5m_rtc_ops = {
static void s5m_rtc_enable_wtsr(struct s5m_rtc_info *info, bool enable)
{
int ret;
- ret = regmap_update_bits(info->rtc, SEC_WTSR_SMPL_CNTL,
+ ret = regmap_update_bits(info->regmap, SEC_WTSR_SMPL_CNTL,
WTSR_ENABLE_MASK,
enable ? WTSR_ENABLE_MASK : 0);
if (ret < 0)
@@ -466,7 +491,7 @@ static void s5m_rtc_enable_wtsr(struct s5m_rtc_info *info, bool enable)
static void s5m_rtc_enable_smpl(struct s5m_rtc_info *info, bool enable)
{
int ret;
- ret = regmap_update_bits(info->rtc, SEC_WTSR_SMPL_CNTL,
+ ret = regmap_update_bits(info->regmap, SEC_WTSR_SMPL_CNTL,
SMPL_ENABLE_MASK,
enable ? SMPL_ENABLE_MASK : 0);
if (ret < 0)
@@ -481,7 +506,7 @@ static int s5m8767_rtc_init_reg(struct s5m_rtc_info *info)
int ret;
struct rtc_time tm;
- ret = regmap_read(info->rtc, SEC_RTC_UDR_CON, &tp_read);
+ ret = regmap_read(info->regmap, SEC_RTC_UDR_CON, &tp_read);
if (ret < 0) {
dev_err(info->dev, "%s: fail to read control reg(%d)\n",
__func__, ret);
@@ -493,7 +518,7 @@ static int s5m8767_rtc_init_reg(struct s5m_rtc_info *info)
data[1] = (0 << BCD_EN_SHIFT) | (1 << MODEL24_SHIFT);
info->rtc_24hr_mode = 1;
- ret = regmap_raw_write(info->rtc, SEC_ALARM0_CONF, data, 2);
+ ret = regmap_raw_write(info->regmap, SEC_ALARM0_CONF, data, 2);
if (ret < 0) {
dev_err(info->dev, "%s: fail to write controlm reg(%d)\n",
__func__, ret);
@@ -515,7 +540,7 @@ static int s5m8767_rtc_init_reg(struct s5m_rtc_info *info)
ret = s5m_rtc_set_time(info->dev, &tm);
}
- ret = regmap_update_bits(info->rtc, SEC_RTC_UDR_CON,
+ ret = regmap_update_bits(info->regmap, SEC_RTC_UDR_CON,
RTC_TCON_MASK, tp_read | RTC_TCON_MASK);
if (ret < 0)
dev_err(info->dev, "%s: fail to update TCON reg(%d)\n",
@@ -542,17 +567,19 @@ static int s5m_rtc_probe(struct platform_device *pdev)
info->dev = &pdev->dev;
info->s5m87xx = s5m87xx;
- info->rtc = s5m87xx->rtc;
+ info->regmap = s5m87xx->regmap_rtc;
info->device_type = s5m87xx->device_type;
info->wtsr_smpl = s5m87xx->wtsr_smpl;
switch (pdata->device_type) {
case S5M8763X:
- info->irq = s5m87xx->irq_base + S5M8763_IRQ_ALARM0;
+ info->irq = regmap_irq_get_virq(s5m87xx->irq_data,
+ S5M8763_IRQ_ALARM0);
break;
case S5M8767X:
- info->irq = s5m87xx->irq_base + S5M8767_IRQ_RTCA1;
+ info->irq = regmap_irq_get_virq(s5m87xx->irq_data,
+ S5M8767_IRQ_RTCA1);
break;
default:
@@ -596,7 +623,7 @@ static void s5m_rtc_shutdown(struct platform_device *pdev)
if (info->wtsr_smpl) {
for (i = 0; i < 3; i++) {
s5m_rtc_enable_wtsr(info, false);
- regmap_read(info->rtc, SEC_WTSR_SMPL_CNTL, &val);
+ regmap_read(info->regmap, SEC_WTSR_SMPL_CNTL, &val);
pr_debug("%s: WTSR_SMPL reg(0x%02x)\n", __func__, val);
if (val & WTSR_ENABLE_MASK)
pr_emerg("%s: fail to disable WTSR\n",
@@ -612,6 +639,30 @@ static void s5m_rtc_shutdown(struct platform_device *pdev)
s5m_rtc_enable_smpl(info, false);
}
+static int s5m_rtc_resume(struct device *dev)
+{
+ struct s5m_rtc_info *info = dev_get_drvdata(dev);
+ int ret = 0;
+
+ if (device_may_wakeup(dev))
+ ret = disable_irq_wake(info->irq);
+
+ return ret;
+}
+
+static int s5m_rtc_suspend(struct device *dev)
+{
+ struct s5m_rtc_info *info = dev_get_drvdata(dev);
+ int ret = 0;
+
+ if (device_may_wakeup(dev))
+ ret = enable_irq_wake(info->irq);
+
+ return ret;
+}
+
+static SIMPLE_DEV_PM_OPS(s5m_rtc_pm_ops, s5m_rtc_suspend, s5m_rtc_resume);
+
static const struct platform_device_id s5m_rtc_id[] = {
{ "s5m-rtc", 0 },
};
@@ -620,6 +671,7 @@ static struct platform_driver s5m_rtc_driver = {
.driver = {
.name = "s5m-rtc",
.owner = THIS_MODULE,
+ .pm = &s5m_rtc_pm_ops,
},
.probe = s5m_rtc_probe,
.shutdown = s5m_rtc_shutdown,
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index cee7e2708a1f..95e45782692f 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -3224,6 +3224,8 @@ static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev,
fcx_multitrack = private->features.feature[40] & 0x20;
data_size = blk_rq_bytes(req);
+ if (data_size % blksize)
+ return ERR_PTR(-EINVAL);
/* tpm write request add CBC data on each track boundary */
if (rq_data_dir(req) == WRITE)
data_size += (last_trk - first_trk) * 4;
diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c
index f64921756ad6..f224d59c4b6b 100644
--- a/drivers/s390/block/dasd_genhd.c
+++ b/drivers/s390/block/dasd_genhd.c
@@ -87,7 +87,6 @@ void dasd_gendisk_free(struct dasd_block *block)
{
if (block->gdp) {
del_gendisk(block->gdp);
- block->gdp->queue = NULL;
block->gdp->private_data = NULL;
put_disk(block->gdp);
block->gdp = NULL;
diff --git a/drivers/s390/char/sclp_early.c b/drivers/s390/char/sclp_early.c
index f7aa080e9b28..1465e9563101 100644
--- a/drivers/s390/char/sclp_early.c
+++ b/drivers/s390/char/sclp_early.c
@@ -35,7 +35,6 @@ struct read_info_sccb {
u8 _reserved5[4096 - 112]; /* 112-4095 */
} __packed __aligned(PAGE_SIZE);
-static __initdata struct init_sccb early_event_mask_sccb __aligned(PAGE_SIZE);
static __initdata struct read_info_sccb early_read_info_sccb;
static __initdata char sccb_early[PAGE_SIZE] __aligned(PAGE_SIZE);
static unsigned long sclp_hsa_size;
@@ -113,7 +112,7 @@ static void __init sclp_facilities_detect(void)
bool __init sclp_has_linemode(void)
{
- struct init_sccb *sccb = &early_event_mask_sccb;
+ struct init_sccb *sccb = (void *) &sccb_early;
if (sccb->header.response_code != 0x20)
return 0;
@@ -126,7 +125,7 @@ bool __init sclp_has_linemode(void)
bool __init sclp_has_vt220(void)
{
- struct init_sccb *sccb = &early_event_mask_sccb;
+ struct init_sccb *sccb = (void *) &sccb_early;
if (sccb->header.response_code != 0x20)
return 0;
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index 9b333fcf1a4c..ce16d1bdb20a 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -739,8 +739,12 @@ static void conn_action_txdone(fsm_instance *fi, int event, void *arg)
IUCV_DBF_TEXT(trace, 4, __func__);
- if (conn && conn->netdev)
- privptr = netdev_priv(conn->netdev);
+ if (!conn || !conn->netdev) {
+ IUCV_DBF_TEXT(data, 2,
+ "Send confirmation for unlinked connection\n");
+ return;
+ }
+ privptr = netdev_priv(conn->netdev);
conn->prof.tx_pending--;
if (single_flag) {
if ((skb = skb_dequeue(&conn->commit_queue))) {
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 41ef94320ee8..d45427c553b0 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -738,6 +738,12 @@ struct qeth_rx {
int qdio_err;
};
+struct carrier_info {
+ __u8 card_type;
+ __u16 port_mode;
+ __u32 port_speed;
+};
+
#define QETH_NAPI_WEIGHT NAPI_POLL_WEIGHT
struct qeth_card {
@@ -914,6 +920,8 @@ struct qeth_cmd_buffer *qeth_wait_for_buffer(struct qeth_channel *);
int qeth_mdio_read(struct net_device *, int, int);
int qeth_snmp_command(struct qeth_card *, char __user *);
int qeth_query_oat_command(struct qeth_card *, char __user *);
+int qeth_query_card_info(struct qeth_card *card,
+ struct carrier_info *carrier_info);
int qeth_send_control_data(struct qeth_card *, int, struct qeth_cmd_buffer *,
int (*reply_cb)(struct qeth_card *, struct qeth_reply*, unsigned long),
void *reply_param);
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index eb4e1f809feb..f9a85b47e3c3 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -4602,6 +4602,42 @@ out:
}
EXPORT_SYMBOL_GPL(qeth_query_oat_command);
+int qeth_query_card_info_cb(struct qeth_card *card,
+ struct qeth_reply *reply, unsigned long data)
+{
+ struct qeth_ipa_cmd *cmd;
+ struct qeth_query_card_info *card_info;
+ struct carrier_info *carrier_info;
+
+ QETH_CARD_TEXT(card, 2, "qcrdincb");
+ carrier_info = (struct carrier_info *)reply->param;
+ cmd = (struct qeth_ipa_cmd *)data;
+ card_info = &cmd->data.setadapterparms.data.card_info;
+ if (cmd->data.setadapterparms.hdr.return_code == 0) {
+ carrier_info->card_type = card_info->card_type;
+ carrier_info->port_mode = card_info->port_mode;
+ carrier_info->port_speed = card_info->port_speed;
+ }
+
+ qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd);
+ return 0;
+}
+
+int qeth_query_card_info(struct qeth_card *card,
+ struct carrier_info *carrier_info)
+{
+ struct qeth_cmd_buffer *iob;
+
+ QETH_CARD_TEXT(card, 2, "qcrdinfo");
+ if (!qeth_adp_supported(card, IPA_SETADP_QUERY_CARD_INFO))
+ return -EOPNOTSUPP;
+ iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_CARD_INFO,
+ sizeof(struct qeth_ipacmd_setadpparms_hdr));
+ return qeth_send_ipa_cmd(card, iob, qeth_query_card_info_cb,
+ (void *)carrier_info);
+}
+EXPORT_SYMBOL_GPL(qeth_query_card_info);
+
static inline int qeth_get_qdio_q_format(struct qeth_card *card)
{
switch (card->info.type) {
@@ -5606,11 +5642,65 @@ void qeth_core_get_drvinfo(struct net_device *dev,
}
EXPORT_SYMBOL_GPL(qeth_core_get_drvinfo);
+/* Helper function to fill 'advertizing' and 'supported' which are the same. */
+/* Autoneg and full-duplex are supported and advertized uncondionally. */
+/* Always advertize and support all speeds up to specified, and only one */
+/* specified port type. */
+static void qeth_set_ecmd_adv_sup(struct ethtool_cmd *ecmd,
+ int maxspeed, int porttype)
+{
+ int port_sup, port_adv, spd_sup, spd_adv;
+
+ switch (porttype) {
+ case PORT_TP:
+ port_sup = SUPPORTED_TP;
+ port_adv = ADVERTISED_TP;
+ break;
+ case PORT_FIBRE:
+ port_sup = SUPPORTED_FIBRE;
+ port_adv = ADVERTISED_FIBRE;
+ break;
+ default:
+ port_sup = SUPPORTED_TP;
+ port_adv = ADVERTISED_TP;
+ WARN_ON_ONCE(1);
+ }
+
+ /* "Fallthrough" case'es ordered from high to low result in setting */
+ /* flags cumulatively, starting from the specified speed and down to */
+ /* the lowest possible. */
+ spd_sup = 0;
+ spd_adv = 0;
+ switch (maxspeed) {
+ case SPEED_10000:
+ spd_sup |= SUPPORTED_10000baseT_Full;
+ spd_adv |= ADVERTISED_10000baseT_Full;
+ case SPEED_1000:
+ spd_sup |= SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full;
+ spd_adv |= ADVERTISED_1000baseT_Half |
+ ADVERTISED_1000baseT_Full;
+ case SPEED_100:
+ spd_sup |= SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full;
+ spd_adv |= ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full;
+ case SPEED_10:
+ spd_sup |= SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full;
+ spd_adv |= ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full;
+ break;
+ default:
+ spd_sup = SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full;
+ spd_adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full;
+ WARN_ON_ONCE(1);
+ }
+ ecmd->advertising = ADVERTISED_Autoneg | port_adv | spd_adv;
+ ecmd->supported = SUPPORTED_Autoneg | port_sup | spd_sup;
+}
+
int qeth_core_ethtool_get_settings(struct net_device *netdev,
struct ethtool_cmd *ecmd)
{
struct qeth_card *card = netdev->ml_priv;
enum qeth_link_types link_type;
+ struct carrier_info carrier_info;
if ((card->info.type == QETH_CARD_TYPE_IQD) || (card->info.guestlan))
link_type = QETH_LINK_TYPE_10GBIT_ETH;
@@ -5618,80 +5708,92 @@ int qeth_core_ethtool_get_settings(struct net_device *netdev,
link_type = card->info.link_type;
ecmd->transceiver = XCVR_INTERNAL;
- ecmd->supported = SUPPORTED_Autoneg;
- ecmd->advertising = ADVERTISED_Autoneg;
ecmd->duplex = DUPLEX_FULL;
ecmd->autoneg = AUTONEG_ENABLE;
switch (link_type) {
case QETH_LINK_TYPE_FAST_ETH:
case QETH_LINK_TYPE_LANE_ETH100:
- ecmd->supported |= SUPPORTED_10baseT_Half |
- SUPPORTED_10baseT_Full |
- SUPPORTED_100baseT_Half |
- SUPPORTED_100baseT_Full |
- SUPPORTED_TP;
- ecmd->advertising |= ADVERTISED_10baseT_Half |
- ADVERTISED_10baseT_Full |
- ADVERTISED_100baseT_Half |
- ADVERTISED_100baseT_Full |
- ADVERTISED_TP;
+ qeth_set_ecmd_adv_sup(ecmd, SPEED_100, PORT_TP);
ecmd->speed = SPEED_100;
ecmd->port = PORT_TP;
break;
case QETH_LINK_TYPE_GBIT_ETH:
case QETH_LINK_TYPE_LANE_ETH1000:
- ecmd->supported |= SUPPORTED_10baseT_Half |
- SUPPORTED_10baseT_Full |
- SUPPORTED_100baseT_Half |
- SUPPORTED_100baseT_Full |
- SUPPORTED_1000baseT_Half |
- SUPPORTED_1000baseT_Full |
- SUPPORTED_FIBRE;
- ecmd->advertising |= ADVERTISED_10baseT_Half |
- ADVERTISED_10baseT_Full |
- ADVERTISED_100baseT_Half |
- ADVERTISED_100baseT_Full |
- ADVERTISED_1000baseT_Half |
- ADVERTISED_1000baseT_Full |
- ADVERTISED_FIBRE;
+ qeth_set_ecmd_adv_sup(ecmd, SPEED_1000, PORT_FIBRE);
ecmd->speed = SPEED_1000;
ecmd->port = PORT_FIBRE;
break;
case QETH_LINK_TYPE_10GBIT_ETH:
- ecmd->supported |= SUPPORTED_10baseT_Half |
- SUPPORTED_10baseT_Full |
- SUPPORTED_100baseT_Half |
- SUPPORTED_100baseT_Full |
- SUPPORTED_1000baseT_Half |
- SUPPORTED_1000baseT_Full |
- SUPPORTED_10000baseT_Full |
- SUPPORTED_FIBRE;
- ecmd->advertising |= ADVERTISED_10baseT_Half |
- ADVERTISED_10baseT_Full |
- ADVERTISED_100baseT_Half |
- ADVERTISED_100baseT_Full |
- ADVERTISED_1000baseT_Half |
- ADVERTISED_1000baseT_Full |
- ADVERTISED_10000baseT_Full |
- ADVERTISED_FIBRE;
+ qeth_set_ecmd_adv_sup(ecmd, SPEED_10000, PORT_FIBRE);
ecmd->speed = SPEED_10000;
ecmd->port = PORT_FIBRE;
break;
default:
- ecmd->supported |= SUPPORTED_10baseT_Half |
- SUPPORTED_10baseT_Full |
- SUPPORTED_TP;
- ecmd->advertising |= ADVERTISED_10baseT_Half |
- ADVERTISED_10baseT_Full |
- ADVERTISED_TP;
+ qeth_set_ecmd_adv_sup(ecmd, SPEED_10, PORT_TP);
ecmd->speed = SPEED_10;
ecmd->port = PORT_TP;
}
+ /* Check if we can obtain more accurate information. */
+ /* If QUERY_CARD_INFO command is not supported or fails, */
+ /* just return the heuristics that was filled above. */
+ if (qeth_query_card_info(card, &carrier_info) != 0)
+ return 0;
+
+ netdev_dbg(netdev,
+ "card info: card_type=0x%02x, port_mode=0x%04x, port_speed=0x%08x\n",
+ carrier_info.card_type,
+ carrier_info.port_mode,
+ carrier_info.port_speed);
+
+ /* Update attributes for which we've obtained more authoritative */
+ /* information, leave the rest the way they where filled above. */
+ switch (carrier_info.card_type) {
+ case CARD_INFO_TYPE_1G_COPPER_A:
+ case CARD_INFO_TYPE_1G_COPPER_B:
+ qeth_set_ecmd_adv_sup(ecmd, SPEED_1000, PORT_TP);
+ ecmd->port = PORT_TP;
+ break;
+ case CARD_INFO_TYPE_1G_FIBRE_A:
+ case CARD_INFO_TYPE_1G_FIBRE_B:
+ qeth_set_ecmd_adv_sup(ecmd, SPEED_1000, PORT_FIBRE);
+ ecmd->port = PORT_FIBRE;
+ break;
+ case CARD_INFO_TYPE_10G_FIBRE_A:
+ case CARD_INFO_TYPE_10G_FIBRE_B:
+ qeth_set_ecmd_adv_sup(ecmd, SPEED_10000, PORT_FIBRE);
+ ecmd->port = PORT_FIBRE;
+ break;
+ }
+
+ switch (carrier_info.port_mode) {
+ case CARD_INFO_PORTM_FULLDUPLEX:
+ ecmd->duplex = DUPLEX_FULL;
+ break;
+ case CARD_INFO_PORTM_HALFDUPLEX:
+ ecmd->duplex = DUPLEX_HALF;
+ break;
+ }
+
+ switch (carrier_info.port_speed) {
+ case CARD_INFO_PORTS_10M:
+ ecmd->speed = SPEED_10;
+ break;
+ case CARD_INFO_PORTS_100M:
+ ecmd->speed = SPEED_100;
+ break;
+ case CARD_INFO_PORTS_1G:
+ ecmd->speed = SPEED_1000;
+ break;
+ case CARD_INFO_PORTS_10G:
+ ecmd->speed = SPEED_10000;
+ break;
+ }
+
return 0;
}
EXPORT_SYMBOL_GPL(qeth_core_ethtool_get_settings);
diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h
index 07085d55f9a1..0a6e695578cd 100644
--- a/drivers/s390/net/qeth_core_mpc.h
+++ b/drivers/s390/net/qeth_core_mpc.h
@@ -274,7 +274,24 @@ enum qeth_ipa_set_access_mode_rc {
SET_ACCESS_CTRL_RC_REFLREL_FAILED = 0x0024,
SET_ACCESS_CTRL_RC_REFLREL_DEACT_FAILED = 0x0028,
};
-
+enum qeth_card_info_card_type {
+ CARD_INFO_TYPE_1G_COPPER_A = 0x61,
+ CARD_INFO_TYPE_1G_FIBRE_A = 0x71,
+ CARD_INFO_TYPE_10G_FIBRE_A = 0x91,
+ CARD_INFO_TYPE_1G_COPPER_B = 0xb1,
+ CARD_INFO_TYPE_1G_FIBRE_B = 0xa1,
+ CARD_INFO_TYPE_10G_FIBRE_B = 0xc1,
+};
+enum qeth_card_info_port_mode {
+ CARD_INFO_PORTM_HALFDUPLEX = 0x0002,
+ CARD_INFO_PORTM_FULLDUPLEX = 0x0003,
+};
+enum qeth_card_info_port_speed {
+ CARD_INFO_PORTS_10M = 0x00000005,
+ CARD_INFO_PORTS_100M = 0x00000006,
+ CARD_INFO_PORTS_1G = 0x00000007,
+ CARD_INFO_PORTS_10G = 0x00000008,
+};
/* (SET)DELIP(M) IPA stuff ***************************************************/
struct qeth_ipacmd_setdelip4 {
@@ -404,6 +421,14 @@ struct qeth_qoat_priv {
char *buffer;
};
+struct qeth_query_card_info {
+ __u8 card_type;
+ __u8 reserved1;
+ __u16 port_mode;
+ __u32 port_speed;
+ __u32 reserved2;
+};
+
struct qeth_ipacmd_setadpparms_hdr {
__u32 supp_hw_cmds;
__u32 reserved1;
@@ -424,6 +449,7 @@ struct qeth_ipacmd_setadpparms {
struct qeth_snmp_cmd snmp;
struct qeth_set_access_ctrl set_access_ctrl;
struct qeth_query_oat query_oat;
+ struct qeth_query_card_info card_info;
__u32 mode;
} data;
} __attribute__ ((packed));
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index 5e1e12c0cf42..0a7325361d29 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -2025,7 +2025,8 @@ static struct scsi_host_template driver_template = {
.cmd_per_lun = TW_MAX_CMDS_PER_LUN,
.use_clustering = ENABLE_CLUSTERING,
.shost_attrs = twa_host_attrs,
- .emulated = 1
+ .emulated = 1,
+ .no_write_same = 1,
};
/* This function will probe and initialize a card */
diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c
index c845bdbeb6c0..4de346017e9f 100644
--- a/drivers/scsi/3w-sas.c
+++ b/drivers/scsi/3w-sas.c
@@ -1600,7 +1600,8 @@ static struct scsi_host_template driver_template = {
.cmd_per_lun = TW_MAX_CMDS_PER_LUN,
.use_clustering = ENABLE_CLUSTERING,
.shost_attrs = twl_host_attrs,
- .emulated = 1
+ .emulated = 1,
+ .no_write_same = 1,
};
/* This function will probe and initialize a card */
diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
index b9276d10b25c..752624e6bc00 100644
--- a/drivers/scsi/3w-xxxx.c
+++ b/drivers/scsi/3w-xxxx.c
@@ -2279,7 +2279,8 @@ static struct scsi_host_template driver_template = {
.cmd_per_lun = TW_MAX_CMDS_PER_LUN,
.use_clustering = ENABLE_CLUSTERING,
.shost_attrs = tw_host_attrs,
- .emulated = 1
+ .emulated = 1,
+ .no_write_same = 1,
};
/* This function will probe and initialize a card */
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index f0d432c139d0..4921ed19a027 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -1081,6 +1081,7 @@ static struct scsi_host_template aac_driver_template = {
#endif
.use_clustering = ENABLE_CLUSTERING,
.emulated = 1,
+ .no_write_same = 1,
};
static void __aac_shutdown(struct aac_dev * aac)
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index 97fd450aff09..4f6a30b8e5f9 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -137,6 +137,7 @@ static struct scsi_host_template arcmsr_scsi_host_template = {
.cmd_per_lun = ARCMSR_MAX_CMD_PERLUN,
.use_clustering = ENABLE_CLUSTERING,
.shost_attrs = arcmsr_host_attrs,
+ .no_write_same = 1,
};
static struct pci_device_id arcmsr_device_id_table[] = {
{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1110)},
diff --git a/drivers/scsi/bfa/bfa_fcs.h b/drivers/scsi/bfa/bfa_fcs.h
index 94d5d0102f7d..42bcb970445a 100644
--- a/drivers/scsi/bfa/bfa_fcs.h
+++ b/drivers/scsi/bfa/bfa_fcs.h
@@ -296,6 +296,7 @@ wwn_t bfa_fcs_lport_get_rport(struct bfa_fcs_lport_s *port, wwn_t wwn,
struct bfa_fcs_lport_s *bfa_fcs_lookup_port(struct bfa_fcs_s *fcs,
u16 vf_id, wwn_t lpwwn);
+void bfa_fcs_lport_set_symname(struct bfa_fcs_lport_s *port, char *symname);
void bfa_fcs_lport_get_info(struct bfa_fcs_lport_s *port,
struct bfa_lport_info_s *port_info);
void bfa_fcs_lport_get_attr(struct bfa_fcs_lport_s *port,
diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
index 2f61a5af3658..f5e4e61a0fd7 100644
--- a/drivers/scsi/bfa/bfa_fcs_lport.c
+++ b/drivers/scsi/bfa/bfa_fcs_lport.c
@@ -1097,6 +1097,17 @@ bfa_fcs_lport_init(struct bfa_fcs_lport_s *lport,
bfa_sm_send_event(lport, BFA_FCS_PORT_SM_CREATE);
}
+void
+bfa_fcs_lport_set_symname(struct bfa_fcs_lport_s *port,
+ char *symname)
+{
+ strcpy(port->port_cfg.sym_name.symname, symname);
+
+ if (bfa_sm_cmp_state(port, bfa_fcs_lport_sm_online))
+ bfa_fcs_lport_ns_util_send_rspn_id(
+ BFA_FCS_GET_NS_FROM_PORT(port), NULL);
+}
+
/*
* fcs_lport_api
*/
@@ -5140,9 +5151,6 @@ bfa_fcs_lport_ns_util_send_rspn_id(void *cbarg, struct bfa_fcxp_s *fcxp_alloced)
u8 *psymbl = &symbl[0];
int len;
- if (!bfa_sm_cmp_state(port, bfa_fcs_lport_sm_online))
- return;
-
/* Avoid sending RSPN in the following states. */
if (bfa_sm_cmp_state(ns, bfa_fcs_lport_ns_sm_offline) ||
bfa_sm_cmp_state(ns, bfa_fcs_lport_ns_sm_plogi_sending) ||
diff --git a/drivers/scsi/bfa/bfad_attr.c b/drivers/scsi/bfa/bfad_attr.c
index e9a681d31223..40be670a1cbc 100644
--- a/drivers/scsi/bfa/bfad_attr.c
+++ b/drivers/scsi/bfa/bfad_attr.c
@@ -593,11 +593,8 @@ bfad_im_vport_set_symbolic_name(struct fc_vport *fc_vport)
return;
spin_lock_irqsave(&bfad->bfad_lock, flags);
- if (strlen(sym_name) > 0) {
- strcpy(fcs_vport->lport.port_cfg.sym_name.symname, sym_name);
- bfa_fcs_lport_ns_util_send_rspn_id(
- BFA_FCS_GET_NS_FROM_PORT((&fcs_vport->lport)), NULL);
- }
+ if (strlen(sym_name) > 0)
+ bfa_fcs_lport_set_symname(&fcs_vport->lport, sym_name);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
}
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
index ee4fa40a50b1..ce5ef0190bad 100644
--- a/drivers/scsi/gdth.c
+++ b/drivers/scsi/gdth.c
@@ -4684,6 +4684,7 @@ static struct scsi_host_template gdth_template = {
.cmd_per_lun = GDTH_MAXC_P_L,
.unchecked_isa_dma = 1,
.use_clustering = ENABLE_CLUSTERING,
+ .no_write_same = 1,
};
#ifdef CONFIG_ISA
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index f334859024c0..f2c5005f312a 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -395,6 +395,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
shost->use_clustering = sht->use_clustering;
shost->ordered_tag = sht->ordered_tag;
shost->eh_deadline = shost_eh_deadline * HZ;
+ shost->no_write_same = sht->no_write_same;
if (sht->supported_mode == MODE_UNKNOWN)
/* means we didn't set it ... default to INITIATOR */
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 22f6432eb475..20a5e6ecf945 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -561,6 +561,7 @@ static struct scsi_host_template hpsa_driver_template = {
.sdev_attrs = hpsa_sdev_attrs,
.shost_attrs = hpsa_shost_attrs,
.max_sectors = 8192,
+ .no_write_same = 1,
};
@@ -1288,7 +1289,7 @@ static void complete_scsi_command(struct CommandList *cp)
"has check condition: aborted command: "
"ASC: 0x%x, ASCQ: 0x%x\n",
cp, asc, ascq);
- cmd->result = DID_SOFT_ERROR << 16;
+ cmd->result |= DID_SOFT_ERROR << 16;
break;
}
/* Must be some other type of check condition */
@@ -4925,7 +4926,7 @@ reinit_after_soft_reset:
hpsa_hba_inquiry(h);
hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
start_controller_lockup_detector(h);
- return 1;
+ return 0;
clean4:
hpsa_free_sg_chain_blocks(h);
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 36ac1c34ce97..573f4128b6b6 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -6305,7 +6305,8 @@ static struct scsi_host_template driver_template = {
.use_clustering = ENABLE_CLUSTERING,
.shost_attrs = ipr_ioa_attrs,
.sdev_attrs = ipr_dev_attrs,
- .proc_name = IPR_NAME
+ .proc_name = IPR_NAME,
+ .no_write_same = 1,
};
/**
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
index 8d5ea8a1e5a6..52a216f21ae5 100644
--- a/drivers/scsi/ips.c
+++ b/drivers/scsi/ips.c
@@ -374,6 +374,7 @@ static struct scsi_host_template ips_driver_template = {
.sg_tablesize = IPS_MAX_SG,
.cmd_per_lun = 3,
.use_clustering = ENABLE_CLUSTERING,
+ .no_write_same = 1,
};
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 161c98efade9..d2895836f9fa 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -211,7 +211,7 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc)
qc->tf.nsect = 0;
}
- ata_tf_to_fis(&qc->tf, 1, 0, (u8*)&task->ata_task.fis);
+ ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, (u8 *)&task->ata_task.fis);
task->uldd_task = qc;
if (ata_is_atapi(qc->tf.protocol)) {
memcpy(task->ata_task.atapi_packet, qc->cdb, qc->dev->cdb_len);
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index 90c95a3385d1..816db12ef5d5 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -4244,6 +4244,7 @@ static struct scsi_host_template megaraid_template = {
.eh_device_reset_handler = megaraid_reset,
.eh_bus_reset_handler = megaraid_reset,
.eh_host_reset_handler = megaraid_reset,
+ .no_write_same = 1,
};
static int
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
index d1a4b82836ea..e2237a97cb9d 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.c
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -367,6 +367,7 @@ static struct scsi_host_template megaraid_template_g = {
.eh_host_reset_handler = megaraid_reset_handler,
.change_queue_depth = megaraid_change_queue_depth,
.use_clustering = ENABLE_CLUSTERING,
+ .no_write_same = 1,
.sdev_attrs = megaraid_sdev_attrs,
.shost_attrs = megaraid_shost_attrs,
};
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 0a743a5d1647..c99812bf2a73 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -2148,6 +2148,7 @@ static struct scsi_host_template megasas_template = {
.bios_param = megasas_bios_param,
.use_clustering = ENABLE_CLUSTERING,
.change_queue_depth = megasas_change_queue_depth,
+ .no_write_same = 1,
};
/**
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index f16ece91b94a..0a1296a87d66 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -3403,6 +3403,7 @@ hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
unsigned long flags;
u8 deviceType = pPayload->sas_identify.dev_type;
port->port_state = portstate;
+ phy->phy_state = PHY_STATE_LINK_UP_SPC;
PM8001_MSG_DBG(pm8001_ha,
pm8001_printk("HW_EVENT_SAS_PHY_UP port id = %d, phy id = %d\n",
port_id, phy_id));
@@ -3483,6 +3484,7 @@ hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm8001_printk("HW_EVENT_SATA_PHY_UP port id = %d,"
" phy id = %d\n", port_id, phy_id));
port->port_state = portstate;
+ phy->phy_state = PHY_STATE_LINK_UP_SPC;
port->port_attached = 1;
pm8001_get_lrate_mode(phy, link_rate);
phy->phy_type |= PORT_TYPE_SATA;
diff --git a/drivers/scsi/pm8001/pm8001_hwi.h b/drivers/scsi/pm8001/pm8001_hwi.h
index 6d91e2446542..e4867e690c84 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.h
+++ b/drivers/scsi/pm8001/pm8001_hwi.h
@@ -131,6 +131,10 @@
#define LINKRATE_30 (0x02 << 8)
#define LINKRATE_60 (0x04 << 8)
+/* for phy state */
+
+#define PHY_STATE_LINK_UP_SPC 0x1
+
/* for new SPC controllers MEMBASE III is shared between BIOS and DATA */
#define GSM_SM_BASE 0x4F0000
struct mpi_msg_hdr{
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index 34f5f5ffef05..73a120d81b4d 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -175,20 +175,16 @@ static void pm8001_free(struct pm8001_hba_info *pm8001_ha)
static void pm8001_tasklet(unsigned long opaque)
{
struct pm8001_hba_info *pm8001_ha;
- u32 vec;
- pm8001_ha = (struct pm8001_hba_info *)opaque;
+ struct isr_param *irq_vector;
+
+ irq_vector = (struct isr_param *)opaque;
+ pm8001_ha = irq_vector->drv_inst;
if (unlikely(!pm8001_ha))
BUG_ON(1);
- vec = pm8001_ha->int_vector;
- PM8001_CHIP_DISP->isr(pm8001_ha, vec);
+ PM8001_CHIP_DISP->isr(pm8001_ha, irq_vector->irq_id);
}
#endif
-static struct pm8001_hba_info *outq_to_hba(u8 *outq)
-{
- return container_of((outq - *outq), struct pm8001_hba_info, outq[0]);
-}
-
/**
* pm8001_interrupt_handler_msix - main MSIX interrupt handler.
* It obtains the vector number and calls the equivalent bottom
@@ -198,18 +194,20 @@ static struct pm8001_hba_info *outq_to_hba(u8 *outq)
*/
static irqreturn_t pm8001_interrupt_handler_msix(int irq, void *opaque)
{
- struct pm8001_hba_info *pm8001_ha = outq_to_hba(opaque);
- u8 outq = *(u8 *)opaque;
+ struct isr_param *irq_vector;
+ struct pm8001_hba_info *pm8001_ha;
irqreturn_t ret = IRQ_HANDLED;
+ irq_vector = (struct isr_param *)opaque;
+ pm8001_ha = irq_vector->drv_inst;
+
if (unlikely(!pm8001_ha))
return IRQ_NONE;
if (!PM8001_CHIP_DISP->is_our_interupt(pm8001_ha))
return IRQ_NONE;
- pm8001_ha->int_vector = outq;
#ifdef PM8001_USE_TASKLET
- tasklet_schedule(&pm8001_ha->tasklet);
+ tasklet_schedule(&pm8001_ha->tasklet[irq_vector->irq_id]);
#else
- ret = PM8001_CHIP_DISP->isr(pm8001_ha, outq);
+ ret = PM8001_CHIP_DISP->isr(pm8001_ha, irq_vector->irq_id);
#endif
return ret;
}
@@ -230,9 +228,8 @@ static irqreturn_t pm8001_interrupt_handler_intx(int irq, void *dev_id)
if (!PM8001_CHIP_DISP->is_our_interupt(pm8001_ha))
return IRQ_NONE;
- pm8001_ha->int_vector = 0;
#ifdef PM8001_USE_TASKLET
- tasklet_schedule(&pm8001_ha->tasklet);
+ tasklet_schedule(&pm8001_ha->tasklet[0]);
#else
ret = PM8001_CHIP_DISP->isr(pm8001_ha, 0);
#endif
@@ -457,7 +454,7 @@ static struct pm8001_hba_info *pm8001_pci_alloc(struct pci_dev *pdev,
{
struct pm8001_hba_info *pm8001_ha;
struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
-
+ int j;
pm8001_ha = sha->lldd_ha;
if (!pm8001_ha)
@@ -480,12 +477,14 @@ static struct pm8001_hba_info *pm8001_pci_alloc(struct pci_dev *pdev,
pm8001_ha->iomb_size = IOMB_SIZE_SPC;
#ifdef PM8001_USE_TASKLET
- /**
- * default tasklet for non msi-x interrupt handler/first msi-x
- * interrupt handler
- **/
- tasklet_init(&pm8001_ha->tasklet, pm8001_tasklet,
- (unsigned long)pm8001_ha);
+ /* Tasklet for non msi-x interrupt handler */
+ if ((!pdev->msix_cap) || (pm8001_ha->chip_id == chip_8001))
+ tasklet_init(&pm8001_ha->tasklet[0], pm8001_tasklet,
+ (unsigned long)&(pm8001_ha->irq_vector[0]));
+ else
+ for (j = 0; j < PM8001_MAX_MSIX_VEC; j++)
+ tasklet_init(&pm8001_ha->tasklet[j], pm8001_tasklet,
+ (unsigned long)&(pm8001_ha->irq_vector[j]));
#endif
pm8001_ioremap(pm8001_ha);
if (!pm8001_alloc(pm8001_ha, ent))
@@ -733,19 +732,20 @@ static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha)
"pci_enable_msix request ret:%d no of intr %d\n",
rc, pm8001_ha->number_of_intr));
- for (i = 0; i < number_of_intr; i++)
- pm8001_ha->outq[i] = i;
for (i = 0; i < number_of_intr; i++) {
snprintf(intr_drvname[i], sizeof(intr_drvname[0]),
DRV_NAME"%d", i);
+ pm8001_ha->irq_vector[i].irq_id = i;
+ pm8001_ha->irq_vector[i].drv_inst = pm8001_ha;
+
if (request_irq(pm8001_ha->msix_entries[i].vector,
pm8001_interrupt_handler_msix, flag,
- intr_drvname[i], &pm8001_ha->outq[i])) {
+ intr_drvname[i], &(pm8001_ha->irq_vector[i]))) {
for (j = 0; j < i; j++)
free_irq(
pm8001_ha->msix_entries[j].vector,
- &pm8001_ha->outq[j]);
+ &(pm8001_ha->irq_vector[i]));
pci_disable_msix(pm8001_ha->pdev);
break;
}
@@ -907,7 +907,7 @@ static void pm8001_pci_remove(struct pci_dev *pdev)
{
struct sas_ha_struct *sha = pci_get_drvdata(pdev);
struct pm8001_hba_info *pm8001_ha;
- int i;
+ int i, j;
pm8001_ha = sha->lldd_ha;
sas_unregister_ha(sha);
sas_remove_host(pm8001_ha->shost);
@@ -921,13 +921,18 @@ static void pm8001_pci_remove(struct pci_dev *pdev)
synchronize_irq(pm8001_ha->msix_entries[i].vector);
for (i = 0; i < pm8001_ha->number_of_intr; i++)
free_irq(pm8001_ha->msix_entries[i].vector,
- &pm8001_ha->outq[i]);
+ &(pm8001_ha->irq_vector[i]));
pci_disable_msix(pdev);
#else
free_irq(pm8001_ha->irq, sha);
#endif
#ifdef PM8001_USE_TASKLET
- tasklet_kill(&pm8001_ha->tasklet);
+ /* For non-msix and msix interrupts */
+ if ((!pdev->msix_cap) || (pm8001_ha->chip_id == chip_8001))
+ tasklet_kill(&pm8001_ha->tasklet[0]);
+ else
+ for (j = 0; j < PM8001_MAX_MSIX_VEC; j++)
+ tasklet_kill(&pm8001_ha->tasklet[j]);
#endif
pm8001_free(pm8001_ha);
kfree(sha->sas_phy);
@@ -948,7 +953,7 @@ static int pm8001_pci_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct sas_ha_struct *sha = pci_get_drvdata(pdev);
struct pm8001_hba_info *pm8001_ha;
- int i;
+ int i, j;
u32 device_state;
pm8001_ha = sha->lldd_ha;
flush_workqueue(pm8001_wq);
@@ -964,13 +969,18 @@ static int pm8001_pci_suspend(struct pci_dev *pdev, pm_message_t state)
synchronize_irq(pm8001_ha->msix_entries[i].vector);
for (i = 0; i < pm8001_ha->number_of_intr; i++)
free_irq(pm8001_ha->msix_entries[i].vector,
- &pm8001_ha->outq[i]);
+ &(pm8001_ha->irq_vector[i]));
pci_disable_msix(pdev);
#else
free_irq(pm8001_ha->irq, sha);
#endif
#ifdef PM8001_USE_TASKLET
- tasklet_kill(&pm8001_ha->tasklet);
+ /* For non-msix and msix interrupts */
+ if ((!pdev->msix_cap) || (pm8001_ha->chip_id == chip_8001))
+ tasklet_kill(&pm8001_ha->tasklet[0]);
+ else
+ for (j = 0; j < PM8001_MAX_MSIX_VEC; j++)
+ tasklet_kill(&pm8001_ha->tasklet[j]);
#endif
device_state = pci_choose_state(pdev, state);
pm8001_printk("pdev=0x%p, slot=%s, entering "
@@ -993,7 +1003,7 @@ static int pm8001_pci_resume(struct pci_dev *pdev)
struct sas_ha_struct *sha = pci_get_drvdata(pdev);
struct pm8001_hba_info *pm8001_ha;
int rc;
- u8 i = 0;
+ u8 i = 0, j;
u32 device_state;
pm8001_ha = sha->lldd_ha;
device_state = pdev->current_state;
@@ -1033,10 +1043,14 @@ static int pm8001_pci_resume(struct pci_dev *pdev)
if (rc)
goto err_out_disable;
#ifdef PM8001_USE_TASKLET
- /* default tasklet for non msi-x interrupt handler/first msi-x
- * interrupt handler */
- tasklet_init(&pm8001_ha->tasklet, pm8001_tasklet,
- (unsigned long)pm8001_ha);
+ /* Tasklet for non msi-x interrupt handler */
+ if ((!pdev->msix_cap) || (pm8001_ha->chip_id == chip_8001))
+ tasklet_init(&pm8001_ha->tasklet[0], pm8001_tasklet,
+ (unsigned long)&(pm8001_ha->irq_vector[0]));
+ else
+ for (j = 0; j < PM8001_MAX_MSIX_VEC; j++)
+ tasklet_init(&pm8001_ha->tasklet[j], pm8001_tasklet,
+ (unsigned long)&(pm8001_ha->irq_vector[j]));
#endif
PM8001_CHIP_DISP->interrupt_enable(pm8001_ha, 0);
if (pm8001_ha->chip_id != chip_8001) {
@@ -1169,6 +1183,7 @@ module_exit(pm8001_exit);
MODULE_AUTHOR("Jack Wang <jack_wang@usish.com>");
MODULE_AUTHOR("Anand Kumar Santhanam <AnandKumar.Santhanam@pmcs.com>");
MODULE_AUTHOR("Sangeetha Gnanasekaran <Sangeetha.Gnanasekaran@pmcs.com>");
+MODULE_AUTHOR("Nikith Ganigarakoppal <Nikith.Ganigarakoppal@pmcs.com>");
MODULE_DESCRIPTION(
"PMC-Sierra PM8001/8081/8088/8089/8074/8076/8077 "
"SAS/SATA controller driver");
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
index f4eb18e51631..f50ac44b950e 100644
--- a/drivers/scsi/pm8001/pm8001_sas.c
+++ b/drivers/scsi/pm8001/pm8001_sas.c
@@ -1098,15 +1098,17 @@ int pm8001_lu_reset(struct domain_device *dev, u8 *lun)
struct pm8001_tmf_task tmf_task;
struct pm8001_device *pm8001_dev = dev->lldd_dev;
struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev);
+ DECLARE_COMPLETION_ONSTACK(completion_setstate);
if (dev_is_sata(dev)) {
struct sas_phy *phy = sas_get_local_phy(dev);
rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev ,
dev, 1, 0);
rc = sas_phy_reset(phy, 1);
sas_put_local_phy(phy);
+ pm8001_dev->setds_completion = &completion_setstate;
rc = PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
pm8001_dev, 0x01);
- msleep(2000);
+ wait_for_completion(&completion_setstate);
} else {
tmf_task.tmf = TMF_LU_RESET;
rc = pm8001_issue_ssp_tmf(dev, lun, &tmf_task);
diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h
index 6037d477a183..6c5fd5ee22d3 100644
--- a/drivers/scsi/pm8001/pm8001_sas.h
+++ b/drivers/scsi/pm8001/pm8001_sas.h
@@ -466,6 +466,10 @@ struct pm8001_hba_memspace {
u64 membase;
u32 memsize;
};
+struct isr_param {
+ struct pm8001_hba_info *drv_inst;
+ u32 irq_id;
+};
struct pm8001_hba_info {
char name[PM8001_NAME_LENGTH];
struct list_head list;
@@ -519,14 +523,13 @@ struct pm8001_hba_info {
int number_of_intr;/*will be used in remove()*/
#endif
#ifdef PM8001_USE_TASKLET
- struct tasklet_struct tasklet;
+ struct tasklet_struct tasklet[PM8001_MAX_MSIX_VEC];
#endif
u32 logging_level;
u32 fw_status;
u32 smp_exp_mode;
- u32 int_vector;
const struct firmware *fw_image;
- u8 outq[PM8001_MAX_MSIX_VEC];
+ struct isr_param irq_vector[PM8001_MAX_MSIX_VEC];
};
struct pm8001_work {
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
index 8987b1706216..c950dc5c9943 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.c
+++ b/drivers/scsi/pm8001/pm80xx_hwi.c
@@ -2894,6 +2894,7 @@ hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
unsigned long flags;
u8 deviceType = pPayload->sas_identify.dev_type;
port->port_state = portstate;
+ phy->phy_state = PHY_STATE_LINK_UP_SPCV;
PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
"portid:%d; phyid:%d; linkrate:%d; "
"portstate:%x; devicetype:%x\n",
@@ -2978,6 +2979,7 @@ hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
port_id, phy_id, link_rate, portstate));
port->port_state = portstate;
+ phy->phy_state = PHY_STATE_LINK_UP_SPCV;
port->port_attached = 1;
pm8001_get_lrate_mode(phy, link_rate);
phy->phy_type |= PORT_TYPE_SATA;
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.h b/drivers/scsi/pm8001/pm80xx_hwi.h
index c86816bea424..9970a385795d 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.h
+++ b/drivers/scsi/pm8001/pm80xx_hwi.h
@@ -215,6 +215,8 @@
#define SAS_DOPNRJT_RTRY_TMO 128
#define SAS_COPNRJT_RTRY_TMO 128
+/* for phy state */
+#define PHY_STATE_LINK_UP_SPCV 0x2
/*
Making ORR bigger than IT NEXUS LOSS which is 2000000us = 2 second.
Assuming a bigger value 3 second, 3000000/128 = 23437.5 where 128
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index bd6f743d87a7..be8ce54f99b2 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -1404,11 +1404,22 @@ enum {
};
#define PMCRAID_AEN_CMD_MAX (__PMCRAID_AEN_CMD_MAX - 1)
+static struct genl_multicast_group pmcraid_mcgrps[] = {
+ { .name = "events", /* not really used - see ID discussion below */ },
+};
+
static struct genl_family pmcraid_event_family = {
- .id = GENL_ID_GENERATE,
+ /*
+ * Due to prior multicast group abuse (the code having assumed that
+ * the family ID can be used as a multicast group ID) we need to
+ * statically allocate a family (and thus group) ID.
+ */
+ .id = GENL_ID_PMCRAID,
.name = "pmcraid",
.version = 1,
- .maxattr = PMCRAID_AEN_ATTR_MAX
+ .maxattr = PMCRAID_AEN_ATTR_MAX,
+ .mcgrps = pmcraid_mcgrps,
+ .n_mcgrps = ARRAY_SIZE(pmcraid_mcgrps),
};
/**
@@ -1511,9 +1522,8 @@ static int pmcraid_notify_aen(
return result;
}
- result =
- genlmsg_multicast(&pmcraid_event_family, skb, 0,
- pmcraid_event_family.id, GFP_ATOMIC);
+ result = genlmsg_multicast(&pmcraid_event_family, skb,
+ 0, 0, GFP_ATOMIC);
/* If there are no listeners, genlmsg_multicast may return non-zero
* value.
@@ -4315,6 +4325,7 @@ static struct scsi_host_template pmcraid_host_template = {
.this_id = -1,
.sg_tablesize = PMCRAID_MAX_IOADLS,
.max_sectors = PMCRAID_IOA_MAX_SECTORS,
+ .no_write_same = 1,
.cmd_per_lun = PMCRAID_MAX_CMD_PER_LUN,
.use_clustering = ENABLE_CLUSTERING,
.shost_attrs = pmcraid_host_attrs,
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 596480022b0a..38a1257e76e1 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -471,7 +471,7 @@ static void qlt_schedule_sess_for_deletion(struct qla_tgt_sess *sess,
schedule_delayed_work(&tgt->sess_del_work, 0);
else
schedule_delayed_work(&tgt->sess_del_work,
- jiffies - sess->expires);
+ sess->expires - jiffies);
}
/* ha->hardware_lock supposed to be held on entry */
@@ -550,13 +550,14 @@ static void qlt_del_sess_work_fn(struct delayed_work *work)
struct scsi_qla_host *vha = tgt->vha;
struct qla_hw_data *ha = vha->hw;
struct qla_tgt_sess *sess;
- unsigned long flags;
+ unsigned long flags, elapsed;
spin_lock_irqsave(&ha->hardware_lock, flags);
while (!list_empty(&tgt->del_sess_list)) {
sess = list_entry(tgt->del_sess_list.next, typeof(*sess),
del_list_entry);
- if (time_after_eq(jiffies, sess->expires)) {
+ elapsed = jiffies;
+ if (time_after_eq(elapsed, sess->expires)) {
qlt_undelete_sess(sess);
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004,
@@ -566,7 +567,7 @@ static void qlt_del_sess_work_fn(struct delayed_work *work)
ha->tgt.tgt_ops->put_sess(sess);
} else {
schedule_delayed_work(&tgt->sess_del_work,
- jiffies - sess->expires);
+ sess->expires - elapsed);
break;
}
}
@@ -4290,6 +4291,7 @@ int qlt_lport_register(struct qla_tgt_func_tmpl *qla_tgt_ops, u64 wwpn,
if (rc != 0) {
ha->tgt.tgt_ops = NULL;
ha->tgt.target_lport_ptr = NULL;
+ scsi_host_put(host);
}
mutex_unlock(&qla_tgt_mutex);
return rc;
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index f85b9e5c1f05..7eb19be35d46 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -330,7 +330,7 @@ static int tcm_qla2xxx_check_demo_mode(struct se_portal_group *se_tpg)
struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
struct tcm_qla2xxx_tpg, se_tpg);
- return QLA_TPG_ATTRIB(tpg)->generate_node_acls;
+ return tpg->tpg_attrib.generate_node_acls;
}
static int tcm_qla2xxx_check_demo_mode_cache(struct se_portal_group *se_tpg)
@@ -338,7 +338,7 @@ static int tcm_qla2xxx_check_demo_mode_cache(struct se_portal_group *se_tpg)
struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
struct tcm_qla2xxx_tpg, se_tpg);
- return QLA_TPG_ATTRIB(tpg)->cache_dynamic_acls;
+ return tpg->tpg_attrib.cache_dynamic_acls;
}
static int tcm_qla2xxx_check_demo_write_protect(struct se_portal_group *se_tpg)
@@ -346,7 +346,7 @@ static int tcm_qla2xxx_check_demo_write_protect(struct se_portal_group *se_tpg)
struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
struct tcm_qla2xxx_tpg, se_tpg);
- return QLA_TPG_ATTRIB(tpg)->demo_mode_write_protect;
+ return tpg->tpg_attrib.demo_mode_write_protect;
}
static int tcm_qla2xxx_check_prod_write_protect(struct se_portal_group *se_tpg)
@@ -354,7 +354,7 @@ static int tcm_qla2xxx_check_prod_write_protect(struct se_portal_group *se_tpg)
struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
struct tcm_qla2xxx_tpg, se_tpg);
- return QLA_TPG_ATTRIB(tpg)->prod_mode_write_protect;
+ return tpg->tpg_attrib.prod_mode_write_protect;
}
static int tcm_qla2xxx_check_demo_mode_login_only(struct se_portal_group *se_tpg)
@@ -362,7 +362,7 @@ static int tcm_qla2xxx_check_demo_mode_login_only(struct se_portal_group *se_tpg
struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
struct tcm_qla2xxx_tpg, se_tpg);
- return QLA_TPG_ATTRIB(tpg)->demo_mode_login_only;
+ return tpg->tpg_attrib.demo_mode_login_only;
}
static struct se_node_acl *tcm_qla2xxx_alloc_fabric_acl(
@@ -847,7 +847,7 @@ static ssize_t tcm_qla2xxx_tpg_attrib_show_##name( \
struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, \
struct tcm_qla2xxx_tpg, se_tpg); \
\
- return sprintf(page, "%u\n", QLA_TPG_ATTRIB(tpg)->name); \
+ return sprintf(page, "%u\n", tpg->tpg_attrib.name); \
} \
\
static ssize_t tcm_qla2xxx_tpg_attrib_store_##name( \
@@ -1027,10 +1027,10 @@ static struct se_portal_group *tcm_qla2xxx_make_tpg(
* By default allow READ-ONLY TPG demo-mode access w/ cached dynamic
* NodeACLs
*/
- QLA_TPG_ATTRIB(tpg)->generate_node_acls = 1;
- QLA_TPG_ATTRIB(tpg)->demo_mode_write_protect = 1;
- QLA_TPG_ATTRIB(tpg)->cache_dynamic_acls = 1;
- QLA_TPG_ATTRIB(tpg)->demo_mode_login_only = 1;
+ tpg->tpg_attrib.generate_node_acls = 1;
+ tpg->tpg_attrib.demo_mode_write_protect = 1;
+ tpg->tpg_attrib.cache_dynamic_acls = 1;
+ tpg->tpg_attrib.demo_mode_login_only = 1;
ret = core_tpg_register(&tcm_qla2xxx_fabric_configfs->tf_ops, wwn,
&tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL);
@@ -1830,16 +1830,16 @@ static int tcm_qla2xxx_register_configfs(void)
/*
* Setup default attribute lists for various fabric->tf_cit_tmpl
*/
- TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = tcm_qla2xxx_wwn_attrs;
- TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = tcm_qla2xxx_tpg_attrs;
- TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs =
+ fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = tcm_qla2xxx_wwn_attrs;
+ fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = tcm_qla2xxx_tpg_attrs;
+ fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs =
tcm_qla2xxx_tpg_attrib_attrs;
- TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;
- TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;
- TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;
- TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
- TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
- TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;
+ fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;
+ fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
+ fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL;
+ fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
+ fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
+ fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL;
/*
* Register the fabric for use within TCM
*/
@@ -1870,15 +1870,15 @@ static int tcm_qla2xxx_register_configfs(void)
/*
* Setup default attribute lists for various npiv_fabric->tf_cit_tmpl
*/
- TF_CIT_TMPL(npiv_fabric)->tfc_wwn_cit.ct_attrs = tcm_qla2xxx_wwn_attrs;
- TF_CIT_TMPL(npiv_fabric)->tfc_tpg_base_cit.ct_attrs = NULL;
- TF_CIT_TMPL(npiv_fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;
- TF_CIT_TMPL(npiv_fabric)->tfc_tpg_param_cit.ct_attrs = NULL;
- TF_CIT_TMPL(npiv_fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;
- TF_CIT_TMPL(npiv_fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;
- TF_CIT_TMPL(npiv_fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
- TF_CIT_TMPL(npiv_fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
- TF_CIT_TMPL(npiv_fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;
+ npiv_fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = tcm_qla2xxx_wwn_attrs;
+ npiv_fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = NULL;
+ npiv_fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL;
+ npiv_fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;
+ npiv_fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
+ npiv_fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL;
+ npiv_fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
+ npiv_fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
+ npiv_fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL;
/*
* Register the npiv_fabric for use within TCM
*/
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.h b/drivers/scsi/qla2xxx/tcm_qla2xxx.h
index 329327528a55..771f7b816443 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.h
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.h
@@ -45,8 +45,6 @@ struct tcm_qla2xxx_tpg {
struct se_portal_group se_tpg;
};
-#define QLA_TPG_ATTRIB(tpg) (&(tpg)->tpg_attrib)
-
struct tcm_qla2xxx_fc_loopid {
struct se_node_acl *se_nacl;
};
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index e6c4bff04339..69725f7c32c1 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -2659,6 +2659,12 @@ static void sd_read_write_same(struct scsi_disk *sdkp, unsigned char *buffer)
{
struct scsi_device *sdev = sdkp->device;
+ if (sdev->host->no_write_same) {
+ sdev->no_write_same = 1;
+
+ return;
+ }
+
if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, INQUIRY) < 0) {
/* too large values might cause issues with arcmsr */
int vpd_buf_len = 64;
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 1a28f5632797..17d740427240 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -1697,6 +1697,7 @@ static struct scsi_host_template scsi_driver = {
.use_clustering = DISABLE_CLUSTERING,
/* Make sure we dont get a sg segment crosses a page boundary */
.dma_boundary = PAGE_SIZE-1,
+ .no_write_same = 1,
};
enum {
diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c
index 3ed666fe840a..9025edd7dc45 100644
--- a/drivers/spi/spi-bcm2835.c
+++ b/drivers/spi/spi-bcm2835.c
@@ -377,7 +377,7 @@ out_master_put:
static int bcm2835_spi_remove(struct platform_device *pdev)
{
- struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
+ struct spi_master *master = platform_get_drvdata(pdev);
struct bcm2835_spi *bs = spi_master_get_devdata(master);
free_irq(bs->irq, master);
diff --git a/drivers/spi/spi-bcm63xx.c b/drivers/spi/spi-bcm63xx.c
index 80d56b214eb5..469ecd876358 100644
--- a/drivers/spi/spi-bcm63xx.c
+++ b/drivers/spi/spi-bcm63xx.c
@@ -435,7 +435,7 @@ out:
static int bcm63xx_spi_remove(struct platform_device *pdev)
{
- struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
+ struct spi_master *master = platform_get_drvdata(pdev);
struct bcm63xx_spi *bs = spi_master_get_devdata(master);
/* reset spi block */
diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c
index b9f0192758d6..6d207afec8cb 100644
--- a/drivers/spi/spi-dw-mid.c
+++ b/drivers/spi/spi-dw-mid.c
@@ -150,7 +150,7 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change)
&dws->tx_sgl,
1,
DMA_MEM_TO_DEV,
- DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_DEST_UNMAP);
+ DMA_PREP_INTERRUPT);
txdesc->callback = dw_spi_dma_done;
txdesc->callback_param = dws;
@@ -173,7 +173,7 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change)
&dws->rx_sgl,
1,
DMA_DEV_TO_MEM,
- DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_DEST_UNMAP);
+ DMA_PREP_INTERRUPT);
rxdesc->callback = dw_spi_dma_done;
rxdesc->callback_param = dws;
diff --git a/drivers/spi/spi-mpc512x-psc.c b/drivers/spi/spi-mpc512x-psc.c
index 9602bbd8d7ea..87676587d783 100644
--- a/drivers/spi/spi-mpc512x-psc.c
+++ b/drivers/spi/spi-mpc512x-psc.c
@@ -557,7 +557,7 @@ free_master:
static int mpc512x_psc_spi_do_remove(struct device *dev)
{
- struct spi_master *master = spi_master_get(dev_get_drvdata(dev));
+ struct spi_master *master = dev_get_drvdata(dev);
struct mpc512x_psc_spi *mps = spi_master_get_devdata(master);
clk_disable_unprepare(mps->clk_mclk);
diff --git a/drivers/spi/spi-mxs.c b/drivers/spi/spi-mxs.c
index 73afb56c08cc..3adebfa22e3d 100644
--- a/drivers/spi/spi-mxs.c
+++ b/drivers/spi/spi-mxs.c
@@ -565,7 +565,7 @@ static int mxs_spi_remove(struct platform_device *pdev)
struct mxs_spi *spi;
struct mxs_ssp *ssp;
- master = spi_master_get(platform_get_drvdata(pdev));
+ master = platform_get_drvdata(pdev);
spi = spi_master_get_devdata(master);
ssp = &spi->ssp;
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index cb0e1f1137ad..7765b1999537 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -1073,6 +1073,8 @@ pxa2xx_spi_acpi_get_pdata(struct platform_device *pdev)
static struct acpi_device_id pxa2xx_spi_acpi_match[] = {
{ "INT33C0", 0 },
{ "INT33C1", 0 },
+ { "INT3430", 0 },
+ { "INT3431", 0 },
{ "80860F0E", 0 },
{ },
};
@@ -1291,6 +1293,9 @@ static int pxa2xx_spi_resume(struct device *dev)
/* Enable the SSP clock */
clk_prepare_enable(ssp->clk);
+ /* Restore LPSS private register bits */
+ lpss_ssp_setup(drv_data);
+
/* Start the queue running */
status = spi_master_resume(drv_data->master);
if (status != 0) {
diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c
index 58449ad4ad0d..9e829cee7357 100644
--- a/drivers/spi/spi-rspi.c
+++ b/drivers/spi/spi-rspi.c
@@ -885,14 +885,13 @@ static void rspi_release_dma(struct rspi_data *rspi)
static int rspi_remove(struct platform_device *pdev)
{
- struct rspi_data *rspi = spi_master_get(platform_get_drvdata(pdev));
+ struct rspi_data *rspi = platform_get_drvdata(pdev);
spi_unregister_master(rspi->master);
rspi_release_dma(rspi);
free_irq(platform_get_irq(pdev, 0), rspi);
clk_put(rspi->clk);
iounmap(rspi->addr);
- spi_master_put(rspi->master);
return 0;
}
diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c
index 0b71270fbf67..4396bd448540 100644
--- a/drivers/spi/spi-ti-qspi.c
+++ b/drivers/spi/spi-ti-qspi.c
@@ -161,7 +161,7 @@ static int ti_qspi_setup(struct spi_device *spi)
qspi->spi_max_frequency, clk_div);
ret = pm_runtime_get_sync(qspi->dev);
- if (ret) {
+ if (ret < 0) {
dev_err(qspi->dev, "pm_runtime_get_sync() failed\n");
return ret;
}
@@ -459,11 +459,10 @@ static int ti_qspi_probe(struct platform_device *pdev)
if (!of_property_read_u32(np, "num-cs", &num_cs))
master->num_chipselect = num_cs;
- platform_set_drvdata(pdev, master);
-
qspi = spi_master_get_devdata(master);
qspi->master = master;
qspi->dev = &pdev->dev;
+ platform_set_drvdata(pdev, qspi);
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -517,10 +516,26 @@ free_master:
static int ti_qspi_remove(struct platform_device *pdev)
{
- struct ti_qspi *qspi = platform_get_drvdata(pdev);
+ struct spi_master *master;
+ struct ti_qspi *qspi;
+ int ret;
+
+ master = platform_get_drvdata(pdev);
+ qspi = spi_master_get_devdata(master);
+
+ ret = pm_runtime_get_sync(qspi->dev);
+ if (ret < 0) {
+ dev_err(qspi->dev, "pm_runtime_get_sync() failed\n");
+ return ret;
+ }
ti_qspi_write(qspi, QSPI_WC_INT_DISABLE, QSPI_INTR_ENABLE_CLEAR_REG);
+ pm_runtime_put(qspi->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ spi_unregister_master(master);
+
return 0;
}
diff --git a/drivers/spi/spi-txx9.c b/drivers/spi/spi-txx9.c
index 637cce2b8bdd..18c9bb2b5f39 100644
--- a/drivers/spi/spi-txx9.c
+++ b/drivers/spi/spi-txx9.c
@@ -425,7 +425,7 @@ exit:
static int txx9spi_remove(struct platform_device *dev)
{
- struct spi_master *master = spi_master_get(platform_get_drvdata(dev));
+ struct spi_master *master = platform_get_drvdata(dev);
struct txx9spi *c = spi_master_get_devdata(master);
destroy_workqueue(c->workqueue);
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 8d85ddc46011..349ebba4b199 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -357,6 +357,19 @@ struct spi_device *spi_alloc_device(struct spi_master *master)
}
EXPORT_SYMBOL_GPL(spi_alloc_device);
+static void spi_dev_set_name(struct spi_device *spi)
+{
+ struct acpi_device *adev = ACPI_COMPANION(&spi->dev);
+
+ if (adev) {
+ dev_set_name(&spi->dev, "spi-%s", acpi_dev_name(adev));
+ return;
+ }
+
+ dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->master->dev),
+ spi->chip_select);
+}
+
/**
* spi_add_device - Add spi_device allocated with spi_alloc_device
* @spi: spi_device to register
@@ -383,9 +396,7 @@ int spi_add_device(struct spi_device *spi)
}
/* Set the bus ID string */
- dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->master->dev),
- spi->chip_select);
-
+ spi_dev_set_name(spi);
/* We need to make sure there's no other device with this
* chipselect **BEFORE** we call setup(), else we'll trash
@@ -1144,7 +1155,7 @@ static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level,
return AE_NO_MEMORY;
}
- ACPI_HANDLE_SET(&spi->dev, handle);
+ ACPI_COMPANION_SET(&spi->dev, adev);
spi->irq = -1;
INIT_LIST_HEAD(&resource_list);
@@ -1404,7 +1415,7 @@ int devm_spi_register_master(struct device *dev, struct spi_master *master)
return -ENOMEM;
ret = spi_register_master(master);
- if (ret != 0) {
+ if (!ret) {
*ptr = master;
devres_add(dev, ptr);
} else {
diff --git a/drivers/staging/btmtk_usb/btmtk_usb.c b/drivers/staging/btmtk_usb/btmtk_usb.c
index 7a9bf3b57810..9a5ebd6cc512 100644
--- a/drivers/staging/btmtk_usb/btmtk_usb.c
+++ b/drivers/staging/btmtk_usb/btmtk_usb.c
@@ -1284,9 +1284,8 @@ done:
kfree_skb(skb);
}
-static int btmtk_usb_send_frame(struct sk_buff *skb)
+static int btmtk_usb_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
{
- struct hci_dev *hdev = (struct hci_dev *)skb->dev;
struct btmtk_usb_data *data = hci_get_drvdata(hdev);
struct usb_ctrlrequest *dr;
struct urb *urb;
diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c
index 8f02bf66e20b..4964d2a2fc7d 100644
--- a/drivers/staging/comedi/drivers.c
+++ b/drivers/staging/comedi/drivers.c
@@ -446,7 +446,7 @@ int comedi_load_firmware(struct comedi_device *dev,
release_firmware(fw);
}
- return ret;
+ return ret < 0 ? ret : 0;
}
EXPORT_SYMBOL_GPL(comedi_load_firmware);
diff --git a/drivers/staging/comedi/drivers/8255_pci.c b/drivers/staging/comedi/drivers/8255_pci.c
index 432e3f9c3301..c55f234b29e6 100644
--- a/drivers/staging/comedi/drivers/8255_pci.c
+++ b/drivers/staging/comedi/drivers/8255_pci.c
@@ -63,7 +63,8 @@ enum pci_8255_boardid {
BOARD_ADLINK_PCI7296,
BOARD_CB_PCIDIO24,
BOARD_CB_PCIDIO24H,
- BOARD_CB_PCIDIO48H,
+ BOARD_CB_PCIDIO48H_OLD,
+ BOARD_CB_PCIDIO48H_NEW,
BOARD_CB_PCIDIO96H,
BOARD_NI_PCIDIO96,
BOARD_NI_PCIDIO96B,
@@ -106,11 +107,16 @@ static const struct pci_8255_boardinfo pci_8255_boards[] = {
.dio_badr = 2,
.n_8255 = 1,
},
- [BOARD_CB_PCIDIO48H] = {
+ [BOARD_CB_PCIDIO48H_OLD] = {
.name = "cb_pci-dio48h",
.dio_badr = 1,
.n_8255 = 2,
},
+ [BOARD_CB_PCIDIO48H_NEW] = {
+ .name = "cb_pci-dio48h",
+ .dio_badr = 2,
+ .n_8255 = 2,
+ },
[BOARD_CB_PCIDIO96H] = {
.name = "cb_pci-dio96h",
.dio_badr = 2,
@@ -263,7 +269,10 @@ static DEFINE_PCI_DEVICE_TABLE(pci_8255_pci_table) = {
{ PCI_VDEVICE(ADLINK, 0x7296), BOARD_ADLINK_PCI7296 },
{ PCI_VDEVICE(CB, 0x0028), BOARD_CB_PCIDIO24 },
{ PCI_VDEVICE(CB, 0x0014), BOARD_CB_PCIDIO24H },
- { PCI_VDEVICE(CB, 0x000b), BOARD_CB_PCIDIO48H },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CB, 0x000b, 0x0000, 0x0000),
+ .driver_data = BOARD_CB_PCIDIO48H_OLD },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CB, 0x000b, PCI_VENDOR_ID_CB, 0x000b),
+ .driver_data = BOARD_CB_PCIDIO48H_NEW },
{ PCI_VDEVICE(CB, 0x0017), BOARD_CB_PCIDIO96H },
{ PCI_VDEVICE(NI, 0x0160), BOARD_NI_PCIDIO96 },
{ PCI_VDEVICE(NI, 0x1630), BOARD_NI_PCIDIO96B },
diff --git a/drivers/staging/comedi/drivers/pcl730.c b/drivers/staging/comedi/drivers/pcl730.c
index d041b714db29..2baaf1db6fbf 100644
--- a/drivers/staging/comedi/drivers/pcl730.c
+++ b/drivers/staging/comedi/drivers/pcl730.c
@@ -173,11 +173,11 @@ static int pcl730_do_insn_bits(struct comedi_device *dev,
if (mask) {
if (mask & 0x00ff)
outb(s->state & 0xff, dev->iobase + reg);
- if ((mask & 0xff00) & (s->n_chan > 8))
+ if ((mask & 0xff00) && (s->n_chan > 8))
outb((s->state >> 8) & 0xff, dev->iobase + reg + 1);
- if ((mask & 0xff0000) & (s->n_chan > 16))
+ if ((mask & 0xff0000) && (s->n_chan > 16))
outb((s->state >> 16) & 0xff, dev->iobase + reg + 2);
- if ((mask & 0xff000000) & (s->n_chan > 24))
+ if ((mask & 0xff000000) && (s->n_chan > 24))
outb((s->state >> 24) & 0xff, dev->iobase + reg + 3);
}
diff --git a/drivers/staging/comedi/drivers/s626.c b/drivers/staging/comedi/drivers/s626.c
index 6815cfe2664e..b486099b543d 100644
--- a/drivers/staging/comedi/drivers/s626.c
+++ b/drivers/staging/comedi/drivers/s626.c
@@ -494,7 +494,7 @@ static void s626_send_dac(struct comedi_device *dev, uint32_t val)
* Private helper function: Write setpoint to an application DAC channel.
*/
static void s626_set_dac(struct comedi_device *dev, uint16_t chan,
- unsigned short dacdata)
+ int16_t dacdata)
{
struct s626_private *devpriv = dev->private;
uint16_t signmask;
diff --git a/drivers/staging/comedi/drivers/vmk80xx.c b/drivers/staging/comedi/drivers/vmk80xx.c
index 933b01a0f03d..0adf3cffddb0 100644
--- a/drivers/staging/comedi/drivers/vmk80xx.c
+++ b/drivers/staging/comedi/drivers/vmk80xx.c
@@ -465,7 +465,7 @@ static int vmk80xx_do_insn_bits(struct comedi_device *dev,
unsigned char *rx_buf = devpriv->usb_rx_buf;
unsigned char *tx_buf = devpriv->usb_tx_buf;
int reg, cmd;
- int ret;
+ int ret = 0;
if (devpriv->model == VMK8061_MODEL) {
reg = VMK8061_DO_REG;
diff --git a/drivers/staging/ft1000/ft1000-usb/ft1000_download.c b/drivers/staging/ft1000/ft1000-usb/ft1000_download.c
index 68ded17c0f5c..12f333fa59b5 100644
--- a/drivers/staging/ft1000/ft1000-usb/ft1000_download.c
+++ b/drivers/staging/ft1000/ft1000-usb/ft1000_download.c
@@ -578,7 +578,7 @@ static int request_code_segment(struct ft1000_usb *ft1000dev, u16 **s_file,
u8 **c_file, const u8 *endpoint, bool boot_case)
{
long word_length;
- int status;
+ int status = 0;
/*DEBUG("FT1000:REQUEST_CODE_SEGMENT\n");i*/
word_length = get_request_value(ft1000dev);
@@ -1074,4 +1074,3 @@ int scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart,
return status;
}
-
diff --git a/drivers/staging/iio/magnetometer/Kconfig b/drivers/staging/iio/magnetometer/Kconfig
index a3ea69e9d800..34634da1f9f7 100644
--- a/drivers/staging/iio/magnetometer/Kconfig
+++ b/drivers/staging/iio/magnetometer/Kconfig
@@ -6,6 +6,8 @@ menu "Magnetometer sensors"
config SENSORS_HMC5843
tristate "Honeywell HMC5843/5883/5883L 3-Axis Magnetometer"
depends on I2C
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
help
Say Y here to add support for the Honeywell HMC5843, HMC5883 and
HMC5883L 3-Axis Magnetometer (digital compass).
diff --git a/drivers/staging/iio/magnetometer/hmc5843.c b/drivers/staging/iio/magnetometer/hmc5843.c
index 99421f90d189..0485d7f39867 100644
--- a/drivers/staging/iio/magnetometer/hmc5843.c
+++ b/drivers/staging/iio/magnetometer/hmc5843.c
@@ -451,7 +451,12 @@ done:
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \
BIT(IIO_CHAN_INFO_SAMP_FREQ), \
.scan_index = idx, \
- .scan_type = IIO_ST('s', 16, 16, IIO_BE), \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 16, \
+ .storagebits = 16, \
+ .endianness = IIO_BE, \
+ }, \
}
static const struct iio_chan_spec hmc5843_channels[] = {
diff --git a/drivers/staging/imx-drm/Makefile b/drivers/staging/imx-drm/Makefile
index 2c3a9e178fb5..8742432d7b01 100644
--- a/drivers/staging/imx-drm/Makefile
+++ b/drivers/staging/imx-drm/Makefile
@@ -8,4 +8,6 @@ obj-$(CONFIG_DRM_IMX_TVE) += imx-tve.o
obj-$(CONFIG_DRM_IMX_LDB) += imx-ldb.o
obj-$(CONFIG_DRM_IMX_FB_HELPER) += imx-fbdev.o
obj-$(CONFIG_DRM_IMX_IPUV3_CORE) += ipu-v3/
-obj-$(CONFIG_DRM_IMX_IPUV3) += ipuv3-crtc.o ipuv3-plane.o
+
+imx-ipuv3-crtc-objs := ipuv3-crtc.o ipuv3-plane.o
+obj-$(CONFIG_DRM_IMX_IPUV3) += imx-ipuv3-crtc.o
diff --git a/drivers/staging/imx-drm/imx-drm-core.c b/drivers/staging/imx-drm/imx-drm-core.c
index 51aa9772f959..96e4eee344ef 100644
--- a/drivers/staging/imx-drm/imx-drm-core.c
+++ b/drivers/staging/imx-drm/imx-drm-core.c
@@ -72,6 +72,7 @@ int imx_drm_crtc_id(struct imx_drm_crtc *crtc)
{
return crtc->pipe;
}
+EXPORT_SYMBOL_GPL(imx_drm_crtc_id);
static void imx_drm_driver_lastclose(struct drm_device *drm)
{
@@ -87,8 +88,9 @@ static int imx_drm_driver_unload(struct drm_device *drm)
imx_drm_device_put();
- drm_mode_config_cleanup(imxdrm->drm);
+ drm_vblank_cleanup(imxdrm->drm);
drm_kms_helper_poll_fini(imxdrm->drm);
+ drm_mode_config_cleanup(imxdrm->drm);
return 0;
}
@@ -198,8 +200,8 @@ static void imx_drm_driver_preclose(struct drm_device *drm,
if (!file->is_master)
return;
- for (i = 0; i < 4; i++)
- imx_drm_disable_vblank(drm , i);
+ for (i = 0; i < MAX_CRTC; i++)
+ imx_drm_disable_vblank(drm, i);
}
static const struct file_operations imx_drm_driver_fops = {
@@ -375,8 +377,6 @@ static int imx_drm_crtc_register(struct imx_drm_crtc *imx_drm_crtc)
struct imx_drm_device *imxdrm = __imx_drm_device();
int ret;
- drm_crtc_init(imxdrm->drm, imx_drm_crtc->crtc,
- imx_drm_crtc->imx_drm_helper_funcs.crtc_funcs);
ret = drm_mode_crtc_set_gamma_size(imx_drm_crtc->crtc, 256);
if (ret)
return ret;
@@ -384,6 +384,9 @@ static int imx_drm_crtc_register(struct imx_drm_crtc *imx_drm_crtc)
drm_crtc_helper_add(imx_drm_crtc->crtc,
imx_drm_crtc->imx_drm_helper_funcs.crtc_helper_funcs);
+ drm_crtc_init(imxdrm->drm, imx_drm_crtc->crtc,
+ imx_drm_crtc->imx_drm_helper_funcs.crtc_funcs);
+
drm_mode_group_reinit(imxdrm->drm);
return 0;
@@ -427,11 +430,11 @@ static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags)
ret = drm_mode_group_init_legacy_group(imxdrm->drm,
&imxdrm->drm->primary->mode_group);
if (ret)
- goto err_init;
+ goto err_kms;
ret = drm_vblank_init(imxdrm->drm, MAX_CRTC);
if (ret)
- goto err_init;
+ goto err_kms;
/*
* with vblank_disable_allowed = true, vblank interrupt will be disabled
@@ -440,12 +443,19 @@ static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags)
*/
imxdrm->drm->vblank_disable_allowed = true;
- if (!imx_drm_device_get())
+ if (!imx_drm_device_get()) {
ret = -EINVAL;
+ goto err_vblank;
+ }
- ret = 0;
+ mutex_unlock(&imxdrm->mutex);
+ return 0;
-err_init:
+err_vblank:
+ drm_vblank_cleanup(drm);
+err_kms:
+ drm_kms_helper_poll_fini(drm);
+ drm_mode_config_cleanup(drm);
mutex_unlock(&imxdrm->mutex);
return ret;
@@ -491,6 +501,15 @@ int imx_drm_add_crtc(struct drm_crtc *crtc,
mutex_lock(&imxdrm->mutex);
+ /*
+ * The vblank arrays are dimensioned by MAX_CRTC - we can't
+ * pass IDs greater than this to those functions.
+ */
+ if (imxdrm->pipes >= MAX_CRTC) {
+ ret = -EINVAL;
+ goto err_busy;
+ }
+
if (imxdrm->drm->open_count) {
ret = -EBUSY;
goto err_busy;
@@ -527,6 +546,7 @@ int imx_drm_add_crtc(struct drm_crtc *crtc,
return 0;
err_register:
+ list_del(&imx_drm_crtc->list);
kfree(imx_drm_crtc);
err_alloc:
err_busy:
diff --git a/drivers/staging/imx-drm/imx-tve.c b/drivers/staging/imx-drm/imx-tve.c
index 680f4c8fa081..2c44fef8d58b 100644
--- a/drivers/staging/imx-drm/imx-tve.c
+++ b/drivers/staging/imx-drm/imx-tve.c
@@ -114,7 +114,6 @@ struct imx_tve {
struct drm_encoder encoder;
struct imx_drm_encoder *imx_drm_encoder;
struct device *dev;
- spinlock_t enable_lock; /* serializes tve_enable/disable */
spinlock_t lock; /* register lock */
bool enabled;
int mode;
@@ -146,10 +145,8 @@ __releases(&tve->lock)
static void tve_enable(struct imx_tve *tve)
{
- unsigned long flags;
int ret;
- spin_lock_irqsave(&tve->enable_lock, flags);
if (!tve->enabled) {
tve->enabled = true;
clk_prepare_enable(tve->clk);
@@ -169,23 +166,18 @@ static void tve_enable(struct imx_tve *tve)
TVE_CD_SM_IEN |
TVE_CD_LM_IEN |
TVE_CD_MON_END_IEN);
-
- spin_unlock_irqrestore(&tve->enable_lock, flags);
}
static void tve_disable(struct imx_tve *tve)
{
- unsigned long flags;
int ret;
- spin_lock_irqsave(&tve->enable_lock, flags);
if (tve->enabled) {
tve->enabled = false;
ret = regmap_update_bits(tve->regmap, TVE_COM_CONF_REG,
TVE_IPU_CLK_EN | TVE_EN, 0);
clk_disable_unprepare(tve->clk);
}
- spin_unlock_irqrestore(&tve->enable_lock, flags);
}
static int tve_setup_tvout(struct imx_tve *tve)
@@ -601,7 +593,6 @@ static int imx_tve_probe(struct platform_device *pdev)
tve->dev = &pdev->dev;
spin_lock_init(&tve->lock);
- spin_lock_init(&tve->enable_lock);
ddc_node = of_parse_phandle(np, "ddc", 0);
if (ddc_node) {
diff --git a/drivers/staging/imx-drm/ipu-v3/ipu-common.c b/drivers/staging/imx-drm/ipu-v3/ipu-common.c
index 7a22ce619ed2..97ca6924dbb3 100644
--- a/drivers/staging/imx-drm/ipu-v3/ipu-common.c
+++ b/drivers/staging/imx-drm/ipu-v3/ipu-common.c
@@ -996,35 +996,35 @@ static const struct ipu_platform_reg client_reg[] = {
},
};
+static DEFINE_MUTEX(ipu_client_id_mutex);
static int ipu_client_id;
-static int ipu_add_subdevice_pdata(struct device *dev,
- const struct ipu_platform_reg *reg)
-{
- struct platform_device *pdev;
-
- pdev = platform_device_register_data(dev, reg->name, ipu_client_id++,
- &reg->pdata, sizeof(struct ipu_platform_reg));
-
- return PTR_ERR_OR_ZERO(pdev);
-}
-
static int ipu_add_client_devices(struct ipu_soc *ipu)
{
- int ret;
- int i;
+ struct device *dev = ipu->dev;
+ unsigned i;
+ int id, ret;
+
+ mutex_lock(&ipu_client_id_mutex);
+ id = ipu_client_id;
+ ipu_client_id += ARRAY_SIZE(client_reg);
+ mutex_unlock(&ipu_client_id_mutex);
for (i = 0; i < ARRAY_SIZE(client_reg); i++) {
const struct ipu_platform_reg *reg = &client_reg[i];
- ret = ipu_add_subdevice_pdata(ipu->dev, reg);
- if (ret)
+ struct platform_device *pdev;
+
+ pdev = platform_device_register_data(dev, reg->name,
+ id++, &reg->pdata, sizeof(reg->pdata));
+
+ if (IS_ERR(pdev))
goto err_register;
}
return 0;
err_register:
- platform_device_unregister_children(to_platform_device(ipu->dev));
+ platform_device_unregister_children(to_platform_device(dev));
return ret;
}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/pinger.c b/drivers/staging/lustre/lustre/ptlrpc/pinger.c
index 5dec771d70ee..4d340f4a2198 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/pinger.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/pinger.c
@@ -409,8 +409,8 @@ int ptlrpc_stop_pinger(void)
struct l_wait_info lwi = { 0 };
int rc = 0;
- if (!thread_is_init(&pinger_thread) &&
- !thread_is_stopped(&pinger_thread))
+ if (thread_is_init(&pinger_thread) ||
+ thread_is_stopped(&pinger_thread))
return -EALREADY;
ptlrpc_pinger_remove_timeouts();
diff --git a/drivers/staging/media/go7007/go7007-usb.c b/drivers/staging/media/go7007/go7007-usb.c
index 58684da45e6c..b658c2316df3 100644
--- a/drivers/staging/media/go7007/go7007-usb.c
+++ b/drivers/staging/media/go7007/go7007-usb.c
@@ -15,6 +15,8 @@
* Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -661,7 +663,7 @@ static int go7007_usb_interface_reset(struct go7007 *go)
if (usb->board->flags & GO7007_USB_EZUSB) {
/* Reset buffer in EZ-USB */
- dev_dbg(go->dev, "resetting EZ-USB buffers\n");
+ pr_debug("resetting EZ-USB buffers\n");
if (go7007_usb_vendor_request(go, 0x10, 0, 0, NULL, 0, 0) < 0 ||
go7007_usb_vendor_request(go, 0x10, 0, 0, NULL, 0, 0) < 0)
return -1;
@@ -689,7 +691,7 @@ static int go7007_usb_ezusb_write_interrupt(struct go7007 *go,
u16 status_reg = 0;
int timeout = 500;
- dev_dbg(go->dev, "WriteInterrupt: %04x %04x\n", addr, data);
+ pr_debug("WriteInterrupt: %04x %04x\n", addr, data);
for (i = 0; i < 100; ++i) {
r = usb_control_msg(usb->usbdev,
@@ -734,7 +736,7 @@ static int go7007_usb_onboard_write_interrupt(struct go7007 *go,
int r;
int timeout = 500;
- dev_dbg(go->dev, "WriteInterrupt: %04x %04x\n", addr, data);
+ pr_debug("WriteInterrupt: %04x %04x\n", addr, data);
go->usb_buf[0] = data & 0xff;
go->usb_buf[1] = data >> 8;
@@ -771,7 +773,7 @@ static void go7007_usb_readinterrupt_complete(struct urb *urb)
go->interrupt_available = 1;
go->interrupt_data = __le16_to_cpu(regs[0]);
go->interrupt_value = __le16_to_cpu(regs[1]);
- dev_dbg(go->dev, "ReadInterrupt: %04x %04x\n",
+ pr_debug("ReadInterrupt: %04x %04x\n",
go->interrupt_value, go->interrupt_data);
}
@@ -891,7 +893,7 @@ static int go7007_usb_send_firmware(struct go7007 *go, u8 *data, int len)
int transferred, pipe;
int timeout = 500;
- dev_dbg(go->dev, "DownloadBuffer sending %d bytes\n", len);
+ pr_debug("DownloadBuffer sending %d bytes\n", len);
if (usb->board->flags & GO7007_USB_EZUSB)
pipe = usb_sndbulkpipe(usb->usbdev, 2);
@@ -977,7 +979,7 @@ static int go7007_usb_i2c_master_xfer(struct i2c_adapter *adapter,
!(msgs[i].flags & I2C_M_RD) &&
(msgs[i + 1].flags & I2C_M_RD)) {
#ifdef GO7007_I2C_DEBUG
- dev_dbg(go->dev, "i2c write/read %d/%d bytes on %02x\n",
+ pr_debug("i2c write/read %d/%d bytes on %02x\n",
msgs[i].len, msgs[i + 1].len, msgs[i].addr);
#endif
buf[0] = 0x01;
@@ -988,7 +990,7 @@ static int go7007_usb_i2c_master_xfer(struct i2c_adapter *adapter,
buf[buf_len++] = msgs[++i].len;
} else if (msgs[i].flags & I2C_M_RD) {
#ifdef GO7007_I2C_DEBUG
- dev_dbg(go->dev, "i2c read %d bytes on %02x\n",
+ pr_debug("i2c read %d bytes on %02x\n",
msgs[i].len, msgs[i].addr);
#endif
buf[0] = 0x01;
@@ -998,7 +1000,7 @@ static int go7007_usb_i2c_master_xfer(struct i2c_adapter *adapter,
buf_len = 4;
} else {
#ifdef GO7007_I2C_DEBUG
- dev_dbg(go->dev, "i2c write %d bytes on %02x\n",
+ pr_debug("i2c write %d bytes on %02x\n",
msgs[i].len, msgs[i].addr);
#endif
buf[0] = 0x00;
@@ -1057,7 +1059,7 @@ static int go7007_usb_probe(struct usb_interface *intf,
char *name;
int video_pipe, i, v_urb_len;
- dev_dbg(go->dev, "probing new GO7007 USB board\n");
+ pr_debug("probing new GO7007 USB board\n");
switch (id->driver_info) {
case GO7007_BOARDID_MATRIX_II:
@@ -1097,13 +1099,13 @@ static int go7007_usb_probe(struct usb_interface *intf,
board = &board_px_tv402u;
break;
case GO7007_BOARDID_LIFEVIEW_LR192:
- dev_err(go->dev, "The Lifeview TV Walker Ultra is not supported. Sorry!\n");
+ dev_err(&intf->dev, "The Lifeview TV Walker Ultra is not supported. Sorry!\n");
return -ENODEV;
name = "Lifeview TV Walker Ultra";
board = &board_lifeview_lr192;
break;
case GO7007_BOARDID_SENSORAY_2250:
- dev_info(go->dev, "Sensoray 2250 found\n");
+ dev_info(&intf->dev, "Sensoray 2250 found\n");
name = "Sensoray 2250/2251";
board = &board_sensoray_2250;
break;
@@ -1112,7 +1114,7 @@ static int go7007_usb_probe(struct usb_interface *intf,
board = &board_ads_usbav_709;
break;
default:
- dev_err(go->dev, "unknown board ID %d!\n",
+ dev_err(&intf->dev, "unknown board ID %d!\n",
(unsigned int)id->driver_info);
return -ENODEV;
}
@@ -1247,7 +1249,7 @@ static int go7007_usb_probe(struct usb_interface *intf,
sizeof(go->name));
break;
default:
- dev_dbg(go->dev, "unable to detect tuner type!\n");
+ pr_debug("unable to detect tuner type!\n");
break;
}
/* Configure tuner mode selection inputs connected
diff --git a/drivers/staging/nvec/nvec.c b/drivers/staging/nvec/nvec.c
index 3066ee2e753b..49ea76b3435d 100644
--- a/drivers/staging/nvec/nvec.c
+++ b/drivers/staging/nvec/nvec.c
@@ -681,7 +681,8 @@ static irqreturn_t nvec_interrupt(int irq, void *dev)
dev_err(nvec->dev,
"RX buffer overflow on %p: "
"Trying to write byte %u of %u\n",
- nvec->rx, nvec->rx->pos, NVEC_MSG_SIZE);
+ nvec->rx, nvec->rx ? nvec->rx->pos : 0,
+ NVEC_MSG_SIZE);
break;
default:
nvec->state = 0;
diff --git a/drivers/staging/ozwpan/ozcdev.c b/drivers/staging/ozwpan/ozcdev.c
index 6ce0af9977d8..5de5981b3bba 100644
--- a/drivers/staging/ozwpan/ozcdev.c
+++ b/drivers/staging/ozwpan/ozcdev.c
@@ -448,7 +448,7 @@ int oz_cdev_start(struct oz_pd *pd, int resume)
}
spin_lock(&g_cdev.lock);
if ((g_cdev.active_pd == NULL) &&
- (memcmp(pd->mac_addr, g_cdev.active_addr, ETH_ALEN) == 0)) {
+ ether_addr_equal(pd->mac_addr, g_cdev.active_addr)) {
oz_pd_get(pd);
g_cdev.active_pd = pd;
oz_dbg(ON, "Active PD arrived\n");
diff --git a/drivers/staging/ozwpan/ozproto.c b/drivers/staging/ozwpan/ozproto.c
index 88714ec85705..19a2521ee179 100644
--- a/drivers/staging/ozwpan/ozproto.c
+++ b/drivers/staging/ozwpan/ozproto.c
@@ -9,6 +9,7 @@
#include <linux/timer.h>
#include <linux/sched.h>
#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
#include <linux/errno.h>
#include <linux/ieee80211.h>
#include "ozdbg.h"
@@ -180,7 +181,7 @@ static struct oz_pd *oz_connect_req(struct oz_pd *cur_pd, struct oz_elt *elt,
spin_lock_bh(&g_polling_lock);
list_for_each(e, &g_pd_list) {
pd2 = container_of(e, struct oz_pd, link);
- if (memcmp(pd2->mac_addr, pd_addr, ETH_ALEN) == 0) {
+ if (ether_addr_equal(pd2->mac_addr, pd_addr)) {
free_pd = pd;
pd = pd2;
break;
@@ -597,7 +598,7 @@ struct oz_pd *oz_pd_find(const u8 *mac_addr)
spin_lock_bh(&g_polling_lock);
list_for_each(e, &g_pd_list) {
pd = container_of(e, struct oz_pd, link);
- if (memcmp(pd->mac_addr, mac_addr, ETH_ALEN) == 0) {
+ if (ether_addr_equal(pd->mac_addr, mac_addr)) {
atomic_inc(&pd->ref_count);
spin_unlock_bh(&g_polling_lock);
return pd;
diff --git a/drivers/staging/rtl8188eu/core/rtw_ap.c b/drivers/staging/rtl8188eu/core/rtw_ap.c
index 2c678f409573..2f548ebada59 100644
--- a/drivers/staging/rtl8188eu/core/rtw_ap.c
+++ b/drivers/staging/rtl8188eu/core/rtw_ap.c
@@ -1115,6 +1115,9 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len)
return _FAIL;
}
+ /* fix bug of flush_cam_entry at STOP AP mode */
+ psta->state |= WIFI_AP_STATE;
+ rtw_indicate_connect(padapter);
pmlmepriv->cur_network.join_res = true;/* for check if already set beacon */
return ret;
}
diff --git a/drivers/staging/tidspbridge/Kconfig b/drivers/staging/tidspbridge/Kconfig
index 165b918b8171..1b6d581c438b 100644
--- a/drivers/staging/tidspbridge/Kconfig
+++ b/drivers/staging/tidspbridge/Kconfig
@@ -4,7 +4,7 @@
menuconfig TIDSPBRIDGE
tristate "DSP Bridge driver"
- depends on ARCH_OMAP3 && !ARCH_MULTIPLATFORM
+ depends on ARCH_OMAP3 && !ARCH_MULTIPLATFORM && BROKEN
select MAILBOX
select OMAP2PLUS_MBOX
help
diff --git a/drivers/staging/tidspbridge/rmgr/drv_interface.c b/drivers/staging/tidspbridge/rmgr/drv_interface.c
index 1aa4a3fd0f1b..56e355b3e7fa 100644
--- a/drivers/staging/tidspbridge/rmgr/drv_interface.c
+++ b/drivers/staging/tidspbridge/rmgr/drv_interface.c
@@ -258,7 +258,8 @@ err:
/* This function maps kernel space memory to user space memory. */
static int bridge_mmap(struct file *filp, struct vm_area_struct *vma)
{
- u32 status;
+ struct omap_dsp_platform_data *pdata =
+ omap_dspbridge_dev->dev.platform_data;
/* VM_IO | VM_DONTEXPAND | VM_DONTDUMP are set by remap_pfn_range() */
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
@@ -268,13 +269,9 @@ static int bridge_mmap(struct file *filp, struct vm_area_struct *vma)
vma->vm_start, vma->vm_end, vma->vm_page_prot,
vma->vm_flags);
- status = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
- vma->vm_end - vma->vm_start,
- vma->vm_page_prot);
- if (status != 0)
- status = -EAGAIN;
-
- return status;
+ return vm_iomap_memory(vma,
+ pdata->phys_mempool_base,
+ pdata->phys_mempool_size);
}
static const struct file_operations bridge_fops = {
diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
index aab0012bba92..ab8b2ba6eedd 100644
--- a/drivers/staging/vt6655/hostap.c
+++ b/drivers/staging/vt6655/hostap.c
@@ -143,7 +143,8 @@ static int hostap_disable_hostapd(PSDevice pDevice, int rtnl_locked)
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Netdevice %s unregistered\n",
pDevice->dev->name, pDevice->apdev->name);
}
- free_netdev(pDevice->apdev);
+ if (pDevice->apdev)
+ free_netdev(pDevice->apdev);
pDevice->apdev = NULL;
pDevice->bEnable8021x = false;
pDevice->bEnableHostWEP = false;
diff --git a/drivers/staging/vt6656/baseband.c b/drivers/staging/vt6656/baseband.c
index 1e8b8412e67e..4aa5ef54b683 100644
--- a/drivers/staging/vt6656/baseband.c
+++ b/drivers/staging/vt6656/baseband.c
@@ -939,6 +939,7 @@ int BBbVT3184Init(struct vnt_private *pDevice)
u8 * pbyAgc;
u16 wLengthAgc;
u8 abyArray[256];
+ u8 data;
ntStatus = CONTROLnsRequestIn(pDevice,
MESSAGE_TYPE_READ,
@@ -1104,6 +1105,16 @@ else {
ControlvWriteByte(pDevice,MESSAGE_REQUEST_BBREG,0x0D,0x01);
RFbRFTableDownload(pDevice);
+
+ /* Fix for TX USB resets from vendors driver */
+ CONTROLnsRequestIn(pDevice, MESSAGE_TYPE_READ, USB_REG4,
+ MESSAGE_REQUEST_MEM, sizeof(data), &data);
+
+ data |= 0x2;
+
+ CONTROLnsRequestOut(pDevice, MESSAGE_TYPE_WRITE, USB_REG4,
+ MESSAGE_REQUEST_MEM, sizeof(data), &data);
+
return true;//ntStatus;
}
diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
index ae1676d190c5..67ba48b9a8d9 100644
--- a/drivers/staging/vt6656/hostap.c
+++ b/drivers/staging/vt6656/hostap.c
@@ -133,7 +133,8 @@ static int hostap_disable_hostapd(struct vnt_private *pDevice, int rtnl_locked)
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Netdevice %s unregistered\n",
pDevice->dev->name, pDevice->apdev->name);
}
- free_netdev(pDevice->apdev);
+ if (pDevice->apdev)
+ free_netdev(pDevice->apdev);
pDevice->apdev = NULL;
pDevice->bEnable8021x = false;
pDevice->bEnableHostWEP = false;
diff --git a/drivers/staging/vt6656/rndis.h b/drivers/staging/vt6656/rndis.h
index 5e073062017a..5cf5e732a36f 100644
--- a/drivers/staging/vt6656/rndis.h
+++ b/drivers/staging/vt6656/rndis.h
@@ -66,6 +66,8 @@
#define VIAUSB20_PACKET_HEADER 0x04
+#define USB_REG4 0x604
+
typedef struct _CMD_MESSAGE
{
u8 byData[256];
diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c
index 79ce363b2ea9..3277d9838f4e 100644
--- a/drivers/staging/zram/zram_drv.c
+++ b/drivers/staging/zram/zram_drv.c
@@ -652,21 +652,30 @@ static ssize_t reset_store(struct device *dev,
return -ENOMEM;
/* Do not reset an active device! */
- if (bdev->bd_holders)
- return -EBUSY;
+ if (bdev->bd_holders) {
+ ret = -EBUSY;
+ goto out;
+ }
ret = kstrtou16(buf, 10, &do_reset);
if (ret)
- return ret;
+ goto out;
- if (!do_reset)
- return -EINVAL;
+ if (!do_reset) {
+ ret = -EINVAL;
+ goto out;
+ }
/* Make sure all pending I/O is finished */
fsync_bdev(bdev);
+ bdput(bdev);
zram_reset_device(zram, true);
return len;
+
+out:
+ bdput(bdev);
+ return ret;
}
static void __zram_make_request(struct zram *zram, struct bio *bio, int rw)
diff --git a/drivers/staging/zsmalloc/zsmalloc-main.c b/drivers/staging/zsmalloc/zsmalloc-main.c
index 1a67537dbc56..3b950e5a918f 100644
--- a/drivers/staging/zsmalloc/zsmalloc-main.c
+++ b/drivers/staging/zsmalloc/zsmalloc-main.c
@@ -430,7 +430,12 @@ static struct page *get_next_page(struct page *page)
return next;
}
-/* Encode <page, obj_idx> as a single handle value */
+/*
+ * Encode <page, obj_idx> as a single handle value.
+ * On hardware platforms with physical memory starting at 0x0 the pfn
+ * could be 0 so we ensure that the handle will never be 0 by adjusting the
+ * encoded obj_idx value before encoding.
+ */
static void *obj_location_to_handle(struct page *page, unsigned long obj_idx)
{
unsigned long handle;
@@ -441,17 +446,21 @@ static void *obj_location_to_handle(struct page *page, unsigned long obj_idx)
}
handle = page_to_pfn(page) << OBJ_INDEX_BITS;
- handle |= (obj_idx & OBJ_INDEX_MASK);
+ handle |= ((obj_idx + 1) & OBJ_INDEX_MASK);
return (void *)handle;
}
-/* Decode <page, obj_idx> pair from the given object handle */
+/*
+ * Decode <page, obj_idx> pair from the given object handle. We adjust the
+ * decoded obj_idx back to its original value since it was adjusted in
+ * obj_location_to_handle().
+ */
static void obj_handle_to_location(unsigned long handle, struct page **page,
unsigned long *obj_idx)
{
*page = pfn_to_page(handle >> OBJ_INDEX_BITS);
- *obj_idx = handle & OBJ_INDEX_MASK;
+ *obj_idx = (handle & OBJ_INDEX_MASK) - 1;
}
static unsigned long obj_idx_to_offset(struct page *page,
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 38e44b9abf0f..00867190413c 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -465,6 +465,7 @@ int iscsit_del_np(struct iscsi_np *np)
*/
send_sig(SIGINT, np->np_thread, 1);
kthread_stop(np->np_thread);
+ np->np_thread = NULL;
}
np->np_transport->iscsit_free_np(np);
@@ -805,14 +806,7 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
int iscsi_task_attr;
int sam_task_attr;
- spin_lock_bh(&conn->sess->session_stats_lock);
- conn->sess->cmd_pdus++;
- if (conn->sess->se_sess->se_node_acl) {
- spin_lock(&conn->sess->se_sess->se_node_acl->stats_lock);
- conn->sess->se_sess->se_node_acl->num_cmds++;
- spin_unlock(&conn->sess->se_sess->se_node_acl->stats_lock);
- }
- spin_unlock_bh(&conn->sess->session_stats_lock);
+ atomic_long_inc(&conn->sess->cmd_pdus);
hdr = (struct iscsi_scsi_req *) buf;
payload_length = ntoh24(hdr->dlength);
@@ -830,24 +824,22 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
if (((hdr->flags & ISCSI_FLAG_CMD_READ) ||
(hdr->flags & ISCSI_FLAG_CMD_WRITE)) && !hdr->data_length) {
/*
- * Vmware ESX v3.0 uses a modified Cisco Initiator (v3.4.2)
- * that adds support for RESERVE/RELEASE. There is a bug
- * add with this new functionality that sets R/W bits when
- * neither CDB carries any READ or WRITE datapayloads.
+ * From RFC-3720 Section 10.3.1:
+ *
+ * "Either or both of R and W MAY be 1 when either the
+ * Expected Data Transfer Length and/or Bidirectional Read
+ * Expected Data Transfer Length are 0"
+ *
+ * For this case, go ahead and clear the unnecssary bits
+ * to avoid any confusion with ->data_direction.
*/
- if ((hdr->cdb[0] == 0x16) || (hdr->cdb[0] == 0x17)) {
- hdr->flags &= ~ISCSI_FLAG_CMD_READ;
- hdr->flags &= ~ISCSI_FLAG_CMD_WRITE;
- goto done;
- }
+ hdr->flags &= ~ISCSI_FLAG_CMD_READ;
+ hdr->flags &= ~ISCSI_FLAG_CMD_WRITE;
- pr_err("ISCSI_FLAG_CMD_READ or ISCSI_FLAG_CMD_WRITE"
+ pr_warn("ISCSI_FLAG_CMD_READ or ISCSI_FLAG_CMD_WRITE"
" set when Expected Data Transfer Length is 0 for"
- " CDB: 0x%02x. Bad iSCSI Initiator.\n", hdr->cdb[0]);
- return iscsit_add_reject_cmd(cmd,
- ISCSI_REASON_BOOKMARK_INVALID, buf);
+ " CDB: 0x%02x, Fixing up flags\n", hdr->cdb[0]);
}
-done:
if (!(hdr->flags & ISCSI_FLAG_CMD_READ) &&
!(hdr->flags & ISCSI_FLAG_CMD_WRITE) && (hdr->data_length != 0)) {
@@ -1254,20 +1246,12 @@ iscsit_check_dataout_hdr(struct iscsi_conn *conn, unsigned char *buf,
int rc;
if (!payload_length) {
- pr_err("DataOUT payload is ZERO, protocol error.\n");
- return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
- buf);
+ pr_warn("DataOUT payload is ZERO, ignoring.\n");
+ return 0;
}
/* iSCSI write */
- spin_lock_bh(&conn->sess->session_stats_lock);
- conn->sess->rx_data_octets += payload_length;
- if (conn->sess->se_sess->se_node_acl) {
- spin_lock(&conn->sess->se_sess->se_node_acl->stats_lock);
- conn->sess->se_sess->se_node_acl->write_bytes += payload_length;
- spin_unlock(&conn->sess->se_sess->se_node_acl->stats_lock);
- }
- spin_unlock_bh(&conn->sess->session_stats_lock);
+ atomic_long_add(payload_length, &conn->sess->rx_data_octets);
if (payload_length > conn->conn_ops->MaxXmitDataSegmentLength) {
pr_err("DataSegmentLength: %u is greater than"
@@ -1486,7 +1470,7 @@ EXPORT_SYMBOL(iscsit_check_dataout_payload);
static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
{
- struct iscsi_cmd *cmd;
+ struct iscsi_cmd *cmd = NULL;
struct iscsi_data *hdr = (struct iscsi_data *)buf;
int rc;
bool data_crc_failed = false;
@@ -1954,6 +1938,13 @@ iscsit_setup_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
(unsigned char *)hdr);
}
+ if (!(hdr->flags & ISCSI_FLAG_CMD_FINAL) ||
+ (hdr->flags & ISCSI_FLAG_TEXT_CONTINUE)) {
+ pr_err("Multi sequence text commands currently not supported\n");
+ return iscsit_reject_cmd(cmd, ISCSI_REASON_CMD_NOT_SUPPORTED,
+ (unsigned char *)hdr);
+ }
+
pr_debug("Got Text Request: ITT: 0x%08x, CmdSN: 0x%08x,"
" ExpStatSN: 0x%08x, Length: %u\n", hdr->itt, hdr->cmdsn,
hdr->exp_statsn, payload_length);
@@ -2630,14 +2621,7 @@ static int iscsit_send_datain(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
return -1;
}
- spin_lock_bh(&conn->sess->session_stats_lock);
- conn->sess->tx_data_octets += datain.length;
- if (conn->sess->se_sess->se_node_acl) {
- spin_lock(&conn->sess->se_sess->se_node_acl->stats_lock);
- conn->sess->se_sess->se_node_acl->read_bytes += datain.length;
- spin_unlock(&conn->sess->se_sess->se_node_acl->stats_lock);
- }
- spin_unlock_bh(&conn->sess->session_stats_lock);
+ atomic_long_add(datain.length, &conn->sess->tx_data_octets);
/*
* Special case for successfully execution w/ both DATAIN
* and Sense Data.
@@ -3162,9 +3146,7 @@ void iscsit_build_rsp_pdu(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
if (inc_stat_sn)
cmd->stat_sn = conn->stat_sn++;
- spin_lock_bh(&conn->sess->session_stats_lock);
- conn->sess->rsp_pdus++;
- spin_unlock_bh(&conn->sess->session_stats_lock);
+ atomic_long_inc(&conn->sess->rsp_pdus);
memset(hdr, 0, ISCSI_HDR_LEN);
hdr->opcode = ISCSI_OP_SCSI_CMD_RSP;
@@ -3374,6 +3356,7 @@ static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd)
struct iscsi_tiqn *tiqn;
struct iscsi_tpg_np *tpg_np;
int buffer_len, end_of_buf = 0, len = 0, payload_len = 0;
+ int target_name_printed;
unsigned char buf[ISCSI_IQN_LEN+12]; /* iqn + "TargetName=" + \0 */
unsigned char *text_in = cmd->text_in_ptr, *text_ptr = NULL;
@@ -3411,19 +3394,23 @@ static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd)
continue;
}
- len = sprintf(buf, "TargetName=%s", tiqn->tiqn);
- len += 1;
-
- if ((len + payload_len) > buffer_len) {
- end_of_buf = 1;
- goto eob;
- }
- memcpy(payload + payload_len, buf, len);
- payload_len += len;
+ target_name_printed = 0;
spin_lock(&tiqn->tiqn_tpg_lock);
list_for_each_entry(tpg, &tiqn->tiqn_tpg_list, tpg_list) {
+ /* If demo_mode_discovery=0 and generate_node_acls=0
+ * (demo mode dislabed) do not return
+ * TargetName+TargetAddress unless a NodeACL exists.
+ */
+
+ if ((tpg->tpg_attrib.generate_node_acls == 0) &&
+ (tpg->tpg_attrib.demo_mode_discovery == 0) &&
+ (!core_tpg_get_initiator_node_acl(&tpg->tpg_se_tpg,
+ cmd->conn->sess->sess_ops->InitiatorName))) {
+ continue;
+ }
+
spin_lock(&tpg->tpg_state_lock);
if ((tpg->tpg_state == TPG_STATE_FREE) ||
(tpg->tpg_state == TPG_STATE_INACTIVE)) {
@@ -3438,6 +3425,22 @@ static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd)
struct iscsi_np *np = tpg_np->tpg_np;
bool inaddr_any = iscsit_check_inaddr_any(np);
+ if (!target_name_printed) {
+ len = sprintf(buf, "TargetName=%s",
+ tiqn->tiqn);
+ len += 1;
+
+ if ((len + payload_len) > buffer_len) {
+ spin_unlock(&tpg->tpg_np_lock);
+ spin_unlock(&tiqn->tiqn_tpg_lock);
+ end_of_buf = 1;
+ goto eob;
+ }
+ memcpy(payload + payload_len, buf, len);
+ payload_len += len;
+ target_name_printed = 1;
+ }
+
len = sprintf(buf, "TargetAddress="
"%s:%hu,%hu",
(inaddr_any == false) ?
@@ -4092,9 +4095,7 @@ restart:
* hit default in the switch below.
*/
memset(buffer, 0xff, ISCSI_HDR_LEN);
- spin_lock_bh(&conn->sess->session_stats_lock);
- conn->sess->conn_digest_errors++;
- spin_unlock_bh(&conn->sess->session_stats_lock);
+ atomic_long_inc(&conn->sess->conn_digest_errors);
} else {
pr_debug("Got HeaderDigest CRC32C"
" 0x%08x\n", checksum);
@@ -4381,7 +4382,7 @@ int iscsit_close_connection(
int iscsit_close_session(struct iscsi_session *sess)
{
- struct iscsi_portal_group *tpg = ISCSI_TPG_S(sess);
+ struct iscsi_portal_group *tpg = sess->tpg;
struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
if (atomic_read(&sess->nconn)) {
diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c
index 7505fddca15f..de77d9aa22c6 100644
--- a/drivers/target/iscsi/iscsi_target_auth.c
+++ b/drivers/target/iscsi/iscsi_target_auth.c
@@ -111,7 +111,7 @@ static struct iscsi_chap *chap_server_open(
/*
* Set Identifier.
*/
- chap->id = ISCSI_TPG_C(conn)->tpg_chap_id++;
+ chap->id = conn->tpg->tpg_chap_id++;
*aic_len += sprintf(aic_str + *aic_len, "CHAP_I=%d", chap->id);
*aic_len += 1;
pr_debug("[server] Sending CHAP_I=%d\n", chap->id);
@@ -146,6 +146,7 @@ static int chap_server_compute_md5(
unsigned char client_digest[MD5_SIGNATURE_SIZE];
unsigned char server_digest[MD5_SIGNATURE_SIZE];
unsigned char chap_n[MAX_CHAP_N_SIZE], chap_r[MAX_RESPONSE_LENGTH];
+ size_t compare_len;
struct iscsi_chap *chap = conn->auth_protocol;
struct crypto_hash *tfm;
struct hash_desc desc;
@@ -184,7 +185,9 @@ static int chap_server_compute_md5(
goto out;
}
- if (memcmp(chap_n, auth->userid, strlen(auth->userid)) != 0) {
+ /* Include the terminating NULL in the compare */
+ compare_len = strlen(auth->userid) + 1;
+ if (strncmp(chap_n, auth->userid, compare_len) != 0) {
pr_err("CHAP_N values do not match!\n");
goto out;
}
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index fd145259361d..1c0088fe9e99 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -372,7 +372,7 @@ static ssize_t iscsi_nacl_attrib_show_##name( \
struct iscsi_node_acl *nacl = container_of(se_nacl, struct iscsi_node_acl, \
se_node_acl); \
\
- return sprintf(page, "%u\n", ISCSI_NODE_ATTRIB(nacl)->name); \
+ return sprintf(page, "%u\n", nacl->node_attrib.name); \
} \
\
static ssize_t iscsi_nacl_attrib_store_##name( \
@@ -474,7 +474,8 @@ static ssize_t __iscsi_##prefix##_store_##name( \
\
if (!capable(CAP_SYS_ADMIN)) \
return -EPERM; \
- \
+ if (count >= sizeof(auth->name)) \
+ return -EINVAL; \
snprintf(auth->name, sizeof(auth->name), "%s", page); \
if (!strncmp("NULL", auth->name, 4)) \
auth->naf_flags &= ~flags; \
@@ -897,7 +898,7 @@ static struct se_node_acl *lio_target_make_nodeacl(
if (!se_nacl_new)
return ERR_PTR(-ENOMEM);
- cmdsn_depth = ISCSI_TPG_ATTRIB(tpg)->default_cmdsn_depth;
+ cmdsn_depth = tpg->tpg_attrib.default_cmdsn_depth;
/*
* se_nacl_new may be released by core_tpg_add_initiator_node_acl()
* when converting a NdoeACL from demo mode -> explict
@@ -920,9 +921,9 @@ static struct se_node_acl *lio_target_make_nodeacl(
return ERR_PTR(-ENOMEM);
}
- stats_cg->default_groups[0] = &NODE_STAT_GRPS(acl)->iscsi_sess_stats_group;
+ stats_cg->default_groups[0] = &acl->node_stat_grps.iscsi_sess_stats_group;
stats_cg->default_groups[1] = NULL;
- config_group_init_type_name(&NODE_STAT_GRPS(acl)->iscsi_sess_stats_group,
+ config_group_init_type_name(&acl->node_stat_grps.iscsi_sess_stats_group,
"iscsi_sess_stats", &iscsi_stat_sess_cit);
return se_nacl;
@@ -967,7 +968,7 @@ static ssize_t iscsi_tpg_attrib_show_##name( \
if (iscsit_get_tpg(tpg) < 0) \
return -EINVAL; \
\
- rb = sprintf(page, "%u\n", ISCSI_TPG_ATTRIB(tpg)->name); \
+ rb = sprintf(page, "%u\n", tpg->tpg_attrib.name); \
iscsit_put_tpg(tpg); \
return rb; \
} \
@@ -1041,6 +1042,16 @@ TPG_ATTR(demo_mode_write_protect, S_IRUGO | S_IWUSR);
*/
DEF_TPG_ATTRIB(prod_mode_write_protect);
TPG_ATTR(prod_mode_write_protect, S_IRUGO | S_IWUSR);
+/*
+ * Define iscsi_tpg_attrib_s_demo_mode_discovery,
+ */
+DEF_TPG_ATTRIB(demo_mode_discovery);
+TPG_ATTR(demo_mode_discovery, S_IRUGO | S_IWUSR);
+/*
+ * Define iscsi_tpg_attrib_s_default_erl
+ */
+DEF_TPG_ATTRIB(default_erl);
+TPG_ATTR(default_erl, S_IRUGO | S_IWUSR);
static struct configfs_attribute *lio_target_tpg_attrib_attrs[] = {
&iscsi_tpg_attrib_authentication.attr,
@@ -1051,6 +1062,8 @@ static struct configfs_attribute *lio_target_tpg_attrib_attrs[] = {
&iscsi_tpg_attrib_cache_dynamic_acls.attr,
&iscsi_tpg_attrib_demo_mode_write_protect.attr,
&iscsi_tpg_attrib_prod_mode_write_protect.attr,
+ &iscsi_tpg_attrib_demo_mode_discovery.attr,
+ &iscsi_tpg_attrib_default_erl.attr,
NULL,
};
@@ -1514,21 +1527,21 @@ static struct se_wwn *lio_target_call_coreaddtiqn(
return ERR_PTR(-ENOMEM);
}
- stats_cg->default_groups[0] = &WWN_STAT_GRPS(tiqn)->iscsi_instance_group;
- stats_cg->default_groups[1] = &WWN_STAT_GRPS(tiqn)->iscsi_sess_err_group;
- stats_cg->default_groups[2] = &WWN_STAT_GRPS(tiqn)->iscsi_tgt_attr_group;
- stats_cg->default_groups[3] = &WWN_STAT_GRPS(tiqn)->iscsi_login_stats_group;
- stats_cg->default_groups[4] = &WWN_STAT_GRPS(tiqn)->iscsi_logout_stats_group;
+ stats_cg->default_groups[0] = &tiqn->tiqn_stat_grps.iscsi_instance_group;
+ stats_cg->default_groups[1] = &tiqn->tiqn_stat_grps.iscsi_sess_err_group;
+ stats_cg->default_groups[2] = &tiqn->tiqn_stat_grps.iscsi_tgt_attr_group;
+ stats_cg->default_groups[3] = &tiqn->tiqn_stat_grps.iscsi_login_stats_group;
+ stats_cg->default_groups[4] = &tiqn->tiqn_stat_grps.iscsi_logout_stats_group;
stats_cg->default_groups[5] = NULL;
- config_group_init_type_name(&WWN_STAT_GRPS(tiqn)->iscsi_instance_group,
+ config_group_init_type_name(&tiqn->tiqn_stat_grps.iscsi_instance_group,
"iscsi_instance", &iscsi_stat_instance_cit);
- config_group_init_type_name(&WWN_STAT_GRPS(tiqn)->iscsi_sess_err_group,
+ config_group_init_type_name(&tiqn->tiqn_stat_grps.iscsi_sess_err_group,
"iscsi_sess_err", &iscsi_stat_sess_err_cit);
- config_group_init_type_name(&WWN_STAT_GRPS(tiqn)->iscsi_tgt_attr_group,
+ config_group_init_type_name(&tiqn->tiqn_stat_grps.iscsi_tgt_attr_group,
"iscsi_tgt_attr", &iscsi_stat_tgt_attr_cit);
- config_group_init_type_name(&WWN_STAT_GRPS(tiqn)->iscsi_login_stats_group,
+ config_group_init_type_name(&tiqn->tiqn_stat_grps.iscsi_login_stats_group,
"iscsi_login_stats", &iscsi_stat_login_cit);
- config_group_init_type_name(&WWN_STAT_GRPS(tiqn)->iscsi_logout_stats_group,
+ config_group_init_type_name(&tiqn->tiqn_stat_grps.iscsi_logout_stats_group,
"iscsi_logout_stats", &iscsi_stat_logout_cit);
pr_debug("LIO_Target_ConfigFS: REGISTER -> %s\n", tiqn->tiqn);
@@ -1784,6 +1797,11 @@ static int lio_queue_status(struct se_cmd *se_cmd)
struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
cmd->i_state = ISTATE_SEND_STATUS;
+
+ if (cmd->se_cmd.scsi_status || cmd->sense_reason) {
+ iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
+ return 0;
+ }
cmd->conn->conn_transport->iscsit_queue_status(cmd->conn, cmd);
return 0;
@@ -1815,21 +1833,21 @@ static u32 lio_tpg_get_default_depth(struct se_portal_group *se_tpg)
{
struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
- return ISCSI_TPG_ATTRIB(tpg)->default_cmdsn_depth;
+ return tpg->tpg_attrib.default_cmdsn_depth;
}
static int lio_tpg_check_demo_mode(struct se_portal_group *se_tpg)
{
struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
- return ISCSI_TPG_ATTRIB(tpg)->generate_node_acls;
+ return tpg->tpg_attrib.generate_node_acls;
}
static int lio_tpg_check_demo_mode_cache(struct se_portal_group *se_tpg)
{
struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
- return ISCSI_TPG_ATTRIB(tpg)->cache_dynamic_acls;
+ return tpg->tpg_attrib.cache_dynamic_acls;
}
static int lio_tpg_check_demo_mode_write_protect(
@@ -1837,7 +1855,7 @@ static int lio_tpg_check_demo_mode_write_protect(
{
struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
- return ISCSI_TPG_ATTRIB(tpg)->demo_mode_write_protect;
+ return tpg->tpg_attrib.demo_mode_write_protect;
}
static int lio_tpg_check_prod_mode_write_protect(
@@ -1845,7 +1863,7 @@ static int lio_tpg_check_prod_mode_write_protect(
{
struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
- return ISCSI_TPG_ATTRIB(tpg)->prod_mode_write_protect;
+ return tpg->tpg_attrib.prod_mode_write_protect;
}
static void lio_tpg_release_fabric_acl(
@@ -1908,9 +1926,12 @@ static void lio_set_default_node_attributes(struct se_node_acl *se_acl)
{
struct iscsi_node_acl *acl = container_of(se_acl, struct iscsi_node_acl,
se_node_acl);
+ struct se_portal_group *se_tpg = se_acl->se_tpg;
+ struct iscsi_portal_group *tpg = container_of(se_tpg,
+ struct iscsi_portal_group, tpg_se_tpg);
- ISCSI_NODE_ATTRIB(acl)->nacl = acl;
- iscsit_set_default_node_attribues(acl);
+ acl->node_attrib.nacl = acl;
+ iscsit_set_default_node_attribues(acl, tpg);
}
static int lio_check_stop_free(struct se_cmd *se_cmd)
@@ -1995,17 +2016,17 @@ int iscsi_target_register_configfs(void)
* Setup default attribute lists for various fabric->tf_cit_tmpl
* sturct config_item_type's
*/
- TF_CIT_TMPL(fabric)->tfc_discovery_cit.ct_attrs = lio_target_discovery_auth_attrs;
- TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = lio_target_wwn_attrs;
- TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = lio_target_tpg_attrs;
- TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = lio_target_tpg_attrib_attrs;
- TF_CIT_TMPL(fabric)->tfc_tpg_auth_cit.ct_attrs = lio_target_tpg_auth_attrs;
- TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = lio_target_tpg_param_attrs;
- TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = lio_target_portal_attrs;
- TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = lio_target_initiator_attrs;
- TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = lio_target_nacl_attrib_attrs;
- TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = lio_target_nacl_auth_attrs;
- TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = lio_target_nacl_param_attrs;
+ fabric->tf_cit_tmpl.tfc_discovery_cit.ct_attrs = lio_target_discovery_auth_attrs;
+ fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = lio_target_wwn_attrs;
+ fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = lio_target_tpg_attrs;
+ fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = lio_target_tpg_attrib_attrs;
+ fabric->tf_cit_tmpl.tfc_tpg_auth_cit.ct_attrs = lio_target_tpg_auth_attrs;
+ fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = lio_target_tpg_param_attrs;
+ fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = lio_target_portal_attrs;
+ fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = lio_target_initiator_attrs;
+ fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = lio_target_nacl_attrib_attrs;
+ fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = lio_target_nacl_auth_attrs;
+ fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = lio_target_nacl_param_attrs;
ret = target_fabric_configfs_register(fabric);
if (ret < 0) {
diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h
index 9a5721b8ff96..48f7b3bf4e8c 100644
--- a/drivers/target/iscsi/iscsi_target_core.h
+++ b/drivers/target/iscsi/iscsi_target_core.h
@@ -37,9 +37,6 @@
#define NA_RANDOM_DATAIN_PDU_OFFSETS 0
#define NA_RANDOM_DATAIN_SEQ_OFFSETS 0
#define NA_RANDOM_R2T_OFFSETS 0
-#define NA_DEFAULT_ERL 0
-#define NA_DEFAULT_ERL_MAX 2
-#define NA_DEFAULT_ERL_MIN 0
/* struct iscsi_tpg_attrib sanity values */
#define TA_AUTHENTICATION 1
@@ -58,6 +55,8 @@
#define TA_DEMO_MODE_WRITE_PROTECT 1
/* Disabled by default in production mode w/ explict ACLs */
#define TA_PROD_MODE_WRITE_PROTECT 0
+#define TA_DEMO_MODE_DISCOVERY 1
+#define TA_DEFAULT_ERL 0
#define TA_CACHE_CORE_NPS 0
@@ -192,6 +191,7 @@ enum recover_cmdsn_ret_table {
CMDSN_NORMAL_OPERATION = 0,
CMDSN_LOWER_THAN_EXP = 1,
CMDSN_HIGHER_THAN_EXP = 2,
+ CMDSN_MAXCMDSN_OVERRUN = 3,
};
/* Used for iscsi_handle_immediate_data() return values */
@@ -650,14 +650,13 @@ struct iscsi_session {
/* Used for session reference counting */
int session_usage_count;
int session_waiting_on_uc;
- u32 cmd_pdus;
- u32 rsp_pdus;
- u64 tx_data_octets;
- u64 rx_data_octets;
- u32 conn_digest_errors;
- u32 conn_timeout_errors;
+ atomic_long_t cmd_pdus;
+ atomic_long_t rsp_pdus;
+ atomic_long_t tx_data_octets;
+ atomic_long_t rx_data_octets;
+ atomic_long_t conn_digest_errors;
+ atomic_long_t conn_timeout_errors;
u64 creation_time;
- spinlock_t session_stats_lock;
/* Number of active connections */
atomic_t nconn;
atomic_t session_continuation;
@@ -755,11 +754,6 @@ struct iscsi_node_acl {
struct se_node_acl se_node_acl;
};
-#define NODE_STAT_GRPS(nacl) (&(nacl)->node_stat_grps)
-
-#define ISCSI_NODE_ATTRIB(t) (&(t)->node_attrib)
-#define ISCSI_NODE_AUTH(t) (&(t)->node_auth)
-
struct iscsi_tpg_attrib {
u32 authentication;
u32 login_timeout;
@@ -769,6 +763,8 @@ struct iscsi_tpg_attrib {
u32 default_cmdsn_depth;
u32 demo_mode_write_protect;
u32 prod_mode_write_protect;
+ u32 demo_mode_discovery;
+ u32 default_erl;
struct iscsi_portal_group *tpg;
};
@@ -835,12 +831,6 @@ struct iscsi_portal_group {
struct list_head tpg_list;
} ____cacheline_aligned;
-#define ISCSI_TPG_C(c) ((struct iscsi_portal_group *)(c)->tpg)
-#define ISCSI_TPG_LUN(c, l) ((iscsi_tpg_list_t *)(c)->tpg->tpg_lun_list_t[l])
-#define ISCSI_TPG_S(s) ((struct iscsi_portal_group *)(s)->tpg)
-#define ISCSI_TPG_ATTRIB(t) (&(t)->tpg_attrib)
-#define SE_TPG(tpg) (&(tpg)->tpg_se_tpg)
-
struct iscsi_wwn_stat_grps {
struct config_group iscsi_stat_group;
struct config_group iscsi_instance_group;
@@ -871,8 +861,6 @@ struct iscsi_tiqn {
struct iscsi_logout_stats logout_stats;
} ____cacheline_aligned;
-#define WWN_STAT_GRPS(tiqn) (&(tiqn)->tiqn_stat_grps)
-
struct iscsit_global {
/* In core shutdown */
u32 in_shutdown;
diff --git a/drivers/target/iscsi/iscsi_target_device.c b/drivers/target/iscsi/iscsi_target_device.c
index 6c7a5104a4cd..7087c736daa5 100644
--- a/drivers/target/iscsi/iscsi_target_device.c
+++ b/drivers/target/iscsi/iscsi_target_device.c
@@ -58,11 +58,7 @@ void iscsit_increment_maxcmdsn(struct iscsi_cmd *cmd, struct iscsi_session *sess
cmd->maxcmdsn_inc = 1;
- if (!mutex_trylock(&sess->cmdsn_mutex)) {
- sess->max_cmd_sn += 1;
- pr_debug("Updated MaxCmdSN to 0x%08x\n", sess->max_cmd_sn);
- return;
- }
+ mutex_lock(&sess->cmdsn_mutex);
sess->max_cmd_sn += 1;
pr_debug("Updated MaxCmdSN to 0x%08x\n", sess->max_cmd_sn);
mutex_unlock(&sess->cmdsn_mutex);
diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c
index 41052e512d92..0d1e6ee3e992 100644
--- a/drivers/target/iscsi/iscsi_target_erl0.c
+++ b/drivers/target/iscsi/iscsi_target_erl0.c
@@ -757,7 +757,7 @@ int iscsit_check_post_dataout(
static void iscsit_handle_time2retain_timeout(unsigned long data)
{
struct iscsi_session *sess = (struct iscsi_session *) data;
- struct iscsi_portal_group *tpg = ISCSI_TPG_S(sess);
+ struct iscsi_portal_group *tpg = sess->tpg;
struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
spin_lock_bh(&se_tpg->session_lock);
@@ -785,7 +785,7 @@ static void iscsit_handle_time2retain_timeout(unsigned long data)
tiqn->sess_err_stats.last_sess_failure_type =
ISCSI_SESS_ERR_CXN_TIMEOUT;
tiqn->sess_err_stats.cxn_timeout_errors++;
- sess->conn_timeout_errors++;
+ atomic_long_inc(&sess->conn_timeout_errors);
spin_unlock(&tiqn->sess_err_stats.lock);
}
}
@@ -801,9 +801,9 @@ void iscsit_start_time2retain_handler(struct iscsi_session *sess)
* Only start Time2Retain timer when the associated TPG is still in
* an ACTIVE (eg: not disabled or shutdown) state.
*/
- spin_lock(&ISCSI_TPG_S(sess)->tpg_state_lock);
- tpg_active = (ISCSI_TPG_S(sess)->tpg_state == TPG_STATE_ACTIVE);
- spin_unlock(&ISCSI_TPG_S(sess)->tpg_state_lock);
+ spin_lock(&sess->tpg->tpg_state_lock);
+ tpg_active = (sess->tpg->tpg_state == TPG_STATE_ACTIVE);
+ spin_unlock(&sess->tpg->tpg_state_lock);
if (!tpg_active)
return;
@@ -829,7 +829,7 @@ void iscsit_start_time2retain_handler(struct iscsi_session *sess)
*/
int iscsit_stop_time2retain_timer(struct iscsi_session *sess)
{
- struct iscsi_portal_group *tpg = ISCSI_TPG_S(sess);
+ struct iscsi_portal_group *tpg = sess->tpg;
struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
if (sess->time2retain_timer_flags & ISCSI_TF_EXPIRED)
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index 1794c753954a..e29279e6b577 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -305,7 +305,6 @@ static int iscsi_login_zero_tsih_s1(
}
sess->creation_time = get_jiffies_64();
- spin_lock_init(&sess->session_stats_lock);
/*
* The FFP CmdSN window values will be allocated from the TPG's
* Initiator Node's ACL once the login has been successfully completed.
@@ -347,15 +346,15 @@ static int iscsi_login_zero_tsih_s2(
* Assign a new TPG Session Handle. Note this is protected with
* struct iscsi_portal_group->np_login_sem from iscsit_access_np().
*/
- sess->tsih = ++ISCSI_TPG_S(sess)->ntsih;
+ sess->tsih = ++sess->tpg->ntsih;
if (!sess->tsih)
- sess->tsih = ++ISCSI_TPG_S(sess)->ntsih;
+ sess->tsih = ++sess->tpg->ntsih;
/*
* Create the default params from user defined values..
*/
if (iscsi_copy_param_list(&conn->param_list,
- ISCSI_TPG_C(conn)->param_list, 1) < 0) {
+ conn->tpg->param_list, 1) < 0) {
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_NO_RESOURCES);
return -1;
@@ -380,7 +379,7 @@ static int iscsi_login_zero_tsih_s2(
* In our case, we have already located the struct iscsi_tiqn at this point.
*/
memset(buf, 0, 32);
- sprintf(buf, "TargetPortalGroupTag=%hu", ISCSI_TPG_S(sess)->tpgt);
+ sprintf(buf, "TargetPortalGroupTag=%hu", sess->tpg->tpgt);
if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) {
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_NO_RESOURCES);
@@ -575,7 +574,7 @@ static int iscsi_login_non_zero_tsih_s2(
iscsi_login_set_conn_values(sess, conn, pdu->cid);
if (iscsi_copy_param_list(&conn->param_list,
- ISCSI_TPG_C(conn)->param_list, 0) < 0) {
+ conn->tpg->param_list, 0) < 0) {
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_NO_RESOURCES);
return -1;
@@ -593,7 +592,7 @@ static int iscsi_login_non_zero_tsih_s2(
* In our case, we have already located the struct iscsi_tiqn at this point.
*/
memset(buf, 0, 32);
- sprintf(buf, "TargetPortalGroupTag=%hu", ISCSI_TPG_S(sess)->tpgt);
+ sprintf(buf, "TargetPortalGroupTag=%hu", sess->tpg->tpgt);
if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) {
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_NO_RESOURCES);
@@ -691,7 +690,7 @@ int iscsi_post_login_handler(
int stop_timer = 0;
struct iscsi_session *sess = conn->sess;
struct se_session *se_sess = sess->se_sess;
- struct iscsi_portal_group *tpg = ISCSI_TPG_S(sess);
+ struct iscsi_portal_group *tpg = sess->tpg;
struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
struct iscsi_thread_set *ts;
@@ -1154,7 +1153,7 @@ old_sess_out:
spin_lock_bh(&conn->sess->conn_lock);
if (conn->sess->session_state == TARG_SESS_STATE_FAILED) {
struct se_portal_group *se_tpg =
- &ISCSI_TPG_C(conn)->tpg_se_tpg;
+ &conn->tpg->tpg_se_tpg;
atomic_set(&conn->sess->session_continuation, 0);
spin_unlock_bh(&conn->sess->conn_lock);
@@ -1404,11 +1403,6 @@ old_sess_out:
out:
stop = kthread_should_stop();
- if (!stop && signal_pending(current)) {
- spin_lock_bh(&np->np_thread_lock);
- stop = (np->np_thread_state == ISCSI_NP_THREAD_SHUTDOWN);
- spin_unlock_bh(&np->np_thread_lock);
- }
/* Wait for another socket.. */
if (!stop)
return 1;
@@ -1416,7 +1410,6 @@ exit:
iscsi_stop_login_thread_timer(np);
spin_lock_bh(&np->np_thread_lock);
np->np_thread_state = ISCSI_NP_THREAD_EXIT;
- np->np_thread = NULL;
spin_unlock_bh(&np->np_thread_lock);
return 0;
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
index ef6d836a4d09..83c965c65386 100644
--- a/drivers/target/iscsi/iscsi_target_nego.c
+++ b/drivers/target/iscsi/iscsi_target_nego.c
@@ -88,7 +88,7 @@ int extract_param(
if (len < 0)
return -1;
- if (len > max_length) {
+ if (len >= max_length) {
pr_err("Length of input: %d exceeds max_length:"
" %d\n", len, max_length);
return -1;
@@ -140,7 +140,7 @@ static u32 iscsi_handle_authentication(
iscsi_nacl = container_of(se_nacl, struct iscsi_node_acl,
se_node_acl);
- auth = ISCSI_NODE_AUTH(iscsi_nacl);
+ auth = &iscsi_nacl->node_auth;
}
} else {
/*
@@ -789,7 +789,7 @@ static int iscsi_target_handle_csg_zero(
return -1;
if (!iscsi_check_negotiated_keys(conn->param_list)) {
- if (ISCSI_TPG_ATTRIB(ISCSI_TPG_C(conn))->authentication &&
+ if (conn->tpg->tpg_attrib.authentication &&
!strncmp(param->value, NONE, 4)) {
pr_err("Initiator sent AuthMethod=None but"
" Target is enforcing iSCSI Authentication,"
@@ -799,7 +799,7 @@ static int iscsi_target_handle_csg_zero(
return -1;
}
- if (ISCSI_TPG_ATTRIB(ISCSI_TPG_C(conn))->authentication &&
+ if (conn->tpg->tpg_attrib.authentication &&
!login->auth_complete)
return 0;
@@ -862,7 +862,7 @@ static int iscsi_target_handle_csg_one(struct iscsi_conn *conn, struct iscsi_log
}
if (!login->auth_complete &&
- ISCSI_TPG_ATTRIB(ISCSI_TPG_C(conn))->authentication) {
+ conn->tpg->tpg_attrib.authentication) {
pr_err("Initiator is requesting CSG: 1, has not been"
" successfully authenticated, and the Target is"
" enforcing iSCSI Authentication, login failed.\n");
diff --git a/drivers/target/iscsi/iscsi_target_nodeattrib.c b/drivers/target/iscsi/iscsi_target_nodeattrib.c
index 93bdc475eb00..16454a922e2b 100644
--- a/drivers/target/iscsi/iscsi_target_nodeattrib.c
+++ b/drivers/target/iscsi/iscsi_target_nodeattrib.c
@@ -33,7 +33,8 @@ static inline char *iscsit_na_get_initiatorname(
}
void iscsit_set_default_node_attribues(
- struct iscsi_node_acl *acl)
+ struct iscsi_node_acl *acl,
+ struct iscsi_portal_group *tpg)
{
struct iscsi_node_attrib *a = &acl->node_attrib;
@@ -44,7 +45,7 @@ void iscsit_set_default_node_attribues(
a->random_datain_pdu_offsets = NA_RANDOM_DATAIN_PDU_OFFSETS;
a->random_datain_seq_offsets = NA_RANDOM_DATAIN_SEQ_OFFSETS;
a->random_r2t_offsets = NA_RANDOM_R2T_OFFSETS;
- a->default_erl = NA_DEFAULT_ERL;
+ a->default_erl = tpg->tpg_attrib.default_erl;
}
int iscsit_na_dataout_timeout(
diff --git a/drivers/target/iscsi/iscsi_target_nodeattrib.h b/drivers/target/iscsi/iscsi_target_nodeattrib.h
index c970b326ef23..0c69a46a62ec 100644
--- a/drivers/target/iscsi/iscsi_target_nodeattrib.h
+++ b/drivers/target/iscsi/iscsi_target_nodeattrib.h
@@ -1,7 +1,8 @@
#ifndef ISCSI_TARGET_NODEATTRIB_H
#define ISCSI_TARGET_NODEATTRIB_H
-extern void iscsit_set_default_node_attribues(struct iscsi_node_acl *);
+extern void iscsit_set_default_node_attribues(struct iscsi_node_acl *,
+ struct iscsi_portal_group *);
extern int iscsit_na_dataout_timeout(struct iscsi_node_acl *, u32);
extern int iscsit_na_dataout_timeout_retries(struct iscsi_node_acl *, u32);
extern int iscsit_na_nopin_timeout(struct iscsi_node_acl *, u32);
diff --git a/drivers/target/iscsi/iscsi_target_stat.c b/drivers/target/iscsi/iscsi_target_stat.c
index f788e8b5e855..103395510307 100644
--- a/drivers/target/iscsi/iscsi_target_stat.c
+++ b/drivers/target/iscsi/iscsi_target_stat.c
@@ -792,7 +792,8 @@ static ssize_t iscsi_stat_sess_show_attr_cmd_pdus(
if (se_sess) {
sess = se_sess->fabric_sess_ptr;
if (sess)
- ret = snprintf(page, PAGE_SIZE, "%u\n", sess->cmd_pdus);
+ ret = snprintf(page, PAGE_SIZE, "%lu\n",
+ atomic_long_read(&sess->cmd_pdus));
}
spin_unlock_bh(&se_nacl->nacl_sess_lock);
@@ -815,7 +816,8 @@ static ssize_t iscsi_stat_sess_show_attr_rsp_pdus(
if (se_sess) {
sess = se_sess->fabric_sess_ptr;
if (sess)
- ret = snprintf(page, PAGE_SIZE, "%u\n", sess->rsp_pdus);
+ ret = snprintf(page, PAGE_SIZE, "%lu\n",
+ atomic_long_read(&sess->rsp_pdus));
}
spin_unlock_bh(&se_nacl->nacl_sess_lock);
@@ -838,8 +840,8 @@ static ssize_t iscsi_stat_sess_show_attr_txdata_octs(
if (se_sess) {
sess = se_sess->fabric_sess_ptr;
if (sess)
- ret = snprintf(page, PAGE_SIZE, "%llu\n",
- (unsigned long long)sess->tx_data_octets);
+ ret = snprintf(page, PAGE_SIZE, "%lu\n",
+ atomic_long_read(&sess->tx_data_octets));
}
spin_unlock_bh(&se_nacl->nacl_sess_lock);
@@ -862,8 +864,8 @@ static ssize_t iscsi_stat_sess_show_attr_rxdata_octs(
if (se_sess) {
sess = se_sess->fabric_sess_ptr;
if (sess)
- ret = snprintf(page, PAGE_SIZE, "%llu\n",
- (unsigned long long)sess->rx_data_octets);
+ ret = snprintf(page, PAGE_SIZE, "%lu\n",
+ atomic_long_read(&sess->rx_data_octets));
}
spin_unlock_bh(&se_nacl->nacl_sess_lock);
@@ -886,8 +888,8 @@ static ssize_t iscsi_stat_sess_show_attr_conn_digest_errors(
if (se_sess) {
sess = se_sess->fabric_sess_ptr;
if (sess)
- ret = snprintf(page, PAGE_SIZE, "%u\n",
- sess->conn_digest_errors);
+ ret = snprintf(page, PAGE_SIZE, "%lu\n",
+ atomic_long_read(&sess->conn_digest_errors));
}
spin_unlock_bh(&se_nacl->nacl_sess_lock);
@@ -910,8 +912,8 @@ static ssize_t iscsi_stat_sess_show_attr_conn_timeout_errors(
if (se_sess) {
sess = se_sess->fabric_sess_ptr;
if (sess)
- ret = snprintf(page, PAGE_SIZE, "%u\n",
- sess->conn_timeout_errors);
+ ret = snprintf(page, PAGE_SIZE, "%lu\n",
+ atomic_long_read(&sess->conn_timeout_errors));
}
spin_unlock_bh(&se_nacl->nacl_sess_lock);
diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
index 4faeb47fa5e1..39761837608d 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.c
+++ b/drivers/target/iscsi/iscsi_target_tpg.c
@@ -223,6 +223,8 @@ static void iscsit_set_default_tpg_attribs(struct iscsi_portal_group *tpg)
a->cache_dynamic_acls = TA_CACHE_DYNAMIC_ACLS;
a->demo_mode_write_protect = TA_DEMO_MODE_WRITE_PROTECT;
a->prod_mode_write_protect = TA_PROD_MODE_WRITE_PROTECT;
+ a->demo_mode_discovery = TA_DEMO_MODE_DISCOVERY;
+ a->default_erl = TA_DEFAULT_ERL;
}
int iscsit_tpg_add_portal_group(struct iscsi_tiqn *tiqn, struct iscsi_portal_group *tpg)
@@ -237,7 +239,7 @@ int iscsit_tpg_add_portal_group(struct iscsi_tiqn *tiqn, struct iscsi_portal_gro
if (iscsi_create_default_params(&tpg->param_list) < 0)
goto err_out;
- ISCSI_TPG_ATTRIB(tpg)->tpg = tpg;
+ tpg->tpg_attrib.tpg = tpg;
spin_lock(&tpg->tpg_state_lock);
tpg->tpg_state = TPG_STATE_INACTIVE;
@@ -330,7 +332,7 @@ int iscsit_tpg_enable_portal_group(struct iscsi_portal_group *tpg)
return -EINVAL;
}
- if (ISCSI_TPG_ATTRIB(tpg)->authentication) {
+ if (tpg->tpg_attrib.authentication) {
if (!strcmp(param->value, NONE)) {
ret = iscsi_update_param_value(param, CHAP);
if (ret)
@@ -820,3 +822,39 @@ int iscsit_ta_prod_mode_write_protect(
return 0;
}
+
+int iscsit_ta_demo_mode_discovery(
+ struct iscsi_portal_group *tpg,
+ u32 flag)
+{
+ struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
+
+ if ((flag != 0) && (flag != 1)) {
+ pr_err("Illegal value %d\n", flag);
+ return -EINVAL;
+ }
+
+ a->demo_mode_discovery = flag;
+ pr_debug("iSCSI_TPG[%hu] - Demo Mode Discovery bit:"
+ " %s\n", tpg->tpgt, (a->demo_mode_discovery) ?
+ "ON" : "OFF");
+
+ return 0;
+}
+
+int iscsit_ta_default_erl(
+ struct iscsi_portal_group *tpg,
+ u32 default_erl)
+{
+ struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
+
+ if ((default_erl != 0) && (default_erl != 1) && (default_erl != 2)) {
+ pr_err("Illegal value for default_erl: %u\n", default_erl);
+ return -EINVAL;
+ }
+
+ a->default_erl = default_erl;
+ pr_debug("iSCSI_TPG[%hu] - DefaultERL: %u\n", tpg->tpgt, a->default_erl);
+
+ return 0;
+}
diff --git a/drivers/target/iscsi/iscsi_target_tpg.h b/drivers/target/iscsi/iscsi_target_tpg.h
index b77693e2c209..213c0fc7fdc9 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.h
+++ b/drivers/target/iscsi/iscsi_target_tpg.h
@@ -37,5 +37,7 @@ extern int iscsit_ta_default_cmdsn_depth(struct iscsi_portal_group *, u32);
extern int iscsit_ta_cache_dynamic_acls(struct iscsi_portal_group *, u32);
extern int iscsit_ta_demo_mode_write_protect(struct iscsi_portal_group *, u32);
extern int iscsit_ta_prod_mode_write_protect(struct iscsi_portal_group *, u32);
+extern int iscsit_ta_demo_mode_discovery(struct iscsi_portal_group *, u32);
+extern int iscsit_ta_default_erl(struct iscsi_portal_group *, u32);
#endif /* ISCSI_TARGET_TPG_H */
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index b0cac0c342e1..0819e688a398 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -242,9 +242,9 @@ static inline int iscsit_check_received_cmdsn(struct iscsi_session *sess, u32 cm
*/
if (iscsi_sna_gt(cmdsn, sess->max_cmd_sn)) {
pr_err("Received CmdSN: 0x%08x is greater than"
- " MaxCmdSN: 0x%08x, protocol error.\n", cmdsn,
+ " MaxCmdSN: 0x%08x, ignoring.\n", cmdsn,
sess->max_cmd_sn);
- ret = CMDSN_ERROR_CANNOT_RECOVER;
+ ret = CMDSN_MAXCMDSN_OVERRUN;
} else if (cmdsn == sess->exp_cmd_sn) {
sess->exp_cmd_sn++;
@@ -303,14 +303,16 @@ int iscsit_sequence_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
ret = CMDSN_HIGHER_THAN_EXP;
break;
case CMDSN_LOWER_THAN_EXP:
+ case CMDSN_MAXCMDSN_OVERRUN:
+ default:
cmd->i_state = ISTATE_REMOVE;
iscsit_add_cmd_to_immediate_queue(cmd, conn, cmd->i_state);
- ret = cmdsn_ret;
- break;
- default:
- reason = ISCSI_REASON_PROTOCOL_ERROR;
- reject = true;
- ret = cmdsn_ret;
+ /*
+ * Existing callers for iscsit_sequence_cmd() will silently
+ * ignore commands with CMDSN_LOWER_THAN_EXP, so force this
+ * return for CMDSN_MAXCMDSN_OVERRUN as well..
+ */
+ ret = CMDSN_LOWER_THAN_EXP;
break;
}
mutex_unlock(&conn->sess->cmdsn_mutex);
@@ -980,7 +982,7 @@ static void iscsit_handle_nopin_response_timeout(unsigned long data)
tiqn->sess_err_stats.last_sess_failure_type =
ISCSI_SESS_ERR_CXN_TIMEOUT;
tiqn->sess_err_stats.cxn_timeout_errors++;
- conn->sess->conn_timeout_errors++;
+ atomic_long_inc(&conn->sess->conn_timeout_errors);
spin_unlock_bh(&tiqn->sess_err_stats.lock);
}
}
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index 0f6d69dabca1..1b41e6776152 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -135,6 +135,21 @@ static int tcm_loop_change_queue_depth(
return sdev->queue_depth;
}
+static int tcm_loop_change_queue_type(struct scsi_device *sdev, int tag)
+{
+ if (sdev->tagged_supported) {
+ scsi_set_tag_type(sdev, tag);
+
+ if (tag)
+ scsi_activate_tcq(sdev, sdev->queue_depth);
+ else
+ scsi_deactivate_tcq(sdev, sdev->queue_depth);
+ } else
+ tag = 0;
+
+ return tag;
+}
+
/*
* Locate the SAM Task Attr from struct scsi_cmnd *
*/
@@ -178,7 +193,10 @@ static void tcm_loop_submission_work(struct work_struct *work)
set_host_byte(sc, DID_NO_CONNECT);
goto out_done;
}
-
+ if (tl_tpg->tl_transport_status == TCM_TRANSPORT_OFFLINE) {
+ set_host_byte(sc, DID_TRANSPORT_DISRUPTED);
+ goto out_done;
+ }
tl_nexus = tl_hba->tl_nexus;
if (!tl_nexus) {
scmd_printk(KERN_ERR, sc, "TCM_Loop I_T Nexus"
@@ -233,6 +251,7 @@ static int tcm_loop_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc)
}
tl_cmd->sc = sc;
+ tl_cmd->sc_cmd_tag = sc->tag;
INIT_WORK(&tl_cmd->work, tcm_loop_submission_work);
queue_work(tcm_loop_workqueue, &tl_cmd->work);
return 0;
@@ -242,41 +261,21 @@ static int tcm_loop_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc)
* Called from SCSI EH process context to issue a LUN_RESET TMR
* to struct scsi_device
*/
-static int tcm_loop_device_reset(struct scsi_cmnd *sc)
+static int tcm_loop_issue_tmr(struct tcm_loop_tpg *tl_tpg,
+ struct tcm_loop_nexus *tl_nexus,
+ int lun, int task, enum tcm_tmreq_table tmr)
{
struct se_cmd *se_cmd = NULL;
- struct se_portal_group *se_tpg;
struct se_session *se_sess;
+ struct se_portal_group *se_tpg;
struct tcm_loop_cmd *tl_cmd = NULL;
- struct tcm_loop_hba *tl_hba;
- struct tcm_loop_nexus *tl_nexus;
struct tcm_loop_tmr *tl_tmr = NULL;
- struct tcm_loop_tpg *tl_tpg;
- int ret = FAILED, rc;
- /*
- * Locate the tcm_loop_hba_t pointer
- */
- tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
- /*
- * Locate the tl_nexus and se_sess pointers
- */
- tl_nexus = tl_hba->tl_nexus;
- if (!tl_nexus) {
- pr_err("Unable to perform device reset without"
- " active I_T Nexus\n");
- return FAILED;
- }
- se_sess = tl_nexus->se_sess;
- /*
- * Locate the tl_tpg and se_tpg pointers from TargetID in sc->device->id
- */
- tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
- se_tpg = &tl_tpg->tl_se_tpg;
+ int ret = TMR_FUNCTION_FAILED, rc;
tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_KERNEL);
if (!tl_cmd) {
pr_err("Unable to allocate memory for tl_cmd\n");
- return FAILED;
+ return ret;
}
tl_tmr = kzalloc(sizeof(struct tcm_loop_tmr), GFP_KERNEL);
@@ -287,6 +286,8 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc)
init_waitqueue_head(&tl_tmr->tl_tmr_wait);
se_cmd = &tl_cmd->tl_se_cmd;
+ se_tpg = &tl_tpg->tl_se_tpg;
+ se_sess = tl_nexus->se_sess;
/*
* Initialize struct se_cmd descriptor from target_core_mod infrastructure
*/
@@ -294,17 +295,23 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc)
DMA_NONE, MSG_SIMPLE_TAG,
&tl_cmd->tl_sense_buf[0]);
- rc = core_tmr_alloc_req(se_cmd, tl_tmr, TMR_LUN_RESET, GFP_KERNEL);
+ rc = core_tmr_alloc_req(se_cmd, tl_tmr, tmr, GFP_KERNEL);
if (rc < 0)
goto release;
+
+ if (tmr == TMR_ABORT_TASK)
+ se_cmd->se_tmr_req->ref_task_tag = task;
+
/*
- * Locate the underlying TCM struct se_lun from sc->device->lun
+ * Locate the underlying TCM struct se_lun
*/
- if (transport_lookup_tmr_lun(se_cmd, sc->device->lun) < 0)
+ if (transport_lookup_tmr_lun(se_cmd, lun) < 0) {
+ ret = TMR_LUN_DOES_NOT_EXIST;
goto release;
+ }
/*
- * Queue the TMR to TCM Core and sleep waiting for tcm_loop_queue_tm_rsp()
- * to wake us up.
+ * Queue the TMR to TCM Core and sleep waiting for
+ * tcm_loop_queue_tm_rsp() to wake us up.
*/
transport_generic_handle_tmr(se_cmd);
wait_event(tl_tmr->tl_tmr_wait, atomic_read(&tl_tmr->tmr_complete));
@@ -312,8 +319,7 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc)
* The TMR LUN_RESET has completed, check the response status and
* then release allocations.
*/
- ret = (se_cmd->se_tmr_req->response == TMR_FUNCTION_COMPLETE) ?
- SUCCESS : FAILED;
+ ret = se_cmd->se_tmr_req->response;
release:
if (se_cmd)
transport_generic_free_cmd(se_cmd, 1);
@@ -323,6 +329,94 @@ release:
return ret;
}
+static int tcm_loop_abort_task(struct scsi_cmnd *sc)
+{
+ struct tcm_loop_hba *tl_hba;
+ struct tcm_loop_nexus *tl_nexus;
+ struct tcm_loop_tpg *tl_tpg;
+ int ret = FAILED;
+
+ /*
+ * Locate the tcm_loop_hba_t pointer
+ */
+ tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
+ /*
+ * Locate the tl_nexus and se_sess pointers
+ */
+ tl_nexus = tl_hba->tl_nexus;
+ if (!tl_nexus) {
+ pr_err("Unable to perform device reset without"
+ " active I_T Nexus\n");
+ return FAILED;
+ }
+
+ /*
+ * Locate the tl_tpg pointer from TargetID in sc->device->id
+ */
+ tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
+ ret = tcm_loop_issue_tmr(tl_tpg, tl_nexus, sc->device->lun,
+ sc->tag, TMR_ABORT_TASK);
+ return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED;
+}
+
+/*
+ * Called from SCSI EH process context to issue a LUN_RESET TMR
+ * to struct scsi_device
+ */
+static int tcm_loop_device_reset(struct scsi_cmnd *sc)
+{
+ struct tcm_loop_hba *tl_hba;
+ struct tcm_loop_nexus *tl_nexus;
+ struct tcm_loop_tpg *tl_tpg;
+ int ret = FAILED;
+
+ /*
+ * Locate the tcm_loop_hba_t pointer
+ */
+ tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
+ /*
+ * Locate the tl_nexus and se_sess pointers
+ */
+ tl_nexus = tl_hba->tl_nexus;
+ if (!tl_nexus) {
+ pr_err("Unable to perform device reset without"
+ " active I_T Nexus\n");
+ return FAILED;
+ }
+ /*
+ * Locate the tl_tpg pointer from TargetID in sc->device->id
+ */
+ tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
+ ret = tcm_loop_issue_tmr(tl_tpg, tl_nexus, sc->device->lun,
+ 0, TMR_LUN_RESET);
+ return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED;
+}
+
+static int tcm_loop_target_reset(struct scsi_cmnd *sc)
+{
+ struct tcm_loop_hba *tl_hba;
+ struct tcm_loop_tpg *tl_tpg;
+
+ /*
+ * Locate the tcm_loop_hba_t pointer
+ */
+ tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
+ if (!tl_hba) {
+ pr_err("Unable to perform device reset without"
+ " active I_T Nexus\n");
+ return FAILED;
+ }
+ /*
+ * Locate the tl_tpg pointer from TargetID in sc->device->id
+ */
+ tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
+ if (tl_tpg) {
+ tl_tpg->tl_transport_status = TCM_TRANSPORT_ONLINE;
+ return SUCCESS;
+ }
+ return FAILED;
+}
+
static int tcm_loop_slave_alloc(struct scsi_device *sd)
{
set_bit(QUEUE_FLAG_BIDI, &sd->request_queue->queue_flags);
@@ -331,6 +425,15 @@ static int tcm_loop_slave_alloc(struct scsi_device *sd)
static int tcm_loop_slave_configure(struct scsi_device *sd)
{
+ if (sd->tagged_supported) {
+ scsi_activate_tcq(sd, sd->queue_depth);
+ scsi_adjust_queue_depth(sd, MSG_SIMPLE_TAG,
+ sd->host->cmd_per_lun);
+ } else {
+ scsi_adjust_queue_depth(sd, 0,
+ sd->host->cmd_per_lun);
+ }
+
return 0;
}
@@ -340,7 +443,10 @@ static struct scsi_host_template tcm_loop_driver_template = {
.name = "TCM_Loopback",
.queuecommand = tcm_loop_queuecommand,
.change_queue_depth = tcm_loop_change_queue_depth,
+ .change_queue_type = tcm_loop_change_queue_type,
+ .eh_abort_handler = tcm_loop_abort_task,
.eh_device_reset_handler = tcm_loop_device_reset,
+ .eh_target_reset_handler = tcm_loop_target_reset,
.can_queue = 1024,
.this_id = -1,
.sg_tablesize = 256,
@@ -699,7 +805,10 @@ static void tcm_loop_set_default_node_attributes(struct se_node_acl *se_acl)
static u32 tcm_loop_get_task_tag(struct se_cmd *se_cmd)
{
- return 1;
+ struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
+ struct tcm_loop_cmd, tl_se_cmd);
+
+ return tl_cmd->sc_cmd_tag;
}
static int tcm_loop_get_cmd_state(struct se_cmd *se_cmd)
@@ -932,7 +1041,10 @@ static int tcm_loop_drop_nexus(
struct tcm_loop_nexus *tl_nexus;
struct tcm_loop_hba *tl_hba = tpg->tl_hba;
- tl_nexus = tpg->tl_hba->tl_nexus;
+ if (!tl_hba)
+ return -ENODEV;
+
+ tl_nexus = tl_hba->tl_nexus;
if (!tl_nexus)
return -ENODEV;
@@ -1061,8 +1173,56 @@ check_newline:
TF_TPG_BASE_ATTR(tcm_loop, nexus, S_IRUGO | S_IWUSR);
+static ssize_t tcm_loop_tpg_show_transport_status(
+ struct se_portal_group *se_tpg,
+ char *page)
+{
+ struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
+ struct tcm_loop_tpg, tl_se_tpg);
+ const char *status = NULL;
+ ssize_t ret = -EINVAL;
+
+ switch (tl_tpg->tl_transport_status) {
+ case TCM_TRANSPORT_ONLINE:
+ status = "online";
+ break;
+ case TCM_TRANSPORT_OFFLINE:
+ status = "offline";
+ break;
+ default:
+ break;
+ }
+
+ if (status)
+ ret = snprintf(page, PAGE_SIZE, "%s\n", status);
+
+ return ret;
+}
+
+static ssize_t tcm_loop_tpg_store_transport_status(
+ struct se_portal_group *se_tpg,
+ const char *page,
+ size_t count)
+{
+ struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
+ struct tcm_loop_tpg, tl_se_tpg);
+
+ if (!strncmp(page, "online", 6)) {
+ tl_tpg->tl_transport_status = TCM_TRANSPORT_ONLINE;
+ return count;
+ }
+ if (!strncmp(page, "offline", 7)) {
+ tl_tpg->tl_transport_status = TCM_TRANSPORT_OFFLINE;
+ return count;
+ }
+ return -EINVAL;
+}
+
+TF_TPG_BASE_ATTR(tcm_loop, transport_status, S_IRUGO | S_IWUSR);
+
static struct configfs_attribute *tcm_loop_tpg_attrs[] = {
&tcm_loop_tpg_nexus.attr,
+ &tcm_loop_tpg_transport_status.attr,
NULL,
};
@@ -1334,11 +1494,11 @@ static int tcm_loop_register_configfs(void)
/*
* Setup default attribute lists for various fabric->tf_cit_tmpl
*/
- TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = tcm_loop_wwn_attrs;
- TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = tcm_loop_tpg_attrs;
- TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;
- TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;
- TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;
+ fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = tcm_loop_wwn_attrs;
+ fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = tcm_loop_tpg_attrs;
+ fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL;
+ fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;
+ fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
/*
* Once fabric->tf_ops has been setup, now register the fabric for
* use within TCM
diff --git a/drivers/target/loopback/tcm_loop.h b/drivers/target/loopback/tcm_loop.h
index dd7a84ee78e1..54c59d0b6608 100644
--- a/drivers/target/loopback/tcm_loop.h
+++ b/drivers/target/loopback/tcm_loop.h
@@ -10,6 +10,8 @@
struct tcm_loop_cmd {
/* State of Linux/SCSI CDB+Data descriptor */
u32 sc_cmd_state;
+ /* Tagged command queueing */
+ u32 sc_cmd_tag;
/* Pointer to the CDB+Data descriptor from Linux/SCSI subsystem */
struct scsi_cmnd *sc;
/* The TCM I/O descriptor that is accessed via container_of() */
@@ -40,8 +42,12 @@ struct tcm_loop_nacl {
struct se_node_acl se_node_acl;
};
+#define TCM_TRANSPORT_ONLINE 0
+#define TCM_TRANSPORT_OFFLINE 1
+
struct tcm_loop_tpg {
unsigned short tl_tpgt;
+ unsigned short tl_transport_status;
atomic_t tl_tpg_port_count;
struct se_portal_group tl_se_tpg;
struct tcm_loop_hba *tl_hba;
diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c
index e51b09a04d52..24884cac19ce 100644
--- a/drivers/target/sbp/sbp_target.c
+++ b/drivers/target/sbp/sbp_target.c
@@ -2556,15 +2556,15 @@ static int sbp_register_configfs(void)
/*
* Setup default attribute lists for various fabric->tf_cit_tmpl
*/
- TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = sbp_wwn_attrs;
- TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = sbp_tpg_base_attrs;
- TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = sbp_tpg_attrib_attrs;
- TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;
- TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;
- TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;
- TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
- TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
- TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;
+ fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = sbp_wwn_attrs;
+ fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = sbp_tpg_base_attrs;
+ fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = sbp_tpg_attrib_attrs;
+ fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;
+ fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
+ fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL;
+ fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
+ fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
+ fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL;
ret = target_fabric_configfs_register(fabric);
if (ret < 0) {
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index 47244102281e..fdcee326bfbc 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -44,7 +44,7 @@
static sense_reason_t core_alua_check_transition(int state, int *primary);
static int core_alua_set_tg_pt_secondary_state(
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
- struct se_port *port, int explict, int offline);
+ struct se_port *port, int explicit, int offline);
static u16 alua_lu_gps_counter;
static u32 alua_lu_gps_count;
@@ -117,12 +117,7 @@ target_emulate_report_target_port_groups(struct se_cmd *cmd)
/*
* Set supported ASYMMETRIC ACCESS State bits
*/
- buf[off] = 0x80; /* T_SUP */
- buf[off] |= 0x40; /* O_SUP */
- buf[off] |= 0x8; /* U_SUP */
- buf[off] |= 0x4; /* S_SUP */
- buf[off] |= 0x2; /* AN_SUP */
- buf[off++] |= 0x1; /* AO_SUP */
+ buf[off++] |= tg_pt_gp->tg_pt_gp_alua_supported_states;
/*
* TARGET PORT GROUP
*/
@@ -175,7 +170,7 @@ target_emulate_report_target_port_groups(struct se_cmd *cmd)
if (ext_hdr != 0) {
buf[4] = 0x10;
/*
- * Set the implict transition time (in seconds) for the application
+ * Set the implicit transition time (in seconds) for the application
* client to use as a base for it's transition timeout value.
*
* Use the current tg_pt_gp_mem -> tg_pt_gp membership from the LUN
@@ -188,7 +183,7 @@ target_emulate_report_target_port_groups(struct se_cmd *cmd)
spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
if (tg_pt_gp)
- buf[5] = tg_pt_gp->tg_pt_gp_implict_trans_secs;
+ buf[5] = tg_pt_gp->tg_pt_gp_implicit_trans_secs;
spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
}
}
@@ -199,7 +194,7 @@ target_emulate_report_target_port_groups(struct se_cmd *cmd)
}
/*
- * SET_TARGET_PORT_GROUPS for explict ALUA operation.
+ * SET_TARGET_PORT_GROUPS for explicit ALUA operation.
*
* See spc4r17 section 6.35
*/
@@ -232,7 +227,7 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
/*
- * Determine if explict ALUA via SET_TARGET_PORT_GROUPS is allowed
+ * Determine if explicit ALUA via SET_TARGET_PORT_GROUPS is allowed
* for the local tg_pt_gp.
*/
l_tg_pt_gp_mem = l_port->sep_alua_tg_pt_gp_mem;
@@ -251,9 +246,9 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd)
}
spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
- if (!(l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA)) {
+ if (!(l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA)) {
pr_debug("Unable to process SET_TARGET_PORT_GROUPS"
- " while TPGS_EXPLICT_ALUA is disabled\n");
+ " while TPGS_EXPLICIT_ALUA is disabled\n");
rc = TCM_UNSUPPORTED_SCSI_OPCODE;
goto out;
}
@@ -330,7 +325,7 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd)
spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
} else {
/*
- * Extact the RELATIVE TARGET PORT IDENTIFIER to identify
+ * Extract the RELATIVE TARGET PORT IDENTIFIER to identify
* the Target Port in question for the the incoming
* SET_TARGET_PORT_GROUPS op.
*/
@@ -487,7 +482,7 @@ static inline int core_alua_state_transition(
u8 *alua_ascq)
{
/*
- * Allowed CDBs for ALUA_ACCESS_STATE_TRANSITIO as defined by
+ * Allowed CDBs for ALUA_ACCESS_STATE_TRANSITION as defined by
* spc4r17 section 5.9.2.5
*/
switch (cdb[0]) {
@@ -515,9 +510,9 @@ static inline int core_alua_state_transition(
}
/*
- * return 1: Is used to signal LUN not accecsable, and check condition/not ready
+ * return 1: Is used to signal LUN not accessible, and check condition/not ready
* return 0: Used to signal success
- * reutrn -1: Used to signal failure, and invalid cdb field
+ * return -1: Used to signal failure, and invalid cdb field
*/
sense_reason_t
target_alua_state_check(struct se_cmd *cmd)
@@ -566,12 +561,12 @@ target_alua_state_check(struct se_cmd *cmd)
nonop_delay_msecs = tg_pt_gp->tg_pt_gp_nonop_delay_msecs;
spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
/*
- * Process ALUA_ACCESS_STATE_ACTIVE_OPTMIZED in a separate conditional
+ * Process ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED in a separate conditional
* statement so the compiler knows explicitly to check this case first.
* For the Optimized ALUA access state case, we want to process the
* incoming fabric cmd ASAP..
*/
- if (out_alua_state == ALUA_ACCESS_STATE_ACTIVE_OPTMIZED)
+ if (out_alua_state == ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED)
return 0;
switch (out_alua_state) {
@@ -620,13 +615,13 @@ out:
}
/*
- * Check implict and explict ALUA state change request.
+ * Check implicit and explicit ALUA state change request.
*/
static sense_reason_t
core_alua_check_transition(int state, int *primary)
{
switch (state) {
- case ALUA_ACCESS_STATE_ACTIVE_OPTMIZED:
+ case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED:
case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
case ALUA_ACCESS_STATE_STANDBY:
case ALUA_ACCESS_STATE_UNAVAILABLE:
@@ -654,7 +649,7 @@ core_alua_check_transition(int state, int *primary)
static char *core_alua_dump_state(int state)
{
switch (state) {
- case ALUA_ACCESS_STATE_ACTIVE_OPTMIZED:
+ case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED:
return "Active/Optimized";
case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
return "Active/NonOptimized";
@@ -676,10 +671,10 @@ char *core_alua_dump_status(int status)
switch (status) {
case ALUA_STATUS_NONE:
return "None";
- case ALUA_STATUS_ALTERED_BY_EXPLICT_STPG:
- return "Altered by Explict STPG";
- case ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA:
- return "Altered by Implict ALUA";
+ case ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG:
+ return "Altered by Explicit STPG";
+ case ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA:
+ return "Altered by Implicit ALUA";
default:
return "Unknown";
}
@@ -770,7 +765,7 @@ static int core_alua_do_transition_tg_pt(
struct se_node_acl *nacl,
unsigned char *md_buf,
int new_state,
- int explict)
+ int explicit)
{
struct se_dev_entry *se_deve;
struct se_lun_acl *lacl;
@@ -784,9 +779,9 @@ static int core_alua_do_transition_tg_pt(
old_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
ALUA_ACCESS_STATE_TRANSITION);
- tg_pt_gp->tg_pt_gp_alua_access_status = (explict) ?
- ALUA_STATUS_ALTERED_BY_EXPLICT_STPG :
- ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA;
+ tg_pt_gp->tg_pt_gp_alua_access_status = (explicit) ?
+ ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG :
+ ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA;
/*
* Check for the optional ALUA primary state transition delay
*/
@@ -802,7 +797,7 @@ static int core_alua_do_transition_tg_pt(
* change, a device server shall establish a unit attention
* condition for the initiator port associated with every I_T
* nexus with the additional sense code set to ASYMMETRIC
- * ACCESS STATE CHAGED.
+ * ACCESS STATE CHANGED.
*
* After an explicit target port asymmetric access state
* change, a device server shall establish a unit attention
@@ -821,12 +816,12 @@ static int core_alua_do_transition_tg_pt(
lacl = se_deve->se_lun_acl;
/*
* se_deve->se_lun_acl pointer may be NULL for a
- * entry created without explict Node+MappedLUN ACLs
+ * entry created without explicit Node+MappedLUN ACLs
*/
if (!lacl)
continue;
- if (explict &&
+ if (explicit &&
(nacl != NULL) && (nacl == lacl->se_lun_nacl) &&
(l_port != NULL) && (l_port == port))
continue;
@@ -866,8 +861,8 @@ static int core_alua_do_transition_tg_pt(
atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, new_state);
pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
- " from primary access state %s to %s\n", (explict) ? "explict" :
- "implict", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
+ " from primary access state %s to %s\n", (explicit) ? "explicit" :
+ "implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
tg_pt_gp->tg_pt_gp_id, core_alua_dump_state(old_state),
core_alua_dump_state(new_state));
@@ -880,7 +875,7 @@ int core_alua_do_port_transition(
struct se_port *l_port,
struct se_node_acl *l_nacl,
int new_state,
- int explict)
+ int explicit)
{
struct se_device *dev;
struct se_port *port;
@@ -917,7 +912,7 @@ int core_alua_do_port_transition(
* success.
*/
core_alua_do_transition_tg_pt(l_tg_pt_gp, l_port, l_nacl,
- md_buf, new_state, explict);
+ md_buf, new_state, explicit);
atomic_dec(&lu_gp->lu_gp_ref_cnt);
smp_mb__after_atomic_dec();
kfree(md_buf);
@@ -946,7 +941,7 @@ int core_alua_do_port_transition(
continue;
/*
* If the target behavior port asymmetric access state
- * is changed for any target port group accessiable via
+ * is changed for any target port group accessible via
* a logical unit within a LU group, the target port
* behavior group asymmetric access states for the same
* target port group accessible via other logical units
@@ -970,7 +965,7 @@ int core_alua_do_port_transition(
* success.
*/
core_alua_do_transition_tg_pt(tg_pt_gp, port,
- nacl, md_buf, new_state, explict);
+ nacl, md_buf, new_state, explicit);
spin_lock(&dev->t10_alua.tg_pt_gps_lock);
atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
@@ -987,7 +982,7 @@ int core_alua_do_port_transition(
pr_debug("Successfully processed LU Group: %s all ALUA TG PT"
" Group IDs: %hu %s transition to primary state: %s\n",
config_item_name(&lu_gp->lu_gp_group.cg_item),
- l_tg_pt_gp->tg_pt_gp_id, (explict) ? "explict" : "implict",
+ l_tg_pt_gp->tg_pt_gp_id, (explicit) ? "explicit" : "implicit",
core_alua_dump_state(new_state));
atomic_dec(&lu_gp->lu_gp_ref_cnt);
@@ -1034,7 +1029,7 @@ static int core_alua_update_tpg_secondary_metadata(
static int core_alua_set_tg_pt_secondary_state(
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
struct se_port *port,
- int explict,
+ int explicit,
int offline)
{
struct t10_alua_tg_pt_gp *tg_pt_gp;
@@ -1061,13 +1056,13 @@ static int core_alua_set_tg_pt_secondary_state(
atomic_set(&port->sep_tg_pt_secondary_offline, 0);
md_buf_len = tg_pt_gp->tg_pt_gp_md_buf_len;
- port->sep_tg_pt_secondary_stat = (explict) ?
- ALUA_STATUS_ALTERED_BY_EXPLICT_STPG :
- ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA;
+ port->sep_tg_pt_secondary_stat = (explicit) ?
+ ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG :
+ ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA;
pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
- " to secondary access state: %s\n", (explict) ? "explict" :
- "implict", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
+ " to secondary access state: %s\n", (explicit) ? "explicit" :
+ "implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
tg_pt_gp->tg_pt_gp_id, (offline) ? "OFFLINE" : "ONLINE");
spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
@@ -1232,7 +1227,7 @@ void core_alua_free_lu_gp(struct t10_alua_lu_gp *lu_gp)
* struct se_device is released via core_alua_free_lu_gp_mem().
*
* If the passed lu_gp does NOT match the default_lu_gp, assume
- * we want to re-assocate a given lu_gp_mem with default_lu_gp.
+ * we want to re-associate a given lu_gp_mem with default_lu_gp.
*/
spin_lock(&lu_gp_mem->lu_gp_mem_lock);
if (lu_gp != default_lu_gp)
@@ -1354,18 +1349,25 @@ struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(struct se_device *dev,
tg_pt_gp->tg_pt_gp_dev = dev;
tg_pt_gp->tg_pt_gp_md_buf_len = ALUA_MD_BUF_LEN;
atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
- ALUA_ACCESS_STATE_ACTIVE_OPTMIZED);
+ ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED);
/*
- * Enable both explict and implict ALUA support by default
+ * Enable both explicit and implicit ALUA support by default
*/
tg_pt_gp->tg_pt_gp_alua_access_type =
- TPGS_EXPLICT_ALUA | TPGS_IMPLICT_ALUA;
+ TPGS_EXPLICIT_ALUA | TPGS_IMPLICIT_ALUA;
/*
* Set the default Active/NonOptimized Delay in milliseconds
*/
tg_pt_gp->tg_pt_gp_nonop_delay_msecs = ALUA_DEFAULT_NONOP_DELAY_MSECS;
tg_pt_gp->tg_pt_gp_trans_delay_msecs = ALUA_DEFAULT_TRANS_DELAY_MSECS;
- tg_pt_gp->tg_pt_gp_implict_trans_secs = ALUA_DEFAULT_IMPLICT_TRANS_SECS;
+ tg_pt_gp->tg_pt_gp_implicit_trans_secs = ALUA_DEFAULT_IMPLICIT_TRANS_SECS;
+
+ /*
+ * Enable all supported states
+ */
+ tg_pt_gp->tg_pt_gp_alua_supported_states =
+ ALUA_T_SUP | ALUA_O_SUP |
+ ALUA_U_SUP | ALUA_S_SUP | ALUA_AN_SUP | ALUA_AO_SUP;
if (def_group) {
spin_lock(&dev->t10_alua.tg_pt_gps_lock);
@@ -1465,7 +1467,7 @@ void core_alua_free_tg_pt_gp(
* been called from target_core_alua_drop_tg_pt_gp().
*
* Here we remove *tg_pt_gp from the global list so that
- * no assications *OR* explict ALUA via SET_TARGET_PORT_GROUPS
+ * no associations *OR* explicit ALUA via SET_TARGET_PORT_GROUPS
* can be made while we are releasing struct t10_alua_tg_pt_gp.
*/
spin_lock(&dev->t10_alua.tg_pt_gps_lock);
@@ -1501,7 +1503,7 @@ void core_alua_free_tg_pt_gp(
* core_alua_free_tg_pt_gp_mem().
*
* If the passed tg_pt_gp does NOT match the default_tg_pt_gp,
- * assume we want to re-assocate a given tg_pt_gp_mem with
+ * assume we want to re-associate a given tg_pt_gp_mem with
* default_tg_pt_gp.
*/
spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
@@ -1740,13 +1742,13 @@ ssize_t core_alua_show_access_type(
struct t10_alua_tg_pt_gp *tg_pt_gp,
char *page)
{
- if ((tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA) &&
- (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA))
- return sprintf(page, "Implict and Explict\n");
- else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA)
- return sprintf(page, "Implict\n");
- else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA)
- return sprintf(page, "Explict\n");
+ if ((tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA) &&
+ (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICIT_ALUA))
+ return sprintf(page, "Implicit and Explicit\n");
+ else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICIT_ALUA)
+ return sprintf(page, "Implicit\n");
+ else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA)
+ return sprintf(page, "Explicit\n");
else
return sprintf(page, "None\n");
}
@@ -1771,11 +1773,11 @@ ssize_t core_alua_store_access_type(
}
if (tmp == 3)
tg_pt_gp->tg_pt_gp_alua_access_type =
- TPGS_IMPLICT_ALUA | TPGS_EXPLICT_ALUA;
+ TPGS_IMPLICIT_ALUA | TPGS_EXPLICIT_ALUA;
else if (tmp == 2)
- tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_EXPLICT_ALUA;
+ tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_EXPLICIT_ALUA;
else if (tmp == 1)
- tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_IMPLICT_ALUA;
+ tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_IMPLICIT_ALUA;
else
tg_pt_gp->tg_pt_gp_alua_access_type = 0;
@@ -1844,14 +1846,14 @@ ssize_t core_alua_store_trans_delay_msecs(
return count;
}
-ssize_t core_alua_show_implict_trans_secs(
+ssize_t core_alua_show_implicit_trans_secs(
struct t10_alua_tg_pt_gp *tg_pt_gp,
char *page)
{
- return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_implict_trans_secs);
+ return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_implicit_trans_secs);
}
-ssize_t core_alua_store_implict_trans_secs(
+ssize_t core_alua_store_implicit_trans_secs(
struct t10_alua_tg_pt_gp *tg_pt_gp,
const char *page,
size_t count)
@@ -1861,16 +1863,16 @@ ssize_t core_alua_store_implict_trans_secs(
ret = kstrtoul(page, 0, &tmp);
if (ret < 0) {
- pr_err("Unable to extract implict_trans_secs\n");
+ pr_err("Unable to extract implicit_trans_secs\n");
return ret;
}
- if (tmp > ALUA_MAX_IMPLICT_TRANS_SECS) {
- pr_err("Passed implict_trans_secs: %lu, exceeds"
- " ALUA_MAX_IMPLICT_TRANS_SECS: %d\n", tmp,
- ALUA_MAX_IMPLICT_TRANS_SECS);
+ if (tmp > ALUA_MAX_IMPLICIT_TRANS_SECS) {
+ pr_err("Passed implicit_trans_secs: %lu, exceeds"
+ " ALUA_MAX_IMPLICIT_TRANS_SECS: %d\n", tmp,
+ ALUA_MAX_IMPLICIT_TRANS_SECS);
return -EINVAL;
}
- tg_pt_gp->tg_pt_gp_implict_trans_secs = (int)tmp;
+ tg_pt_gp->tg_pt_gp_implicit_trans_secs = (int)tmp;
return count;
}
@@ -1970,8 +1972,8 @@ ssize_t core_alua_store_secondary_status(
return ret;
}
if ((tmp != ALUA_STATUS_NONE) &&
- (tmp != ALUA_STATUS_ALTERED_BY_EXPLICT_STPG) &&
- (tmp != ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA)) {
+ (tmp != ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) &&
+ (tmp != ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA)) {
pr_err("Illegal value for alua_tg_pt_status: %lu\n",
tmp);
return -EINVAL;
diff --git a/drivers/target/target_core_alua.h b/drivers/target/target_core_alua.h
index e539c3e7f4ad..88e2e835f14a 100644
--- a/drivers/target/target_core_alua.h
+++ b/drivers/target/target_core_alua.h
@@ -7,15 +7,15 @@
* from spc4r17 section 6.4.2 Table 135
*/
#define TPGS_NO_ALUA 0x00
-#define TPGS_IMPLICT_ALUA 0x10
-#define TPGS_EXPLICT_ALUA 0x20
+#define TPGS_IMPLICIT_ALUA 0x10
+#define TPGS_EXPLICIT_ALUA 0x20
/*
* ASYMMETRIC ACCESS STATE field
*
* from spc4r17 section 6.27 Table 245
*/
-#define ALUA_ACCESS_STATE_ACTIVE_OPTMIZED 0x0
+#define ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED 0x0
#define ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED 0x1
#define ALUA_ACCESS_STATE_STANDBY 0x2
#define ALUA_ACCESS_STATE_UNAVAILABLE 0x3
@@ -23,13 +23,24 @@
#define ALUA_ACCESS_STATE_TRANSITION 0xf
/*
+ * from spc4r36j section 6.37 Table 306
+ */
+#define ALUA_T_SUP 0x80
+#define ALUA_O_SUP 0x40
+#define ALUA_LBD_SUP 0x10
+#define ALUA_U_SUP 0x08
+#define ALUA_S_SUP 0x04
+#define ALUA_AN_SUP 0x02
+#define ALUA_AO_SUP 0x01
+
+/*
* REPORT_TARGET_PORT_GROUP STATUS CODE
*
* from spc4r17 section 6.27 Table 246
*/
#define ALUA_STATUS_NONE 0x00
-#define ALUA_STATUS_ALTERED_BY_EXPLICT_STPG 0x01
-#define ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA 0x02
+#define ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG 0x01
+#define ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA 0x02
/*
* From spc4r17, Table D.1: ASC and ASCQ Assignement
@@ -46,17 +57,17 @@
#define ALUA_DEFAULT_NONOP_DELAY_MSECS 100
#define ALUA_MAX_NONOP_DELAY_MSECS 10000 /* 10 seconds */
/*
- * Used for implict and explict ALUA transitional delay, that is disabled
+ * Used for implicit and explicit ALUA transitional delay, that is disabled
* by default, and is intended to be used for debugging client side ALUA code.
*/
#define ALUA_DEFAULT_TRANS_DELAY_MSECS 0
#define ALUA_MAX_TRANS_DELAY_MSECS 30000 /* 30 seconds */
/*
- * Used for the recommended application client implict transition timeout
+ * Used for the recommended application client implicit transition timeout
* in seconds, returned by the REPORT_TARGET_PORT_GROUPS w/ extended header.
*/
-#define ALUA_DEFAULT_IMPLICT_TRANS_SECS 0
-#define ALUA_MAX_IMPLICT_TRANS_SECS 255
+#define ALUA_DEFAULT_IMPLICIT_TRANS_SECS 0
+#define ALUA_MAX_IMPLICIT_TRANS_SECS 255
/*
* Used by core_alua_update_tpg_primary_metadata() and
* core_alua_update_tpg_secondary_metadata()
@@ -113,9 +124,9 @@ extern ssize_t core_alua_show_trans_delay_msecs(struct t10_alua_tg_pt_gp *,
char *);
extern ssize_t core_alua_store_trans_delay_msecs(struct t10_alua_tg_pt_gp *,
const char *, size_t);
-extern ssize_t core_alua_show_implict_trans_secs(struct t10_alua_tg_pt_gp *,
+extern ssize_t core_alua_show_implicit_trans_secs(struct t10_alua_tg_pt_gp *,
char *);
-extern ssize_t core_alua_store_implict_trans_secs(struct t10_alua_tg_pt_gp *,
+extern ssize_t core_alua_store_implicit_trans_secs(struct t10_alua_tg_pt_gp *,
const char *, size_t);
extern ssize_t core_alua_show_preferred_bit(struct t10_alua_tg_pt_gp *,
char *);
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index 82e81c542e43..272755d03e5a 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -177,16 +177,16 @@ static struct config_group *target_core_register_fabric(
* struct target_fabric_configfs *tf will contain a usage reference.
*/
pr_debug("Target_Core_ConfigFS: REGISTER tfc_wwn_cit -> %p\n",
- &TF_CIT_TMPL(tf)->tfc_wwn_cit);
+ &tf->tf_cit_tmpl.tfc_wwn_cit);
tf->tf_group.default_groups = tf->tf_default_groups;
tf->tf_group.default_groups[0] = &tf->tf_disc_group;
tf->tf_group.default_groups[1] = NULL;
config_group_init_type_name(&tf->tf_group, name,
- &TF_CIT_TMPL(tf)->tfc_wwn_cit);
+ &tf->tf_cit_tmpl.tfc_wwn_cit);
config_group_init_type_name(&tf->tf_disc_group, "discovery_auth",
- &TF_CIT_TMPL(tf)->tfc_discovery_cit);
+ &tf->tf_cit_tmpl.tfc_discovery_cit);
pr_debug("Target_Core_ConfigFS: REGISTER -> Allocated Fabric:"
" %s\n", tf->tf_group.cg_item.ci_name);
@@ -2036,7 +2036,7 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_state(
int new_state, ret;
if (!tg_pt_gp->tg_pt_gp_valid_id) {
- pr_err("Unable to do implict ALUA on non valid"
+ pr_err("Unable to do implicit ALUA on non valid"
" tg_pt_gp ID: %hu\n", tg_pt_gp->tg_pt_gp_valid_id);
return -EINVAL;
}
@@ -2049,9 +2049,9 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_state(
}
new_state = (int)tmp;
- if (!(tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA)) {
- pr_err("Unable to process implict configfs ALUA"
- " transition while TPGS_IMPLICT_ALUA is disabled\n");
+ if (!(tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICIT_ALUA)) {
+ pr_err("Unable to process implicit configfs ALUA"
+ " transition while TPGS_IMPLICIT_ALUA is disabled\n");
return -EINVAL;
}
@@ -2097,8 +2097,8 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_status(
new_status = (int)tmp;
if ((new_status != ALUA_STATUS_NONE) &&
- (new_status != ALUA_STATUS_ALTERED_BY_EXPLICT_STPG) &&
- (new_status != ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA)) {
+ (new_status != ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) &&
+ (new_status != ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA)) {
pr_err("Illegal ALUA access status: 0x%02x\n",
new_status);
return -EINVAL;
@@ -2131,6 +2131,90 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_type(
SE_DEV_ALUA_TG_PT_ATTR(alua_access_type, S_IRUGO | S_IWUSR);
/*
+ * alua_supported_states
+ */
+
+#define SE_DEV_ALUA_SUPPORT_STATE_SHOW(_name, _var, _bit) \
+static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_support_##_name( \
+ struct t10_alua_tg_pt_gp *t, char *p) \
+{ \
+ return sprintf(p, "%d\n", !!(t->_var & _bit)); \
+}
+
+#define SE_DEV_ALUA_SUPPORT_STATE_STORE(_name, _var, _bit) \
+static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_support_##_name(\
+ struct t10_alua_tg_pt_gp *t, const char *p, size_t c) \
+{ \
+ unsigned long tmp; \
+ int ret; \
+ \
+ if (!t->tg_pt_gp_valid_id) { \
+ pr_err("Unable to do set ##_name ALUA state on non" \
+ " valid tg_pt_gp ID: %hu\n", \
+ t->tg_pt_gp_valid_id); \
+ return -EINVAL; \
+ } \
+ \
+ ret = kstrtoul(p, 0, &tmp); \
+ if (ret < 0) { \
+ pr_err("Invalid value '%s', must be '0' or '1'\n", p); \
+ return -EINVAL; \
+ } \
+ if (tmp > 1) { \
+ pr_err("Invalid value '%ld', must be '0' or '1'\n", tmp); \
+ return -EINVAL; \
+ } \
+ if (!tmp) \
+ t->_var |= _bit; \
+ else \
+ t->_var &= ~_bit; \
+ \
+ return c; \
+}
+
+SE_DEV_ALUA_SUPPORT_STATE_SHOW(transitioning,
+ tg_pt_gp_alua_supported_states, ALUA_T_SUP);
+SE_DEV_ALUA_SUPPORT_STATE_STORE(transitioning,
+ tg_pt_gp_alua_supported_states, ALUA_T_SUP);
+SE_DEV_ALUA_TG_PT_ATTR(alua_support_transitioning, S_IRUGO | S_IWUSR);
+
+SE_DEV_ALUA_SUPPORT_STATE_SHOW(offline,
+ tg_pt_gp_alua_supported_states, ALUA_O_SUP);
+SE_DEV_ALUA_SUPPORT_STATE_STORE(offline,
+ tg_pt_gp_alua_supported_states, ALUA_O_SUP);
+SE_DEV_ALUA_TG_PT_ATTR(alua_support_offline, S_IRUGO | S_IWUSR);
+
+SE_DEV_ALUA_SUPPORT_STATE_SHOW(lba_dependent,
+ tg_pt_gp_alua_supported_states, ALUA_LBD_SUP);
+SE_DEV_ALUA_SUPPORT_STATE_STORE(lba_dependent,
+ tg_pt_gp_alua_supported_states, ALUA_LBD_SUP);
+SE_DEV_ALUA_TG_PT_ATTR(alua_support_lba_dependent, S_IRUGO | S_IWUSR);
+
+SE_DEV_ALUA_SUPPORT_STATE_SHOW(unavailable,
+ tg_pt_gp_alua_supported_states, ALUA_U_SUP);
+SE_DEV_ALUA_SUPPORT_STATE_STORE(unavailable,
+ tg_pt_gp_alua_supported_states, ALUA_U_SUP);
+SE_DEV_ALUA_TG_PT_ATTR(alua_support_unavailable, S_IRUGO | S_IWUSR);
+
+SE_DEV_ALUA_SUPPORT_STATE_SHOW(standby,
+ tg_pt_gp_alua_supported_states, ALUA_S_SUP);
+SE_DEV_ALUA_SUPPORT_STATE_STORE(standby,
+ tg_pt_gp_alua_supported_states, ALUA_S_SUP);
+SE_DEV_ALUA_TG_PT_ATTR(alua_support_standby, S_IRUGO | S_IWUSR);
+
+SE_DEV_ALUA_SUPPORT_STATE_SHOW(active_optimized,
+ tg_pt_gp_alua_supported_states, ALUA_AO_SUP);
+SE_DEV_ALUA_SUPPORT_STATE_STORE(active_optimized,
+ tg_pt_gp_alua_supported_states, ALUA_AO_SUP);
+SE_DEV_ALUA_TG_PT_ATTR(alua_support_active_optimized, S_IRUGO | S_IWUSR);
+
+SE_DEV_ALUA_SUPPORT_STATE_SHOW(active_nonoptimized,
+ tg_pt_gp_alua_supported_states, ALUA_AN_SUP);
+SE_DEV_ALUA_SUPPORT_STATE_STORE(active_nonoptimized,
+ tg_pt_gp_alua_supported_states, ALUA_AN_SUP);
+SE_DEV_ALUA_TG_PT_ATTR(alua_support_active_nonoptimized, S_IRUGO | S_IWUSR);
+
+/*
* alua_write_metadata
*/
static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_write_metadata(
@@ -2210,24 +2294,24 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_trans_delay_msecs(
SE_DEV_ALUA_TG_PT_ATTR(trans_delay_msecs, S_IRUGO | S_IWUSR);
/*
- * implict_trans_secs
+ * implicit_trans_secs
*/
-static ssize_t target_core_alua_tg_pt_gp_show_attr_implict_trans_secs(
+static ssize_t target_core_alua_tg_pt_gp_show_attr_implicit_trans_secs(
struct t10_alua_tg_pt_gp *tg_pt_gp,
char *page)
{
- return core_alua_show_implict_trans_secs(tg_pt_gp, page);
+ return core_alua_show_implicit_trans_secs(tg_pt_gp, page);
}
-static ssize_t target_core_alua_tg_pt_gp_store_attr_implict_trans_secs(
+static ssize_t target_core_alua_tg_pt_gp_store_attr_implicit_trans_secs(
struct t10_alua_tg_pt_gp *tg_pt_gp,
const char *page,
size_t count)
{
- return core_alua_store_implict_trans_secs(tg_pt_gp, page, count);
+ return core_alua_store_implicit_trans_secs(tg_pt_gp, page, count);
}
-SE_DEV_ALUA_TG_PT_ATTR(implict_trans_secs, S_IRUGO | S_IWUSR);
+SE_DEV_ALUA_TG_PT_ATTR(implicit_trans_secs, S_IRUGO | S_IWUSR);
/*
* preferred
@@ -2350,10 +2434,17 @@ static struct configfs_attribute *target_core_alua_tg_pt_gp_attrs[] = {
&target_core_alua_tg_pt_gp_alua_access_state.attr,
&target_core_alua_tg_pt_gp_alua_access_status.attr,
&target_core_alua_tg_pt_gp_alua_access_type.attr,
+ &target_core_alua_tg_pt_gp_alua_support_transitioning.attr,
+ &target_core_alua_tg_pt_gp_alua_support_offline.attr,
+ &target_core_alua_tg_pt_gp_alua_support_lba_dependent.attr,
+ &target_core_alua_tg_pt_gp_alua_support_unavailable.attr,
+ &target_core_alua_tg_pt_gp_alua_support_standby.attr,
+ &target_core_alua_tg_pt_gp_alua_support_active_nonoptimized.attr,
+ &target_core_alua_tg_pt_gp_alua_support_active_optimized.attr,
&target_core_alua_tg_pt_gp_alua_write_metadata.attr,
&target_core_alua_tg_pt_gp_nonop_delay_msecs.attr,
&target_core_alua_tg_pt_gp_trans_delay_msecs.attr,
- &target_core_alua_tg_pt_gp_implict_trans_secs.attr,
+ &target_core_alua_tg_pt_gp_implicit_trans_secs.attr,
&target_core_alua_tg_pt_gp_preferred.attr,
&target_core_alua_tg_pt_gp_tg_pt_gp_id.attr,
&target_core_alua_tg_pt_gp_members.attr,
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index d90dbb0f1a69..d06de84b069b 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -92,6 +92,9 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
se_cmd->pr_res_key = deve->pr_res_key;
se_cmd->orig_fe_lun = unpacked_lun;
se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
+
+ percpu_ref_get(&se_lun->lun_ref);
+ se_cmd->lun_ref_active = true;
}
spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
@@ -119,24 +122,20 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0;
se_cmd->orig_fe_lun = 0;
se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
+
+ percpu_ref_get(&se_lun->lun_ref);
+ se_cmd->lun_ref_active = true;
}
/* Directly associate cmd with se_dev */
se_cmd->se_dev = se_lun->lun_se_dev;
- /* TODO: get rid of this and use atomics for stats */
dev = se_lun->lun_se_dev;
- spin_lock_irqsave(&dev->stats_lock, flags);
- dev->num_cmds++;
+ atomic_long_inc(&dev->num_cmds);
if (se_cmd->data_direction == DMA_TO_DEVICE)
- dev->write_bytes += se_cmd->data_length;
+ atomic_long_add(se_cmd->data_length, &dev->write_bytes);
else if (se_cmd->data_direction == DMA_FROM_DEVICE)
- dev->read_bytes += se_cmd->data_length;
- spin_unlock_irqrestore(&dev->stats_lock, flags);
-
- spin_lock_irqsave(&se_lun->lun_cmd_lock, flags);
- list_add_tail(&se_cmd->se_lun_node, &se_lun->lun_cmd_list);
- spin_unlock_irqrestore(&se_lun->lun_cmd_lock, flags);
+ atomic_long_add(se_cmd->data_length, &dev->read_bytes);
return 0;
}
@@ -314,14 +313,14 @@ int core_enable_device_list_for_node(
deve = nacl->device_list[mapped_lun];
/*
- * Check if the call is handling demo mode -> explict LUN ACL
+ * Check if the call is handling demo mode -> explicit LUN ACL
* transition. This transition must be for the same struct se_lun
* + mapped_lun that was setup in demo mode..
*/
if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
if (deve->se_lun_acl != NULL) {
pr_err("struct se_dev_entry->se_lun_acl"
- " already set for demo mode -> explict"
+ " already set for demo mode -> explicit"
" LUN ACL transition\n");
spin_unlock_irq(&nacl->device_list_lock);
return -EINVAL;
@@ -329,7 +328,7 @@ int core_enable_device_list_for_node(
if (deve->se_lun != lun) {
pr_err("struct se_dev_entry->se_lun does"
" match passed struct se_lun for demo mode"
- " -> explict LUN ACL transition\n");
+ " -> explicit LUN ACL transition\n");
spin_unlock_irq(&nacl->device_list_lock);
return -EINVAL;
}
@@ -1107,6 +1106,11 @@ int se_dev_set_block_size(struct se_device *dev, u32 block_size)
dev->dev_attrib.block_size = block_size;
pr_debug("dev[%p]: SE Device block_size changed to %u\n",
dev, block_size);
+
+ if (dev->dev_attrib.max_bytes_per_io)
+ dev->dev_attrib.hw_max_sectors =
+ dev->dev_attrib.max_bytes_per_io / block_size;
+
return 0;
}
@@ -1407,6 +1411,7 @@ static void scsi_dump_inquiry(struct se_device *dev)
struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
{
struct se_device *dev;
+ struct se_lun *xcopy_lun;
dev = hba->transport->alloc_device(hba, name);
if (!dev)
@@ -1423,7 +1428,6 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
INIT_LIST_HEAD(&dev->state_list);
INIT_LIST_HEAD(&dev->qf_cmd_list);
INIT_LIST_HEAD(&dev->g_dev_node);
- spin_lock_init(&dev->stats_lock);
spin_lock_init(&dev->execute_task_lock);
spin_lock_init(&dev->delayed_cmd_lock);
spin_lock_init(&dev->dev_reservation_lock);
@@ -1469,6 +1473,14 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
dev->dev_attrib.fabric_max_sectors = DA_FABRIC_MAX_SECTORS;
dev->dev_attrib.optimal_sectors = DA_FABRIC_MAX_SECTORS;
+ xcopy_lun = &dev->xcopy_lun;
+ xcopy_lun->lun_se_dev = dev;
+ init_completion(&xcopy_lun->lun_shutdown_comp);
+ INIT_LIST_HEAD(&xcopy_lun->lun_acl_list);
+ spin_lock_init(&xcopy_lun->lun_acl_lock);
+ spin_lock_init(&xcopy_lun->lun_sep_lock);
+ init_completion(&xcopy_lun->lun_ref_comp);
+
return dev;
}
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
index 3503996d7d10..dae2ad6a669e 100644
--- a/drivers/target/target_core_fabric_configfs.c
+++ b/drivers/target/target_core_fabric_configfs.c
@@ -385,9 +385,9 @@ static struct config_group *target_fabric_make_mappedlun(
}
config_group_init_type_name(&lacl->se_lun_group, name,
- &TF_CIT_TMPL(tf)->tfc_tpg_mappedlun_cit);
+ &tf->tf_cit_tmpl.tfc_tpg_mappedlun_cit);
config_group_init_type_name(&lacl->ml_stat_grps.stat_group,
- "statistics", &TF_CIT_TMPL(tf)->tfc_tpg_mappedlun_stat_cit);
+ "statistics", &tf->tf_cit_tmpl.tfc_tpg_mappedlun_stat_cit);
lacl_cg->default_groups[0] = &lacl->ml_stat_grps.stat_group;
lacl_cg->default_groups[1] = NULL;
@@ -504,16 +504,16 @@ static struct config_group *target_fabric_make_nodeacl(
nacl_cg->default_groups[4] = NULL;
config_group_init_type_name(&se_nacl->acl_group, name,
- &TF_CIT_TMPL(tf)->tfc_tpg_nacl_base_cit);
+ &tf->tf_cit_tmpl.tfc_tpg_nacl_base_cit);
config_group_init_type_name(&se_nacl->acl_attrib_group, "attrib",
- &TF_CIT_TMPL(tf)->tfc_tpg_nacl_attrib_cit);
+ &tf->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit);
config_group_init_type_name(&se_nacl->acl_auth_group, "auth",
- &TF_CIT_TMPL(tf)->tfc_tpg_nacl_auth_cit);
+ &tf->tf_cit_tmpl.tfc_tpg_nacl_auth_cit);
config_group_init_type_name(&se_nacl->acl_param_group, "param",
- &TF_CIT_TMPL(tf)->tfc_tpg_nacl_param_cit);
+ &tf->tf_cit_tmpl.tfc_tpg_nacl_param_cit);
config_group_init_type_name(&se_nacl->acl_fabric_stat_group,
"fabric_statistics",
- &TF_CIT_TMPL(tf)->tfc_tpg_nacl_stat_cit);
+ &tf->tf_cit_tmpl.tfc_tpg_nacl_stat_cit);
return &se_nacl->acl_group;
}
@@ -595,7 +595,7 @@ static struct config_group *target_fabric_make_np(
se_tpg_np->tpg_np_parent = se_tpg;
config_group_init_type_name(&se_tpg_np->tpg_np_group, name,
- &TF_CIT_TMPL(tf)->tfc_tpg_np_base_cit);
+ &tf->tf_cit_tmpl.tfc_tpg_np_base_cit);
return &se_tpg_np->tpg_np_group;
}
@@ -899,9 +899,9 @@ static struct config_group *target_fabric_make_lun(
}
config_group_init_type_name(&lun->lun_group, name,
- &TF_CIT_TMPL(tf)->tfc_tpg_port_cit);
+ &tf->tf_cit_tmpl.tfc_tpg_port_cit);
config_group_init_type_name(&lun->port_stat_grps.stat_group,
- "statistics", &TF_CIT_TMPL(tf)->tfc_tpg_port_stat_cit);
+ "statistics", &tf->tf_cit_tmpl.tfc_tpg_port_stat_cit);
lun_cg->default_groups[0] = &lun->port_stat_grps.stat_group;
lun_cg->default_groups[1] = NULL;
@@ -1056,19 +1056,19 @@ static struct config_group *target_fabric_make_tpg(
se_tpg->tpg_group.default_groups[6] = NULL;
config_group_init_type_name(&se_tpg->tpg_group, name,
- &TF_CIT_TMPL(tf)->tfc_tpg_base_cit);
+ &tf->tf_cit_tmpl.tfc_tpg_base_cit);
config_group_init_type_name(&se_tpg->tpg_lun_group, "lun",
- &TF_CIT_TMPL(tf)->tfc_tpg_lun_cit);
+ &tf->tf_cit_tmpl.tfc_tpg_lun_cit);
config_group_init_type_name(&se_tpg->tpg_np_group, "np",
- &TF_CIT_TMPL(tf)->tfc_tpg_np_cit);
+ &tf->tf_cit_tmpl.tfc_tpg_np_cit);
config_group_init_type_name(&se_tpg->tpg_acl_group, "acls",
- &TF_CIT_TMPL(tf)->tfc_tpg_nacl_cit);
+ &tf->tf_cit_tmpl.tfc_tpg_nacl_cit);
config_group_init_type_name(&se_tpg->tpg_attrib_group, "attrib",
- &TF_CIT_TMPL(tf)->tfc_tpg_attrib_cit);
+ &tf->tf_cit_tmpl.tfc_tpg_attrib_cit);
config_group_init_type_name(&se_tpg->tpg_auth_group, "auth",
- &TF_CIT_TMPL(tf)->tfc_tpg_auth_cit);
+ &tf->tf_cit_tmpl.tfc_tpg_auth_cit);
config_group_init_type_name(&se_tpg->tpg_param_group, "param",
- &TF_CIT_TMPL(tf)->tfc_tpg_param_cit);
+ &tf->tf_cit_tmpl.tfc_tpg_param_cit);
return &se_tpg->tpg_group;
}
@@ -1155,9 +1155,9 @@ static struct config_group *target_fabric_make_wwn(
wwn->wwn_group.default_groups[1] = NULL;
config_group_init_type_name(&wwn->wwn_group, name,
- &TF_CIT_TMPL(tf)->tfc_tpg_cit);
+ &tf->tf_cit_tmpl.tfc_tpg_cit);
config_group_init_type_name(&wwn->fabric_stat_group, "fabric_statistics",
- &TF_CIT_TMPL(tf)->tfc_wwn_fabric_stats_cit);
+ &tf->tf_cit_tmpl.tfc_wwn_fabric_stats_cit);
return &wwn->wwn_group;
}
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index b662f89dedac..78241a53b555 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -66,9 +66,8 @@ static int fd_attach_hba(struct se_hba *hba, u32 host_id)
pr_debug("CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic"
" Target Core Stack %s\n", hba->hba_id, FD_VERSION,
TARGET_CORE_MOD_VERSION);
- pr_debug("CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic"
- " MaxSectors: %u\n",
- hba->hba_id, fd_host->fd_host_id, FD_MAX_SECTORS);
+ pr_debug("CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic\n",
+ hba->hba_id, fd_host->fd_host_id);
return 0;
}
@@ -220,7 +219,8 @@ static int fd_configure_device(struct se_device *dev)
}
dev->dev_attrib.hw_block_size = fd_dev->fd_block_size;
- dev->dev_attrib.hw_max_sectors = FD_MAX_SECTORS;
+ dev->dev_attrib.max_bytes_per_io = FD_MAX_BYTES;
+ dev->dev_attrib.hw_max_sectors = FD_MAX_BYTES / fd_dev->fd_block_size;
dev->dev_attrib.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH;
if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) {
@@ -562,7 +562,7 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
} else {
ret = fd_do_rw(cmd, sgl, sgl_nents, 1);
/*
- * Perform implict vfs_fsync_range() for fd_do_writev() ops
+ * Perform implicit vfs_fsync_range() for fd_do_writev() ops
* for SCSI WRITEs with Forced Unit Access (FUA) set.
* Allow this to happen independent of WCE=0 setting.
*/
diff --git a/drivers/target/target_core_file.h b/drivers/target/target_core_file.h
index 37ffc5bd2399..d7772c167685 100644
--- a/drivers/target/target_core_file.h
+++ b/drivers/target/target_core_file.h
@@ -7,7 +7,10 @@
#define FD_DEVICE_QUEUE_DEPTH 32
#define FD_MAX_DEVICE_QUEUE_DEPTH 128
#define FD_BLOCKSIZE 512
-#define FD_MAX_SECTORS 2048
+/*
+ * Limited by the number of iovecs (2048) per vfs_[writev,readv] call
+ */
+#define FD_MAX_BYTES 8388608
#define RRF_EMULATE_CDB 0x01
#define RRF_GOT_LBA 0x02
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index b9a3394fe479..c87959f12760 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -710,6 +710,45 @@ static sector_t iblock_get_blocks(struct se_device *dev)
return iblock_emulate_read_cap_with_block_size(dev, bd, q);
}
+static sector_t iblock_get_alignment_offset_lbas(struct se_device *dev)
+{
+ struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
+ struct block_device *bd = ib_dev->ibd_bd;
+ int ret;
+
+ ret = bdev_alignment_offset(bd);
+ if (ret == -1)
+ return 0;
+
+ /* convert offset-bytes to offset-lbas */
+ return ret / bdev_logical_block_size(bd);
+}
+
+static unsigned int iblock_get_lbppbe(struct se_device *dev)
+{
+ struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
+ struct block_device *bd = ib_dev->ibd_bd;
+ int logs_per_phys = bdev_physical_block_size(bd) / bdev_logical_block_size(bd);
+
+ return ilog2(logs_per_phys);
+}
+
+static unsigned int iblock_get_io_min(struct se_device *dev)
+{
+ struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
+ struct block_device *bd = ib_dev->ibd_bd;
+
+ return bdev_io_min(bd);
+}
+
+static unsigned int iblock_get_io_opt(struct se_device *dev)
+{
+ struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
+ struct block_device *bd = ib_dev->ibd_bd;
+
+ return bdev_io_opt(bd);
+}
+
static struct sbc_ops iblock_sbc_ops = {
.execute_rw = iblock_execute_rw,
.execute_sync_cache = iblock_execute_sync_cache,
@@ -749,6 +788,10 @@ static struct se_subsystem_api iblock_template = {
.show_configfs_dev_params = iblock_show_configfs_dev_params,
.get_device_type = sbc_get_device_type,
.get_blocks = iblock_get_blocks,
+ .get_alignment_offset_lbas = iblock_get_alignment_offset_lbas,
+ .get_lbppbe = iblock_get_lbppbe,
+ .get_io_min = iblock_get_io_min,
+ .get_io_opt = iblock_get_io_opt,
.get_write_cache = iblock_get_write_cache,
};
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
index 579128abe3f5..47b63b094cdc 100644
--- a/drivers/target/target_core_internal.h
+++ b/drivers/target/target_core_internal.h
@@ -75,8 +75,6 @@ extern struct se_device *g_lun0_dev;
struct se_node_acl *__core_tpg_get_initiator_node_acl(struct se_portal_group *tpg,
const char *);
-struct se_node_acl *core_tpg_get_initiator_node_acl(struct se_portal_group *tpg,
- unsigned char *);
void core_tpg_add_node_to_devs(struct se_node_acl *, struct se_portal_group *);
void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *);
struct se_lun *core_tpg_pre_addlun(struct se_portal_group *, u32);
@@ -102,7 +100,7 @@ int transport_dump_vpd_assoc(struct t10_vpd *, unsigned char *, int);
int transport_dump_vpd_ident_type(struct t10_vpd *, unsigned char *, int);
int transport_dump_vpd_ident(struct t10_vpd *, unsigned char *, int);
bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags);
-int transport_clear_lun_from_sessions(struct se_lun *);
+int transport_clear_lun_ref(struct se_lun *);
void transport_send_task_abort(struct se_cmd *);
sense_reason_t target_cmd_size_check(struct se_cmd *cmd, unsigned int size);
void target_qf_do_work(struct work_struct *work);
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index d1ae4c5c3ffd..2f5d77932c80 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -474,7 +474,7 @@ static int core_scsi3_pr_seq_non_holder(
* statement.
*/
if (!ret && !other_cdb) {
- pr_debug("Allowing explict CDB: 0x%02x for %s"
+ pr_debug("Allowing explicit CDB: 0x%02x for %s"
" reservation holder\n", cdb[0],
core_scsi3_pr_dump_type(pr_reg_type));
@@ -507,7 +507,7 @@ static int core_scsi3_pr_seq_non_holder(
*/
if (!registered_nexus) {
- pr_debug("Allowing implict CDB: 0x%02x"
+ pr_debug("Allowing implicit CDB: 0x%02x"
" for %s reservation on unregistered"
" nexus\n", cdb[0],
core_scsi3_pr_dump_type(pr_reg_type));
@@ -522,7 +522,7 @@ static int core_scsi3_pr_seq_non_holder(
* allow commands from registered nexuses.
*/
- pr_debug("Allowing implict CDB: 0x%02x for %s"
+ pr_debug("Allowing implicit CDB: 0x%02x for %s"
" reservation\n", cdb[0],
core_scsi3_pr_dump_type(pr_reg_type));
@@ -683,7 +683,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
alua_port_list) {
/*
* This pointer will be NULL for demo mode MappedLUNs
- * that have not been make explict via a ConfigFS
+ * that have not been make explicit via a ConfigFS
* MappedLUN group for the SCSI Initiator Node ACL.
*/
if (!deve_tmp->se_lun_acl)
@@ -1158,7 +1158,7 @@ static void core_scsi3_put_pr_reg(struct t10_pr_registration *pr_reg)
smp_mb__after_atomic_dec();
}
-static int core_scsi3_check_implict_release(
+static int core_scsi3_check_implicit_release(
struct se_device *dev,
struct t10_pr_registration *pr_reg)
{
@@ -1174,7 +1174,7 @@ static int core_scsi3_check_implict_release(
}
if (pr_res_holder == pr_reg) {
/*
- * Perform an implict RELEASE if the registration that
+ * Perform an implicit RELEASE if the registration that
* is being released is holding the reservation.
*
* From spc4r17, section 5.7.11.1:
@@ -1192,7 +1192,7 @@ static int core_scsi3_check_implict_release(
* For 'All Registrants' reservation types, all existing
* registrations are still processed as reservation holders
* in core_scsi3_pr_seq_non_holder() after the initial
- * reservation holder is implictly released here.
+ * reservation holder is implicitly released here.
*/
} else if (pr_reg->pr_reg_all_tg_pt &&
(!strcmp(pr_res_holder->pr_reg_nacl->initiatorname,
@@ -2125,7 +2125,7 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key,
/*
* sa_res_key=0 Unregister Reservation Key for registered I_T Nexus.
*/
- pr_holder = core_scsi3_check_implict_release(
+ pr_holder = core_scsi3_check_implicit_release(
cmd->se_dev, pr_reg);
if (pr_holder < 0) {
ret = TCM_RESERVATION_CONFLICT;
@@ -2402,7 +2402,7 @@ static void __core_scsi3_complete_pro_release(
struct se_device *dev,
struct se_node_acl *se_nacl,
struct t10_pr_registration *pr_reg,
- int explict)
+ int explicit)
{
struct target_core_fabric_ops *tfo = se_nacl->se_tpg->se_tpg_tfo;
char i_buf[PR_REG_ISID_ID_LEN];
@@ -2416,7 +2416,7 @@ static void __core_scsi3_complete_pro_release(
pr_debug("SPC-3 PR [%s] Service Action: %s RELEASE cleared"
" reservation holder TYPE: %s ALL_TG_PT: %d\n",
- tfo->get_fabric_name(), (explict) ? "explict" : "implict",
+ tfo->get_fabric_name(), (explicit) ? "explicit" : "implicit",
core_scsi3_pr_dump_type(pr_reg->pr_res_type),
(pr_reg->pr_reg_all_tg_pt) ? 1 : 0);
pr_debug("SPC-3 PR [%s] RELEASE Node: %s%s\n",
@@ -2692,7 +2692,7 @@ static void __core_scsi3_complete_pro_preempt(
memset(i_buf, 0, PR_REG_ISID_ID_LEN);
core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN);
/*
- * Do an implict RELEASE of the existing reservation.
+ * Do an implicit RELEASE of the existing reservation.
*/
if (dev->dev_pr_res_holder)
__core_scsi3_complete_pro_release(dev, nacl,
@@ -2845,7 +2845,7 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key,
* 5.7.11.4 Preempting, Table 52 and Figure 7.
*
* For a ZERO SA Reservation key, release
- * all other registrations and do an implict
+ * all other registrations and do an implicit
* release of active persistent reservation.
*
* For a non-ZERO SA Reservation key, only
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
index 131327ac7f5b..4ffe5f2ec0e9 100644
--- a/drivers/target/target_core_rd.c
+++ b/drivers/target/target_core_rd.c
@@ -27,7 +27,6 @@
#include <linux/string.h>
#include <linux/parser.h>
#include <linux/timer.h>
-#include <linux/blkdev.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <scsi/scsi.h>
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index d9b92b2c524d..52ae54e60105 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -105,12 +105,22 @@ sbc_emulate_readcapacity_16(struct se_cmd *cmd)
buf[9] = (dev->dev_attrib.block_size >> 16) & 0xff;
buf[10] = (dev->dev_attrib.block_size >> 8) & 0xff;
buf[11] = dev->dev_attrib.block_size & 0xff;
+
+ if (dev->transport->get_lbppbe)
+ buf[13] = dev->transport->get_lbppbe(dev) & 0x0f;
+
+ if (dev->transport->get_alignment_offset_lbas) {
+ u16 lalba = dev->transport->get_alignment_offset_lbas(dev);
+ buf[14] = (lalba >> 8) & 0x3f;
+ buf[15] = lalba & 0xff;
+ }
+
/*
* Set Thin Provisioning Enable bit following sbc3r22 in section
* READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled.
*/
if (dev->dev_attrib.emulate_tpu || dev->dev_attrib.emulate_tpws)
- buf[14] = 0x80;
+ buf[14] |= 0x80;
rbuf = transport_kmap_data_sg(cmd);
if (rbuf) {
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
index 074539558a54..021c3f4a4f00 100644
--- a/drivers/target/target_core_spc.c
+++ b/drivers/target/target_core_spc.c
@@ -48,7 +48,7 @@ static void spc_fill_alua_data(struct se_port *port, unsigned char *buf)
buf[5] = 0x80;
/*
- * Set TPGS field for explict and/or implict ALUA access type
+ * Set TPGS field for explicit and/or implicit ALUA access type
* and opteration.
*
* See spc4r17 section 6.4.2 Table 135
@@ -452,6 +452,7 @@ spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
struct se_device *dev = cmd->se_dev;
u32 max_sectors;
int have_tp = 0;
+ int opt, min;
/*
* Following spc3r22 section 6.5.3 Block Limits VPD page, when
@@ -475,7 +476,10 @@ spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
/*
* Set OPTIMAL TRANSFER LENGTH GRANULARITY
*/
- put_unaligned_be16(1, &buf[6]);
+ if (dev->transport->get_io_min && (min = dev->transport->get_io_min(dev)))
+ put_unaligned_be16(min / dev->dev_attrib.block_size, &buf[6]);
+ else
+ put_unaligned_be16(1, &buf[6]);
/*
* Set MAXIMUM TRANSFER LENGTH
@@ -487,7 +491,10 @@ spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
/*
* Set OPTIMAL TRANSFER LENGTH
*/
- put_unaligned_be32(dev->dev_attrib.optimal_sectors, &buf[12]);
+ if (dev->transport->get_io_opt && (opt = dev->transport->get_io_opt(dev)))
+ put_unaligned_be32(opt / dev->dev_attrib.block_size, &buf[12]);
+ else
+ put_unaligned_be32(dev->dev_attrib.optimal_sectors, &buf[12]);
/*
* Exit now if we don't support TP.
@@ -1250,7 +1257,7 @@ spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
*size = (cdb[3] << 8) + cdb[4];
/*
- * Do implict HEAD_OF_QUEUE processing for INQUIRY.
+ * Do implicit HEAD_OF_QUEUE processing for INQUIRY.
* See spc4r17 section 5.3
*/
cmd->sam_task_attr = MSG_HEAD_TAG;
@@ -1284,7 +1291,7 @@ spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
cmd->execute_cmd = spc_emulate_report_luns;
*size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
/*
- * Do implict HEAD_OF_QUEUE processing for REPORT_LUNS
+ * Do implicit HEAD_OF_QUEUE processing for REPORT_LUNS
* See spc4r17 section 5.3
*/
cmd->sam_task_attr = MSG_HEAD_TAG;
diff --git a/drivers/target/target_core_stat.c b/drivers/target/target_core_stat.c
index 9c642e02cba1..03538994d2f7 100644
--- a/drivers/target/target_core_stat.c
+++ b/drivers/target/target_core_stat.c
@@ -32,7 +32,6 @@
#include <linux/utsname.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
-#include <linux/blkdev.h>
#include <linux/configfs.h>
#include <scsi/scsi.h>
#include <scsi/scsi_device.h>
@@ -214,7 +213,8 @@ static ssize_t target_stat_scsi_tgt_dev_show_attr_resets(
struct se_device *dev =
container_of(sgrps, struct se_device, dev_stat_grps);
- return snprintf(page, PAGE_SIZE, "%u\n", dev->num_resets);
+ return snprintf(page, PAGE_SIZE, "%lu\n",
+ atomic_long_read(&dev->num_resets));
}
DEV_STAT_SCSI_TGT_DEV_ATTR_RO(resets);
@@ -397,8 +397,8 @@ static ssize_t target_stat_scsi_lu_show_attr_num_cmds(
container_of(sgrps, struct se_device, dev_stat_grps);
/* scsiLuNumCommands */
- return snprintf(page, PAGE_SIZE, "%llu\n",
- (unsigned long long)dev->num_cmds);
+ return snprintf(page, PAGE_SIZE, "%lu\n",
+ atomic_long_read(&dev->num_cmds));
}
DEV_STAT_SCSI_LU_ATTR_RO(num_cmds);
@@ -409,7 +409,8 @@ static ssize_t target_stat_scsi_lu_show_attr_read_mbytes(
container_of(sgrps, struct se_device, dev_stat_grps);
/* scsiLuReadMegaBytes */
- return snprintf(page, PAGE_SIZE, "%u\n", (u32)(dev->read_bytes >> 20));
+ return snprintf(page, PAGE_SIZE, "%lu\n",
+ atomic_long_read(&dev->read_bytes) >> 20);
}
DEV_STAT_SCSI_LU_ATTR_RO(read_mbytes);
@@ -420,7 +421,8 @@ static ssize_t target_stat_scsi_lu_show_attr_write_mbytes(
container_of(sgrps, struct se_device, dev_stat_grps);
/* scsiLuWrittenMegaBytes */
- return snprintf(page, PAGE_SIZE, "%u\n", (u32)(dev->write_bytes >> 20));
+ return snprintf(page, PAGE_SIZE, "%lu\n",
+ atomic_long_read(&dev->write_bytes) >> 20);
}
DEV_STAT_SCSI_LU_ATTR_RO(write_mbytes);
@@ -431,7 +433,7 @@ static ssize_t target_stat_scsi_lu_show_attr_resets(
container_of(sgrps, struct se_device, dev_stat_grps);
/* scsiLuInResets */
- return snprintf(page, PAGE_SIZE, "%u\n", dev->num_resets);
+ return snprintf(page, PAGE_SIZE, "%lu\n", atomic_long_read(&dev->num_resets));
}
DEV_STAT_SCSI_LU_ATTR_RO(resets);
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index 250009909d49..70c638f730af 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -386,9 +386,7 @@ int core_tmr_lun_reset(
pr_debug("LUN_RESET: SCSI-2 Released reservation\n");
}
- spin_lock_irq(&dev->stats_lock);
- dev->num_resets++;
- spin_unlock_irq(&dev->stats_lock);
+ atomic_long_inc(&dev->num_resets);
pr_debug("LUN_RESET: %s for [%s] Complete\n",
(preempt_and_abort_list) ? "Preempt" : "TMR",
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index b9a6ec0aa5fe..2a573de19a9f 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -116,6 +116,7 @@ struct se_node_acl *core_tpg_get_initiator_node_acl(
return acl;
}
+EXPORT_SYMBOL(core_tpg_get_initiator_node_acl);
/* core_tpg_add_node_to_devs():
*
@@ -277,7 +278,6 @@ struct se_node_acl *core_tpg_check_initiator_node_acl(
snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
acl->se_tpg = tpg;
acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
- spin_lock_init(&acl->stats_lock);
acl->dynamic_node_acl = 1;
tpg->se_tpg_tfo->set_default_node_attributes(acl);
@@ -405,7 +405,6 @@ struct se_node_acl *core_tpg_add_initiator_node_acl(
snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
acl->se_tpg = tpg;
acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
- spin_lock_init(&acl->stats_lock);
tpg->se_tpg_tfo->set_default_node_attributes(acl);
@@ -633,6 +632,13 @@ int core_tpg_set_initiator_node_tag(
}
EXPORT_SYMBOL(core_tpg_set_initiator_node_tag);
+static void core_tpg_lun_ref_release(struct percpu_ref *ref)
+{
+ struct se_lun *lun = container_of(ref, struct se_lun, lun_ref);
+
+ complete(&lun->lun_ref_comp);
+}
+
static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg)
{
/* Set in core_dev_setup_virtual_lun0() */
@@ -646,10 +652,9 @@ static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg)
atomic_set(&lun->lun_acl_count, 0);
init_completion(&lun->lun_shutdown_comp);
INIT_LIST_HEAD(&lun->lun_acl_list);
- INIT_LIST_HEAD(&lun->lun_cmd_list);
spin_lock_init(&lun->lun_acl_lock);
- spin_lock_init(&lun->lun_cmd_lock);
spin_lock_init(&lun->lun_sep_lock);
+ init_completion(&lun->lun_ref_comp);
ret = core_tpg_post_addlun(se_tpg, lun, lun_access, dev);
if (ret < 0)
@@ -691,10 +696,9 @@ int core_tpg_register(
atomic_set(&lun->lun_acl_count, 0);
init_completion(&lun->lun_shutdown_comp);
INIT_LIST_HEAD(&lun->lun_acl_list);
- INIT_LIST_HEAD(&lun->lun_cmd_list);
spin_lock_init(&lun->lun_acl_lock);
- spin_lock_init(&lun->lun_cmd_lock);
spin_lock_init(&lun->lun_sep_lock);
+ init_completion(&lun->lun_ref_comp);
}
se_tpg->se_tpg_type = se_tpg_type;
@@ -815,10 +819,16 @@ int core_tpg_post_addlun(
{
int ret;
- ret = core_dev_export(lun_ptr, tpg, lun);
+ ret = percpu_ref_init(&lun->lun_ref, core_tpg_lun_ref_release);
if (ret < 0)
return ret;
+ ret = core_dev_export(lun_ptr, tpg, lun);
+ if (ret < 0) {
+ percpu_ref_cancel_init(&lun->lun_ref);
+ return ret;
+ }
+
spin_lock(&tpg->tpg_lun_lock);
lun->lun_access = lun_access;
lun->lun_status = TRANSPORT_LUN_STATUS_ACTIVE;
@@ -827,14 +837,6 @@ int core_tpg_post_addlun(
return 0;
}
-static void core_tpg_shutdown_lun(
- struct se_portal_group *tpg,
- struct se_lun *lun)
-{
- core_clear_lun_from_tpg(lun, tpg);
- transport_clear_lun_from_sessions(lun);
-}
-
struct se_lun *core_tpg_pre_dellun(
struct se_portal_group *tpg,
u32 unpacked_lun)
@@ -869,7 +871,8 @@ int core_tpg_post_dellun(
struct se_portal_group *tpg,
struct se_lun *lun)
{
- core_tpg_shutdown_lun(tpg, lun);
+ core_clear_lun_from_tpg(lun, tpg);
+ transport_clear_lun_ref(lun);
core_dev_unexport(lun->lun_se_dev, tpg, lun);
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 81e945eefbbd..91953da0f623 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -28,7 +28,6 @@
#include <linux/string.h>
#include <linux/timer.h>
#include <linux/slab.h>
-#include <linux/blkdev.h>
#include <linux/spinlock.h>
#include <linux/kthread.h>
#include <linux/in.h>
@@ -473,7 +472,7 @@ void transport_deregister_session(struct se_session *se_sess)
pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n",
se_tpg->se_tpg_tfo->get_fabric_name());
/*
- * If last kref is dropping now for an explict NodeACL, awake sleeping
+ * If last kref is dropping now for an explicit NodeACL, awake sleeping
* ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group
* removal context.
*/
@@ -515,23 +514,6 @@ static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists,
if (write_pending)
cmd->t_state = TRANSPORT_WRITE_PENDING;
- /*
- * Determine if IOCTL context caller in requesting the stopping of this
- * command for LUN shutdown purposes.
- */
- if (cmd->transport_state & CMD_T_LUN_STOP) {
- pr_debug("%s:%d CMD_T_LUN_STOP for ITT: 0x%08x\n",
- __func__, __LINE__, cmd->se_tfo->get_task_tag(cmd));
-
- cmd->transport_state &= ~CMD_T_ACTIVE;
- if (remove_from_lists)
- target_remove_from_state_list(cmd);
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-
- complete(&cmd->transport_lun_stop_comp);
- return 1;
- }
-
if (remove_from_lists) {
target_remove_from_state_list(cmd);
@@ -585,15 +567,11 @@ static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
static void transport_lun_remove_cmd(struct se_cmd *cmd)
{
struct se_lun *lun = cmd->se_lun;
- unsigned long flags;
- if (!lun)
+ if (!lun || !cmd->lun_ref_active)
return;
- spin_lock_irqsave(&lun->lun_cmd_lock, flags);
- if (!list_empty(&cmd->se_lun_node))
- list_del_init(&cmd->se_lun_node);
- spin_unlock_irqrestore(&lun->lun_cmd_lock, flags);
+ percpu_ref_put(&lun->lun_ref);
}
void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
@@ -668,7 +646,7 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
cmd->transport_state |= CMD_T_FAILED;
/*
- * Check for case where an explict ABORT_TASK has been received
+ * Check for case where an explicit ABORT_TASK has been received
* and transport_wait_for_tasks() will be waiting for completion..
*/
if (cmd->transport_state & CMD_T_ABORTED &&
@@ -1092,13 +1070,10 @@ void transport_init_se_cmd(
int task_attr,
unsigned char *sense_buffer)
{
- INIT_LIST_HEAD(&cmd->se_lun_node);
INIT_LIST_HEAD(&cmd->se_delayed_node);
INIT_LIST_HEAD(&cmd->se_qf_node);
INIT_LIST_HEAD(&cmd->se_cmd_list);
INIT_LIST_HEAD(&cmd->state_list);
- init_completion(&cmd->transport_lun_fe_stop_comp);
- init_completion(&cmd->transport_lun_stop_comp);
init_completion(&cmd->t_transport_stop_comp);
init_completion(&cmd->cmd_wait_comp);
init_completion(&cmd->task_stop_comp);
@@ -1719,29 +1694,14 @@ void target_execute_cmd(struct se_cmd *cmd)
/*
* If the received CDB has aleady been aborted stop processing it here.
*/
- if (transport_check_aborted_status(cmd, 1)) {
- complete(&cmd->transport_lun_stop_comp);
+ if (transport_check_aborted_status(cmd, 1))
return;
- }
/*
- * Determine if IOCTL context caller in requesting the stopping of this
- * command for LUN shutdown purposes.
- */
- spin_lock_irq(&cmd->t_state_lock);
- if (cmd->transport_state & CMD_T_LUN_STOP) {
- pr_debug("%s:%d CMD_T_LUN_STOP for ITT: 0x%08x\n",
- __func__, __LINE__, cmd->se_tfo->get_task_tag(cmd));
-
- cmd->transport_state &= ~CMD_T_ACTIVE;
- spin_unlock_irq(&cmd->t_state_lock);
- complete(&cmd->transport_lun_stop_comp);
- return;
- }
- /*
* Determine if frontend context caller is requesting the stopping of
* this command for frontend exceptions.
*/
+ spin_lock_irq(&cmd->t_state_lock);
if (cmd->transport_state & CMD_T_STOP) {
pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08x\n",
__func__, __LINE__,
@@ -2404,164 +2364,23 @@ void target_wait_for_sess_cmds(struct se_session *se_sess)
}
EXPORT_SYMBOL(target_wait_for_sess_cmds);
-/* transport_lun_wait_for_tasks():
- *
- * Called from ConfigFS context to stop the passed struct se_cmd to allow
- * an struct se_lun to be successfully shutdown.
- */
-static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun)
-{
- unsigned long flags;
- int ret = 0;
-
- /*
- * If the frontend has already requested this struct se_cmd to
- * be stopped, we can safely ignore this struct se_cmd.
- */
- spin_lock_irqsave(&cmd->t_state_lock, flags);
- if (cmd->transport_state & CMD_T_STOP) {
- cmd->transport_state &= ~CMD_T_LUN_STOP;
-
- pr_debug("ConfigFS ITT[0x%08x] - CMD_T_STOP, skipping\n",
- cmd->se_tfo->get_task_tag(cmd));
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- transport_cmd_check_stop(cmd, false, false);
- return -EPERM;
- }
- cmd->transport_state |= CMD_T_LUN_FE_STOP;
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-
- // XXX: audit task_flags checks.
- spin_lock_irqsave(&cmd->t_state_lock, flags);
- if ((cmd->transport_state & CMD_T_BUSY) &&
- (cmd->transport_state & CMD_T_SENT)) {
- if (!target_stop_cmd(cmd, &flags))
- ret++;
- }
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-
- pr_debug("ConfigFS: cmd: %p stop tasks ret:"
- " %d\n", cmd, ret);
- if (!ret) {
- pr_debug("ConfigFS: ITT[0x%08x] - stopping cmd....\n",
- cmd->se_tfo->get_task_tag(cmd));
- wait_for_completion(&cmd->transport_lun_stop_comp);
- pr_debug("ConfigFS: ITT[0x%08x] - stopped cmd....\n",
- cmd->se_tfo->get_task_tag(cmd));
- }
-
- return 0;
-}
-
-static void __transport_clear_lun_from_sessions(struct se_lun *lun)
-{
- struct se_cmd *cmd = NULL;
- unsigned long lun_flags, cmd_flags;
- /*
- * Do exception processing and return CHECK_CONDITION status to the
- * Initiator Port.
- */
- spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
- while (!list_empty(&lun->lun_cmd_list)) {
- cmd = list_first_entry(&lun->lun_cmd_list,
- struct se_cmd, se_lun_node);
- list_del_init(&cmd->se_lun_node);
-
- spin_lock(&cmd->t_state_lock);
- pr_debug("SE_LUN[%d] - Setting cmd->transport"
- "_lun_stop for ITT: 0x%08x\n",
- cmd->se_lun->unpacked_lun,
- cmd->se_tfo->get_task_tag(cmd));
- cmd->transport_state |= CMD_T_LUN_STOP;
- spin_unlock(&cmd->t_state_lock);
-
- spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);
-
- if (!cmd->se_lun) {
- pr_err("ITT: 0x%08x, [i,t]_state: %u/%u\n",
- cmd->se_tfo->get_task_tag(cmd),
- cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
- BUG();
- }
- /*
- * If the Storage engine still owns the iscsi_cmd_t, determine
- * and/or stop its context.
- */
- pr_debug("SE_LUN[%d] - ITT: 0x%08x before transport"
- "_lun_wait_for_tasks()\n", cmd->se_lun->unpacked_lun,
- cmd->se_tfo->get_task_tag(cmd));
-
- if (transport_lun_wait_for_tasks(cmd, cmd->se_lun) < 0) {
- spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
- continue;
- }
-
- pr_debug("SE_LUN[%d] - ITT: 0x%08x after transport_lun"
- "_wait_for_tasks(): SUCCESS\n",
- cmd->se_lun->unpacked_lun,
- cmd->se_tfo->get_task_tag(cmd));
-
- spin_lock_irqsave(&cmd->t_state_lock, cmd_flags);
- if (!(cmd->transport_state & CMD_T_DEV_ACTIVE)) {
- spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
- goto check_cond;
- }
- cmd->transport_state &= ~CMD_T_DEV_ACTIVE;
- target_remove_from_state_list(cmd);
- spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
-
- /*
- * The Storage engine stopped this struct se_cmd before it was
- * send to the fabric frontend for delivery back to the
- * Initiator Node. Return this SCSI CDB back with an
- * CHECK_CONDITION status.
- */
-check_cond:
- transport_send_check_condition_and_sense(cmd,
- TCM_NON_EXISTENT_LUN, 0);
- /*
- * If the fabric frontend is waiting for this iscsi_cmd_t to
- * be released, notify the waiting thread now that LU has
- * finished accessing it.
- */
- spin_lock_irqsave(&cmd->t_state_lock, cmd_flags);
- if (cmd->transport_state & CMD_T_LUN_FE_STOP) {
- pr_debug("SE_LUN[%d] - Detected FE stop for"
- " struct se_cmd: %p ITT: 0x%08x\n",
- lun->unpacked_lun,
- cmd, cmd->se_tfo->get_task_tag(cmd));
-
- spin_unlock_irqrestore(&cmd->t_state_lock,
- cmd_flags);
- transport_cmd_check_stop(cmd, false, false);
- complete(&cmd->transport_lun_fe_stop_comp);
- spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
- continue;
- }
- pr_debug("SE_LUN[%d] - ITT: 0x%08x finished processing\n",
- lun->unpacked_lun, cmd->se_tfo->get_task_tag(cmd));
-
- spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
- spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
- }
- spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);
-}
-
-static int transport_clear_lun_thread(void *p)
+static int transport_clear_lun_ref_thread(void *p)
{
struct se_lun *lun = p;
- __transport_clear_lun_from_sessions(lun);
+ percpu_ref_kill(&lun->lun_ref);
+
+ wait_for_completion(&lun->lun_ref_comp);
complete(&lun->lun_shutdown_comp);
return 0;
}
-int transport_clear_lun_from_sessions(struct se_lun *lun)
+int transport_clear_lun_ref(struct se_lun *lun)
{
struct task_struct *kt;
- kt = kthread_run(transport_clear_lun_thread, lun,
+ kt = kthread_run(transport_clear_lun_ref_thread, lun,
"tcm_cl_%u", lun->unpacked_lun);
if (IS_ERR(kt)) {
pr_err("Unable to start clear_lun thread\n");
@@ -2595,43 +2414,6 @@ bool transport_wait_for_tasks(struct se_cmd *cmd)
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
return false;
}
- /*
- * If we are already stopped due to an external event (ie: LUN shutdown)
- * sleep until the connection can have the passed struct se_cmd back.
- * The cmd->transport_lun_stopped_sem will be upped by
- * transport_clear_lun_from_sessions() once the ConfigFS context caller
- * has completed its operation on the struct se_cmd.
- */
- if (cmd->transport_state & CMD_T_LUN_STOP) {
- pr_debug("wait_for_tasks: Stopping"
- " wait_for_completion(&cmd->t_tasktransport_lun_fe"
- "_stop_comp); for ITT: 0x%08x\n",
- cmd->se_tfo->get_task_tag(cmd));
- /*
- * There is a special case for WRITES where a FE exception +
- * LUN shutdown means ConfigFS context is still sleeping on
- * transport_lun_stop_comp in transport_lun_wait_for_tasks().
- * We go ahead and up transport_lun_stop_comp just to be sure
- * here.
- */
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- complete(&cmd->transport_lun_stop_comp);
- wait_for_completion(&cmd->transport_lun_fe_stop_comp);
- spin_lock_irqsave(&cmd->t_state_lock, flags);
-
- target_remove_from_state_list(cmd);
- /*
- * At this point, the frontend who was the originator of this
- * struct se_cmd, now owns the structure and can be released through
- * normal means below.
- */
- pr_debug("wait_for_tasks: Stopped"
- " wait_for_completion(&cmd->t_tasktransport_lun_fe_"
- "stop_comp); for ITT: 0x%08x\n",
- cmd->se_tfo->get_task_tag(cmd));
-
- cmd->transport_state &= ~CMD_T_LUN_STOP;
- }
if (!(cmd->transport_state & CMD_T_ACTIVE)) {
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
@@ -2910,6 +2692,7 @@ int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
cmd->t_task_cdb[0], cmd->se_tfo->get_task_tag(cmd));
cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS;
+ cmd->scsi_status = SAM_STAT_TASK_ABORTED;
trace_target_cmd_complete(cmd);
cmd->se_tfo->queue_status(cmd);
@@ -2938,6 +2721,7 @@ void transport_send_task_abort(struct se_cmd *cmd)
if (cmd->se_tfo->write_pending_status(cmd) != 0) {
cmd->transport_state |= CMD_T_ABORTED;
smp_mb__after_atomic_inc();
+ return;
}
}
cmd->scsi_status = SAM_STAT_TASK_ABORTED;
diff --git a/drivers/target/target_core_ua.h b/drivers/target/target_core_ua.h
index 0204952fe4d3..be912b36daae 100644
--- a/drivers/target/target_core_ua.h
+++ b/drivers/target/target_core_ua.h
@@ -19,7 +19,7 @@
#define ASCQ_2AH_RESERVATIONS_RELEASED 0x04
#define ASCQ_2AH_REGISTRATIONS_PREEMPTED 0x05
#define ASCQ_2AH_ASYMMETRIC_ACCESS_STATE_CHANGED 0x06
-#define ASCQ_2AH_IMPLICT_ASYMMETRIC_ACCESS_STATE_TRANSITION_FAILED 0x07
+#define ASCQ_2AH_IMPLICIT_ASYMMETRIC_ACCESS_STATE_TRANSITION_FAILED 0x07
#define ASCQ_2AH_PRIORITY_CHANGED 0x08
#define ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS 0x09
diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
index 474cd44fac14..6b88a9958f61 100644
--- a/drivers/target/target_core_xcopy.c
+++ b/drivers/target/target_core_xcopy.c
@@ -405,9 +405,6 @@ static void xcopy_pt_release_cmd(struct se_cmd *se_cmd)
struct xcopy_pt_cmd *xpt_cmd = container_of(se_cmd,
struct xcopy_pt_cmd, se_cmd);
- if (xpt_cmd->remote_port)
- kfree(se_cmd->se_lun);
-
kfree(xpt_cmd);
}
@@ -572,22 +569,10 @@ static int target_xcopy_init_pt_lun(
return 0;
}
- pt_cmd->se_lun = kzalloc(sizeof(struct se_lun), GFP_KERNEL);
- if (!pt_cmd->se_lun) {
- pr_err("Unable to allocate pt_cmd->se_lun\n");
- return -ENOMEM;
- }
- init_completion(&pt_cmd->se_lun->lun_shutdown_comp);
- INIT_LIST_HEAD(&pt_cmd->se_lun->lun_cmd_list);
- INIT_LIST_HEAD(&pt_cmd->se_lun->lun_acl_list);
- spin_lock_init(&pt_cmd->se_lun->lun_acl_lock);
- spin_lock_init(&pt_cmd->se_lun->lun_cmd_lock);
- spin_lock_init(&pt_cmd->se_lun->lun_sep_lock);
-
+ pt_cmd->se_lun = &se_dev->xcopy_lun;
pt_cmd->se_dev = se_dev;
pr_debug("Setup emulated se_dev: %p from se_dev\n", pt_cmd->se_dev);
- pt_cmd->se_lun->lun_se_dev = se_dev;
pt_cmd->se_cmd_flags |= SCF_SE_LUN_CMD | SCF_CMD_XCOPY_PASSTHROUGH;
pr_debug("Setup emulated se_dev: %p to pt_cmd->se_lun->lun_se_dev\n",
@@ -658,8 +643,6 @@ static int target_xcopy_setup_pt_cmd(
return 0;
out:
- if (remote_port == true)
- kfree(cmd->se_lun);
return ret;
}
diff --git a/drivers/target/tcm_fc/tcm_fc.h b/drivers/target/tcm_fc/tcm_fc.h
index 0dd54a44abcf..752863acecb8 100644
--- a/drivers/target/tcm_fc/tcm_fc.h
+++ b/drivers/target/tcm_fc/tcm_fc.h
@@ -22,6 +22,7 @@
#define FT_NAMELEN 32 /* length of ASCII WWPNs including pad */
#define FT_TPG_NAMELEN 32 /* max length of TPG name */
#define FT_LUN_NAMELEN 32 /* max length of LUN name */
+#define TCM_FC_DEFAULT_TAGS 512 /* tags used for per-session preallocation */
struct ft_transport_id {
__u8 format;
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index 0e5a1caed176..479ec5621a4e 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -28,6 +28,7 @@
#include <linux/configfs.h>
#include <linux/ctype.h>
#include <linux/hash.h>
+#include <linux/percpu_ida.h>
#include <asm/unaligned.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
@@ -89,16 +90,18 @@ static void ft_free_cmd(struct ft_cmd *cmd)
{
struct fc_frame *fp;
struct fc_lport *lport;
+ struct se_session *se_sess;
if (!cmd)
return;
+ se_sess = cmd->sess->se_sess;
fp = cmd->req_frame;
lport = fr_dev(fp);
if (fr_seq(fp))
lport->tt.seq_release(fr_seq(fp));
fc_frame_free(fp);
+ percpu_ida_free(&se_sess->sess_tag_pool, cmd->se_cmd.map_tag);
ft_sess_put(cmd->sess); /* undo get from lookup at recv */
- kfree(cmd);
}
void ft_release_cmd(struct se_cmd *se_cmd)
@@ -432,14 +435,21 @@ static void ft_recv_cmd(struct ft_sess *sess, struct fc_frame *fp)
{
struct ft_cmd *cmd;
struct fc_lport *lport = sess->tport->lport;
+ struct se_session *se_sess = sess->se_sess;
+ int tag;
- cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
- if (!cmd)
+ tag = percpu_ida_alloc(&se_sess->sess_tag_pool, GFP_ATOMIC);
+ if (tag < 0)
goto busy;
+
+ cmd = &((struct ft_cmd *)se_sess->sess_cmd_map)[tag];
+ memset(cmd, 0, sizeof(struct ft_cmd));
+
+ cmd->se_cmd.map_tag = tag;
cmd->sess = sess;
cmd->seq = lport->tt.seq_assign(lport, fp);
if (!cmd->seq) {
- kfree(cmd);
+ percpu_ida_free(&se_sess->sess_tag_pool, tag);
goto busy;
}
cmd->req_frame = fp; /* hold frame during cmd */
diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c
index 4e0050840a72..c6932fb53a8d 100644
--- a/drivers/target/tcm_fc/tfc_conf.c
+++ b/drivers/target/tcm_fc/tfc_conf.c
@@ -571,16 +571,16 @@ int ft_register_configfs(void)
/*
* Setup default attribute lists for various fabric->tf_cit_tmpl
*/
- TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = ft_wwn_attrs;
- TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = NULL;
- TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;
- TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;
- TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;
- TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs =
+ fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = ft_wwn_attrs;
+ fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = NULL;
+ fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL;
+ fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;
+ fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
+ fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs =
ft_nacl_base_attrs;
- TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
- TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
- TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;
+ fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
+ fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
+ fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL;
/*
* register the fabric for use within TCM
*/
diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c
index 4859505ae2ed..ae52c08dad09 100644
--- a/drivers/target/tcm_fc/tfc_sess.c
+++ b/drivers/target/tcm_fc/tfc_sess.c
@@ -210,7 +210,8 @@ static struct ft_sess *ft_sess_create(struct ft_tport *tport, u32 port_id,
if (!sess)
return NULL;
- sess->se_sess = transport_init_session();
+ sess->se_sess = transport_init_session_tags(TCM_FC_DEFAULT_TAGS,
+ sizeof(struct ft_cmd));
if (IS_ERR(sess->se_sess)) {
kfree(sess);
return NULL;
diff --git a/drivers/tty/amiserial.c b/drivers/tty/amiserial.c
index 2b86f8e0fb58..71630a2af42c 100644
--- a/drivers/tty/amiserial.c
+++ b/drivers/tty/amiserial.c
@@ -1855,6 +1855,9 @@ static struct console sercons = {
*/
static int __init amiserial_console_init(void)
{
+ if (!MACH_IS_AMIGA)
+ return -ENODEV;
+
register_console(&sercons);
return 0;
}
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index 7cdd1eb9406c..34aacaaae14a 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -93,6 +93,7 @@ struct n_tty_data {
size_t canon_head;
size_t echo_head;
size_t echo_commit;
+ size_t echo_mark;
DECLARE_BITMAP(char_map, 256);
/* private to n_tty_receive_overrun (single-threaded) */
@@ -336,6 +337,7 @@ static void reset_buffer_flags(struct n_tty_data *ldata)
{
ldata->read_head = ldata->canon_head = ldata->read_tail = 0;
ldata->echo_head = ldata->echo_tail = ldata->echo_commit = 0;
+ ldata->echo_mark = 0;
ldata->line_start = 0;
ldata->erasing = 0;
@@ -768,7 +770,7 @@ static size_t __process_echoes(struct tty_struct *tty)
* data at the tail to prevent a subsequent overrun */
while (ldata->echo_commit - tail >= ECHO_DISCARD_WATERMARK) {
if (echo_buf(ldata, tail) == ECHO_OP_START) {
- if (echo_buf(ldata, tail) == ECHO_OP_ERASE_TAB)
+ if (echo_buf(ldata, tail + 1) == ECHO_OP_ERASE_TAB)
tail += 3;
else
tail += 2;
@@ -787,6 +789,7 @@ static void commit_echoes(struct tty_struct *tty)
size_t head;
head = ldata->echo_head;
+ ldata->echo_mark = head;
old = ldata->echo_commit - ldata->echo_tail;
/* Process committed echoes if the accumulated # of bytes
@@ -810,10 +813,12 @@ static void process_echoes(struct tty_struct *tty)
struct n_tty_data *ldata = tty->disc_data;
size_t echoed;
- if (!L_ECHO(tty) || ldata->echo_commit == ldata->echo_tail)
+ if ((!L_ECHO(tty) && !L_ECHONL(tty)) ||
+ ldata->echo_mark == ldata->echo_tail)
return;
mutex_lock(&ldata->output_lock);
+ ldata->echo_commit = ldata->echo_mark;
echoed = __process_echoes(tty);
mutex_unlock(&ldata->output_lock);
@@ -821,11 +826,13 @@ static void process_echoes(struct tty_struct *tty)
tty->ops->flush_chars(tty);
}
+/* NB: echo_mark and echo_head should be equivalent here */
static void flush_echoes(struct tty_struct *tty)
{
struct n_tty_data *ldata = tty->disc_data;
- if (!L_ECHO(tty) || ldata->echo_commit == ldata->echo_head)
+ if ((!L_ECHO(tty) && !L_ECHONL(tty)) ||
+ ldata->echo_commit == ldata->echo_head)
return;
mutex_lock(&ldata->output_lock);
@@ -1998,7 +2005,10 @@ static int canon_copy_from_read_buf(struct tty_struct *tty,
found = 1;
size = N_TTY_BUF_SIZE - tail;
- n = (found + eol + size) & (N_TTY_BUF_SIZE - 1);
+ n = eol - tail;
+ if (n > 4096)
+ n += 4096;
+ n += found;
c = n;
if (found && read_buf(ldata, eol) == __DISABLED_CHAR) {
@@ -2243,18 +2253,19 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
if (time)
timeout = time;
}
- mutex_unlock(&ldata->atomic_read_lock);
- remove_wait_queue(&tty->read_wait, &wait);
+ n_tty_set_room(tty);
+ up_read(&tty->termios_rwsem);
+ remove_wait_queue(&tty->read_wait, &wait);
if (!waitqueue_active(&tty->read_wait))
ldata->minimum_to_wake = minimum;
+ mutex_unlock(&ldata->atomic_read_lock);
+
__set_current_state(TASK_RUNNING);
if (b - buf)
retval = b - buf;
- n_tty_set_room(tty);
- up_read(&tty->termios_rwsem);
return retval;
}
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
index 4658e3e0ec42..06525f10e364 100644
--- a/drivers/tty/serial/8250/8250_dw.c
+++ b/drivers/tty/serial/8250/8250_dw.c
@@ -96,7 +96,8 @@ static void dw8250_serial_out(struct uart_port *p, int offset, int value)
if (offset == UART_LCR) {
int tries = 1000;
while (tries--) {
- if (value == p->serial_in(p, UART_LCR))
+ unsigned int lcr = p->serial_in(p, UART_LCR);
+ if ((value & ~UART_LCR_SPAR) == (lcr & ~UART_LCR_SPAR))
return;
dw8250_force_idle(p);
writeb(value, p->membase + (UART_LCR << p->regshift));
@@ -132,7 +133,8 @@ static void dw8250_serial_out32(struct uart_port *p, int offset, int value)
if (offset == UART_LCR) {
int tries = 1000;
while (tries--) {
- if (value == p->serial_in(p, UART_LCR))
+ unsigned int lcr = p->serial_in(p, UART_LCR);
+ if ((value & ~UART_LCR_SPAR) == (lcr & ~UART_LCR_SPAR))
return;
dw8250_force_idle(p);
writel(value, p->membase + (UART_LCR << p->regshift));
@@ -455,6 +457,8 @@ MODULE_DEVICE_TABLE(of, dw8250_of_match);
static const struct acpi_device_id dw8250_acpi_match[] = {
{ "INT33C4", 0 },
{ "INT33C5", 0 },
+ { "INT3434", 0 },
+ { "INT3435", 0 },
{ "80860F0A", 0 },
{ },
};
diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig
index f3b306efaa59..23329918f229 100644
--- a/drivers/tty/serial/8250/Kconfig
+++ b/drivers/tty/serial/8250/Kconfig
@@ -41,7 +41,7 @@ config SERIAL_8250_DEPRECATED_OPTIONS
accept kernel parameters in both forms like 8250_core.nr_uarts=4 and
8250.nr_uarts=4. We now renamed the module back to 8250, but if
anybody noticed in 3.7 and changed their userspace we still have to
- keep the 8350_core.* options around until they revert the changes
+ keep the 8250_core.* options around until they revert the changes
they already did.
If 8250 is built as a module, this adds 8250_core alias instead.
diff --git a/drivers/tty/serial/pmac_zilog.c b/drivers/tty/serial/pmac_zilog.c
index 481b781b26e3..e9d420ff3931 100644
--- a/drivers/tty/serial/pmac_zilog.c
+++ b/drivers/tty/serial/pmac_zilog.c
@@ -2052,6 +2052,9 @@ static int __init pmz_console_init(void)
/* Probe ports */
pmz_probe();
+ if (pmz_ports_count == 0)
+ return -ENODEV;
+
/* TODO: Autoprobe console based on OF */
/* pmz_console.index = i; */
register_console(&pmz_console);
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 537750261aaa..7d8103cd3e2e 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -1433,7 +1433,7 @@ static void work_fn_rx(struct work_struct *work)
desc = s->desc_rx[new];
if (dma_async_is_tx_complete(s->chan_rx, s->active_rx, NULL, NULL) !=
- DMA_SUCCESS) {
+ DMA_COMPLETE) {
/* Handle incomplete DMA receive */
struct dma_chan *chan = s->chan_rx;
struct shdma_desc *sh_desc = container_of(desc,
diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
index e46e9f3f19b9..f619ad5b5eae 100644
--- a/drivers/tty/serial/xilinx_uartps.c
+++ b/drivers/tty/serial/xilinx_uartps.c
@@ -240,6 +240,7 @@ static irqreturn_t xuartps_isr(int irq, void *dev_id)
continue;
}
+#ifdef SUPPORT_SYSRQ
/*
* uart_handle_sysrq_char() doesn't work if
* spinlocked, for some reason
@@ -253,6 +254,7 @@ static irqreturn_t xuartps_isr(int irq, void *dev_id)
}
spin_lock(&port->lock);
}
+#endif
port->icount.rx++;
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 3a1a01af9a80..c74a00ad7add 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -2086,6 +2086,7 @@ retry_open:
filp->f_op = &tty_fops;
goto retry_open;
}
+ clear_bit(TTY_HUPPED, &tty->flags);
tty_unlock(tty);
diff --git a/drivers/tty/tty_ldsem.c b/drivers/tty/tty_ldsem.c
index 22fad8ad5ac2..d8a55e87877f 100644
--- a/drivers/tty/tty_ldsem.c
+++ b/drivers/tty/tty_ldsem.c
@@ -86,11 +86,21 @@ static inline long ldsem_atomic_update(long delta, struct ld_semaphore *sem)
return atomic_long_add_return(delta, (atomic_long_t *)&sem->count);
}
+/*
+ * ldsem_cmpxchg() updates @*old with the last-known sem->count value.
+ * Returns 1 if count was successfully changed; @*old will have @new value.
+ * Returns 0 if count was not changed; @*old will have most recent sem->count
+ */
static inline int ldsem_cmpxchg(long *old, long new, struct ld_semaphore *sem)
{
- long tmp = *old;
- *old = atomic_long_cmpxchg(&sem->count, *old, new);
- return *old == tmp;
+ long tmp = atomic_long_cmpxchg(&sem->count, *old, new);
+ if (tmp == *old) {
+ *old = new;
+ return 1;
+ } else {
+ *old = tmp;
+ return 0;
+ }
}
/*
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
index 67beb8444930..f7beb6eb40c7 100644
--- a/drivers/uio/uio.c
+++ b/drivers/uio/uio.c
@@ -653,6 +653,8 @@ static int uio_mmap_physical(struct vm_area_struct *vma)
return -EINVAL;
mem = idev->info->mem + mi;
+ if (mem->addr & ~PAGE_MASK)
+ return -ENODEV;
if (vma->vm_end - vma->vm_start > mem->size)
return -EINVAL;
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index 5d8981c5235e..6e73f8cd60e5 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -642,6 +642,10 @@ static int ci_hdrc_probe(struct platform_device *pdev)
: CI_ROLE_GADGET;
}
+ /* only update vbus status for peripheral */
+ if (ci->role == CI_ROLE_GADGET)
+ ci_handle_vbus_change(ci);
+
ret = ci_role_start(ci, ci->role);
if (ret) {
dev_err(dev, "can't start %s role\n", ci_role(ci)->name);
diff --git a/drivers/usb/chipidea/host.c b/drivers/usb/chipidea/host.c
index 59e6020ea753..526cd77563d8 100644
--- a/drivers/usb/chipidea/host.c
+++ b/drivers/usb/chipidea/host.c
@@ -88,7 +88,8 @@ static int host_start(struct ci_hdrc *ci)
return ret;
disable_reg:
- regulator_disable(ci->platdata->reg_vbus);
+ if (ci->platdata->reg_vbus)
+ regulator_disable(ci->platdata->reg_vbus);
put_hcd:
usb_put_hcd(hcd);
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index b34c81969cba..69d20fbb38a2 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -1795,9 +1795,6 @@ static int udc_start(struct ci_hdrc *ci)
pm_runtime_no_callbacks(&ci->gadget.dev);
pm_runtime_enable(&ci->gadget.dev);
- /* Update ci->vbus_active */
- ci_handle_vbus_change(ci);
-
return retval;
destroy_eps:
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 3e7560f004f8..e8404319ca68 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1515,6 +1515,8 @@ static int acm_reset_resume(struct usb_interface *intf)
static const struct usb_device_id acm_ids[] = {
/* quirky and broken devices */
+ { USB_DEVICE(0x17ef, 0x7000), /* Lenovo USB modem */
+ .driver_info = NO_UNION_NORMAL, },/* has no union descriptor */
{ USB_DEVICE(0x0870, 0x0001), /* Metricom GS Modem */
.driver_info = NO_UNION_NORMAL, /* has no union descriptor */
},
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index 4d387596f3f0..0b23a8639311 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -854,13 +854,11 @@ static int wdm_manage_power(struct usb_interface *intf, int on)
{
/* need autopm_get/put here to ensure the usbcore sees the new value */
int rv = usb_autopm_get_interface(intf);
- if (rv < 0)
- goto err;
intf->needs_remote_wakeup = on;
- usb_autopm_put_interface(intf);
-err:
- return rv;
+ if (!rv)
+ usb_autopm_put_interface(intf);
+ return 0;
}
static int wdm_probe(struct usb_interface *intf, const struct usb_device_id *id)
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 06cec635e703..bd9dc3504b51 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -4832,8 +4832,9 @@ static void hub_events(void)
hub->ports[i - 1]->child;
dev_dbg(hub_dev, "warm reset port %d\n", i);
- if (!udev || !(portstatus &
- USB_PORT_STAT_CONNECTION)) {
+ if (!udev ||
+ !(portstatus & USB_PORT_STAT_CONNECTION) ||
+ udev->state == USB_STATE_NOTATTACHED) {
status = hub_port_reset(hub, i,
NULL, HUB_BH_RESET_TIME,
true);
@@ -5501,6 +5502,6 @@ acpi_handle usb_get_hub_port_acpi_handle(struct usb_device *hdev,
if (!hub)
return NULL;
- return DEVICE_ACPI_HANDLE(&hub->ports[port1 - 1]->dev);
+ return ACPI_HANDLE(&hub->ports[port1 - 1]->dev);
}
#endif
diff --git a/drivers/usb/core/usb-acpi.c b/drivers/usb/core/usb-acpi.c
index 255c14464bf2..4e243c37f17f 100644
--- a/drivers/usb/core/usb-acpi.c
+++ b/drivers/usb/core/usb-acpi.c
@@ -173,7 +173,7 @@ static int usb_acpi_find_device(struct device *dev, acpi_handle *handle)
}
/* root hub's parent is the usb hcd. */
- parent_handle = DEVICE_ACPI_HANDLE(dev->parent);
+ parent_handle = ACPI_HANDLE(dev->parent);
*handle = acpi_get_child(parent_handle, udev->portnum);
if (!*handle)
return -ENODEV;
@@ -194,7 +194,7 @@ static int usb_acpi_find_device(struct device *dev, acpi_handle *handle)
raw_port_num = usb_hcd_find_raw_port_number(hcd,
port_num);
- *handle = acpi_get_child(DEVICE_ACPI_HANDLE(&udev->dev),
+ *handle = acpi_get_child(ACPI_HANDLE(&udev->dev),
raw_port_num);
if (!*handle)
return -ENODEV;
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 74f9cf02da07..a49217ae3533 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -455,9 +455,6 @@ static int dwc3_probe(struct platform_device *pdev)
if (IS_ERR(regs))
return PTR_ERR(regs);
- usb_phy_set_suspend(dwc->usb2_phy, 0);
- usb_phy_set_suspend(dwc->usb3_phy, 0);
-
spin_lock_init(&dwc->lock);
platform_set_drvdata(pdev, dwc);
@@ -488,6 +485,9 @@ static int dwc3_probe(struct platform_device *pdev)
goto err0;
}
+ usb_phy_set_suspend(dwc->usb2_phy, 0);
+ usb_phy_set_suspend(dwc->usb3_phy, 0);
+
ret = dwc3_event_buffers_setup(dwc);
if (ret) {
dev_err(dwc->dev, "failed to setup event buffers\n");
@@ -569,6 +569,8 @@ err2:
dwc3_event_buffers_cleanup(dwc);
err1:
+ usb_phy_set_suspend(dwc->usb2_phy, 1);
+ usb_phy_set_suspend(dwc->usb3_phy, 1);
dwc3_core_exit(dwc);
err0:
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index 95f7649c71a7..21a352079bc2 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -459,6 +459,8 @@ static int dwc3_ep0_handle_feature(struct dwc3 *dwc,
dep = dwc3_wIndex_to_dep(dwc, wIndex);
if (!dep)
return -EINVAL;
+ if (set == 0 && (dep->flags & DWC3_EP_WEDGE))
+ break;
ret = __dwc3_gadget_ep_set_halt(dep, set);
if (ret)
return -EINVAL;
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 5452c0fce360..02e44fcaf205 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -1200,9 +1200,6 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value)
else
dep->flags |= DWC3_EP_STALL;
} else {
- if (dep->flags & DWC3_EP_WEDGE)
- return 0;
-
ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
DWC3_DEPCMD_CLEARSTALL, &params);
if (ret)
@@ -1210,7 +1207,7 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value)
value ? "set" : "clear",
dep->name);
else
- dep->flags &= ~DWC3_EP_STALL;
+ dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE);
}
return ret;
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index a91e6422f930..f66d96ad1f51 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -682,6 +682,7 @@ config USB_CONFIGFS_PHONET
config USB_CONFIGFS_MASS_STORAGE
boolean "Mass storage"
depends on USB_CONFIGFS
+ depends on BLOCK
select USB_F_MASS_STORAGE
help
The Mass Storage Gadget acts as a USB Mass Storage disk drive.
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index 3e7ae707f691..2018ba1a2172 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -593,6 +593,7 @@ static void reset_config(struct usb_composite_dev *cdev)
bitmap_zero(f->endpoints, 32);
}
cdev->config = NULL;
+ cdev->delayed_status = 0;
}
static int set_config(struct usb_composite_dev *cdev,
diff --git a/drivers/usb/gadget/f_fs.c b/drivers/usb/gadget/f_fs.c
index 774e8b89cdb5..241fc873ffa4 100644
--- a/drivers/usb/gadget/f_fs.c
+++ b/drivers/usb/gadget/f_fs.c
@@ -1304,7 +1304,7 @@ static struct ffs_data *ffs_data_new(void)
{
struct ffs_data *ffs = kzalloc(sizeof *ffs, GFP_KERNEL);
if (unlikely(!ffs))
- return 0;
+ return NULL;
ENTER();
diff --git a/drivers/usb/gadget/f_mass_storage.c b/drivers/usb/gadget/f_mass_storage.c
index a03ba2c83589..b96393908860 100644
--- a/drivers/usb/gadget/f_mass_storage.c
+++ b/drivers/usb/gadget/f_mass_storage.c
@@ -523,7 +523,7 @@ static int fsg_setup(struct usb_function *f,
*/
DBG(fsg, "bulk reset request\n");
raise_exception(fsg->common, FSG_STATE_RESET);
- return DELAYED_STATUS;
+ return USB_GADGET_DELAYED_STATUS;
case US_BULK_GET_MAX_LUN:
if (ctrl->bRequestType !=
@@ -602,13 +602,14 @@ static bool start_out_transfer(struct fsg_common *common, struct fsg_buffhd *bh)
return true;
}
-static int sleep_thread(struct fsg_common *common)
+static int sleep_thread(struct fsg_common *common, bool can_freeze)
{
int rc = 0;
/* Wait until a signal arrives or we are woken up */
for (;;) {
- try_to_freeze();
+ if (can_freeze)
+ try_to_freeze();
set_current_state(TASK_INTERRUPTIBLE);
if (signal_pending(current)) {
rc = -EINTR;
@@ -682,7 +683,7 @@ static int do_read(struct fsg_common *common)
/* Wait for the next buffer to become available */
bh = common->next_buffhd_to_fill;
while (bh->state != BUF_STATE_EMPTY) {
- rc = sleep_thread(common);
+ rc = sleep_thread(common, false);
if (rc)
return rc;
}
@@ -937,7 +938,7 @@ static int do_write(struct fsg_common *common)
}
/* Wait for something to happen */
- rc = sleep_thread(common);
+ rc = sleep_thread(common, false);
if (rc)
return rc;
}
@@ -1504,7 +1505,7 @@ static int throw_away_data(struct fsg_common *common)
}
/* Otherwise wait for something to happen */
- rc = sleep_thread(common);
+ rc = sleep_thread(common, true);
if (rc)
return rc;
}
@@ -1625,7 +1626,7 @@ static int send_status(struct fsg_common *common)
/* Wait for the next buffer to become available */
bh = common->next_buffhd_to_fill;
while (bh->state != BUF_STATE_EMPTY) {
- rc = sleep_thread(common);
+ rc = sleep_thread(common, true);
if (rc)
return rc;
}
@@ -1828,7 +1829,7 @@ static int do_scsi_command(struct fsg_common *common)
bh = common->next_buffhd_to_fill;
common->next_buffhd_to_drain = bh;
while (bh->state != BUF_STATE_EMPTY) {
- rc = sleep_thread(common);
+ rc = sleep_thread(common, true);
if (rc)
return rc;
}
@@ -2174,7 +2175,7 @@ static int get_next_command(struct fsg_common *common)
/* Wait for the next buffer to become available */
bh = common->next_buffhd_to_fill;
while (bh->state != BUF_STATE_EMPTY) {
- rc = sleep_thread(common);
+ rc = sleep_thread(common, true);
if (rc)
return rc;
}
@@ -2193,7 +2194,7 @@ static int get_next_command(struct fsg_common *common)
/* Wait for the CBW to arrive */
while (bh->state != BUF_STATE_FULL) {
- rc = sleep_thread(common);
+ rc = sleep_thread(common, true);
if (rc)
return rc;
}
@@ -2379,7 +2380,7 @@ static void handle_exception(struct fsg_common *common)
}
if (num_active == 0)
break;
- if (sleep_thread(common))
+ if (sleep_thread(common, true))
return;
}
@@ -2516,7 +2517,7 @@ static int fsg_main_thread(void *common_)
}
if (!common->running) {
- sleep_thread(common);
+ sleep_thread(common, true);
continue;
}
@@ -3111,7 +3112,7 @@ static int fsg_bind(struct usb_configuration *c, struct usb_function *f)
fsg->common->can_stall);
if (ret)
return ret;
- fsg_common_set_inquiry_string(fsg->common, 0, 0);
+ fsg_common_set_inquiry_string(fsg->common, NULL, NULL);
ret = fsg_common_run_thread(fsg->common);
if (ret)
return ret;
diff --git a/drivers/usb/gadget/pxa25x_udc.c b/drivers/usb/gadget/pxa25x_udc.c
index 0ac6064aa3b8..409a3c45a36a 100644
--- a/drivers/usb/gadget/pxa25x_udc.c
+++ b/drivers/usb/gadget/pxa25x_udc.c
@@ -54,6 +54,7 @@
*/
#ifdef CONFIG_ARCH_PXA
#include <mach/pxa25x-udc.h>
+#include <mach/hardware.h>
#endif
#ifdef CONFIG_ARCH_LUBBOCK
diff --git a/drivers/usb/gadget/s3c-hsotg.c b/drivers/usb/gadget/s3c-hsotg.c
index 9875d9c0823f..e20bc109fdd7 100644
--- a/drivers/usb/gadget/s3c-hsotg.c
+++ b/drivers/usb/gadget/s3c-hsotg.c
@@ -1180,6 +1180,7 @@ static int s3c_hsotg_process_req_feature(struct s3c_hsotg *hsotg,
}
static void s3c_hsotg_enqueue_setup(struct s3c_hsotg *hsotg);
+static void s3c_hsotg_disconnect(struct s3c_hsotg *hsotg);
/**
* s3c_hsotg_process_control - process a control request
@@ -1221,6 +1222,7 @@ static void s3c_hsotg_process_control(struct s3c_hsotg *hsotg,
if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
switch (ctrl->bRequest) {
case USB_REQ_SET_ADDRESS:
+ s3c_hsotg_disconnect(hsotg);
dcfg = readl(hsotg->regs + DCFG);
dcfg &= ~DCFG_DevAddr_MASK;
dcfg |= ctrl->wValue << DCFG_DevAddr_SHIFT;
@@ -1245,7 +1247,9 @@ static void s3c_hsotg_process_control(struct s3c_hsotg *hsotg,
/* as a fallback, try delivering it to the driver to deal with */
if (ret == 0 && hsotg->driver) {
+ spin_unlock(&hsotg->lock);
ret = hsotg->driver->setup(&hsotg->gadget, ctrl);
+ spin_lock(&hsotg->lock);
if (ret < 0)
dev_dbg(hsotg->dev, "driver->setup() ret %d\n", ret);
}
@@ -1308,10 +1312,12 @@ static void s3c_hsotg_complete_setup(struct usb_ep *ep,
return;
}
+ spin_lock(&hsotg->lock);
if (req->actual == 0)
s3c_hsotg_enqueue_setup(hsotg);
else
s3c_hsotg_process_control(hsotg, req->buf);
+ spin_unlock(&hsotg->lock);
}
/**
@@ -2533,7 +2539,6 @@ irq_retry:
writel(GINTSTS_USBSusp, hsotg->regs + GINTSTS);
call_gadget(hsotg, suspend);
- s3c_hsotg_disconnect(hsotg);
}
if (gintsts & GINTSTS_WkUpInt) {
diff --git a/drivers/usb/gadget/storage_common.h b/drivers/usb/gadget/storage_common.h
index c74c2fdbd56e..70c891469f57 100644
--- a/drivers/usb/gadget/storage_common.h
+++ b/drivers/usb/gadget/storage_common.h
@@ -119,10 +119,6 @@ static inline bool fsg_lun_is_open(struct fsg_lun *curlun)
return curlun->filp != NULL;
}
-/* Big enough to hold our biggest descriptor */
-#define EP0_BUFSIZE 256
-#define DELAYED_STATUS (EP0_BUFSIZE + 999) /* An impossibly large value */
-
/* Default size of buffer length. */
#define FSG_BUFLEN ((u32)16384)
diff --git a/drivers/usb/gadget/tcm_usb_gadget.c b/drivers/usb/gadget/tcm_usb_gadget.c
index eccea1df702d..0f8aad78b54f 100644
--- a/drivers/usb/gadget/tcm_usb_gadget.c
+++ b/drivers/usb/gadget/tcm_usb_gadget.c
@@ -370,7 +370,7 @@ err:
return -ENOMEM;
}
-void bot_cleanup_old_alt(struct f_uas *fu)
+static void bot_cleanup_old_alt(struct f_uas *fu)
{
if (!(fu->flags & USBG_ENABLED))
return;
@@ -1923,15 +1923,15 @@ static int usbg_register_configfs(void)
}
fabric->tf_ops = usbg_ops;
- TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = usbg_wwn_attrs;
- TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = usbg_base_attrs;
- TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;
- TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;
- TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;
- TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;
- TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
- TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
- TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;
+ fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = usbg_wwn_attrs;
+ fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = usbg_base_attrs;
+ fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL;
+ fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;
+ fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
+ fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL;
+ fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
+ fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
+ fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL;
ret = target_fabric_configfs_register(fabric);
if (ret < 0) {
printk(KERN_ERR "target_fabric_configfs_register() failed"
diff --git a/drivers/usb/gadget/zero.c b/drivers/usb/gadget/zero.c
index 0dd07ae1555d..f49b0b61ecc8 100644
--- a/drivers/usb/gadget/zero.c
+++ b/drivers/usb/gadget/zero.c
@@ -91,17 +91,17 @@ static struct usb_zero_options gzero_options = {
* functional coverage for the "USBCV" test harness from USB-IF.
* It's always set if OTG mode is enabled.
*/
-unsigned autoresume = DEFAULT_AUTORESUME;
+static unsigned autoresume = DEFAULT_AUTORESUME;
module_param(autoresume, uint, S_IRUGO);
MODULE_PARM_DESC(autoresume, "zero, or seconds before remote wakeup");
/* Maximum Autoresume time */
-unsigned max_autoresume;
+static unsigned max_autoresume;
module_param(max_autoresume, uint, S_IRUGO);
MODULE_PARM_DESC(max_autoresume, "maximum seconds before remote wakeup");
/* Interval between two remote wakeups */
-unsigned autoresume_interval_ms;
+static unsigned autoresume_interval_ms;
module_param(autoresume_interval_ms, uint, S_IRUGO);
MODULE_PARM_DESC(autoresume_interval_ms,
"milliseconds to increase successive wakeup delays");
diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c
index 418444ebb1b8..8c356af79409 100644
--- a/drivers/usb/host/ohci-at91.c
+++ b/drivers/usb/host/ohci-at91.c
@@ -136,23 +136,27 @@ static int usb_hcd_at91_probe(const struct hc_driver *driver,
struct ohci_hcd *ohci;
int retval;
struct usb_hcd *hcd = NULL;
-
- if (pdev->num_resources != 2) {
- pr_debug("hcd probe: invalid num_resources");
- return -ENODEV;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ int irq;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_dbg(dev, "hcd probe: missing memory resource\n");
+ return -ENXIO;
}
- if ((pdev->resource[0].flags != IORESOURCE_MEM)
- || (pdev->resource[1].flags != IORESOURCE_IRQ)) {
- pr_debug("hcd probe: invalid resource type\n");
- return -ENODEV;
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_dbg(dev, "hcd probe: missing irq resource\n");
+ return irq;
}
hcd = usb_create_hcd(driver, &pdev->dev, "at91");
if (!hcd)
return -ENOMEM;
- hcd->rsrc_start = pdev->resource[0].start;
- hcd->rsrc_len = resource_size(&pdev->resource[0]);
+ hcd->rsrc_start = res->start;
+ hcd->rsrc_len = resource_size(res);
if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
pr_debug("request_mem_region failed\n");
@@ -199,7 +203,7 @@ static int usb_hcd_at91_probe(const struct hc_driver *driver,
ohci->num_ports = board->ports;
at91_start_hc(pdev);
- retval = usb_add_hcd(hcd, pdev->resource[1].start, IRQF_SHARED);
+ retval = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (retval == 0)
return retval;
diff --git a/drivers/usb/host/ohci-pxa27x.c b/drivers/usb/host/ohci-pxa27x.c
index e89ac4d4b87e..9b7435f0dcd6 100644
--- a/drivers/usb/host/ohci-pxa27x.c
+++ b/drivers/usb/host/ohci-pxa27x.c
@@ -21,6 +21,7 @@
#include <linux/clk.h>
#include <linux/device.h>
+#include <linux/dma-mapping.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index b8dffd59eb25..73f5208714a4 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -128,7 +128,12 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
* any other sleep) on Haswell machines with LPT and LPT-LP
* with the new Intel BIOS
*/
- xhci->quirks |= XHCI_SPURIOUS_WAKEUP;
+ /* Limit the quirk to only known vendors, as this triggers
+ * yet another BIOS bug on some other machines
+ * https://bugzilla.kernel.org/show_bug.cgi?id=66171
+ */
+ if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP)
+ xhci->quirks |= XHCI_SPURIOUS_WAKEUP;
}
if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
pdev->device == PCI_DEVICE_ID_ASROCK_P67) {
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 1e2f3f495843..53c2e296467f 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -2973,8 +2973,58 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
}
while (1) {
- if (room_on_ring(xhci, ep_ring, num_trbs))
- break;
+ if (room_on_ring(xhci, ep_ring, num_trbs)) {
+ union xhci_trb *trb = ep_ring->enqueue;
+ unsigned int usable = ep_ring->enq_seg->trbs +
+ TRBS_PER_SEGMENT - 1 - trb;
+ u32 nop_cmd;
+
+ /*
+ * Section 4.11.7.1 TD Fragments states that a link
+ * TRB must only occur at the boundary between
+ * data bursts (eg 512 bytes for 480M).
+ * While it is possible to split a large fragment
+ * we don't know the size yet.
+ * Simplest solution is to fill the trb before the
+ * LINK with nop commands.
+ */
+ if (num_trbs == 1 || num_trbs <= usable || usable == 0)
+ break;
+
+ if (ep_ring->type != TYPE_BULK)
+ /*
+ * While isoc transfers might have a buffer that
+ * crosses a 64k boundary it is unlikely.
+ * Since we can't add NOPs without generating
+ * gaps in the traffic just hope it never
+ * happens at the end of the ring.
+ * This could be fixed by writing a LINK TRB
+ * instead of the first NOP - however the
+ * TRB_TYPE_LINK_LE32() calls would all need
+ * changing to check the ring length.
+ */
+ break;
+
+ if (num_trbs >= TRBS_PER_SEGMENT) {
+ xhci_err(xhci, "Too many fragments %d, max %d\n",
+ num_trbs, TRBS_PER_SEGMENT - 1);
+ return -ENOMEM;
+ }
+
+ nop_cmd = cpu_to_le32(TRB_TYPE(TRB_TR_NOOP) |
+ ep_ring->cycle_state);
+ ep_ring->num_trbs_free -= usable;
+ do {
+ trb->generic.field[0] = 0;
+ trb->generic.field[1] = 0;
+ trb->generic.field[2] = 0;
+ trb->generic.field[3] = nop_cmd;
+ trb++;
+ } while (--usable);
+ ep_ring->enqueue = trb;
+ if (room_on_ring(xhci, ep_ring, num_trbs))
+ break;
+ }
if (ep_ring == xhci->cmd_ring) {
xhci_err(xhci, "Do not support expand command ring\n");
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 0a43329569d1..4d4499b80449 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -1809,7 +1809,6 @@ static void musb_free(struct musb *musb)
disable_irq_wake(musb->nIrq);
free_irq(musb->nIrq, musb);
}
- cancel_work_sync(&musb->irq_work);
musb_host_free(musb);
}
@@ -1896,6 +1895,9 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
musb_platform_disable(musb);
musb_generic_disable(musb);
+ /* Init IRQ workqueue before request_irq */
+ INIT_WORK(&musb->irq_work, musb_irq_work);
+
/* setup musb parts of the core (especially endpoints) */
status = musb_core_init(plat->config->multipoint
? MUSB_CONTROLLER_MHDRC
@@ -1905,9 +1907,6 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
setup_timer(&musb->otg_timer, musb_otg_timer_func, (unsigned long) musb);
- /* Init IRQ workqueue before request_irq */
- INIT_WORK(&musb->irq_work, musb_irq_work);
-
/* attach to the IRQ */
if (request_irq(nIrq, musb->isr, 0, dev_name(dev), musb)) {
dev_err(dev, "request_irq %d failed!\n", nIrq);
@@ -1981,6 +1980,7 @@ fail4:
musb_host_cleanup(musb);
fail3:
+ cancel_work_sync(&musb->irq_work);
if (musb->dma_controller)
dma_controller_destroy(musb->dma_controller);
fail2_5:
@@ -2043,6 +2043,7 @@ static int musb_remove(struct platform_device *pdev)
if (musb->dma_controller)
dma_controller_destroy(musb->dma_controller);
+ cancel_work_sync(&musb->irq_work);
musb_free(musb);
device_init_wakeup(dev, 0);
return 0;
diff --git a/drivers/usb/musb/musb_cppi41.c b/drivers/usb/musb/musb_cppi41.c
index ff9d6de2b746..a12bd30401e0 100644
--- a/drivers/usb/musb/musb_cppi41.c
+++ b/drivers/usb/musb/musb_cppi41.c
@@ -38,6 +38,7 @@ struct cppi41_dma_channel {
u32 prog_len;
u32 transferred;
u32 packet_sz;
+ struct list_head tx_check;
};
#define MUSB_DMA_NUM_CHANNELS 15
@@ -47,6 +48,8 @@ struct cppi41_dma_controller {
struct cppi41_dma_channel rx_channel[MUSB_DMA_NUM_CHANNELS];
struct cppi41_dma_channel tx_channel[MUSB_DMA_NUM_CHANNELS];
struct musb *musb;
+ struct hrtimer early_tx;
+ struct list_head early_tx_list;
u32 rx_mode;
u32 tx_mode;
u32 auto_req;
@@ -96,31 +99,27 @@ static void update_rx_toggle(struct cppi41_dma_channel *cppi41_channel)
cppi41_channel->usb_toggle = toggle;
}
-static void cppi41_dma_callback(void *private_data)
+static bool musb_is_tx_fifo_empty(struct musb_hw_ep *hw_ep)
{
- struct dma_channel *channel = private_data;
- struct cppi41_dma_channel *cppi41_channel = channel->private_data;
- struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep;
- struct musb *musb = hw_ep->musb;
- unsigned long flags;
- struct dma_tx_state txstate;
- u32 transferred;
+ u8 epnum = hw_ep->epnum;
+ struct musb *musb = hw_ep->musb;
+ void __iomem *epio = musb->endpoints[epnum].regs;
+ u16 csr;
- spin_lock_irqsave(&musb->lock, flags);
+ csr = musb_readw(epio, MUSB_TXCSR);
+ if (csr & MUSB_TXCSR_TXPKTRDY)
+ return false;
+ return true;
+}
- dmaengine_tx_status(cppi41_channel->dc, cppi41_channel->cookie,
- &txstate);
- transferred = cppi41_channel->prog_len - txstate.residue;
- cppi41_channel->transferred += transferred;
+static void cppi41_dma_callback(void *private_data);
- dev_dbg(musb->controller, "DMA transfer done on hw_ep=%d bytes=%d/%d\n",
- hw_ep->epnum, cppi41_channel->transferred,
- cppi41_channel->total_len);
+static void cppi41_trans_done(struct cppi41_dma_channel *cppi41_channel)
+{
+ struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep;
+ struct musb *musb = hw_ep->musb;
- update_rx_toggle(cppi41_channel);
-
- if (cppi41_channel->transferred == cppi41_channel->total_len ||
- transferred < cppi41_channel->packet_sz) {
+ if (!cppi41_channel->prog_len) {
/* done, complete */
cppi41_channel->channel.actual_len =
@@ -150,13 +149,11 @@ static void cppi41_dma_callback(void *private_data)
remain_bytes,
direction,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
- if (WARN_ON(!dma_desc)) {
- spin_unlock_irqrestore(&musb->lock, flags);
+ if (WARN_ON(!dma_desc))
return;
- }
dma_desc->callback = cppi41_dma_callback;
- dma_desc->callback_param = channel;
+ dma_desc->callback_param = &cppi41_channel->channel;
cppi41_channel->cookie = dma_desc->tx_submit(dma_desc);
dma_async_issue_pending(dc);
@@ -166,6 +163,117 @@ static void cppi41_dma_callback(void *private_data)
musb_writew(epio, MUSB_RXCSR, csr);
}
}
+}
+
+static enum hrtimer_restart cppi41_recheck_tx_req(struct hrtimer *timer)
+{
+ struct cppi41_dma_controller *controller;
+ struct cppi41_dma_channel *cppi41_channel, *n;
+ struct musb *musb;
+ unsigned long flags;
+ enum hrtimer_restart ret = HRTIMER_NORESTART;
+
+ controller = container_of(timer, struct cppi41_dma_controller,
+ early_tx);
+ musb = controller->musb;
+
+ spin_lock_irqsave(&musb->lock, flags);
+ list_for_each_entry_safe(cppi41_channel, n, &controller->early_tx_list,
+ tx_check) {
+ bool empty;
+ struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep;
+
+ empty = musb_is_tx_fifo_empty(hw_ep);
+ if (empty) {
+ list_del_init(&cppi41_channel->tx_check);
+ cppi41_trans_done(cppi41_channel);
+ }
+ }
+
+ if (!list_empty(&controller->early_tx_list)) {
+ ret = HRTIMER_RESTART;
+ hrtimer_forward_now(&controller->early_tx,
+ ktime_set(0, 150 * NSEC_PER_USEC));
+ }
+
+ spin_unlock_irqrestore(&musb->lock, flags);
+ return ret;
+}
+
+static void cppi41_dma_callback(void *private_data)
+{
+ struct dma_channel *channel = private_data;
+ struct cppi41_dma_channel *cppi41_channel = channel->private_data;
+ struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep;
+ struct musb *musb = hw_ep->musb;
+ unsigned long flags;
+ struct dma_tx_state txstate;
+ u32 transferred;
+ bool empty;
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+ dmaengine_tx_status(cppi41_channel->dc, cppi41_channel->cookie,
+ &txstate);
+ transferred = cppi41_channel->prog_len - txstate.residue;
+ cppi41_channel->transferred += transferred;
+
+ dev_dbg(musb->controller, "DMA transfer done on hw_ep=%d bytes=%d/%d\n",
+ hw_ep->epnum, cppi41_channel->transferred,
+ cppi41_channel->total_len);
+
+ update_rx_toggle(cppi41_channel);
+
+ if (cppi41_channel->transferred == cppi41_channel->total_len ||
+ transferred < cppi41_channel->packet_sz)
+ cppi41_channel->prog_len = 0;
+
+ empty = musb_is_tx_fifo_empty(hw_ep);
+ if (empty) {
+ cppi41_trans_done(cppi41_channel);
+ } else {
+ struct cppi41_dma_controller *controller;
+ /*
+ * On AM335x it has been observed that the TX interrupt fires
+ * too early that means the TXFIFO is not yet empty but the DMA
+ * engine says that it is done with the transfer. We don't
+ * receive a FIFO empty interrupt so the only thing we can do is
+ * to poll for the bit. On HS it usually takes 2us, on FS around
+ * 110us - 150us depending on the transfer size.
+ * We spin on HS (no longer than than 25us and setup a timer on
+ * FS to check for the bit and complete the transfer.
+ */
+ controller = cppi41_channel->controller;
+
+ if (musb->g.speed == USB_SPEED_HIGH) {
+ unsigned wait = 25;
+
+ do {
+ empty = musb_is_tx_fifo_empty(hw_ep);
+ if (empty)
+ break;
+ wait--;
+ if (!wait)
+ break;
+ udelay(1);
+ } while (1);
+
+ empty = musb_is_tx_fifo_empty(hw_ep);
+ if (empty) {
+ cppi41_trans_done(cppi41_channel);
+ goto out;
+ }
+ }
+ list_add_tail(&cppi41_channel->tx_check,
+ &controller->early_tx_list);
+ if (!hrtimer_active(&controller->early_tx)) {
+ hrtimer_start_range_ns(&controller->early_tx,
+ ktime_set(0, 140 * NSEC_PER_USEC),
+ 40 * NSEC_PER_USEC,
+ HRTIMER_MODE_REL);
+ }
+ }
+out:
spin_unlock_irqrestore(&musb->lock, flags);
}
@@ -364,6 +472,8 @@ static int cppi41_is_compatible(struct dma_channel *channel, u16 maxpacket,
WARN_ON(1);
return 1;
}
+ if (cppi41_channel->hw_ep->ep_in.type != USB_ENDPOINT_XFER_BULK)
+ return 0;
if (cppi41_channel->is_tx)
return 1;
/* AM335x Advisory 1.0.13. No workaround for device RX mode */
@@ -388,6 +498,7 @@ static int cppi41_dma_channel_abort(struct dma_channel *channel)
if (cppi41_channel->channel.status == MUSB_DMA_STATUS_FREE)
return 0;
+ list_del_init(&cppi41_channel->tx_check);
if (is_tx) {
csr = musb_readw(epio, MUSB_TXCSR);
csr &= ~MUSB_TXCSR_DMAENAB;
@@ -495,6 +606,7 @@ static int cppi41_dma_controller_start(struct cppi41_dma_controller *controller)
cppi41_channel->controller = controller;
cppi41_channel->port_num = port;
cppi41_channel->is_tx = is_tx;
+ INIT_LIST_HEAD(&cppi41_channel->tx_check);
musb_dma = &cppi41_channel->channel;
musb_dma->private_data = cppi41_channel;
@@ -520,6 +632,7 @@ void dma_controller_destroy(struct dma_controller *c)
struct cppi41_dma_controller *controller = container_of(c,
struct cppi41_dma_controller, controller);
+ hrtimer_cancel(&controller->early_tx);
cppi41_dma_controller_stop(controller);
kfree(controller);
}
@@ -539,6 +652,9 @@ struct dma_controller *dma_controller_create(struct musb *musb,
if (!controller)
goto kzalloc_fail;
+ hrtimer_init(&controller->early_tx, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ controller->early_tx.function = cppi41_recheck_tx_req;
+ INIT_LIST_HEAD(&controller->early_tx_list);
controller->musb = musb;
controller->controller.channel_alloc = cppi41_dma_channel_allocate;
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index d2d3a173b315..32fb057c03f5 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -1796,7 +1796,11 @@ int musb_gadget_setup(struct musb *musb)
/* this "gadget" abstracts/virtualizes the controller */
musb->g.name = musb_driver_name;
+#if IS_ENABLED(CONFIG_USB_MUSB_DUAL_ROLE)
musb->g.is_otg = 1;
+#elif IS_ENABLED(CONFIG_USB_MUSB_GADGET)
+ musb->g.is_otg = 0;
+#endif
musb_g_init_endpoints(musb);
diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig
index 08e2f39027ec..2b41c636a52a 100644
--- a/drivers/usb/phy/Kconfig
+++ b/drivers/usb/phy/Kconfig
@@ -19,8 +19,9 @@ config AB8500_USB
in host mode, low speed.
config FSL_USB2_OTG
- bool "Freescale USB OTG Transceiver Driver"
+ tristate "Freescale USB OTG Transceiver Driver"
depends on USB_EHCI_FSL && USB_FSL_USB2 && PM_RUNTIME
+ depends on USB
select USB_OTG
select USB_PHY
help
@@ -29,6 +30,7 @@ config FSL_USB2_OTG
config ISP1301_OMAP
tristate "Philips ISP1301 with OMAP OTG"
depends on I2C && ARCH_OMAP_OTG
+ depends on USB
select USB_PHY
help
If you say yes here you get support for the Philips ISP1301
diff --git a/drivers/usb/phy/phy-am335x.c b/drivers/usb/phy/phy-am335x.c
index 6370e50649d7..0e3c60cb669a 100644
--- a/drivers/usb/phy/phy-am335x.c
+++ b/drivers/usb/phy/phy-am335x.c
@@ -52,8 +52,7 @@ static int am335x_phy_probe(struct platform_device *pdev)
return am_phy->id;
}
- ret = usb_phy_gen_create_phy(dev, &am_phy->usb_phy_gen,
- USB_PHY_TYPE_USB2, 0, false);
+ ret = usb_phy_gen_create_phy(dev, &am_phy->usb_phy_gen, NULL);
if (ret)
return ret;
@@ -66,8 +65,6 @@ static int am335x_phy_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, am_phy);
return 0;
-
- return ret;
}
static int am335x_phy_remove(struct platform_device *pdev)
diff --git a/drivers/usb/phy/phy-generic.c b/drivers/usb/phy/phy-generic.c
index fce3a9e9bb5d..aa6d37b3378a 100644
--- a/drivers/usb/phy/phy-generic.c
+++ b/drivers/usb/phy/phy-generic.c
@@ -48,8 +48,9 @@ void usb_nop_xceiv_register(void)
if (pd)
return;
pd = platform_device_register_simple("usb_phy_gen_xceiv", -1, NULL, 0);
- if (!pd) {
+ if (IS_ERR(pd)) {
pr_err("Unable to register generic usb transceiver\n");
+ pd = NULL;
return;
}
}
@@ -150,10 +151,40 @@ static int nop_set_host(struct usb_otg *otg, struct usb_bus *host)
}
int usb_phy_gen_create_phy(struct device *dev, struct usb_phy_gen_xceiv *nop,
- enum usb_phy_type type, u32 clk_rate, bool needs_vcc)
+ struct usb_phy_gen_xceiv_platform_data *pdata)
{
+ enum usb_phy_type type = USB_PHY_TYPE_USB2;
int err;
+ u32 clk_rate = 0;
+ bool needs_vcc = false;
+
+ nop->reset_active_low = true; /* default behaviour */
+
+ if (dev->of_node) {
+ struct device_node *node = dev->of_node;
+ enum of_gpio_flags flags = 0;
+
+ if (of_property_read_u32(node, "clock-frequency", &clk_rate))
+ clk_rate = 0;
+
+ needs_vcc = of_property_read_bool(node, "vcc-supply");
+ nop->gpio_reset = of_get_named_gpio_flags(node, "reset-gpios",
+ 0, &flags);
+ if (nop->gpio_reset == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ nop->reset_active_low = flags & OF_GPIO_ACTIVE_LOW;
+
+ } else if (pdata) {
+ type = pdata->type;
+ clk_rate = pdata->clk_rate;
+ needs_vcc = pdata->needs_vcc;
+ nop->gpio_reset = pdata->gpio_reset;
+ } else {
+ nop->gpio_reset = -1;
+ }
+
nop->phy.otg = devm_kzalloc(dev, sizeof(*nop->phy.otg),
GFP_KERNEL);
if (!nop->phy.otg)
@@ -218,43 +249,14 @@ EXPORT_SYMBOL_GPL(usb_phy_gen_create_phy);
static int usb_phy_gen_xceiv_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct usb_phy_gen_xceiv_platform_data *pdata =
- dev_get_platdata(&pdev->dev);
struct usb_phy_gen_xceiv *nop;
- enum usb_phy_type type = USB_PHY_TYPE_USB2;
int err;
- u32 clk_rate = 0;
- bool needs_vcc = false;
nop = devm_kzalloc(dev, sizeof(*nop), GFP_KERNEL);
if (!nop)
return -ENOMEM;
- nop->reset_active_low = true; /* default behaviour */
-
- if (dev->of_node) {
- struct device_node *node = dev->of_node;
- enum of_gpio_flags flags;
-
- if (of_property_read_u32(node, "clock-frequency", &clk_rate))
- clk_rate = 0;
-
- needs_vcc = of_property_read_bool(node, "vcc-supply");
- nop->gpio_reset = of_get_named_gpio_flags(node, "reset-gpios",
- 0, &flags);
- if (nop->gpio_reset == -EPROBE_DEFER)
- return -EPROBE_DEFER;
-
- nop->reset_active_low = flags & OF_GPIO_ACTIVE_LOW;
-
- } else if (pdata) {
- type = pdata->type;
- clk_rate = pdata->clk_rate;
- needs_vcc = pdata->needs_vcc;
- nop->gpio_reset = pdata->gpio_reset;
- }
-
- err = usb_phy_gen_create_phy(dev, nop, type, clk_rate, needs_vcc);
+ err = usb_phy_gen_create_phy(dev, nop, dev_get_platdata(&pdev->dev));
if (err)
return err;
@@ -271,8 +273,6 @@ static int usb_phy_gen_xceiv_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, nop);
return 0;
-
- return err;
}
static int usb_phy_gen_xceiv_remove(struct platform_device *pdev)
diff --git a/drivers/usb/phy/phy-generic.h b/drivers/usb/phy/phy-generic.h
index d2a220d81734..38a81f307b82 100644
--- a/drivers/usb/phy/phy-generic.h
+++ b/drivers/usb/phy/phy-generic.h
@@ -1,6 +1,8 @@
#ifndef _PHY_GENERIC_H_
#define _PHY_GENERIC_H_
+#include <linux/usb/usb_phy_gen_xceiv.h>
+
struct usb_phy_gen_xceiv {
struct usb_phy phy;
struct device *dev;
@@ -14,6 +16,6 @@ int usb_gen_phy_init(struct usb_phy *phy);
void usb_gen_phy_shutdown(struct usb_phy *phy);
int usb_phy_gen_create_phy(struct device *dev, struct usb_phy_gen_xceiv *nop,
- enum usb_phy_type type, u32 clk_rate, bool needs_vcc);
+ struct usb_phy_gen_xceiv_platform_data *pdata);
#endif
diff --git a/drivers/usb/phy/phy-mxs-usb.c b/drivers/usb/phy/phy-mxs-usb.c
index fdd33b44dbd3..545844b7e796 100644
--- a/drivers/usb/phy/phy-mxs-usb.c
+++ b/drivers/usb/phy/phy-mxs-usb.c
@@ -164,7 +164,7 @@ static int mxs_phy_probe(struct platform_device *pdev)
mxs_phy->clk = clk;
- platform_set_drvdata(pdev, &mxs_phy->phy);
+ platform_set_drvdata(pdev, mxs_phy);
ret = usb_add_phy_dev(&mxs_phy->phy);
if (ret)
diff --git a/drivers/usb/phy/phy-rcar-gen2-usb.c b/drivers/usb/phy/phy-rcar-gen2-usb.c
index a99a6953f11c..db3ab34cddb4 100644
--- a/drivers/usb/phy/phy-rcar-gen2-usb.c
+++ b/drivers/usb/phy/phy-rcar-gen2-usb.c
@@ -107,10 +107,10 @@ static void __rcar_gen2_usb_phy_init(struct rcar_gen2_usb_phy_priv *priv)
clk_prepare_enable(priv->clk);
/* Set USB channels in the USBHS UGCTRL2 register */
- val = ioread32(priv->base);
+ val = ioread32(priv->base + USBHS_UGCTRL2_REG);
val &= ~(USBHS_UGCTRL2_USB0_HS | USBHS_UGCTRL2_USB2_SS);
val |= priv->ugctrl2;
- iowrite32(val, priv->base);
+ iowrite32(val, priv->base + USBHS_UGCTRL2_REG);
}
/* Shutdown USB channels */
diff --git a/drivers/usb/phy/phy-tegra-usb.c b/drivers/usb/phy/phy-tegra-usb.c
index 82232acf1ab6..bbe4f8e6e8d7 100644
--- a/drivers/usb/phy/phy-tegra-usb.c
+++ b/drivers/usb/phy/phy-tegra-usb.c
@@ -876,7 +876,7 @@ static int utmi_phy_probe(struct tegra_usb_phy *tegra_phy,
tegra_phy->pad_regs = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
- if (!tegra_phy->regs) {
+ if (!tegra_phy->pad_regs) {
dev_err(&pdev->dev, "Failed to remap UTMI Pad regs\n");
return -ENOMEM;
}
diff --git a/drivers/usb/phy/phy-twl6030-usb.c b/drivers/usb/phy/phy-twl6030-usb.c
index 30e8a61552d4..bad57ce77ba5 100644
--- a/drivers/usb/phy/phy-twl6030-usb.c
+++ b/drivers/usb/phy/phy-twl6030-usb.c
@@ -127,7 +127,8 @@ static inline int twl6030_writeb(struct twl6030_usb *twl, u8 module,
static inline u8 twl6030_readb(struct twl6030_usb *twl, u8 module, u8 address)
{
- u8 data, ret = 0;
+ u8 data;
+ int ret;
ret = twl_i2c_read_u8(module, &data, address);
if (ret >= 0)
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 9ced8937a8f3..fb0d537435eb 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -2123,6 +2123,20 @@ static void ftdi_set_termios(struct tty_struct *tty,
termios->c_cflag |= CRTSCTS;
}
+ /*
+ * All FTDI UART chips are limited to CS7/8. We won't pretend to
+ * support CS5/6 and revert the CSIZE setting instead.
+ */
+ if ((C_CSIZE(tty) != CS8) && (C_CSIZE(tty) != CS7)) {
+ dev_warn(ddev, "requested CSIZE setting not supported\n");
+
+ termios->c_cflag &= ~CSIZE;
+ if (old_termios)
+ termios->c_cflag |= old_termios->c_cflag & CSIZE;
+ else
+ termios->c_cflag |= CS8;
+ }
+
cflag = termios->c_cflag;
if (!old_termios)
@@ -2159,19 +2173,16 @@ no_skip:
} else {
urb_value |= FTDI_SIO_SET_DATA_PARITY_NONE;
}
- if (cflag & CSIZE) {
- switch (cflag & CSIZE) {
- case CS7:
- urb_value |= 7;
- dev_dbg(ddev, "Setting CS7\n");
- break;
- case CS8:
- urb_value |= 8;
- dev_dbg(ddev, "Setting CS8\n");
- break;
- default:
- dev_err(ddev, "CSIZE was set but not CS7-CS8\n");
- }
+ switch (cflag & CSIZE) {
+ case CS7:
+ urb_value |= 7;
+ dev_dbg(ddev, "Setting CS7\n");
+ break;
+ default:
+ case CS8:
+ urb_value |= 8;
+ dev_dbg(ddev, "Setting CS8\n");
+ break;
}
/* This is needed by the break command since it uses the same command
diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c
index 2b01ec8651c2..b63ce023f96f 100644
--- a/drivers/usb/serial/generic.c
+++ b/drivers/usb/serial/generic.c
@@ -173,16 +173,8 @@ retry:
clear_bit_unlock(USB_SERIAL_WRITE_BUSY, &port->flags);
return result;
}
- /*
- * Try sending off another urb, unless called from completion handler
- * (in which case there will be no free urb or no data).
- */
- if (mem_flags != GFP_ATOMIC)
- goto retry;
- clear_bit_unlock(USB_SERIAL_WRITE_BUSY, &port->flags);
-
- return 0;
+ goto retry; /* try sending off another urb */
}
EXPORT_SYMBOL_GPL(usb_serial_generic_write_start);
@@ -208,7 +200,7 @@ int usb_serial_generic_write(struct tty_struct *tty,
return 0;
count = kfifo_in_locked(&port->write_fifo, buf, count, &port->lock);
- result = usb_serial_generic_write_start(port, GFP_KERNEL);
+ result = usb_serial_generic_write_start(port, GFP_ATOMIC);
if (result)
return result;
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index e5bdd987b9e8..a69da83604c0 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -1813,25 +1813,25 @@ static void mos7840_change_port_settings(struct tty_struct *tty,
iflag = tty->termios.c_iflag;
/* Change the number of bits */
- if (cflag & CSIZE) {
- switch (cflag & CSIZE) {
- case CS5:
- lData = LCR_BITS_5;
- break;
+ switch (cflag & CSIZE) {
+ case CS5:
+ lData = LCR_BITS_5;
+ break;
- case CS6:
- lData = LCR_BITS_6;
- break;
+ case CS6:
+ lData = LCR_BITS_6;
+ break;
- case CS7:
- lData = LCR_BITS_7;
- break;
- default:
- case CS8:
- lData = LCR_BITS_8;
- break;
- }
+ case CS7:
+ lData = LCR_BITS_7;
+ break;
+
+ default:
+ case CS8:
+ lData = LCR_BITS_8;
+ break;
}
+
/* Change the Parity bit */
if (cflag & PARENB) {
if (cflag & PARODD) {
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index c3d94853b4ab..cc7a24154490 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -85,6 +85,7 @@ static void option_instat_callback(struct urb *urb);
#define HUAWEI_PRODUCT_K4505 0x1464
#define HUAWEI_PRODUCT_K3765 0x1465
#define HUAWEI_PRODUCT_K4605 0x14C6
+#define HUAWEI_PRODUCT_E173S6 0x1C07
#define QUANTA_VENDOR_ID 0x0408
#define QUANTA_PRODUCT_Q101 0xEA02
@@ -250,6 +251,7 @@ static void option_instat_callback(struct urb *urb);
#define ZTE_PRODUCT_MF628 0x0015
#define ZTE_PRODUCT_MF626 0x0031
#define ZTE_PRODUCT_MC2718 0xffe8
+#define ZTE_PRODUCT_AC2726 0xfff1
#define BENQ_VENDOR_ID 0x04a5
#define BENQ_PRODUCT_H10 0x4068
@@ -572,6 +574,8 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1c23, USB_CLASS_COMM, 0x02, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t) &net_intf1_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173S6, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t) &net_intf1_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1750, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t) &net_intf2_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1441, USB_CLASS_COMM, 0x02, 0xff) },
@@ -634,6 +638,10 @@ static const struct usb_device_id option_ids[] = {
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6D) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6E) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6F) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x72) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x73) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x74) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x75) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x78) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x79) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x7A) },
@@ -688,6 +696,10 @@ static const struct usb_device_id option_ids[] = {
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6D) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6E) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6F) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x72) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x73) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x74) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x75) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x78) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x79) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x7A) },
@@ -742,6 +754,10 @@ static const struct usb_device_id option_ids[] = {
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6D) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6E) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6F) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x72) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x73) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x74) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x75) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x78) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x79) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x7A) },
@@ -796,6 +812,10 @@ static const struct usb_device_id option_ids[] = {
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6D) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6E) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6F) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x72) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x73) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x74) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x75) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x78) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x79) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x7A) },
@@ -850,6 +870,10 @@ static const struct usb_device_id option_ids[] = {
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6D) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6E) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6F) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x72) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x73) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x74) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x75) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x78) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x79) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x7A) },
@@ -904,6 +928,10 @@ static const struct usb_device_id option_ids[] = {
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6D) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6E) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6F) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x72) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x73) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x74) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x75) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x78) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x79) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7A) },
@@ -1426,6 +1454,7 @@ static const struct usb_device_id option_ids[] = {
{ USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x01) },
{ USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x05) },
{ USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x86, 0x10) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC2726, 0xff, 0xff, 0xff) },
{ USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) },
{ USB_DEVICE(DLINK_VENDOR_ID, DLINK_PRODUCT_DWM_652) },
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index 1e6de4cd079d..1e3318dfa1cb 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -361,23 +361,21 @@ static void pl2303_set_termios(struct tty_struct *tty,
0, 0, buf, 7, 100);
dev_dbg(&port->dev, "0xa1:0x21:0:0 %d - %7ph\n", i, buf);
- if (C_CSIZE(tty)) {
- switch (C_CSIZE(tty)) {
- case CS5:
- buf[6] = 5;
- break;
- case CS6:
- buf[6] = 6;
- break;
- case CS7:
- buf[6] = 7;
- break;
- default:
- case CS8:
- buf[6] = 8;
- }
- dev_dbg(&port->dev, "data bits = %d\n", buf[6]);
+ switch (C_CSIZE(tty)) {
+ case CS5:
+ buf[6] = 5;
+ break;
+ case CS6:
+ buf[6] = 6;
+ break;
+ case CS7:
+ buf[6] = 7;
+ break;
+ default:
+ case CS8:
+ buf[6] = 8;
}
+ dev_dbg(&port->dev, "data bits = %d\n", buf[6]);
/* For reference buf[0]:buf[3] baud rate value */
pl2303_encode_baudrate(tty, port, &buf[0]);
diff --git a/drivers/usb/serial/spcp8x5.c b/drivers/usb/serial/spcp8x5.c
index 4abac28b5992..5b793c352267 100644
--- a/drivers/usb/serial/spcp8x5.c
+++ b/drivers/usb/serial/spcp8x5.c
@@ -348,22 +348,20 @@ static void spcp8x5_set_termios(struct tty_struct *tty,
}
/* Set Data Length : 00:5bit, 01:6bit, 10:7bit, 11:8bit */
- if (cflag & CSIZE) {
- switch (cflag & CSIZE) {
- case CS5:
- buf[1] |= SET_UART_FORMAT_SIZE_5;
- break;
- case CS6:
- buf[1] |= SET_UART_FORMAT_SIZE_6;
- break;
- case CS7:
- buf[1] |= SET_UART_FORMAT_SIZE_7;
- break;
- default:
- case CS8:
- buf[1] |= SET_UART_FORMAT_SIZE_8;
- break;
- }
+ switch (cflag & CSIZE) {
+ case CS5:
+ buf[1] |= SET_UART_FORMAT_SIZE_5;
+ break;
+ case CS6:
+ buf[1] |= SET_UART_FORMAT_SIZE_6;
+ break;
+ case CS7:
+ buf[1] |= SET_UART_FORMAT_SIZE_7;
+ break;
+ default:
+ case CS8:
+ buf[1] |= SET_UART_FORMAT_SIZE_8;
+ break;
}
/* Set Stop bit2 : 0:1bit 1:2bit */
diff --git a/drivers/usb/serial/zte_ev.c b/drivers/usb/serial/zte_ev.c
index fca4c752a4ed..eae2c873b39f 100644
--- a/drivers/usb/serial/zte_ev.c
+++ b/drivers/usb/serial/zte_ev.c
@@ -281,8 +281,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x19d2, 0xfffd) },
{ USB_DEVICE(0x19d2, 0xfffc) },
{ USB_DEVICE(0x19d2, 0xfffb) },
- /* AC2726, AC8710_V3 */
- { USB_DEVICE_AND_INTERFACE_INFO(0x19d2, 0xfff1, 0xff, 0xff, 0xff) },
+ /* AC8710_V3 */
{ USB_DEVICE(0x19d2, 0xfff6) },
{ USB_DEVICE(0x19d2, 0xfff7) },
{ USB_DEVICE(0x19d2, 0xfff8) },
diff --git a/drivers/usb/wusbcore/devconnect.c b/drivers/usb/wusbcore/devconnect.c
index e538b72c4e3a..f14e7929ba22 100644
--- a/drivers/usb/wusbcore/devconnect.c
+++ b/drivers/usb/wusbcore/devconnect.c
@@ -97,18 +97,12 @@ static void wusbhc_devconnect_acked_work(struct work_struct *work);
static void wusb_dev_free(struct wusb_dev *wusb_dev)
{
- if (wusb_dev) {
- kfree(wusb_dev->set_gtk_req);
- usb_free_urb(wusb_dev->set_gtk_urb);
- kfree(wusb_dev);
- }
+ kfree(wusb_dev);
}
static struct wusb_dev *wusb_dev_alloc(struct wusbhc *wusbhc)
{
struct wusb_dev *wusb_dev;
- struct urb *urb;
- struct usb_ctrlrequest *req;
wusb_dev = kzalloc(sizeof(*wusb_dev), GFP_KERNEL);
if (wusb_dev == NULL)
@@ -118,22 +112,6 @@ static struct wusb_dev *wusb_dev_alloc(struct wusbhc *wusbhc)
INIT_WORK(&wusb_dev->devconnect_acked_work, wusbhc_devconnect_acked_work);
- urb = usb_alloc_urb(0, GFP_KERNEL);
- if (urb == NULL)
- goto err;
- wusb_dev->set_gtk_urb = urb;
-
- req = kmalloc(sizeof(*req), GFP_KERNEL);
- if (req == NULL)
- goto err;
- wusb_dev->set_gtk_req = req;
-
- req->bRequestType = USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE;
- req->bRequest = USB_REQ_SET_DESCRIPTOR;
- req->wValue = cpu_to_le16(USB_DT_KEY << 8 | wusbhc->gtk_index);
- req->wIndex = 0;
- req->wLength = cpu_to_le16(wusbhc->gtk.descr.bLength);
-
return wusb_dev;
err:
wusb_dev_free(wusb_dev);
@@ -411,9 +389,6 @@ static void __wusbhc_dev_disconnect(struct wusbhc *wusbhc,
/*
* Refresh the list of keep alives to emit in the MMC
*
- * Some devices don't respond to keep alives unless they've been
- * authenticated, so skip unauthenticated devices.
- *
* We only publish the first four devices that have a coming timeout
* condition. Then when we are done processing those, we go for the
* next ones. We ignore the ones that have timed out already (they'll
@@ -448,7 +423,7 @@ static void __wusbhc_keep_alive(struct wusbhc *wusbhc)
if (wusb_dev == NULL)
continue;
- if (wusb_dev->usb_dev == NULL || !wusb_dev->usb_dev->authenticated)
+ if (wusb_dev->usb_dev == NULL)
continue;
if (time_after(jiffies, wusb_dev->entry_ts + tt)) {
@@ -524,11 +499,19 @@ static struct wusb_dev *wusbhc_find_dev_by_addr(struct wusbhc *wusbhc, u8 addr)
*
* @wusbhc shall be referenced and unlocked
*/
-static void wusbhc_handle_dn_alive(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev)
+static void wusbhc_handle_dn_alive(struct wusbhc *wusbhc, u8 srcaddr)
{
+ struct wusb_dev *wusb_dev;
+
mutex_lock(&wusbhc->mutex);
- wusb_dev->entry_ts = jiffies;
- __wusbhc_keep_alive(wusbhc);
+ wusb_dev = wusbhc_find_dev_by_addr(wusbhc, srcaddr);
+ if (wusb_dev == NULL) {
+ dev_dbg(wusbhc->dev, "ignoring DN_Alive from unconnected device %02x\n",
+ srcaddr);
+ } else {
+ wusb_dev->entry_ts = jiffies;
+ __wusbhc_keep_alive(wusbhc);
+ }
mutex_unlock(&wusbhc->mutex);
}
@@ -582,14 +565,22 @@ static void wusbhc_handle_dn_connect(struct wusbhc *wusbhc,
*
* @wusbhc shall be referenced and unlocked
*/
-static void wusbhc_handle_dn_disconnect(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev)
+static void wusbhc_handle_dn_disconnect(struct wusbhc *wusbhc, u8 srcaddr)
{
struct device *dev = wusbhc->dev;
-
- dev_info(dev, "DN DISCONNECT: device 0x%02x going down\n", wusb_dev->addr);
+ struct wusb_dev *wusb_dev;
mutex_lock(&wusbhc->mutex);
- __wusbhc_dev_disconnect(wusbhc, wusb_port_by_idx(wusbhc, wusb_dev->port_idx));
+ wusb_dev = wusbhc_find_dev_by_addr(wusbhc, srcaddr);
+ if (wusb_dev == NULL) {
+ dev_dbg(dev, "ignoring DN DISCONNECT from unconnected device %02x\n",
+ srcaddr);
+ } else {
+ dev_info(dev, "DN DISCONNECT: device 0x%02x going down\n",
+ wusb_dev->addr);
+ __wusbhc_dev_disconnect(wusbhc, wusb_port_by_idx(wusbhc,
+ wusb_dev->port_idx));
+ }
mutex_unlock(&wusbhc->mutex);
}
@@ -611,30 +602,21 @@ void wusbhc_handle_dn(struct wusbhc *wusbhc, u8 srcaddr,
struct wusb_dn_hdr *dn_hdr, size_t size)
{
struct device *dev = wusbhc->dev;
- struct wusb_dev *wusb_dev;
if (size < sizeof(struct wusb_dn_hdr)) {
dev_err(dev, "DN data shorter than DN header (%d < %d)\n",
(int)size, (int)sizeof(struct wusb_dn_hdr));
return;
}
-
- wusb_dev = wusbhc_find_dev_by_addr(wusbhc, srcaddr);
- if (wusb_dev == NULL && dn_hdr->bType != WUSB_DN_CONNECT) {
- dev_dbg(dev, "ignoring DN %d from unconnected device %02x\n",
- dn_hdr->bType, srcaddr);
- return;
- }
-
switch (dn_hdr->bType) {
case WUSB_DN_CONNECT:
wusbhc_handle_dn_connect(wusbhc, dn_hdr, size);
break;
case WUSB_DN_ALIVE:
- wusbhc_handle_dn_alive(wusbhc, wusb_dev);
+ wusbhc_handle_dn_alive(wusbhc, srcaddr);
break;
case WUSB_DN_DISCONNECT:
- wusbhc_handle_dn_disconnect(wusbhc, wusb_dev);
+ wusbhc_handle_dn_disconnect(wusbhc, srcaddr);
break;
case WUSB_DN_MASAVAILCHANGED:
case WUSB_DN_RWAKE:
diff --git a/drivers/usb/wusbcore/security.c b/drivers/usb/wusbcore/security.c
index dd88441c8f78..4c40d0dbf53d 100644
--- a/drivers/usb/wusbcore/security.c
+++ b/drivers/usb/wusbcore/security.c
@@ -29,19 +29,16 @@
#include <linux/export.h>
#include "wusbhc.h"
-static void wusbhc_set_gtk_callback(struct urb *urb);
-static void wusbhc_gtk_rekey_done_work(struct work_struct *work);
+static void wusbhc_gtk_rekey_work(struct work_struct *work);
int wusbhc_sec_create(struct wusbhc *wusbhc)
{
wusbhc->gtk.descr.bLength = sizeof(wusbhc->gtk.descr) + sizeof(wusbhc->gtk.data);
wusbhc->gtk.descr.bDescriptorType = USB_DT_KEY;
wusbhc->gtk.descr.bReserved = 0;
+ wusbhc->gtk_index = 0;
- wusbhc->gtk_index = wusb_key_index(0, WUSB_KEY_INDEX_TYPE_GTK,
- WUSB_KEY_INDEX_ORIGINATOR_HOST);
-
- INIT_WORK(&wusbhc->gtk_rekey_done_work, wusbhc_gtk_rekey_done_work);
+ INIT_WORK(&wusbhc->gtk_rekey_work, wusbhc_gtk_rekey_work);
return 0;
}
@@ -113,7 +110,7 @@ int wusbhc_sec_start(struct wusbhc *wusbhc)
wusbhc_generate_gtk(wusbhc);
result = wusbhc->set_gtk(wusbhc, wusbhc->gtk_tkid,
- &wusbhc->gtk.descr.bKeyData, key_size);
+ &wusbhc->gtk.descr.bKeyData, key_size);
if (result < 0)
dev_err(wusbhc->dev, "cannot set GTK for the host: %d\n",
result);
@@ -129,7 +126,7 @@ int wusbhc_sec_start(struct wusbhc *wusbhc)
*/
void wusbhc_sec_stop(struct wusbhc *wusbhc)
{
- cancel_work_sync(&wusbhc->gtk_rekey_done_work);
+ cancel_work_sync(&wusbhc->gtk_rekey_work);
}
@@ -185,12 +182,14 @@ static int wusb_dev_set_encryption(struct usb_device *usb_dev, int value)
static int wusb_dev_set_gtk(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev)
{
struct usb_device *usb_dev = wusb_dev->usb_dev;
+ u8 key_index = wusb_key_index(wusbhc->gtk_index,
+ WUSB_KEY_INDEX_TYPE_GTK, WUSB_KEY_INDEX_ORIGINATOR_HOST);
return usb_control_msg(
usb_dev, usb_sndctrlpipe(usb_dev, 0),
USB_REQ_SET_DESCRIPTOR,
USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE,
- USB_DT_KEY << 8 | wusbhc->gtk_index, 0,
+ USB_DT_KEY << 8 | key_index, 0,
&wusbhc->gtk.descr, wusbhc->gtk.descr.bLength,
1000);
}
@@ -520,24 +519,55 @@ error_kzalloc:
* Once all connected and authenticated devices have received the new
* GTK, switch the host to using it.
*/
-static void wusbhc_gtk_rekey_done_work(struct work_struct *work)
+static void wusbhc_gtk_rekey_work(struct work_struct *work)
{
- struct wusbhc *wusbhc = container_of(work, struct wusbhc, gtk_rekey_done_work);
+ struct wusbhc *wusbhc = container_of(work,
+ struct wusbhc, gtk_rekey_work);
size_t key_size = sizeof(wusbhc->gtk.data);
+ int port_idx;
+ struct wusb_dev *wusb_dev, *wusb_dev_next;
+ LIST_HEAD(rekey_list);
mutex_lock(&wusbhc->mutex);
+ /* generate the new key */
+ wusbhc_generate_gtk(wusbhc);
+ /* roll the gtk index. */
+ wusbhc->gtk_index = (wusbhc->gtk_index + 1) % (WUSB_KEY_INDEX_MAX + 1);
+ /*
+ * Save all connected devices on a list while holding wusbhc->mutex and
+ * take a reference to each one. Then submit the set key request to
+ * them after releasing the lock in order to avoid a deadlock.
+ */
+ for (port_idx = 0; port_idx < wusbhc->ports_max; port_idx++) {
+ wusb_dev = wusbhc->port[port_idx].wusb_dev;
+ if (!wusb_dev || !wusb_dev->usb_dev
+ || !wusb_dev->usb_dev->authenticated)
+ continue;
- if (--wusbhc->pending_set_gtks == 0)
- wusbhc->set_gtk(wusbhc, wusbhc->gtk_tkid, &wusbhc->gtk.descr.bKeyData, key_size);
-
+ wusb_dev_get(wusb_dev);
+ list_add_tail(&wusb_dev->rekey_node, &rekey_list);
+ }
mutex_unlock(&wusbhc->mutex);
-}
-static void wusbhc_set_gtk_callback(struct urb *urb)
-{
- struct wusbhc *wusbhc = urb->context;
+ /* Submit the rekey requests without holding wusbhc->mutex. */
+ list_for_each_entry_safe(wusb_dev, wusb_dev_next, &rekey_list,
+ rekey_node) {
+ list_del_init(&wusb_dev->rekey_node);
+ dev_dbg(&wusb_dev->usb_dev->dev, "%s: rekey device at port %d\n",
+ __func__, wusb_dev->port_idx);
+
+ if (wusb_dev_set_gtk(wusbhc, wusb_dev) < 0) {
+ dev_err(&wusb_dev->usb_dev->dev, "%s: rekey device at port %d failed\n",
+ __func__, wusb_dev->port_idx);
+ }
+ wusb_dev_put(wusb_dev);
+ }
- queue_work(wusbd, &wusbhc->gtk_rekey_done_work);
+ /* Switch the host controller to use the new GTK. */
+ mutex_lock(&wusbhc->mutex);
+ wusbhc->set_gtk(wusbhc, wusbhc->gtk_tkid,
+ &wusbhc->gtk.descr.bKeyData, key_size);
+ mutex_unlock(&wusbhc->mutex);
}
/**
@@ -553,26 +583,12 @@ static void wusbhc_set_gtk_callback(struct urb *urb)
*/
void wusbhc_gtk_rekey(struct wusbhc *wusbhc)
{
- static const size_t key_size = sizeof(wusbhc->gtk.data);
- int p;
-
- wusbhc_generate_gtk(wusbhc);
-
- for (p = 0; p < wusbhc->ports_max; p++) {
- struct wusb_dev *wusb_dev;
-
- wusb_dev = wusbhc->port[p].wusb_dev;
- if (!wusb_dev || !wusb_dev->usb_dev || !wusb_dev->usb_dev->authenticated)
- continue;
-
- usb_fill_control_urb(wusb_dev->set_gtk_urb, wusb_dev->usb_dev,
- usb_sndctrlpipe(wusb_dev->usb_dev, 0),
- (void *)wusb_dev->set_gtk_req,
- &wusbhc->gtk.descr, wusbhc->gtk.descr.bLength,
- wusbhc_set_gtk_callback, wusbhc);
- if (usb_submit_urb(wusb_dev->set_gtk_urb, GFP_KERNEL) == 0)
- wusbhc->pending_set_gtks++;
- }
- if (wusbhc->pending_set_gtks == 0)
- wusbhc->set_gtk(wusbhc, wusbhc->gtk_tkid, &wusbhc->gtk.descr.bKeyData, key_size);
+ /*
+ * We need to submit a URB to the downstream WUSB devices in order to
+ * change the group key. This can't be done while holding the
+ * wusbhc->mutex since that is also taken in the urb_enqueue routine
+ * and will cause a deadlock. Instead, queue a work item to do
+ * it when the lock is not held
+ */
+ queue_work(wusbd, &wusbhc->gtk_rekey_work);
}
diff --git a/drivers/usb/wusbcore/wusbhc.h b/drivers/usb/wusbcore/wusbhc.h
index 711b1952b114..6bd3b819a6b5 100644
--- a/drivers/usb/wusbcore/wusbhc.h
+++ b/drivers/usb/wusbcore/wusbhc.h
@@ -97,6 +97,7 @@ struct wusb_dev {
struct kref refcnt;
struct wusbhc *wusbhc;
struct list_head cack_node; /* Connect-Ack list */
+ struct list_head rekey_node; /* GTK rekey list */
u8 port_idx;
u8 addr;
u8 beacon_type:4;
@@ -107,8 +108,6 @@ struct wusb_dev {
struct usb_wireless_cap_descriptor *wusb_cap_descr;
struct uwb_mas_bm availability;
struct work_struct devconnect_acked_work;
- struct urb *set_gtk_urb;
- struct usb_ctrlrequest *set_gtk_req;
struct usb_device *usb_dev;
};
@@ -296,8 +295,7 @@ struct wusbhc {
} __attribute__((packed)) gtk;
u8 gtk_index;
u32 gtk_tkid;
- struct work_struct gtk_rekey_done_work;
- int pending_set_gtks;
+ struct work_struct gtk_rekey_work;
struct usb_encryption_descriptor *ccm1_etd;
};
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 831eb4fd197d..9a68409580d5 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -683,7 +683,7 @@ static int vhost_net_open(struct inode *inode, struct file *f)
struct vhost_net *n = kmalloc(sizeof *n, GFP_KERNEL);
struct vhost_dev *dev;
struct vhost_virtqueue **vqs;
- int r, i;
+ int i;
if (!n)
return -ENOMEM;
@@ -706,12 +706,7 @@ static int vhost_net_open(struct inode *inode, struct file *f)
n->vqs[i].vhost_hlen = 0;
n->vqs[i].sock_hlen = 0;
}
- r = vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX);
- if (r < 0) {
- kfree(n);
- kfree(vqs);
- return r;
- }
+ vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX);
vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, POLLOUT, dev);
vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, POLLIN, dev);
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index e663921eebb6..1e4c75c5b36b 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -1417,18 +1417,13 @@ static int vhost_scsi_open(struct inode *inode, struct file *f)
vqs[i] = &vs->vqs[i].vq;
vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick;
}
- r = vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ);
+ vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ);
tcm_vhost_init_inflight(vs, NULL);
- if (r < 0)
- goto err_init;
-
f->private_data = vs;
return 0;
-err_init:
- kfree(vqs);
err_vqs:
vhost_scsi_free(vs);
err_vs:
@@ -2168,15 +2163,15 @@ static int tcm_vhost_register_configfs(void)
/*
* Setup default attribute lists for various fabric->tf_cit_tmpl
*/
- TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = tcm_vhost_wwn_attrs;
- TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = tcm_vhost_tpg_attrs;
- TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;
- TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;
- TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;
- TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;
- TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
- TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
- TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;
+ fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = tcm_vhost_wwn_attrs;
+ fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = tcm_vhost_tpg_attrs;
+ fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL;
+ fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;
+ fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
+ fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL;
+ fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
+ fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
+ fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL;
/*
* Register the fabric for use within TCM
*/
diff --git a/drivers/vhost/test.c b/drivers/vhost/test.c
index 339eae85859a..c2a54fbf7f99 100644
--- a/drivers/vhost/test.c
+++ b/drivers/vhost/test.c
@@ -104,7 +104,6 @@ static int vhost_test_open(struct inode *inode, struct file *f)
struct vhost_test *n = kmalloc(sizeof *n, GFP_KERNEL);
struct vhost_dev *dev;
struct vhost_virtqueue **vqs;
- int r;
if (!n)
return -ENOMEM;
@@ -117,12 +116,7 @@ static int vhost_test_open(struct inode *inode, struct file *f)
dev = &n->dev;
vqs[VHOST_TEST_VQ] = &n->vqs[VHOST_TEST_VQ];
n->vqs[VHOST_TEST_VQ].handle_kick = handle_vq_kick;
- r = vhost_dev_init(dev, vqs, VHOST_TEST_VQ_MAX);
- if (r < 0) {
- kfree(vqs);
- kfree(n);
- return r;
- }
+ vhost_dev_init(dev, vqs, VHOST_TEST_VQ_MAX);
f->private_data = n;
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 69068e0d8f31..78987e481bc6 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -290,7 +290,7 @@ static void vhost_dev_free_iovecs(struct vhost_dev *dev)
vhost_vq_free_iovecs(dev->vqs[i]);
}
-long vhost_dev_init(struct vhost_dev *dev,
+void vhost_dev_init(struct vhost_dev *dev,
struct vhost_virtqueue **vqs, int nvqs)
{
struct vhost_virtqueue *vq;
@@ -319,8 +319,6 @@ long vhost_dev_init(struct vhost_dev *dev,
vhost_poll_init(&vq->poll, vq->handle_kick,
POLLIN, dev);
}
-
- return 0;
}
EXPORT_SYMBOL_GPL(vhost_dev_init);
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index 4465ed5f316d..35eeb2a1bada 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -127,7 +127,7 @@ struct vhost_dev {
struct task_struct *worker;
};
-long vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs, int nvqs);
+void vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs, int nvqs);
long vhost_dev_set_owner(struct vhost_dev *dev);
bool vhost_dev_has_owner(struct vhost_dev *dev);
long vhost_dev_check_owner(struct vhost_dev *);
diff --git a/drivers/video/atmel_lcdfb.c b/drivers/video/atmel_lcdfb.c
index 8521051cf946..cd961622f9c1 100644
--- a/drivers/video/atmel_lcdfb.c
+++ b/drivers/video/atmel_lcdfb.c
@@ -131,6 +131,7 @@ static const struct platform_device_id atmel_lcdfb_devtypes[] = {
/* terminator */
}
};
+MODULE_DEVICE_TABLE(platform, atmel_lcdfb_devtypes);
static struct atmel_lcdfb_config *
atmel_lcdfb_get_config(struct platform_device *pdev)
diff --git a/drivers/video/kyro/fbdev.c b/drivers/video/kyro/fbdev.c
index 50c857477e4f..65041e15fd59 100644
--- a/drivers/video/kyro/fbdev.c
+++ b/drivers/video/kyro/fbdev.c
@@ -624,15 +624,15 @@ static int kyrofb_ioctl(struct fb_info *info,
return -EINVAL;
}
case KYRO_IOCTL_UVSTRIDE:
- if (copy_to_user(argp, &deviceInfo.ulOverlayUVStride, sizeof(unsigned long)))
+ if (copy_to_user(argp, &deviceInfo.ulOverlayUVStride, sizeof(deviceInfo.ulOverlayUVStride)))
return -EFAULT;
break;
case KYRO_IOCTL_STRIDE:
- if (copy_to_user(argp, &deviceInfo.ulOverlayStride, sizeof(unsigned long)))
+ if (copy_to_user(argp, &deviceInfo.ulOverlayStride, sizeof(deviceInfo.ulOverlayStride)))
return -EFAULT;
break;
case KYRO_IOCTL_OVERLAY_OFFSET:
- if (copy_to_user(argp, &deviceInfo.ulOverlayOffset, sizeof(unsigned long)))
+ if (copy_to_user(argp, &deviceInfo.ulOverlayOffset, sizeof(deviceInfo.ulOverlayOffset)))
return -EFAULT;
break;
}
diff --git a/drivers/video/offb.c b/drivers/video/offb.c
index 9dbea2223401..7d44d669d5b6 100644
--- a/drivers/video/offb.c
+++ b/drivers/video/offb.c
@@ -91,6 +91,15 @@ extern boot_infos_t *boot_infos;
#define AVIVO_DC_LUTB_WHITE_OFFSET_GREEN 0x6cd4
#define AVIVO_DC_LUTB_WHITE_OFFSET_RED 0x6cd8
+#define FB_RIGHT_POS(p, bpp) (fb_be_math(p) ? 0 : (32 - (bpp)))
+
+static inline u32 offb_cmap_byteswap(struct fb_info *info, u32 value)
+{
+ u32 bpp = info->var.bits_per_pixel;
+
+ return cpu_to_be32(value) >> FB_RIGHT_POS(info, bpp);
+}
+
/*
* Set a single color register. The values supplied are already
* rounded down to the hardware's capabilities (according to the
@@ -120,7 +129,7 @@ static int offb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
mask <<= info->var.transp.offset;
value |= mask;
}
- pal[regno] = value;
+ pal[regno] = offb_cmap_byteswap(info, value);
return 0;
}
@@ -301,7 +310,7 @@ static struct fb_ops offb_ops = {
static void __iomem *offb_map_reg(struct device_node *np, int index,
unsigned long offset, unsigned long size)
{
- const u32 *addrp;
+ const __be32 *addrp;
u64 asize, taddr;
unsigned int flags;
@@ -369,7 +378,11 @@ static void offb_init_palette_hacks(struct fb_info *info, struct device_node *dp
}
of_node_put(pciparent);
} else if (dp && of_device_is_compatible(dp, "qemu,std-vga")) {
- const u32 io_of_addr[3] = { 0x01000000, 0x0, 0x0 };
+#ifdef __BIG_ENDIAN
+ const __be32 io_of_addr[3] = { 0x01000000, 0x0, 0x0 };
+#else
+ const __be32 io_of_addr[3] = { 0x00000001, 0x0, 0x0 };
+#endif
u64 io_addr = of_translate_address(dp, io_of_addr);
if (io_addr != OF_BAD_ADDR) {
par->cmap_adr = ioremap(io_addr + 0x3c8, 2);
@@ -535,7 +548,7 @@ static void __init offb_init_nodriver(struct device_node *dp, int no_real_node)
unsigned int flags, rsize, addr_prop = 0;
unsigned long max_size = 0;
u64 rstart, address = OF_BAD_ADDR;
- const u32 *pp, *addrp, *up;
+ const __be32 *pp, *addrp, *up;
u64 asize;
int foreign_endian = 0;
@@ -551,25 +564,25 @@ static void __init offb_init_nodriver(struct device_node *dp, int no_real_node)
if (pp == NULL)
pp = of_get_property(dp, "depth", &len);
if (pp && len == sizeof(u32))
- depth = *pp;
+ depth = be32_to_cpup(pp);
pp = of_get_property(dp, "linux,bootx-width", &len);
if (pp == NULL)
pp = of_get_property(dp, "width", &len);
if (pp && len == sizeof(u32))
- width = *pp;
+ width = be32_to_cpup(pp);
pp = of_get_property(dp, "linux,bootx-height", &len);
if (pp == NULL)
pp = of_get_property(dp, "height", &len);
if (pp && len == sizeof(u32))
- height = *pp;
+ height = be32_to_cpup(pp);
pp = of_get_property(dp, "linux,bootx-linebytes", &len);
if (pp == NULL)
pp = of_get_property(dp, "linebytes", &len);
if (pp && len == sizeof(u32) && (*pp != 0xffffffffu))
- pitch = *pp;
+ pitch = be32_to_cpup(pp);
else
pitch = width * ((depth + 7) / 8);
diff --git a/drivers/video/omap2/displays-new/panel-sony-acx565akm.c b/drivers/video/omap2/displays-new/panel-sony-acx565akm.c
index e6d56f714ae4..d94f35dbd536 100644
--- a/drivers/video/omap2/displays-new/panel-sony-acx565akm.c
+++ b/drivers/video/omap2/displays-new/panel-sony-acx565akm.c
@@ -526,6 +526,8 @@ static int acx565akm_panel_power_on(struct omap_dss_device *dssdev)
struct omap_dss_device *in = ddata->in;
int r;
+ mutex_lock(&ddata->mutex);
+
dev_dbg(&ddata->spi->dev, "%s\n", __func__);
in->ops.sdi->set_timings(in, &ddata->videomode);
@@ -614,10 +616,7 @@ static int acx565akm_enable(struct omap_dss_device *dssdev)
if (omapdss_device_is_enabled(dssdev))
return 0;
- mutex_lock(&ddata->mutex);
r = acx565akm_panel_power_on(dssdev);
- mutex_unlock(&ddata->mutex);
-
if (r)
return r;
diff --git a/drivers/video/sh_mobile_meram.c b/drivers/video/sh_mobile_meram.c
index e0f098562a74..a297de5cc859 100644
--- a/drivers/video/sh_mobile_meram.c
+++ b/drivers/video/sh_mobile_meram.c
@@ -569,6 +569,7 @@ EXPORT_SYMBOL_GPL(sh_mobile_meram_cache_update);
* Power management
*/
+#if defined(CONFIG_PM_SLEEP) || defined(CONFIG_PM_RUNTIME)
static int sh_mobile_meram_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
@@ -611,6 +612,7 @@ static int sh_mobile_meram_resume(struct device *dev)
meram_write_reg(priv->base, common_regs[i], priv->regs[i]);
return 0;
}
+#endif /* CONFIG_PM_SLEEP || CONFIG_PM_RUNTIME */
static UNIVERSAL_DEV_PM_OPS(sh_mobile_meram_dev_pm_ops,
sh_mobile_meram_suspend,
diff --git a/drivers/video/vt8500lcdfb.c b/drivers/video/vt8500lcdfb.c
index b30e5a439d1f..a8f2b280f796 100644
--- a/drivers/video/vt8500lcdfb.c
+++ b/drivers/video/vt8500lcdfb.c
@@ -293,8 +293,7 @@ static int vt8500lcd_probe(struct platform_device *pdev)
+ sizeof(u32) * 16, GFP_KERNEL);
if (!fbi) {
dev_err(&pdev->dev, "Failed to initialize framebuffer device\n");
- ret = -ENOMEM;
- goto failed;
+ return -ENOMEM;
}
strcpy(fbi->fb.fix.id, "VT8500 LCD");
@@ -327,15 +326,13 @@ static int vt8500lcd_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (res == NULL) {
dev_err(&pdev->dev, "no I/O memory resource defined\n");
- ret = -ENODEV;
- goto failed_fbi;
+ return -ENODEV;
}
res = request_mem_region(res->start, resource_size(res), "vt8500lcd");
if (res == NULL) {
dev_err(&pdev->dev, "failed to request I/O memory\n");
- ret = -EBUSY;
- goto failed_fbi;
+ return -EBUSY;
}
fbi->regbase = ioremap(res->start, resource_size(res));
@@ -346,17 +343,19 @@ static int vt8500lcd_probe(struct platform_device *pdev)
}
disp_timing = of_get_display_timings(pdev->dev.of_node);
- if (!disp_timing)
- return -EINVAL;
+ if (!disp_timing) {
+ ret = -EINVAL;
+ goto failed_free_io;
+ }
ret = of_get_fb_videomode(pdev->dev.of_node, &of_mode,
OF_USE_NATIVE_MODE);
if (ret)
- return ret;
+ goto failed_free_io;
ret = of_property_read_u32(pdev->dev.of_node, "bits-per-pixel", &bpp);
if (ret)
- return ret;
+ goto failed_free_io;
/* try allocating the framebuffer */
fb_mem_len = of_mode.xres * of_mode.yres * 2 * (bpp / 8);
@@ -364,7 +363,8 @@ static int vt8500lcd_probe(struct platform_device *pdev)
GFP_KERNEL);
if (!fb_mem_virt) {
pr_err("%s: Failed to allocate framebuffer\n", __func__);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto failed_free_io;
}
fbi->fb.fix.smem_start = fb_mem_phys;
@@ -447,9 +447,6 @@ failed_free_io:
iounmap(fbi->regbase);
failed_free_res:
release_mem_region(res->start, resource_size(res));
-failed_fbi:
- kfree(fbi);
-failed:
return ret;
}
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index c444654fc33f..5c4a95b516cf 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -285,7 +285,7 @@ static void update_balloon_size(struct virtio_balloon *vb)
{
__le32 actual = cpu_to_le32(vb->num_pages);
- virtio_cwrite(vb->vdev, struct virtio_balloon_config, num_pages,
+ virtio_cwrite(vb->vdev, struct virtio_balloon_config, actual,
&actual);
}
diff --git a/drivers/watchdog/bcm2835_wdt.c b/drivers/watchdog/bcm2835_wdt.c
index a6a2cebb2587..cafa973c43be 100644
--- a/drivers/watchdog/bcm2835_wdt.c
+++ b/drivers/watchdog/bcm2835_wdt.c
@@ -19,7 +19,6 @@
#include <linux/watchdog.h>
#include <linux/platform_device.h>
#include <linux/of_address.h>
-#include <linux/miscdevice.h>
#define PM_RSTC 0x1c
#define PM_WDOG 0x24
diff --git a/drivers/watchdog/ep93xx_wdt.c b/drivers/watchdog/ep93xx_wdt.c
index 833e81311848..d1d07f2f69df 100644
--- a/drivers/watchdog/ep93xx_wdt.c
+++ b/drivers/watchdog/ep93xx_wdt.c
@@ -28,7 +28,6 @@
#include <linux/platform_device.h>
#include <linux/module.h>
-#include <linux/miscdevice.h>
#include <linux/watchdog.h>
#include <linux/timer.h>
#include <linux/io.h>
diff --git a/drivers/watchdog/ie6xx_wdt.c b/drivers/watchdog/ie6xx_wdt.c
index 70a240297c6d..07f88f54e5c0 100644
--- a/drivers/watchdog/ie6xx_wdt.c
+++ b/drivers/watchdog/ie6xx_wdt.c
@@ -28,7 +28,6 @@
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/watchdog.h>
-#include <linux/miscdevice.h>
#include <linux/seq_file.h>
#include <linux/debugfs.h>
#include <linux/uaccess.h>
diff --git a/drivers/watchdog/jz4740_wdt.c b/drivers/watchdog/jz4740_wdt.c
index 2de486a7eea1..3aa50cfa335f 100644
--- a/drivers/watchdog/jz4740_wdt.c
+++ b/drivers/watchdog/jz4740_wdt.c
@@ -17,7 +17,6 @@
#include <linux/moduleparam.h>
#include <linux/types.h>
#include <linux/kernel.h>
-#include <linux/miscdevice.h>
#include <linux/watchdog.h>
#include <linux/init.h>
#include <linux/platform_device.h>
diff --git a/drivers/watchdog/kempld_wdt.c b/drivers/watchdog/kempld_wdt.c
index a1a3638c579c..20dc73844737 100644
--- a/drivers/watchdog/kempld_wdt.c
+++ b/drivers/watchdog/kempld_wdt.c
@@ -26,7 +26,6 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
-#include <linux/miscdevice.h>
#include <linux/uaccess.h>
#include <linux/watchdog.h>
#include <linux/platform_device.h>
diff --git a/drivers/watchdog/max63xx_wdt.c b/drivers/watchdog/max63xx_wdt.c
index 6d4f3998e1f6..bdb3f4a5b27c 100644
--- a/drivers/watchdog/max63xx_wdt.c
+++ b/drivers/watchdog/max63xx_wdt.c
@@ -19,7 +19,6 @@
#include <linux/moduleparam.h>
#include <linux/types.h>
#include <linux/kernel.h>
-#include <linux/miscdevice.h>
#include <linux/watchdog.h>
#include <linux/init.h>
#include <linux/bitops.h>
diff --git a/drivers/watchdog/orion_wdt.c b/drivers/watchdog/orion_wdt.c
index 44edca66d564..f7722a424676 100644
--- a/drivers/watchdog/orion_wdt.c
+++ b/drivers/watchdog/orion_wdt.c
@@ -16,7 +16,6 @@
#include <linux/moduleparam.h>
#include <linux/types.h>
#include <linux/kernel.h>
-#include <linux/miscdevice.h>
#include <linux/platform_device.h>
#include <linux/watchdog.h>
#include <linux/init.h>
diff --git a/drivers/watchdog/pnx4008_wdt.c b/drivers/watchdog/pnx4008_wdt.c
index 1bdcc313e1d9..5bec20f5dc2d 100644
--- a/drivers/watchdog/pnx4008_wdt.c
+++ b/drivers/watchdog/pnx4008_wdt.c
@@ -23,7 +23,6 @@
#include <linux/moduleparam.h>
#include <linux/types.h>
#include <linux/kernel.h>
-#include <linux/miscdevice.h>
#include <linux/watchdog.h>
#include <linux/init.h>
#include <linux/platform_device.h>
diff --git a/drivers/watchdog/rt2880_wdt.c b/drivers/watchdog/rt2880_wdt.c
index 53d37fea183e..d92c2d5859ce 100644
--- a/drivers/watchdog/rt2880_wdt.c
+++ b/drivers/watchdog/rt2880_wdt.c
@@ -16,7 +16,6 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/watchdog.h>
-#include <linux/miscdevice.h>
#include <linux/moduleparam.h>
#include <linux/platform_device.h>
diff --git a/drivers/watchdog/sc1200wdt.c b/drivers/watchdog/sc1200wdt.c
index 3b9fff9dcf65..131193a7acdf 100644
--- a/drivers/watchdog/sc1200wdt.c
+++ b/drivers/watchdog/sc1200wdt.c
@@ -409,8 +409,9 @@ static int __init sc1200wdt_init(void)
#if defined CONFIG_PNP
/* now that the user has specified an IO port and we haven't detected
* any devices, disable pnp support */
+ if (isapnp)
+ pnp_unregister_driver(&scl200wdt_pnp_driver);
isapnp = 0;
- pnp_unregister_driver(&scl200wdt_pnp_driver);
#endif
if (!request_region(io, io_len, SC1200_MODULE_NAME)) {
diff --git a/drivers/watchdog/shwdt.c b/drivers/watchdog/shwdt.c
index f9b8e06f3558..af3528f84d65 100644
--- a/drivers/watchdog/shwdt.c
+++ b/drivers/watchdog/shwdt.c
@@ -26,7 +26,6 @@
#include <linux/init.h>
#include <linux/types.h>
#include <linux/spinlock.h>
-#include <linux/miscdevice.h>
#include <linux/watchdog.h>
#include <linux/pm_runtime.h>
#include <linux/fs.h>
diff --git a/drivers/watchdog/softdog.c b/drivers/watchdog/softdog.c
index ef2638fee4a8..c04a1aa158e2 100644
--- a/drivers/watchdog/softdog.c
+++ b/drivers/watchdog/softdog.c
@@ -42,7 +42,6 @@
#include <linux/moduleparam.h>
#include <linux/types.h>
#include <linux/timer.h>
-#include <linux/miscdevice.h>
#include <linux/watchdog.h>
#include <linux/notifier.h>
#include <linux/reboot.h>
diff --git a/drivers/watchdog/stmp3xxx_rtc_wdt.c b/drivers/watchdog/stmp3xxx_rtc_wdt.c
index d667f6b51d35..bb64ae3f47da 100644
--- a/drivers/watchdog/stmp3xxx_rtc_wdt.c
+++ b/drivers/watchdog/stmp3xxx_rtc_wdt.c
@@ -12,7 +12,6 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/miscdevice.h>
#include <linux/watchdog.h>
#include <linux/platform_device.h>
#include <linux/stmp3xxx_rtc_wdt.h>
diff --git a/drivers/watchdog/txx9wdt.c b/drivers/watchdog/txx9wdt.c
index 0fd0e8ae62a8..6a447e321dd0 100644
--- a/drivers/watchdog/txx9wdt.c
+++ b/drivers/watchdog/txx9wdt.c
@@ -13,7 +13,6 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/types.h>
-#include <linux/miscdevice.h>
#include <linux/watchdog.h>
#include <linux/init.h>
#include <linux/platform_device.h>
diff --git a/drivers/watchdog/ux500_wdt.c b/drivers/watchdog/ux500_wdt.c
index e029b5768f2c..5aed9d7ad47e 100644
--- a/drivers/watchdog/ux500_wdt.c
+++ b/drivers/watchdog/ux500_wdt.c
@@ -12,7 +12,6 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/moduleparam.h>
-#include <linux/miscdevice.h>
#include <linux/err.h>
#include <linux/uaccess.h>
#include <linux/watchdog.h>
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index 55ea73f7c70b..4c02e2b94103 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -350,17 +350,19 @@ static enum bp_state increase_reservation(unsigned long nr_pages)
pfn = page_to_pfn(page);
- set_phys_to_machine(pfn, frame_list[i]);
-
#ifdef CONFIG_XEN_HAVE_PVMMU
- /* Link back into the page tables if not highmem. */
- if (xen_pv_domain() && !PageHighMem(page)) {
- int ret;
- ret = HYPERVISOR_update_va_mapping(
- (unsigned long)__va(pfn << PAGE_SHIFT),
- mfn_pte(frame_list[i], PAGE_KERNEL),
- 0);
- BUG_ON(ret);
+ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
+ set_phys_to_machine(pfn, frame_list[i]);
+
+ /* Link back into the page tables if not highmem. */
+ if (!PageHighMem(page)) {
+ int ret;
+ ret = HYPERVISOR_update_va_mapping(
+ (unsigned long)__va(pfn << PAGE_SHIFT),
+ mfn_pte(frame_list[i], PAGE_KERNEL),
+ 0);
+ BUG_ON(ret);
+ }
}
#endif
@@ -378,7 +380,6 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
enum bp_state state = BP_DONE;
unsigned long pfn, i;
struct page *page;
- struct page *scratch_page;
int ret;
struct xen_memory_reservation reservation = {
.address_bits = 0,
@@ -411,27 +412,29 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
scrub_page(page);
+#ifdef CONFIG_XEN_HAVE_PVMMU
/*
* Ballooned out frames are effectively replaced with
* a scratch frame. Ensure direct mappings and the
* p2m are consistent.
*/
- scratch_page = get_balloon_scratch_page();
-#ifdef CONFIG_XEN_HAVE_PVMMU
- if (xen_pv_domain() && !PageHighMem(page)) {
- ret = HYPERVISOR_update_va_mapping(
- (unsigned long)__va(pfn << PAGE_SHIFT),
- pfn_pte(page_to_pfn(scratch_page),
- PAGE_KERNEL_RO), 0);
- BUG_ON(ret);
- }
-#endif
if (!xen_feature(XENFEAT_auto_translated_physmap)) {
unsigned long p;
+ struct page *scratch_page = get_balloon_scratch_page();
+
+ if (!PageHighMem(page)) {
+ ret = HYPERVISOR_update_va_mapping(
+ (unsigned long)__va(pfn << PAGE_SHIFT),
+ pfn_pte(page_to_pfn(scratch_page),
+ PAGE_KERNEL_RO), 0);
+ BUG_ON(ret);
+ }
p = page_to_pfn(scratch_page);
__set_phys_to_machine(pfn, pfn_to_mfn(p));
+
+ put_balloon_scratch_page();
}
- put_balloon_scratch_page();
+#endif
balloon_append(pfn_to_page(pfn));
}
@@ -627,15 +630,17 @@ static int __init balloon_init(void)
if (!xen_domain())
return -ENODEV;
- for_each_online_cpu(cpu)
- {
- per_cpu(balloon_scratch_page, cpu) = alloc_page(GFP_KERNEL);
- if (per_cpu(balloon_scratch_page, cpu) == NULL) {
- pr_warn("Failed to allocate balloon_scratch_page for cpu %d\n", cpu);
- return -ENOMEM;
+ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
+ for_each_online_cpu(cpu)
+ {
+ per_cpu(balloon_scratch_page, cpu) = alloc_page(GFP_KERNEL);
+ if (per_cpu(balloon_scratch_page, cpu) == NULL) {
+ pr_warn("Failed to allocate balloon_scratch_page for cpu %d\n", cpu);
+ return -ENOMEM;
+ }
}
+ register_cpu_notifier(&balloon_cpu_notifier);
}
- register_cpu_notifier(&balloon_cpu_notifier);
pr_info("Initialising balloon driver\n");
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index 62ccf5424ba8..aa846a48f400 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -930,9 +930,10 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
ret = m2p_add_override(mfn, pages[i], kmap_ops ?
&kmap_ops[i] : NULL);
if (ret)
- return ret;
+ goto out;
}
+ out:
if (lazy)
arch_leave_lazy_mmu_mode();
@@ -969,9 +970,10 @@ int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
ret = m2p_remove_override(pages[i], kmap_ops ?
&kmap_ops[i] : NULL);
if (ret)
- return ret;
+ goto out;
}
+ out:
if (lazy)
arch_leave_lazy_mmu_mode();
@@ -1174,7 +1176,8 @@ static int gnttab_setup(void)
gnttab_shared.addr = xen_remap(xen_hvm_resume_frames,
PAGE_SIZE * max_nr_gframes);
if (gnttab_shared.addr == NULL) {
- pr_warn("Failed to ioremap gnttab share frames!\n");
+ pr_warn("Failed to ioremap gnttab share frames (addr=0x%08lx)!\n",
+ xen_hvm_resume_frames);
return -ENOMEM;
}
}
diff --git a/drivers/xen/pci.c b/drivers/xen/pci.c
index d15f6e80479f..188825122aae 100644
--- a/drivers/xen/pci.c
+++ b/drivers/xen/pci.c
@@ -59,12 +59,12 @@ static int xen_add_device(struct device *dev)
add.flags = XEN_PCI_DEV_EXTFN;
#ifdef CONFIG_ACPI
- handle = DEVICE_ACPI_HANDLE(&pci_dev->dev);
+ handle = ACPI_HANDLE(&pci_dev->dev);
if (!handle && pci_dev->bus->bridge)
- handle = DEVICE_ACPI_HANDLE(pci_dev->bus->bridge);
+ handle = ACPI_HANDLE(pci_dev->bus->bridge);
#ifdef CONFIG_PCI_IOV
if (!handle && pci_dev->is_virtfn)
- handle = DEVICE_ACPI_HANDLE(physfn->bus->bridge);
+ handle = ACPI_HANDLE(physfn->bus->bridge);
#endif
if (handle) {
acpi_status status;
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index 8e74590fa1bb..569a13b9e856 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -533,12 +533,17 @@ static void privcmd_close(struct vm_area_struct *vma)
{
struct page **pages = vma->vm_private_data;
int numpgs = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+ int rc;
if (!xen_feature(XENFEAT_auto_translated_physmap) || !numpgs || !pages)
return;
- xen_unmap_domain_mfn_range(vma, numpgs, pages);
- free_xenballooned_pages(numpgs, pages);
+ rc = xen_unmap_domain_mfn_range(vma, numpgs, pages);
+ if (rc == 0)
+ free_xenballooned_pages(numpgs, pages);
+ else
+ pr_crit("unable to unmap MFN range: leaking %d pages. rc=%d\n",
+ numpgs, rc);
kfree(pages);
}
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index a224bc74b6b9..1eac0731c349 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -555,6 +555,11 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
sg_dma_len(sgl) = 0;
return 0;
}
+ xen_dma_map_page(hwdev, pfn_to_page(map >> PAGE_SHIFT),
+ map & ~PAGE_MASK,
+ sg->length,
+ dir,
+ attrs);
sg->dma_address = xen_phys_to_bus(map);
} else {
/* we are not interested in the dma_addr returned by
diff --git a/fs/9p/vfs_dentry.c b/fs/9p/vfs_dentry.c
index f039b104a98e..b03dd23feda8 100644
--- a/fs/9p/vfs_dentry.c
+++ b/fs/9p/vfs_dentry.c
@@ -43,23 +43,6 @@
#include "fid.h"
/**
- * v9fs_dentry_delete - called when dentry refcount equals 0
- * @dentry: dentry in question
- *
- * By returning 1 here we should remove cacheing of unused
- * dentry components.
- *
- */
-
-static int v9fs_dentry_delete(const struct dentry *dentry)
-{
- p9_debug(P9_DEBUG_VFS, " dentry: %s (%p)\n",
- dentry->d_name.name, dentry);
-
- return 1;
-}
-
-/**
* v9fs_cached_dentry_delete - called when dentry refcount equals 0
* @dentry: dentry in question
*
@@ -134,6 +117,6 @@ const struct dentry_operations v9fs_cached_dentry_operations = {
};
const struct dentry_operations v9fs_dentry_operations = {
- .d_delete = v9fs_dentry_delete,
+ .d_delete = always_delete_dentry,
.d_release = v9fs_dentry_release,
};
diff --git a/fs/affs/Changes b/fs/affs/Changes
index a29409c1ffe0..b41c2c9792ff 100644
--- a/fs/affs/Changes
+++ b/fs/affs/Changes
@@ -91,7 +91,7 @@ more 2.4 fixes: [Roman Zippel]
Version 3.11
------------
-- Converted to use 2.3.x page cache [Dave Jones <dave@powertweak.com>]
+- Converted to use 2.3.x page cache [Dave Jones]
- Corruption in truncate() bugfix [Ken Tyler <kent@werple.net.au>]
Version 3.10
diff --git a/fs/aio.c b/fs/aio.c
index 823efcbb6ccd..062a5f6a1448 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -80,6 +80,8 @@ struct kioctx {
struct percpu_ref users;
atomic_t dead;
+ struct percpu_ref reqs;
+
unsigned long user_id;
struct __percpu kioctx_cpu *cpu;
@@ -107,7 +109,6 @@ struct kioctx {
struct page **ring_pages;
long nr_pages;
- struct rcu_head rcu_head;
struct work_struct free_work;
struct {
@@ -243,15 +244,22 @@ static void aio_free_ring(struct kioctx *ctx)
int i;
for (i = 0; i < ctx->nr_pages; i++) {
+ struct page *page;
pr_debug("pid(%d) [%d] page->count=%d\n", current->pid, i,
page_count(ctx->ring_pages[i]));
- put_page(ctx->ring_pages[i]);
+ page = ctx->ring_pages[i];
+ if (!page)
+ continue;
+ ctx->ring_pages[i] = NULL;
+ put_page(page);
}
put_aio_ring_file(ctx);
- if (ctx->ring_pages && ctx->ring_pages != ctx->internal_pages)
+ if (ctx->ring_pages && ctx->ring_pages != ctx->internal_pages) {
kfree(ctx->ring_pages);
+ ctx->ring_pages = NULL;
+ }
}
static int aio_ring_mmap(struct file *file, struct vm_area_struct *vma)
@@ -277,18 +285,38 @@ static int aio_migratepage(struct address_space *mapping, struct page *new,
unsigned long flags;
int rc;
+ rc = 0;
+
+ /* Make sure the old page hasn't already been changed */
+ spin_lock(&mapping->private_lock);
+ ctx = mapping->private_data;
+ if (ctx) {
+ pgoff_t idx;
+ spin_lock_irqsave(&ctx->completion_lock, flags);
+ idx = old->index;
+ if (idx < (pgoff_t)ctx->nr_pages) {
+ if (ctx->ring_pages[idx] != old)
+ rc = -EAGAIN;
+ } else
+ rc = -EINVAL;
+ spin_unlock_irqrestore(&ctx->completion_lock, flags);
+ } else
+ rc = -EINVAL;
+ spin_unlock(&mapping->private_lock);
+
+ if (rc != 0)
+ return rc;
+
/* Writeback must be complete */
BUG_ON(PageWriteback(old));
- put_page(old);
+ get_page(new);
- rc = migrate_page_move_mapping(mapping, new, old, NULL, mode);
+ rc = migrate_page_move_mapping(mapping, new, old, NULL, mode, 1);
if (rc != MIGRATEPAGE_SUCCESS) {
- get_page(old);
+ put_page(new);
return rc;
}
- get_page(new);
-
/* We can potentially race against kioctx teardown here. Use the
* address_space's private data lock to protect the mapping's
* private_data.
@@ -300,13 +328,24 @@ static int aio_migratepage(struct address_space *mapping, struct page *new,
spin_lock_irqsave(&ctx->completion_lock, flags);
migrate_page_copy(new, old);
idx = old->index;
- if (idx < (pgoff_t)ctx->nr_pages)
- ctx->ring_pages[idx] = new;
+ if (idx < (pgoff_t)ctx->nr_pages) {
+ /* And only do the move if things haven't changed */
+ if (ctx->ring_pages[idx] == old)
+ ctx->ring_pages[idx] = new;
+ else
+ rc = -EAGAIN;
+ } else
+ rc = -EINVAL;
spin_unlock_irqrestore(&ctx->completion_lock, flags);
} else
rc = -EBUSY;
spin_unlock(&mapping->private_lock);
+ if (rc == MIGRATEPAGE_SUCCESS)
+ put_page(old);
+ else
+ put_page(new);
+
return rc;
}
#endif
@@ -323,7 +362,7 @@ static int aio_setup_ring(struct kioctx *ctx)
struct aio_ring *ring;
unsigned nr_events = ctx->max_reqs;
struct mm_struct *mm = current->mm;
- unsigned long size, populate;
+ unsigned long size, unused;
int nr_pages;
int i;
struct file *file;
@@ -344,6 +383,20 @@ static int aio_setup_ring(struct kioctx *ctx)
return -EAGAIN;
}
+ ctx->aio_ring_file = file;
+ nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring))
+ / sizeof(struct io_event);
+
+ ctx->ring_pages = ctx->internal_pages;
+ if (nr_pages > AIO_RING_PAGES) {
+ ctx->ring_pages = kcalloc(nr_pages, sizeof(struct page *),
+ GFP_KERNEL);
+ if (!ctx->ring_pages) {
+ put_aio_ring_file(ctx);
+ return -ENOMEM;
+ }
+ }
+
for (i = 0; i < nr_pages; i++) {
struct page *page;
page = find_or_create_page(file->f_inode->i_mapping,
@@ -355,17 +408,14 @@ static int aio_setup_ring(struct kioctx *ctx)
SetPageUptodate(page);
SetPageDirty(page);
unlock_page(page);
+
+ ctx->ring_pages[i] = page;
}
- ctx->aio_ring_file = file;
- nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring))
- / sizeof(struct io_event);
+ ctx->nr_pages = i;
- ctx->ring_pages = ctx->internal_pages;
- if (nr_pages > AIO_RING_PAGES) {
- ctx->ring_pages = kcalloc(nr_pages, sizeof(struct page *),
- GFP_KERNEL);
- if (!ctx->ring_pages)
- return -ENOMEM;
+ if (unlikely(i != nr_pages)) {
+ aio_free_ring(ctx);
+ return -EAGAIN;
}
ctx->mmap_size = nr_pages * PAGE_SIZE;
@@ -374,9 +424,9 @@ static int aio_setup_ring(struct kioctx *ctx)
down_write(&mm->mmap_sem);
ctx->mmap_base = do_mmap_pgoff(ctx->aio_ring_file, 0, ctx->mmap_size,
PROT_READ | PROT_WRITE,
- MAP_SHARED | MAP_POPULATE, 0, &populate);
+ MAP_SHARED, 0, &unused);
+ up_write(&mm->mmap_sem);
if (IS_ERR((void *)ctx->mmap_base)) {
- up_write(&mm->mmap_sem);
ctx->mmap_size = 0;
aio_free_ring(ctx);
return -EAGAIN;
@@ -384,27 +434,6 @@ static int aio_setup_ring(struct kioctx *ctx)
pr_debug("mmap address: 0x%08lx\n", ctx->mmap_base);
- /* We must do this while still holding mmap_sem for write, as we
- * need to be protected against userspace attempting to mremap()
- * or munmap() the ring buffer.
- */
- ctx->nr_pages = get_user_pages(current, mm, ctx->mmap_base, nr_pages,
- 1, 0, ctx->ring_pages, NULL);
-
- /* Dropping the reference here is safe as the page cache will hold
- * onto the pages for us. It is also required so that page migration
- * can unmap the pages and get the right reference count.
- */
- for (i = 0; i < ctx->nr_pages; i++)
- put_page(ctx->ring_pages[i]);
-
- up_write(&mm->mmap_sem);
-
- if (unlikely(ctx->nr_pages != nr_pages)) {
- aio_free_ring(ctx);
- return -EAGAIN;
- }
-
ctx->user_id = ctx->mmap_base;
ctx->nr_events = nr_events; /* trusted copy */
@@ -463,26 +492,34 @@ static int kiocb_cancel(struct kioctx *ctx, struct kiocb *kiocb)
return cancel(kiocb);
}
-static void free_ioctx_rcu(struct rcu_head *head)
+static void free_ioctx(struct work_struct *work)
{
- struct kioctx *ctx = container_of(head, struct kioctx, rcu_head);
+ struct kioctx *ctx = container_of(work, struct kioctx, free_work);
+
+ pr_debug("freeing %p\n", ctx);
+ aio_free_ring(ctx);
free_percpu(ctx->cpu);
kmem_cache_free(kioctx_cachep, ctx);
}
+static void free_ioctx_reqs(struct percpu_ref *ref)
+{
+ struct kioctx *ctx = container_of(ref, struct kioctx, reqs);
+
+ INIT_WORK(&ctx->free_work, free_ioctx);
+ schedule_work(&ctx->free_work);
+}
+
/*
* When this function runs, the kioctx has been removed from the "hash table"
* and ctx->users has dropped to 0, so we know no more kiocbs can be submitted -
* now it's safe to cancel any that need to be.
*/
-static void free_ioctx(struct work_struct *work)
+static void free_ioctx_users(struct percpu_ref *ref)
{
- struct kioctx *ctx = container_of(work, struct kioctx, free_work);
- struct aio_ring *ring;
+ struct kioctx *ctx = container_of(ref, struct kioctx, users);
struct kiocb *req;
- unsigned cpu, avail;
- DEFINE_WAIT(wait);
spin_lock_irq(&ctx->ctx_lock);
@@ -496,54 +533,8 @@ static void free_ioctx(struct work_struct *work)
spin_unlock_irq(&ctx->ctx_lock);
- for_each_possible_cpu(cpu) {
- struct kioctx_cpu *kcpu = per_cpu_ptr(ctx->cpu, cpu);
-
- atomic_add(kcpu->reqs_available, &ctx->reqs_available);
- kcpu->reqs_available = 0;
- }
-
- while (1) {
- prepare_to_wait(&ctx->wait, &wait, TASK_UNINTERRUPTIBLE);
-
- ring = kmap_atomic(ctx->ring_pages[0]);
- avail = (ring->head <= ring->tail)
- ? ring->tail - ring->head
- : ctx->nr_events - ring->head + ring->tail;
-
- atomic_add(avail, &ctx->reqs_available);
- ring->head = ring->tail;
- kunmap_atomic(ring);
-
- if (atomic_read(&ctx->reqs_available) >= ctx->nr_events - 1)
- break;
-
- schedule();
- }
- finish_wait(&ctx->wait, &wait);
-
- WARN_ON(atomic_read(&ctx->reqs_available) > ctx->nr_events - 1);
-
- aio_free_ring(ctx);
-
- pr_debug("freeing %p\n", ctx);
-
- /*
- * Here the call_rcu() is between the wait_event() for reqs_active to
- * hit 0, and freeing the ioctx.
- *
- * aio_complete() decrements reqs_active, but it has to touch the ioctx
- * after to issue a wakeup so we use rcu.
- */
- call_rcu(&ctx->rcu_head, free_ioctx_rcu);
-}
-
-static void free_ioctx_ref(struct percpu_ref *ref)
-{
- struct kioctx *ctx = container_of(ref, struct kioctx, users);
-
- INIT_WORK(&ctx->free_work, free_ioctx);
- schedule_work(&ctx->free_work);
+ percpu_ref_kill(&ctx->reqs);
+ percpu_ref_put(&ctx->reqs);
}
static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm)
@@ -602,6 +593,16 @@ static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm)
}
}
+static void aio_nr_sub(unsigned nr)
+{
+ spin_lock(&aio_nr_lock);
+ if (WARN_ON(aio_nr - nr > aio_nr))
+ aio_nr = 0;
+ else
+ aio_nr -= nr;
+ spin_unlock(&aio_nr_lock);
+}
+
/* ioctx_alloc
* Allocates and initializes an ioctx. Returns an ERR_PTR if it failed.
*/
@@ -639,8 +640,11 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
ctx->max_reqs = nr_events;
- if (percpu_ref_init(&ctx->users, free_ioctx_ref))
- goto out_freectx;
+ if (percpu_ref_init(&ctx->users, free_ioctx_users))
+ goto err;
+
+ if (percpu_ref_init(&ctx->reqs, free_ioctx_reqs))
+ goto err;
spin_lock_init(&ctx->ctx_lock);
spin_lock_init(&ctx->completion_lock);
@@ -651,10 +655,10 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
ctx->cpu = alloc_percpu(struct kioctx_cpu);
if (!ctx->cpu)
- goto out_freeref;
+ goto err;
if (aio_setup_ring(ctx) < 0)
- goto out_freepcpu;
+ goto err;
atomic_set(&ctx->reqs_available, ctx->nr_events - 1);
ctx->req_batch = (ctx->nr_events - 1) / (num_possible_cpus() * 4);
@@ -666,32 +670,31 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
if (aio_nr + nr_events > (aio_max_nr * 2UL) ||
aio_nr + nr_events < aio_nr) {
spin_unlock(&aio_nr_lock);
- goto out_cleanup;
+ err = -EAGAIN;
+ goto err_ctx;
}
aio_nr += ctx->max_reqs;
spin_unlock(&aio_nr_lock);
- percpu_ref_get(&ctx->users); /* io_setup() will drop this ref */
+ percpu_ref_get(&ctx->users); /* io_setup() will drop this ref */
+ percpu_ref_get(&ctx->reqs); /* free_ioctx_users() will drop this */
err = ioctx_add_table(ctx, mm);
if (err)
- goto out_cleanup_put;
+ goto err_cleanup;
pr_debug("allocated ioctx %p[%ld]: mm=%p mask=0x%x\n",
ctx, ctx->user_id, mm, ctx->nr_events);
return ctx;
-out_cleanup_put:
- percpu_ref_put(&ctx->users);
-out_cleanup:
- err = -EAGAIN;
+err_cleanup:
+ aio_nr_sub(ctx->max_reqs);
+err_ctx:
aio_free_ring(ctx);
-out_freepcpu:
+err:
free_percpu(ctx->cpu);
-out_freeref:
+ free_percpu(ctx->reqs.pcpu_count);
free_percpu(ctx->users.pcpu_count);
-out_freectx:
- put_aio_ring_file(ctx);
kmem_cache_free(kioctx_cachep, ctx);
pr_debug("error allocating ioctx %d\n", err);
return ERR_PTR(err);
@@ -726,10 +729,7 @@ static void kill_ioctx(struct mm_struct *mm, struct kioctx *ctx)
* -EAGAIN with no ioctxs actually in use (as far as userspace
* could tell).
*/
- spin_lock(&aio_nr_lock);
- BUG_ON(aio_nr - ctx->max_reqs > aio_nr);
- aio_nr -= ctx->max_reqs;
- spin_unlock(&aio_nr_lock);
+ aio_nr_sub(ctx->max_reqs);
if (ctx->mmap_size)
vm_munmap(ctx->mmap_base, ctx->mmap_size);
@@ -861,6 +861,8 @@ static inline struct kiocb *aio_get_req(struct kioctx *ctx)
if (unlikely(!req))
goto out_put;
+ percpu_ref_get(&ctx->reqs);
+
req->ki_ctx = ctx;
return req;
out_put:
@@ -930,12 +932,6 @@ void aio_complete(struct kiocb *iocb, long res, long res2)
return;
}
- /*
- * Take rcu_read_lock() in case the kioctx is being destroyed, as we
- * need to issue a wakeup after incrementing reqs_available.
- */
- rcu_read_lock();
-
if (iocb->ki_list.next) {
unsigned long flags;
@@ -1010,7 +1006,7 @@ void aio_complete(struct kiocb *iocb, long res, long res2)
if (waitqueue_active(&ctx->wait))
wake_up(&ctx->wait);
- rcu_read_unlock();
+ percpu_ref_put(&ctx->reqs);
}
EXPORT_SYMBOL(aio_complete);
@@ -1421,6 +1417,7 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
return 0;
out_put_req:
put_reqs_available(ctx, 1);
+ percpu_ref_put(&ctx->reqs);
kiocb_free(req);
return ret;
}
diff --git a/fs/bio.c b/fs/bio.c
index 2bdb4e25ee77..33d79a4eb92d 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -601,7 +601,7 @@ EXPORT_SYMBOL(bio_get_nr_vecs);
static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
*page, unsigned int len, unsigned int offset,
- unsigned short max_sectors)
+ unsigned int max_sectors)
{
int retried_segments = 0;
struct bio_vec *bvec;
diff --git a/fs/btrfs/Kconfig b/fs/btrfs/Kconfig
index f9d5094e1029..aa976eced2d2 100644
--- a/fs/btrfs/Kconfig
+++ b/fs/btrfs/Kconfig
@@ -9,12 +9,17 @@ config BTRFS_FS
select XOR_BLOCKS
help
- Btrfs is a new filesystem with extents, writable snapshotting,
- support for multiple devices and many more features.
+ Btrfs is a general purpose copy-on-write filesystem with extents,
+ writable snapshotting, support for multiple devices and many more
+ features focused on fault tolerance, repair and easy administration.
- Btrfs is highly experimental, and THE DISK FORMAT IS NOT YET
- FINALIZED. You should say N here unless you are interested in
- testing Btrfs with non-critical data.
+ The filesystem disk format is no longer unstable, and it's not
+ expected to change unless there are strong reasons to do so. If there
+ is a format change, file systems with a unchanged format will
+ continue to be mountable and usable by newer kernels.
+
+ For more information, please see the web pages at
+ http://btrfs.wiki.kernel.org.
To compile this file system support as a module, choose M here. The
module will be called btrfs.
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
index 8aec751fa464..c1e0b0caf9cc 100644
--- a/fs/btrfs/async-thread.c
+++ b/fs/btrfs/async-thread.c
@@ -495,6 +495,7 @@ static int __btrfs_start_workers(struct btrfs_workers *workers)
spin_lock_irq(&workers->lock);
if (workers->stopping) {
spin_unlock_irq(&workers->lock);
+ ret = -EINVAL;
goto fail_kthread;
}
list_add_tail(&worker->worker_list, &workers->idle_list);
diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c
index e0aab4456974..131d82800b3a 100644
--- a/fs/btrfs/check-integrity.c
+++ b/fs/btrfs/check-integrity.c
@@ -77,6 +77,15 @@
* the integrity of (super)-block write requests, do not
* enable the config option BTRFS_FS_CHECK_INTEGRITY to
* include and compile the integrity check tool.
+ *
+ * Expect millions of lines of information in the kernel log with an
+ * enabled check_int_print_mask. Therefore set LOG_BUF_SHIFT in the
+ * kernel config to at least 26 (which is 64MB). Usually the value is
+ * limited to 21 (which is 2MB) in init/Kconfig. The file needs to be
+ * changed like this before LOG_BUF_SHIFT can be set to a high value:
+ * config LOG_BUF_SHIFT
+ * int "Kernel log buffer size (16 => 64KB, 17 => 128KB)"
+ * range 12 30
*/
#include <linux/sched.h>
@@ -124,6 +133,7 @@
#define BTRFSIC_PRINT_MASK_INITIAL_DATABASE 0x00000400
#define BTRFSIC_PRINT_MASK_NUM_COPIES 0x00000800
#define BTRFSIC_PRINT_MASK_TREE_WITH_ALL_MIRRORS 0x00001000
+#define BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH_VERBOSE 0x00002000
struct btrfsic_dev_state;
struct btrfsic_state;
@@ -323,7 +333,6 @@ static void btrfsic_release_block_ctx(struct btrfsic_block_data_ctx *block_ctx);
static int btrfsic_read_block(struct btrfsic_state *state,
struct btrfsic_block_data_ctx *block_ctx);
static void btrfsic_dump_database(struct btrfsic_state *state);
-static void btrfsic_complete_bio_end_io(struct bio *bio, int err);
static int btrfsic_test_for_metadata(struct btrfsic_state *state,
char **datav, unsigned int num_pages);
static void btrfsic_process_written_block(struct btrfsic_dev_state *dev_state,
@@ -1677,7 +1686,6 @@ static int btrfsic_read_block(struct btrfsic_state *state,
for (i = 0; i < num_pages;) {
struct bio *bio;
unsigned int j;
- DECLARE_COMPLETION_ONSTACK(complete);
bio = btrfs_io_bio_alloc(GFP_NOFS, num_pages - i);
if (!bio) {
@@ -1688,8 +1696,6 @@ static int btrfsic_read_block(struct btrfsic_state *state,
}
bio->bi_bdev = block_ctx->dev->bdev;
bio->bi_sector = dev_bytenr >> 9;
- bio->bi_end_io = btrfsic_complete_bio_end_io;
- bio->bi_private = &complete;
for (j = i; j < num_pages; j++) {
ret = bio_add_page(bio, block_ctx->pagev[j],
@@ -1702,12 +1708,7 @@ static int btrfsic_read_block(struct btrfsic_state *state,
"btrfsic: error, failed to add a single page!\n");
return -1;
}
- submit_bio(READ, bio);
-
- /* this will also unplug the queue */
- wait_for_completion(&complete);
-
- if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) {
+ if (submit_bio_wait(READ, bio)) {
printk(KERN_INFO
"btrfsic: read error at logical %llu dev %s!\n",
block_ctx->start, block_ctx->dev->name);
@@ -1730,11 +1731,6 @@ static int btrfsic_read_block(struct btrfsic_state *state,
return block_ctx->len;
}
-static void btrfsic_complete_bio_end_io(struct bio *bio, int err)
-{
- complete((struct completion *)bio->bi_private);
-}
-
static void btrfsic_dump_database(struct btrfsic_state *state)
{
struct list_head *elem_all;
@@ -2998,14 +2994,12 @@ int btrfsic_submit_bh(int rw, struct buffer_head *bh)
return submit_bh(rw, bh);
}
-void btrfsic_submit_bio(int rw, struct bio *bio)
+static void __btrfsic_submit_bio(int rw, struct bio *bio)
{
struct btrfsic_dev_state *dev_state;
- if (!btrfsic_is_initialized) {
- submit_bio(rw, bio);
+ if (!btrfsic_is_initialized)
return;
- }
mutex_lock(&btrfsic_mutex);
/* since btrfsic_submit_bio() is also called before
@@ -3015,6 +3009,7 @@ void btrfsic_submit_bio(int rw, struct bio *bio)
(rw & WRITE) && NULL != bio->bi_io_vec) {
unsigned int i;
u64 dev_bytenr;
+ u64 cur_bytenr;
int bio_is_patched;
char **mapped_datav;
@@ -3033,6 +3028,7 @@ void btrfsic_submit_bio(int rw, struct bio *bio)
GFP_NOFS);
if (!mapped_datav)
goto leave;
+ cur_bytenr = dev_bytenr;
for (i = 0; i < bio->bi_vcnt; i++) {
BUG_ON(bio->bi_io_vec[i].bv_len != PAGE_CACHE_SIZE);
mapped_datav[i] = kmap(bio->bi_io_vec[i].bv_page);
@@ -3044,16 +3040,13 @@ void btrfsic_submit_bio(int rw, struct bio *bio)
kfree(mapped_datav);
goto leave;
}
- if ((BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH |
- BTRFSIC_PRINT_MASK_VERBOSE) ==
- (dev_state->state->print_mask &
- (BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH |
- BTRFSIC_PRINT_MASK_VERBOSE)))
+ if (dev_state->state->print_mask &
+ BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH_VERBOSE)
printk(KERN_INFO
- "#%u: page=%p, len=%u, offset=%u\n",
- i, bio->bi_io_vec[i].bv_page,
- bio->bi_io_vec[i].bv_len,
+ "#%u: bytenr=%llu, len=%u, offset=%u\n",
+ i, cur_bytenr, bio->bi_io_vec[i].bv_len,
bio->bi_io_vec[i].bv_offset);
+ cur_bytenr += bio->bi_io_vec[i].bv_len;
}
btrfsic_process_written_block(dev_state, dev_bytenr,
mapped_datav, bio->bi_vcnt,
@@ -3097,10 +3090,20 @@ void btrfsic_submit_bio(int rw, struct bio *bio)
}
leave:
mutex_unlock(&btrfsic_mutex);
+}
+void btrfsic_submit_bio(int rw, struct bio *bio)
+{
+ __btrfsic_submit_bio(rw, bio);
submit_bio(rw, bio);
}
+int btrfsic_submit_bio_wait(int rw, struct bio *bio)
+{
+ __btrfsic_submit_bio(rw, bio);
+ return submit_bio_wait(rw, bio);
+}
+
int btrfsic_mount(struct btrfs_root *root,
struct btrfs_fs_devices *fs_devices,
int including_extent_data, u32 print_mask)
diff --git a/fs/btrfs/check-integrity.h b/fs/btrfs/check-integrity.h
index 8b59175cc502..13b8566c97ab 100644
--- a/fs/btrfs/check-integrity.h
+++ b/fs/btrfs/check-integrity.h
@@ -22,9 +22,11 @@
#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
int btrfsic_submit_bh(int rw, struct buffer_head *bh);
void btrfsic_submit_bio(int rw, struct bio *bio);
+int btrfsic_submit_bio_wait(int rw, struct bio *bio);
#else
#define btrfsic_submit_bh submit_bh
#define btrfsic_submit_bio submit_bio
+#define btrfsic_submit_bio_wait submit_bio_wait
#endif
int btrfsic_mount(struct btrfs_root *root,
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index f9aeb2759a64..54ab86127f7a 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -3613,9 +3613,6 @@ int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
struct btrfs_ordered_sum *sums);
int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
struct bio *bio, u64 file_start, int contig);
-int btrfs_csum_truncate(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, struct btrfs_path *path,
- u64 isize);
int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
struct list_head *list, int search_commit);
/* inode.c */
@@ -3744,9 +3741,6 @@ void btrfs_cleanup_defrag_inodes(struct btrfs_fs_info *fs_info);
int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync);
void btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
int skip_pinned);
-int btrfs_replace_extent_cache(struct inode *inode, struct extent_map *replace,
- u64 start, u64 end, int skip_pinned,
- int modified);
extern const struct file_operations btrfs_file_operations;
int __btrfs_drop_extents(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct inode *inode,
diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
index 342f9fd411e3..2cfc3dfff64f 100644
--- a/fs/btrfs/dev-replace.c
+++ b/fs/btrfs/dev-replace.c
@@ -366,7 +366,7 @@ int btrfs_dev_replace_start(struct btrfs_root *root,
dev_replace->tgtdev = tgt_device;
printk_in_rcu(KERN_INFO
- "btrfs: dev_replace from %s (devid %llu) to %s) started\n",
+ "btrfs: dev_replace from %s (devid %llu) to %s started\n",
src_device->missing ? "<missing disk>" :
rcu_str_deref(src_device->name),
src_device->devid,
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 4c4ed0bb3da1..8072cfa8a3b1 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -3517,7 +3517,6 @@ int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info)
int btrfs_commit_super(struct btrfs_root *root)
{
struct btrfs_trans_handle *trans;
- int ret;
mutex_lock(&root->fs_info->cleaner_mutex);
btrfs_run_delayed_iputs(root);
@@ -3531,25 +3530,7 @@ int btrfs_commit_super(struct btrfs_root *root)
trans = btrfs_join_transaction(root);
if (IS_ERR(trans))
return PTR_ERR(trans);
- ret = btrfs_commit_transaction(trans, root);
- if (ret)
- return ret;
- /* run commit again to drop the original snapshot */
- trans = btrfs_join_transaction(root);
- if (IS_ERR(trans))
- return PTR_ERR(trans);
- ret = btrfs_commit_transaction(trans, root);
- if (ret)
- return ret;
- ret = btrfs_write_and_wait_transaction(NULL, root);
- if (ret) {
- btrfs_error(root->fs_info, ret,
- "Failed to sync btree inode to disk.");
- return ret;
- }
-
- ret = write_ctree_super(NULL, root, 0);
- return ret;
+ return btrfs_commit_transaction(trans, root);
}
int close_ctree(struct btrfs_root *root)
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 45d98d01028f..9c01509dd8ab 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -767,20 +767,19 @@ int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
if (!path)
return -ENOMEM;
- if (metadata) {
- key.objectid = bytenr;
- key.type = BTRFS_METADATA_ITEM_KEY;
- key.offset = offset;
- } else {
- key.objectid = bytenr;
- key.type = BTRFS_EXTENT_ITEM_KEY;
- key.offset = offset;
- }
-
if (!trans) {
path->skip_locking = 1;
path->search_commit_root = 1;
}
+
+search_again:
+ key.objectid = bytenr;
+ key.offset = offset;
+ if (metadata)
+ key.type = BTRFS_METADATA_ITEM_KEY;
+ else
+ key.type = BTRFS_EXTENT_ITEM_KEY;
+
again:
ret = btrfs_search_slot(trans, root->fs_info->extent_root,
&key, path, 0, 0);
@@ -788,7 +787,6 @@ again:
goto out_free;
if (ret > 0 && metadata && key.type == BTRFS_METADATA_ITEM_KEY) {
- metadata = 0;
if (path->slots[0]) {
path->slots[0]--;
btrfs_item_key_to_cpu(path->nodes[0], &key,
@@ -855,7 +853,7 @@ again:
mutex_lock(&head->mutex);
mutex_unlock(&head->mutex);
btrfs_put_delayed_ref(&head->node);
- goto again;
+ goto search_again;
}
if (head->extent_op && head->extent_op->update_flags)
extent_flags |= head->extent_op->flags_to_set;
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 856bc2b2192c..ff43802a7c88 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -1952,11 +1952,6 @@ static int free_io_failure(struct inode *inode, struct io_failure_record *rec,
return err;
}
-static void repair_io_failure_callback(struct bio *bio, int err)
-{
- complete(bio->bi_private);
-}
-
/*
* this bypasses the standard btrfs submit functions deliberately, as
* the standard behavior is to write all copies in a raid setup. here we only
@@ -1973,13 +1968,13 @@ int repair_io_failure(struct btrfs_fs_info *fs_info, u64 start,
{
struct bio *bio;
struct btrfs_device *dev;
- DECLARE_COMPLETION_ONSTACK(compl);
u64 map_length = 0;
u64 sector;
struct btrfs_bio *bbio = NULL;
struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
int ret;
+ ASSERT(!(fs_info->sb->s_flags & MS_RDONLY));
BUG_ON(!mirror_num);
/* we can't repair anything in raid56 yet */
@@ -1989,8 +1984,6 @@ int repair_io_failure(struct btrfs_fs_info *fs_info, u64 start,
bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
if (!bio)
return -EIO;
- bio->bi_private = &compl;
- bio->bi_end_io = repair_io_failure_callback;
bio->bi_size = 0;
map_length = length;
@@ -2011,10 +2004,8 @@ int repair_io_failure(struct btrfs_fs_info *fs_info, u64 start,
}
bio->bi_bdev = dev->bdev;
bio_add_page(bio, page, length, start - page_offset(page));
- btrfsic_submit_bio(WRITE_SYNC, bio);
- wait_for_completion(&compl);
- if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) {
+ if (btrfsic_submit_bio_wait(WRITE_SYNC, bio)) {
/* try to remap that extent elsewhere? */
bio_put(bio);
btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
@@ -2036,6 +2027,9 @@ int repair_eb_io_failure(struct btrfs_root *root, struct extent_buffer *eb,
unsigned long i, num_pages = num_extent_pages(eb->start, eb->len);
int ret = 0;
+ if (root->fs_info->sb->s_flags & MS_RDONLY)
+ return -EROFS;
+
for (i = 0; i < num_pages; i++) {
struct page *p = extent_buffer_page(eb, i);
ret = repair_io_failure(root->fs_info, start, PAGE_CACHE_SIZE,
@@ -2057,12 +2051,12 @@ static int clean_io_failure(u64 start, struct page *page)
u64 private;
u64 private_failure;
struct io_failure_record *failrec;
- struct btrfs_fs_info *fs_info;
+ struct inode *inode = page->mapping->host;
+ struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
struct extent_state *state;
int num_copies;
int did_repair = 0;
int ret;
- struct inode *inode = page->mapping->host;
private = 0;
ret = count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private,
@@ -2085,6 +2079,8 @@ static int clean_io_failure(u64 start, struct page *page)
did_repair = 1;
goto out;
}
+ if (fs_info->sb->s_flags & MS_RDONLY)
+ goto out;
spin_lock(&BTRFS_I(inode)->io_tree.lock);
state = find_first_extent_bit_state(&BTRFS_I(inode)->io_tree,
@@ -2094,7 +2090,6 @@ static int clean_io_failure(u64 start, struct page *page)
if (state && state->start <= failrec->start &&
state->end >= failrec->start + failrec->len - 1) {
- fs_info = BTRFS_I(inode)->root->fs_info;
num_copies = btrfs_num_copies(fs_info, failrec->logical,
failrec->len);
if (num_copies > 1) {
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index da8d2f696ac5..f1a77449d032 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -2129,7 +2129,8 @@ static noinline bool record_extent_backrefs(struct btrfs_path *path,
old->extent_offset, fs_info,
path, record_one_backref,
old);
- BUG_ON(ret < 0 && ret != -ENOENT);
+ if (ret < 0 && ret != -ENOENT)
+ return false;
/* no backref to be processed for this extent */
if (!old->count) {
@@ -6186,8 +6187,7 @@ insert:
write_unlock(&em_tree->lock);
out:
- if (em)
- trace_btrfs_get_extent(root, em);
+ trace_btrfs_get_extent(root, em);
if (path)
btrfs_free_path(path);
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index a111622598b0..21da5762b0b1 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -2121,7 +2121,7 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
err = mutex_lock_killable_nested(&dir->i_mutex, I_MUTEX_PARENT);
if (err == -EINTR)
- goto out;
+ goto out_drop_write;
dentry = lookup_one_len(vol_args->name, parent, namelen);
if (IS_ERR(dentry)) {
err = PTR_ERR(dentry);
@@ -2284,6 +2284,7 @@ out_dput:
dput(dentry);
out_unlock_dir:
mutex_unlock(&dir->i_mutex);
+out_drop_write:
mnt_drop_write_file(file);
out:
kfree(vol_args);
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index 25a8f3812f14..69582d5b69d1 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -638,6 +638,7 @@ void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, int nr)
WARN_ON(nr < 0);
}
}
+ list_splice_tail(&splice, &fs_info->ordered_roots);
spin_unlock(&fs_info->ordered_root_lock);
}
@@ -803,7 +804,7 @@ int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
btrfs_put_ordered_extent(ordered);
break;
}
- if (ordered->file_offset + ordered->len < start) {
+ if (ordered->file_offset + ordered->len <= start) {
btrfs_put_ordered_extent(ordered);
break;
}
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index ce459a7cb16d..429c73c374b8 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -571,7 +571,9 @@ static int is_cowonly_root(u64 root_objectid)
root_objectid == BTRFS_CHUNK_TREE_OBJECTID ||
root_objectid == BTRFS_DEV_TREE_OBJECTID ||
root_objectid == BTRFS_TREE_LOG_OBJECTID ||
- root_objectid == BTRFS_CSUM_TREE_OBJECTID)
+ root_objectid == BTRFS_CSUM_TREE_OBJECTID ||
+ root_objectid == BTRFS_UUID_TREE_OBJECTID ||
+ root_objectid == BTRFS_QUOTA_TREE_OBJECTID)
return 1;
return 0;
}
@@ -1264,10 +1266,10 @@ static int __must_check __add_reloc_root(struct btrfs_root *root)
}
/*
- * helper to update/delete the 'address of tree root -> reloc tree'
+ * helper to delete the 'address of tree root -> reloc tree'
* mapping
*/
-static int __update_reloc_root(struct btrfs_root *root, int del)
+static void __del_reloc_root(struct btrfs_root *root)
{
struct rb_node *rb_node;
struct mapping_node *node = NULL;
@@ -1275,7 +1277,7 @@ static int __update_reloc_root(struct btrfs_root *root, int del)
spin_lock(&rc->reloc_root_tree.lock);
rb_node = tree_search(&rc->reloc_root_tree.rb_root,
- root->commit_root->start);
+ root->node->start);
if (rb_node) {
node = rb_entry(rb_node, struct mapping_node, rb_node);
rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root);
@@ -1283,23 +1285,45 @@ static int __update_reloc_root(struct btrfs_root *root, int del)
spin_unlock(&rc->reloc_root_tree.lock);
if (!node)
- return 0;
+ return;
BUG_ON((struct btrfs_root *)node->data != root);
- if (!del) {
- spin_lock(&rc->reloc_root_tree.lock);
- node->bytenr = root->node->start;
- rb_node = tree_insert(&rc->reloc_root_tree.rb_root,
- node->bytenr, &node->rb_node);
- spin_unlock(&rc->reloc_root_tree.lock);
- if (rb_node)
- backref_tree_panic(rb_node, -EEXIST, node->bytenr);
- } else {
- spin_lock(&root->fs_info->trans_lock);
- list_del_init(&root->root_list);
- spin_unlock(&root->fs_info->trans_lock);
- kfree(node);
+ spin_lock(&root->fs_info->trans_lock);
+ list_del_init(&root->root_list);
+ spin_unlock(&root->fs_info->trans_lock);
+ kfree(node);
+}
+
+/*
+ * helper to update the 'address of tree root -> reloc tree'
+ * mapping
+ */
+static int __update_reloc_root(struct btrfs_root *root, u64 new_bytenr)
+{
+ struct rb_node *rb_node;
+ struct mapping_node *node = NULL;
+ struct reloc_control *rc = root->fs_info->reloc_ctl;
+
+ spin_lock(&rc->reloc_root_tree.lock);
+ rb_node = tree_search(&rc->reloc_root_tree.rb_root,
+ root->node->start);
+ if (rb_node) {
+ node = rb_entry(rb_node, struct mapping_node, rb_node);
+ rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root);
}
+ spin_unlock(&rc->reloc_root_tree.lock);
+
+ if (!node)
+ return 0;
+ BUG_ON((struct btrfs_root *)node->data != root);
+
+ spin_lock(&rc->reloc_root_tree.lock);
+ node->bytenr = new_bytenr;
+ rb_node = tree_insert(&rc->reloc_root_tree.rb_root,
+ node->bytenr, &node->rb_node);
+ spin_unlock(&rc->reloc_root_tree.lock);
+ if (rb_node)
+ backref_tree_panic(rb_node, -EEXIST, node->bytenr);
return 0;
}
@@ -1420,7 +1444,6 @@ int btrfs_update_reloc_root(struct btrfs_trans_handle *trans,
{
struct btrfs_root *reloc_root;
struct btrfs_root_item *root_item;
- int del = 0;
int ret;
if (!root->reloc_root)
@@ -1432,11 +1455,9 @@ int btrfs_update_reloc_root(struct btrfs_trans_handle *trans,
if (root->fs_info->reloc_ctl->merge_reloc_tree &&
btrfs_root_refs(root_item) == 0) {
root->reloc_root = NULL;
- del = 1;
+ __del_reloc_root(reloc_root);
}
- __update_reloc_root(reloc_root, del);
-
if (reloc_root->commit_root != reloc_root->node) {
btrfs_set_root_node(root_item, reloc_root->node);
free_extent_buffer(reloc_root->commit_root);
@@ -2287,7 +2308,7 @@ void free_reloc_roots(struct list_head *list)
while (!list_empty(list)) {
reloc_root = list_entry(list->next, struct btrfs_root,
root_list);
- __update_reloc_root(reloc_root, 1);
+ __del_reloc_root(reloc_root);
free_extent_buffer(reloc_root->node);
free_extent_buffer(reloc_root->commit_root);
kfree(reloc_root);
@@ -2332,7 +2353,7 @@ again:
ret = merge_reloc_root(rc, root);
if (ret) {
- __update_reloc_root(reloc_root, 1);
+ __del_reloc_root(reloc_root);
free_extent_buffer(reloc_root->node);
free_extent_buffer(reloc_root->commit_root);
kfree(reloc_root);
@@ -2388,6 +2409,13 @@ out:
btrfs_std_error(root->fs_info, ret);
if (!list_empty(&reloc_roots))
free_reloc_roots(&reloc_roots);
+
+ /* new reloc root may be added */
+ mutex_lock(&root->fs_info->reloc_mutex);
+ list_splice_init(&rc->reloc_roots, &reloc_roots);
+ mutex_unlock(&root->fs_info->reloc_mutex);
+ if (!list_empty(&reloc_roots))
+ free_reloc_roots(&reloc_roots);
}
BUG_ON(!RB_EMPTY_ROOT(&rc->reloc_root_tree.rb_root));
@@ -4522,6 +4550,11 @@ int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans,
BUG_ON(rc->stage == UPDATE_DATA_PTRS &&
root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID);
+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
+ if (buf == root->node)
+ __update_reloc_root(root, cow->start);
+ }
+
level = btrfs_header_level(buf);
if (btrfs_header_generation(buf) <=
btrfs_root_last_snapshot(&root->root_item))
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 2544805544f0..1fd3f33c330a 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -208,7 +208,6 @@ static void scrub_recheck_block_checksum(struct btrfs_fs_info *fs_info,
int is_metadata, int have_csum,
const u8 *csum, u64 generation,
u16 csum_size);
-static void scrub_complete_bio_end_io(struct bio *bio, int err);
static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
struct scrub_block *sblock_good,
int force_write);
@@ -938,8 +937,10 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
BTRFS_DEV_STAT_CORRUPTION_ERRS);
}
- if (sctx->readonly && !sctx->is_dev_replace)
- goto did_not_correct_error;
+ if (sctx->readonly) {
+ ASSERT(!sctx->is_dev_replace);
+ goto out;
+ }
if (!is_metadata && !have_csum) {
struct scrub_fixup_nodatasum *fixup_nodatasum;
@@ -1292,7 +1293,6 @@ static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
for (page_num = 0; page_num < sblock->page_count; page_num++) {
struct bio *bio;
struct scrub_page *page = sblock->pagev[page_num];
- DECLARE_COMPLETION_ONSTACK(complete);
if (page->dev->bdev == NULL) {
page->io_error = 1;
@@ -1309,18 +1309,11 @@ static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
}
bio->bi_bdev = page->dev->bdev;
bio->bi_sector = page->physical >> 9;
- bio->bi_end_io = scrub_complete_bio_end_io;
- bio->bi_private = &complete;
bio_add_page(bio, page->page, PAGE_SIZE, 0);
- btrfsic_submit_bio(READ, bio);
-
- /* this will also unplug the queue */
- wait_for_completion(&complete);
-
- page->io_error = !test_bit(BIO_UPTODATE, &bio->bi_flags);
- if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
+ if (btrfsic_submit_bio_wait(READ, bio))
sblock->no_io_error_seen = 0;
+
bio_put(bio);
}
@@ -1389,11 +1382,6 @@ static void scrub_recheck_block_checksum(struct btrfs_fs_info *fs_info,
sblock->checksum_error = 1;
}
-static void scrub_complete_bio_end_io(struct bio *bio, int err)
-{
- complete((struct completion *)bio->bi_private);
-}
-
static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
struct scrub_block *sblock_good,
int force_write)
@@ -1428,7 +1416,6 @@ static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
sblock_bad->checksum_error || page_bad->io_error) {
struct bio *bio;
int ret;
- DECLARE_COMPLETION_ONSTACK(complete);
if (!page_bad->dev->bdev) {
printk_ratelimited(KERN_WARNING
@@ -1441,19 +1428,14 @@ static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
return -EIO;
bio->bi_bdev = page_bad->dev->bdev;
bio->bi_sector = page_bad->physical >> 9;
- bio->bi_end_io = scrub_complete_bio_end_io;
- bio->bi_private = &complete;
ret = bio_add_page(bio, page_good->page, PAGE_SIZE, 0);
if (PAGE_SIZE != ret) {
bio_put(bio);
return -EIO;
}
- btrfsic_submit_bio(WRITE, bio);
- /* this will also unplug the queue */
- wait_for_completion(&complete);
- if (!bio_flagged(bio, BIO_UPTODATE)) {
+ if (btrfsic_submit_bio_wait(WRITE, bio)) {
btrfs_dev_stat_inc_and_print(page_bad->dev,
BTRFS_DEV_STAT_WRITE_ERRS);
btrfs_dev_replace_stats_inc(
@@ -3373,7 +3355,6 @@ static int write_page_nocow(struct scrub_ctx *sctx,
struct bio *bio;
struct btrfs_device *dev;
int ret;
- DECLARE_COMPLETION_ONSTACK(compl);
dev = sctx->wr_ctx.tgtdev;
if (!dev)
@@ -3390,8 +3371,6 @@ static int write_page_nocow(struct scrub_ctx *sctx,
spin_unlock(&sctx->stat_lock);
return -ENOMEM;
}
- bio->bi_private = &compl;
- bio->bi_end_io = scrub_complete_bio_end_io;
bio->bi_size = 0;
bio->bi_sector = physical_for_dev_replace >> 9;
bio->bi_bdev = dev->bdev;
@@ -3402,10 +3381,8 @@ leave_with_eio:
btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
return -EIO;
}
- btrfsic_submit_bio(WRITE_SYNC, bio);
- wait_for_completion(&compl);
- if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
+ if (btrfsic_submit_bio_wait(WRITE_SYNC, bio))
goto leave_with_eio;
bio_put(bio);
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index 6837fe87f3a6..945d1db98f26 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -4723,8 +4723,8 @@ long btrfs_ioctl_send(struct file *mnt_file, void __user *arg_)
}
if (!access_ok(VERIFY_READ, arg->clone_sources,
- sizeof(*arg->clone_sources *
- arg->clone_sources_count))) {
+ sizeof(*arg->clone_sources) *
+ arg->clone_sources_count)) {
ret = -EFAULT;
goto out;
}
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 2d8ac1bf0cf9..d71a11d13dfa 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -432,7 +432,6 @@ int btrfs_parse_options(struct btrfs_root *root, char *options)
} else {
printk(KERN_INFO "btrfs: setting nodatacow\n");
}
- info->compress_type = BTRFS_COMPRESS_NONE;
btrfs_clear_opt(info->mount_opt, COMPRESS);
btrfs_clear_opt(info->mount_opt, FORCE_COMPRESS);
btrfs_set_opt(info->mount_opt, NODATACOW);
@@ -461,7 +460,6 @@ int btrfs_parse_options(struct btrfs_root *root, char *options)
btrfs_set_fs_incompat(info, COMPRESS_LZO);
} else if (strncmp(args[0].from, "no", 2) == 0) {
compress_type = "no";
- info->compress_type = BTRFS_COMPRESS_NONE;
btrfs_clear_opt(info->mount_opt, COMPRESS);
btrfs_clear_opt(info->mount_opt, FORCE_COMPRESS);
compress_force = false;
@@ -474,9 +472,10 @@ int btrfs_parse_options(struct btrfs_root *root, char *options)
btrfs_set_opt(info->mount_opt, FORCE_COMPRESS);
pr_info("btrfs: force %s compression\n",
compress_type);
- } else
+ } else if (btrfs_test_opt(root, COMPRESS)) {
pr_info("btrfs: use %s compression\n",
compress_type);
+ }
break;
case Opt_ssd:
printk(KERN_INFO "btrfs: use ssd allocation scheme\n");
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 57c16b46afbd..c6a872a8a468 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -1480,7 +1480,7 @@ static void do_async_commit(struct work_struct *work)
* We've got freeze protection passed with the transaction.
* Tell lockdep about it.
*/
- if (ac->newtrans->type < TRANS_JOIN_NOLOCK)
+ if (ac->newtrans->type & __TRANS_FREEZABLE)
rwsem_acquire_read(
&ac->root->fs_info->sb->s_writers.lock_map[SB_FREEZE_FS-1],
0, 1, _THIS_IP_);
@@ -1521,7 +1521,7 @@ int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
* Tell lockdep we've released the freeze rwsem, since the
* async commit thread will be the one to unlock it.
*/
- if (trans->type < TRANS_JOIN_NOLOCK)
+ if (ac->newtrans->type & __TRANS_FREEZABLE)
rwsem_release(
&root->fs_info->sb->s_writers.lock_map[SB_FREEZE_FS-1],
1, _THIS_IP_);
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 744553c83fe2..9f7fc51ca334 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -3697,7 +3697,8 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
ret = btrfs_truncate_inode_items(trans, log,
inode, 0, 0);
} else if (test_and_clear_bit(BTRFS_INODE_COPY_EVERYTHING,
- &BTRFS_I(inode)->runtime_flags)) {
+ &BTRFS_I(inode)->runtime_flags) ||
+ inode_only == LOG_INODE_EXISTS) {
if (inode_only == LOG_INODE_ALL)
fast_search = true;
max_key.type = BTRFS_XATTR_ITEM_KEY;
@@ -3801,7 +3802,7 @@ log_extents:
err = ret;
goto out_unlock;
}
- } else {
+ } else if (inode_only == LOG_INODE_ALL) {
struct extent_map_tree *tree = &BTRFS_I(inode)->extent_tree;
struct extent_map *em, *n;
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 0db637097862..92303f42baaa 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -5394,7 +5394,7 @@ static int bio_size_ok(struct block_device *bdev, struct bio *bio,
{
struct bio_vec *prev;
struct request_queue *q = bdev_get_queue(bdev);
- unsigned short max_sectors = queue_max_sectors(q);
+ unsigned int max_sectors = queue_max_sectors(q);
struct bvec_merge_data bvm = {
.bi_bdev = bdev,
.bi_sector = sector,
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index 6df8bd481425..ec3ba43b9faa 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -210,13 +210,17 @@ static int readpage_nounlock(struct file *filp, struct page *page)
if (err < 0) {
SetPageError(page);
goto out;
- } else if (err < PAGE_CACHE_SIZE) {
+ } else {
+ if (err < PAGE_CACHE_SIZE) {
/* zero fill remainder of page */
- zero_user_segment(page, err, PAGE_CACHE_SIZE);
+ zero_user_segment(page, err, PAGE_CACHE_SIZE);
+ } else {
+ flush_dcache_page(page);
+ }
}
SetPageUptodate(page);
- if (err == 0)
+ if (err >= 0)
ceph_readpage_to_fscache(inode, page);
out:
diff --git a/fs/ceph/cache.c b/fs/ceph/cache.c
index 7db2e6ca4b8f..8c44fdd4e1c3 100644
--- a/fs/ceph/cache.c
+++ b/fs/ceph/cache.c
@@ -324,6 +324,9 @@ void ceph_invalidate_fscache_page(struct inode* inode, struct page *page)
{
struct ceph_inode_info *ci = ceph_inode(inode);
+ if (!PageFsCache(page))
+ return;
+
fscache_wait_on_page_write(ci->fscache, page);
fscache_uncache_page(ci->fscache, page);
}
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index 13976c33332e..3c0a4bd74996 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -897,7 +897,7 @@ static int __ceph_is_any_caps(struct ceph_inode_info *ci)
* caller should hold i_ceph_lock.
* caller will not hold session s_mutex if called from destroy_inode.
*/
-void __ceph_remove_cap(struct ceph_cap *cap)
+void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release)
{
struct ceph_mds_session *session = cap->session;
struct ceph_inode_info *ci = cap->ci;
@@ -909,6 +909,16 @@ void __ceph_remove_cap(struct ceph_cap *cap)
/* remove from session list */
spin_lock(&session->s_cap_lock);
+ /*
+ * s_cap_reconnect is protected by s_cap_lock. no one changes
+ * s_cap_gen while session is in the reconnect state.
+ */
+ if (queue_release &&
+ (!session->s_cap_reconnect ||
+ cap->cap_gen == session->s_cap_gen))
+ __queue_cap_release(session, ci->i_vino.ino, cap->cap_id,
+ cap->mseq, cap->issue_seq);
+
if (session->s_cap_iterator == cap) {
/* not yet, we are iterating over this very cap */
dout("__ceph_remove_cap delaying %p removal from session %p\n",
@@ -1023,7 +1033,6 @@ void __queue_cap_release(struct ceph_mds_session *session,
struct ceph_mds_cap_release *head;
struct ceph_mds_cap_item *item;
- spin_lock(&session->s_cap_lock);
BUG_ON(!session->s_num_cap_releases);
msg = list_first_entry(&session->s_cap_releases,
struct ceph_msg, list_head);
@@ -1052,7 +1061,6 @@ void __queue_cap_release(struct ceph_mds_session *session,
(int)CEPH_CAPS_PER_RELEASE,
(int)msg->front.iov_len);
}
- spin_unlock(&session->s_cap_lock);
}
/*
@@ -1067,12 +1075,8 @@ void ceph_queue_caps_release(struct inode *inode)
p = rb_first(&ci->i_caps);
while (p) {
struct ceph_cap *cap = rb_entry(p, struct ceph_cap, ci_node);
- struct ceph_mds_session *session = cap->session;
-
- __queue_cap_release(session, ceph_ino(inode), cap->cap_id,
- cap->mseq, cap->issue_seq);
p = rb_next(p);
- __ceph_remove_cap(cap);
+ __ceph_remove_cap(cap, true);
}
}
@@ -2791,7 +2795,7 @@ static void handle_cap_export(struct inode *inode, struct ceph_mds_caps *ex,
}
spin_unlock(&mdsc->cap_dirty_lock);
}
- __ceph_remove_cap(cap);
+ __ceph_remove_cap(cap, false);
}
/* else, we already released it */
@@ -2931,9 +2935,12 @@ void ceph_handle_caps(struct ceph_mds_session *session,
if (!inode) {
dout(" i don't have ino %llx\n", vino.ino);
- if (op == CEPH_CAP_OP_IMPORT)
+ if (op == CEPH_CAP_OP_IMPORT) {
+ spin_lock(&session->s_cap_lock);
__queue_cap_release(session, vino.ino, cap_id,
mseq, seq);
+ spin_unlock(&session->s_cap_lock);
+ }
goto flush_cap_releases;
}
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index 868b61d56cac..2a0bcaeb189a 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -352,8 +352,18 @@ more:
}
/* note next offset and last dentry name */
+ rinfo = &req->r_reply_info;
+ if (le32_to_cpu(rinfo->dir_dir->frag) != frag) {
+ frag = le32_to_cpu(rinfo->dir_dir->frag);
+ if (ceph_frag_is_leftmost(frag))
+ fi->next_offset = 2;
+ else
+ fi->next_offset = 0;
+ off = fi->next_offset;
+ }
fi->offset = fi->next_offset;
fi->last_readdir = req;
+ fi->frag = frag;
if (req->r_reply_info.dir_end) {
kfree(fi->last_name);
@@ -363,7 +373,6 @@ more:
else
fi->next_offset = 0;
} else {
- rinfo = &req->r_reply_info;
err = note_last_dentry(fi,
rinfo->dir_dname[rinfo->dir_nr-1],
rinfo->dir_dname_len[rinfo->dir_nr-1]);
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index 8549a48115f7..278fd2891288 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -577,6 +577,8 @@ static int fill_inode(struct inode *inode,
int issued = 0, implemented;
struct timespec mtime, atime, ctime;
u32 nsplits;
+ struct ceph_inode_frag *frag;
+ struct rb_node *rb_node;
struct ceph_buffer *xattr_blob = NULL;
int err = 0;
int queue_trunc = 0;
@@ -751,15 +753,38 @@ no_change:
/* FIXME: move me up, if/when version reflects fragtree changes */
nsplits = le32_to_cpu(info->fragtree.nsplits);
mutex_lock(&ci->i_fragtree_mutex);
+ rb_node = rb_first(&ci->i_fragtree);
for (i = 0; i < nsplits; i++) {
u32 id = le32_to_cpu(info->fragtree.splits[i].frag);
- struct ceph_inode_frag *frag = __get_or_create_frag(ci, id);
-
- if (IS_ERR(frag))
- continue;
+ frag = NULL;
+ while (rb_node) {
+ frag = rb_entry(rb_node, struct ceph_inode_frag, node);
+ if (ceph_frag_compare(frag->frag, id) >= 0) {
+ if (frag->frag != id)
+ frag = NULL;
+ else
+ rb_node = rb_next(rb_node);
+ break;
+ }
+ rb_node = rb_next(rb_node);
+ rb_erase(&frag->node, &ci->i_fragtree);
+ kfree(frag);
+ frag = NULL;
+ }
+ if (!frag) {
+ frag = __get_or_create_frag(ci, id);
+ if (IS_ERR(frag))
+ continue;
+ }
frag->split_by = le32_to_cpu(info->fragtree.splits[i].by);
dout(" frag %x split by %d\n", frag->frag, frag->split_by);
}
+ while (rb_node) {
+ frag = rb_entry(rb_node, struct ceph_inode_frag, node);
+ rb_node = rb_next(rb_node);
+ rb_erase(&frag->node, &ci->i_fragtree);
+ kfree(frag);
+ }
mutex_unlock(&ci->i_fragtree_mutex);
/* were we issued a capability? */
@@ -953,7 +978,6 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
struct ceph_mds_reply_inode *ininfo;
struct ceph_vino vino;
struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
- int i = 0;
int err = 0;
dout("fill_trace %p is_dentry %d is_target %d\n", req,
@@ -1014,6 +1038,29 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
}
}
+ if (rinfo->head->is_target) {
+ vino.ino = le64_to_cpu(rinfo->targeti.in->ino);
+ vino.snap = le64_to_cpu(rinfo->targeti.in->snapid);
+
+ in = ceph_get_inode(sb, vino);
+ if (IS_ERR(in)) {
+ err = PTR_ERR(in);
+ goto done;
+ }
+ req->r_target_inode = in;
+
+ err = fill_inode(in, &rinfo->targeti, NULL,
+ session, req->r_request_started,
+ (le32_to_cpu(rinfo->head->result) == 0) ?
+ req->r_fmode : -1,
+ &req->r_caps_reservation);
+ if (err < 0) {
+ pr_err("fill_inode badness %p %llx.%llx\n",
+ in, ceph_vinop(in));
+ goto done;
+ }
+ }
+
/*
* ignore null lease/binding on snapdir ENOENT, or else we
* will have trouble splicing in the virtual snapdir later
@@ -1083,7 +1130,6 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
ceph_dentry(req->r_old_dentry)->offset);
dn = req->r_old_dentry; /* use old_dentry */
- in = dn->d_inode;
}
/* null dentry? */
@@ -1105,44 +1151,28 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
}
/* attach proper inode */
- ininfo = rinfo->targeti.in;
- vino.ino = le64_to_cpu(ininfo->ino);
- vino.snap = le64_to_cpu(ininfo->snapid);
- in = dn->d_inode;
- if (!in) {
- in = ceph_get_inode(sb, vino);
- if (IS_ERR(in)) {
- pr_err("fill_trace bad get_inode "
- "%llx.%llx\n", vino.ino, vino.snap);
- err = PTR_ERR(in);
- d_drop(dn);
- goto done;
- }
+ if (!dn->d_inode) {
+ ihold(in);
dn = splice_dentry(dn, in, &have_lease, true);
if (IS_ERR(dn)) {
err = PTR_ERR(dn);
goto done;
}
req->r_dentry = dn; /* may have spliced */
- ihold(in);
- } else if (ceph_ino(in) == vino.ino &&
- ceph_snap(in) == vino.snap) {
- ihold(in);
- } else {
+ } else if (dn->d_inode && dn->d_inode != in) {
dout(" %p links to %p %llx.%llx, not %llx.%llx\n",
- dn, in, ceph_ino(in), ceph_snap(in),
- vino.ino, vino.snap);
+ dn, dn->d_inode, ceph_vinop(dn->d_inode),
+ ceph_vinop(in));
have_lease = false;
- in = NULL;
}
if (have_lease)
update_dentry_lease(dn, rinfo->dlease, session,
req->r_request_started);
dout(" final dn %p\n", dn);
- i++;
- } else if ((req->r_op == CEPH_MDS_OP_LOOKUPSNAP ||
- req->r_op == CEPH_MDS_OP_MKSNAP) && !req->r_aborted) {
+ } else if (!req->r_aborted &&
+ (req->r_op == CEPH_MDS_OP_LOOKUPSNAP ||
+ req->r_op == CEPH_MDS_OP_MKSNAP)) {
struct dentry *dn = req->r_dentry;
/* fill out a snapdir LOOKUPSNAP dentry */
@@ -1152,52 +1182,15 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
ininfo = rinfo->targeti.in;
vino.ino = le64_to_cpu(ininfo->ino);
vino.snap = le64_to_cpu(ininfo->snapid);
- in = ceph_get_inode(sb, vino);
- if (IS_ERR(in)) {
- pr_err("fill_inode get_inode badness %llx.%llx\n",
- vino.ino, vino.snap);
- err = PTR_ERR(in);
- d_delete(dn);
- goto done;
- }
dout(" linking snapped dir %p to dn %p\n", in, dn);
+ ihold(in);
dn = splice_dentry(dn, in, NULL, true);
if (IS_ERR(dn)) {
err = PTR_ERR(dn);
goto done;
}
req->r_dentry = dn; /* may have spliced */
- ihold(in);
- rinfo->head->is_dentry = 1; /* fool notrace handlers */
- }
-
- if (rinfo->head->is_target) {
- vino.ino = le64_to_cpu(rinfo->targeti.in->ino);
- vino.snap = le64_to_cpu(rinfo->targeti.in->snapid);
-
- if (in == NULL || ceph_ino(in) != vino.ino ||
- ceph_snap(in) != vino.snap) {
- in = ceph_get_inode(sb, vino);
- if (IS_ERR(in)) {
- err = PTR_ERR(in);
- goto done;
- }
- }
- req->r_target_inode = in;
-
- err = fill_inode(in,
- &rinfo->targeti, NULL,
- session, req->r_request_started,
- (le32_to_cpu(rinfo->head->result) == 0) ?
- req->r_fmode : -1,
- &req->r_caps_reservation);
- if (err < 0) {
- pr_err("fill_inode badness %p %llx.%llx\n",
- in, ceph_vinop(in));
- goto done;
- }
}
-
done:
dout("fill_trace done err=%d\n", err);
return err;
@@ -1247,11 +1240,23 @@ int ceph_readdir_prepopulate(struct ceph_mds_request *req,
struct qstr dname;
struct dentry *dn;
struct inode *in;
- int err = 0, i;
+ int err = 0, ret, i;
struct inode *snapdir = NULL;
struct ceph_mds_request_head *rhead = req->r_request->front.iov_base;
- u64 frag = le32_to_cpu(rhead->args.readdir.frag);
struct ceph_dentry_info *di;
+ u64 r_readdir_offset = req->r_readdir_offset;
+ u32 frag = le32_to_cpu(rhead->args.readdir.frag);
+
+ if (rinfo->dir_dir &&
+ le32_to_cpu(rinfo->dir_dir->frag) != frag) {
+ dout("readdir_prepopulate got new frag %x -> %x\n",
+ frag, le32_to_cpu(rinfo->dir_dir->frag));
+ frag = le32_to_cpu(rinfo->dir_dir->frag);
+ if (ceph_frag_is_leftmost(frag))
+ r_readdir_offset = 2;
+ else
+ r_readdir_offset = 0;
+ }
if (req->r_aborted)
return readdir_prepopulate_inodes_only(req, session);
@@ -1268,6 +1273,7 @@ int ceph_readdir_prepopulate(struct ceph_mds_request *req,
ceph_fill_dirfrag(parent->d_inode, rinfo->dir_dir);
}
+ /* FIXME: release caps/leases if error occurs */
for (i = 0; i < rinfo->dir_nr; i++) {
struct ceph_vino vino;
@@ -1292,9 +1298,10 @@ retry_lookup:
err = -ENOMEM;
goto out;
}
- err = ceph_init_dentry(dn);
- if (err < 0) {
+ ret = ceph_init_dentry(dn);
+ if (ret < 0) {
dput(dn);
+ err = ret;
goto out;
}
} else if (dn->d_inode &&
@@ -1314,9 +1321,6 @@ retry_lookup:
spin_unlock(&parent->d_lock);
}
- di = dn->d_fsdata;
- di->offset = ceph_make_fpos(frag, i + req->r_readdir_offset);
-
/* inode */
if (dn->d_inode) {
in = dn->d_inode;
@@ -1329,26 +1333,39 @@ retry_lookup:
err = PTR_ERR(in);
goto out;
}
- dn = splice_dentry(dn, in, NULL, false);
- if (IS_ERR(dn))
- dn = NULL;
}
if (fill_inode(in, &rinfo->dir_in[i], NULL, session,
req->r_request_started, -1,
&req->r_caps_reservation) < 0) {
pr_err("fill_inode badness on %p\n", in);
+ if (!dn->d_inode)
+ iput(in);
+ d_drop(dn);
goto next_item;
}
- if (dn)
- update_dentry_lease(dn, rinfo->dir_dlease[i],
- req->r_session,
- req->r_request_started);
+
+ if (!dn->d_inode) {
+ dn = splice_dentry(dn, in, NULL, false);
+ if (IS_ERR(dn)) {
+ err = PTR_ERR(dn);
+ dn = NULL;
+ goto next_item;
+ }
+ }
+
+ di = dn->d_fsdata;
+ di->offset = ceph_make_fpos(frag, i + r_readdir_offset);
+
+ update_dentry_lease(dn, rinfo->dir_dlease[i],
+ req->r_session,
+ req->r_request_started);
next_item:
if (dn)
dput(dn);
}
- req->r_did_prepopulate = true;
+ if (err == 0)
+ req->r_did_prepopulate = true;
out:
if (snapdir) {
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index b7bda5d9611d..d90861f45210 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -43,6 +43,7 @@
*/
struct ceph_reconnect_state {
+ int nr_caps;
struct ceph_pagelist *pagelist;
bool flock;
};
@@ -443,6 +444,7 @@ static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc,
INIT_LIST_HEAD(&s->s_waiting);
INIT_LIST_HEAD(&s->s_unsafe);
s->s_num_cap_releases = 0;
+ s->s_cap_reconnect = 0;
s->s_cap_iterator = NULL;
INIT_LIST_HEAD(&s->s_cap_releases);
INIT_LIST_HEAD(&s->s_cap_releases_done);
@@ -642,6 +644,8 @@ static void __unregister_request(struct ceph_mds_client *mdsc,
req->r_unsafe_dir = NULL;
}
+ complete_all(&req->r_safe_completion);
+
ceph_mdsc_put_request(req);
}
@@ -986,7 +990,7 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
dout("removing cap %p, ci is %p, inode is %p\n",
cap, ci, &ci->vfs_inode);
spin_lock(&ci->i_ceph_lock);
- __ceph_remove_cap(cap);
+ __ceph_remove_cap(cap, false);
if (!__ceph_is_any_real_caps(ci)) {
struct ceph_mds_client *mdsc =
ceph_sb_to_client(inode->i_sb)->mdsc;
@@ -1231,9 +1235,7 @@ static int trim_caps_cb(struct inode *inode, struct ceph_cap *cap, void *arg)
session->s_trim_caps--;
if (oissued) {
/* we aren't the only cap.. just remove us */
- __queue_cap_release(session, ceph_ino(inode), cap->cap_id,
- cap->mseq, cap->issue_seq);
- __ceph_remove_cap(cap);
+ __ceph_remove_cap(cap, true);
} else {
/* try to drop referring dentries */
spin_unlock(&ci->i_ceph_lock);
@@ -1416,7 +1418,6 @@ static void discard_cap_releases(struct ceph_mds_client *mdsc,
unsigned num;
dout("discard_cap_releases mds%d\n", session->s_mds);
- spin_lock(&session->s_cap_lock);
/* zero out the in-progress message */
msg = list_first_entry(&session->s_cap_releases,
@@ -1443,8 +1444,6 @@ static void discard_cap_releases(struct ceph_mds_client *mdsc,
msg->front.iov_len = sizeof(*head);
list_add(&msg->list_head, &session->s_cap_releases);
}
-
- spin_unlock(&session->s_cap_lock);
}
/*
@@ -1875,8 +1874,11 @@ static int __do_request(struct ceph_mds_client *mdsc,
int mds = -1;
int err = -EAGAIN;
- if (req->r_err || req->r_got_result)
+ if (req->r_err || req->r_got_result) {
+ if (req->r_aborted)
+ __unregister_request(mdsc, req);
goto out;
+ }
if (req->r_timeout &&
time_after_eq(jiffies, req->r_started + req->r_timeout)) {
@@ -2186,7 +2188,6 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
if (head->safe) {
req->r_got_safe = true;
__unregister_request(mdsc, req);
- complete_all(&req->r_safe_completion);
if (req->r_got_unsafe) {
/*
@@ -2238,8 +2239,7 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
err = ceph_fill_trace(mdsc->fsc->sb, req, req->r_session);
if (err == 0) {
if (result == 0 && (req->r_op == CEPH_MDS_OP_READDIR ||
- req->r_op == CEPH_MDS_OP_LSSNAP) &&
- rinfo->dir_nr)
+ req->r_op == CEPH_MDS_OP_LSSNAP))
ceph_readdir_prepopulate(req, req->r_session);
ceph_unreserve_caps(mdsc, &req->r_caps_reservation);
}
@@ -2490,6 +2490,7 @@ static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap,
cap->seq = 0; /* reset cap seq */
cap->issue_seq = 0; /* and issue_seq */
cap->mseq = 0; /* and migrate_seq */
+ cap->cap_gen = cap->session->s_cap_gen;
if (recon_state->flock) {
rec.v2.cap_id = cpu_to_le64(cap->cap_id);
@@ -2552,6 +2553,8 @@ encode_again:
} else {
err = ceph_pagelist_append(pagelist, &rec, reclen);
}
+
+ recon_state->nr_caps++;
out_free:
kfree(path);
out_dput:
@@ -2579,6 +2582,7 @@ static void send_mds_reconnect(struct ceph_mds_client *mdsc,
struct rb_node *p;
int mds = session->s_mds;
int err = -ENOMEM;
+ int s_nr_caps;
struct ceph_pagelist *pagelist;
struct ceph_reconnect_state recon_state;
@@ -2610,20 +2614,38 @@ static void send_mds_reconnect(struct ceph_mds_client *mdsc,
dout("session %p state %s\n", session,
session_state_name(session->s_state));
+ spin_lock(&session->s_gen_ttl_lock);
+ session->s_cap_gen++;
+ spin_unlock(&session->s_gen_ttl_lock);
+
+ spin_lock(&session->s_cap_lock);
+ /*
+ * notify __ceph_remove_cap() that we are composing cap reconnect.
+ * If a cap get released before being added to the cap reconnect,
+ * __ceph_remove_cap() should skip queuing cap release.
+ */
+ session->s_cap_reconnect = 1;
/* drop old cap expires; we're about to reestablish that state */
discard_cap_releases(mdsc, session);
+ spin_unlock(&session->s_cap_lock);
/* traverse this session's caps */
- err = ceph_pagelist_encode_32(pagelist, session->s_nr_caps);
+ s_nr_caps = session->s_nr_caps;
+ err = ceph_pagelist_encode_32(pagelist, s_nr_caps);
if (err)
goto fail;
+ recon_state.nr_caps = 0;
recon_state.pagelist = pagelist;
recon_state.flock = session->s_con.peer_features & CEPH_FEATURE_FLOCK;
err = iterate_session_caps(session, encode_caps_cb, &recon_state);
if (err < 0)
goto fail;
+ spin_lock(&session->s_cap_lock);
+ session->s_cap_reconnect = 0;
+ spin_unlock(&session->s_cap_lock);
+
/*
* snaprealms. we provide mds with the ino, seq (version), and
* parent for all of our realms. If the mds has any newer info,
@@ -2646,11 +2668,18 @@ static void send_mds_reconnect(struct ceph_mds_client *mdsc,
if (recon_state.flock)
reply->hdr.version = cpu_to_le16(2);
- if (pagelist->length) {
- /* set up outbound data if we have any */
- reply->hdr.data_len = cpu_to_le32(pagelist->length);
- ceph_msg_data_add_pagelist(reply, pagelist);
+
+ /* raced with cap release? */
+ if (s_nr_caps != recon_state.nr_caps) {
+ struct page *page = list_first_entry(&pagelist->head,
+ struct page, lru);
+ __le32 *addr = kmap_atomic(page);
+ *addr = cpu_to_le32(recon_state.nr_caps);
+ kunmap_atomic(addr);
}
+
+ reply->hdr.data_len = cpu_to_le32(pagelist->length);
+ ceph_msg_data_add_pagelist(reply, pagelist);
ceph_con_send(&session->s_con, reply);
mutex_unlock(&session->s_mutex);
diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h
index c2a19fbbe517..4c053d099ae4 100644
--- a/fs/ceph/mds_client.h
+++ b/fs/ceph/mds_client.h
@@ -132,6 +132,7 @@ struct ceph_mds_session {
struct list_head s_caps; /* all caps issued by this session */
int s_nr_caps, s_trim_caps;
int s_num_cap_releases;
+ int s_cap_reconnect;
struct list_head s_cap_releases; /* waiting cap_release messages */
struct list_head s_cap_releases_done; /* ready to send */
struct ceph_cap *s_cap_iterator;
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index 6014b0a3c405..ef4ac38bb614 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -741,13 +741,7 @@ extern int ceph_add_cap(struct inode *inode,
int fmode, unsigned issued, unsigned wanted,
unsigned cap, unsigned seq, u64 realmino, int flags,
struct ceph_cap_reservation *caps_reservation);
-extern void __ceph_remove_cap(struct ceph_cap *cap);
-static inline void ceph_remove_cap(struct ceph_cap *cap)
-{
- spin_lock(&cap->ci->i_ceph_lock);
- __ceph_remove_cap(cap);
- spin_unlock(&cap->ci->i_ceph_lock);
-}
+extern void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release);
extern void ceph_put_cap(struct ceph_mds_client *mdsc,
struct ceph_cap *cap);
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index d9ea7ada1378..f918a998a087 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -384,6 +384,7 @@ struct smb_version_operations {
int (*clone_range)(const unsigned int, struct cifsFileInfo *src_file,
struct cifsFileInfo *target_file, u64 src_off, u64 len,
u64 dest_off);
+ int (*validate_negotiate)(const unsigned int, struct cifs_tcon *);
};
struct smb_version_values {
diff --git a/fs/cifs/ioctl.c b/fs/cifs/ioctl.c
index 409b45eefe70..77492301cc2b 100644
--- a/fs/cifs/ioctl.c
+++ b/fs/cifs/ioctl.c
@@ -26,13 +26,15 @@
#include <linux/mount.h>
#include <linux/mm.h>
#include <linux/pagemap.h>
-#include <linux/btrfs.h>
#include "cifspdu.h"
#include "cifsglob.h"
#include "cifsproto.h"
#include "cifs_debug.h"
#include "cifsfs.h"
+#define CIFS_IOCTL_MAGIC 0xCF
+#define CIFS_IOC_COPYCHUNK_FILE _IOW(CIFS_IOCTL_MAGIC, 3, int)
+
static long cifs_ioctl_clone(unsigned int xid, struct file *dst_file,
unsigned long srcfd, u64 off, u64 len, u64 destoff)
{
@@ -213,7 +215,7 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
cifs_dbg(FYI, "set compress flag rc %d\n", rc);
}
break;
- case BTRFS_IOC_CLONE:
+ case CIFS_IOC_COPYCHUNK_FILE:
rc = cifs_ioctl_clone(xid, filep, arg, 0, 0, 0);
break;
default:
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index 11dde4b24f8a..757da3e54d3d 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -532,7 +532,10 @@ smb2_clone_range(const unsigned int xid,
int rc;
unsigned int ret_data_len;
struct copychunk_ioctl *pcchunk;
- char *retbuf = NULL;
+ struct copychunk_ioctl_rsp *retbuf = NULL;
+ struct cifs_tcon *tcon;
+ int chunks_copied = 0;
+ bool chunk_sizes_updated = false;
pcchunk = kmalloc(sizeof(struct copychunk_ioctl), GFP_KERNEL);
@@ -547,27 +550,96 @@ smb2_clone_range(const unsigned int xid,
/* Note: request_res_key sets res_key null only if rc !=0 */
if (rc)
- return rc;
+ goto cchunk_out;
/* For now array only one chunk long, will make more flexible later */
pcchunk->ChunkCount = __constant_cpu_to_le32(1);
pcchunk->Reserved = 0;
- pcchunk->SourceOffset = cpu_to_le64(src_off);
- pcchunk->TargetOffset = cpu_to_le64(dest_off);
- pcchunk->Length = cpu_to_le32(len);
pcchunk->Reserved2 = 0;
- /* Request that server copy to target from src file identified by key */
- rc = SMB2_ioctl(xid, tlink_tcon(trgtfile->tlink),
- trgtfile->fid.persistent_fid,
- trgtfile->fid.volatile_fid, FSCTL_SRV_COPYCHUNK_WRITE,
- true /* is_fsctl */, (char *)pcchunk,
- sizeof(struct copychunk_ioctl), &retbuf, &ret_data_len);
+ tcon = tlink_tcon(trgtfile->tlink);
- /* BB need to special case rc = EINVAL to alter chunk size */
+ while (len > 0) {
+ pcchunk->SourceOffset = cpu_to_le64(src_off);
+ pcchunk->TargetOffset = cpu_to_le64(dest_off);
+ pcchunk->Length =
+ cpu_to_le32(min_t(u32, len, tcon->max_bytes_chunk));
- cifs_dbg(FYI, "rc %d data length out %d\n", rc, ret_data_len);
+ /* Request server copy to target from src identified by key */
+ rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid,
+ trgtfile->fid.volatile_fid, FSCTL_SRV_COPYCHUNK_WRITE,
+ true /* is_fsctl */, (char *)pcchunk,
+ sizeof(struct copychunk_ioctl), (char **)&retbuf,
+ &ret_data_len);
+ if (rc == 0) {
+ if (ret_data_len !=
+ sizeof(struct copychunk_ioctl_rsp)) {
+ cifs_dbg(VFS, "invalid cchunk response size\n");
+ rc = -EIO;
+ goto cchunk_out;
+ }
+ if (retbuf->TotalBytesWritten == 0) {
+ cifs_dbg(FYI, "no bytes copied\n");
+ rc = -EIO;
+ goto cchunk_out;
+ }
+ /*
+ * Check if server claimed to write more than we asked
+ */
+ if (le32_to_cpu(retbuf->TotalBytesWritten) >
+ le32_to_cpu(pcchunk->Length)) {
+ cifs_dbg(VFS, "invalid copy chunk response\n");
+ rc = -EIO;
+ goto cchunk_out;
+ }
+ if (le32_to_cpu(retbuf->ChunksWritten) != 1) {
+ cifs_dbg(VFS, "invalid num chunks written\n");
+ rc = -EIO;
+ goto cchunk_out;
+ }
+ chunks_copied++;
+
+ src_off += le32_to_cpu(retbuf->TotalBytesWritten);
+ dest_off += le32_to_cpu(retbuf->TotalBytesWritten);
+ len -= le32_to_cpu(retbuf->TotalBytesWritten);
+
+ cifs_dbg(FYI, "Chunks %d PartialChunk %d Total %d\n",
+ le32_to_cpu(retbuf->ChunksWritten),
+ le32_to_cpu(retbuf->ChunkBytesWritten),
+ le32_to_cpu(retbuf->TotalBytesWritten));
+ } else if (rc == -EINVAL) {
+ if (ret_data_len != sizeof(struct copychunk_ioctl_rsp))
+ goto cchunk_out;
+
+ cifs_dbg(FYI, "MaxChunks %d BytesChunk %d MaxCopy %d\n",
+ le32_to_cpu(retbuf->ChunksWritten),
+ le32_to_cpu(retbuf->ChunkBytesWritten),
+ le32_to_cpu(retbuf->TotalBytesWritten));
+
+ /*
+ * Check if this is the first request using these sizes,
+ * (ie check if copy succeed once with original sizes
+ * and check if the server gave us different sizes after
+ * we already updated max sizes on previous request).
+ * if not then why is the server returning an error now
+ */
+ if ((chunks_copied != 0) || chunk_sizes_updated)
+ goto cchunk_out;
+
+ /* Check that server is not asking us to grow size */
+ if (le32_to_cpu(retbuf->ChunkBytesWritten) <
+ tcon->max_bytes_chunk)
+ tcon->max_bytes_chunk =
+ le32_to_cpu(retbuf->ChunkBytesWritten);
+ else
+ goto cchunk_out; /* server gave us bogus size */
+
+ /* No need to change MaxChunks since already set to 1 */
+ chunk_sizes_updated = true;
+ }
+ }
+cchunk_out:
kfree(pcchunk);
return rc;
}
@@ -1247,6 +1319,7 @@ struct smb_version_operations smb30_operations = {
.create_lease_buf = smb3_create_lease_buf,
.parse_lease_buf = smb3_parse_lease_buf,
.clone_range = smb2_clone_range,
+ .validate_negotiate = smb3_validate_negotiate,
};
struct smb_version_values smb20_values = {
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index d65270c290a1..2013234b73ad 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -454,6 +454,81 @@ neg_exit:
return rc;
}
+int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon)
+{
+ int rc = 0;
+ struct validate_negotiate_info_req vneg_inbuf;
+ struct validate_negotiate_info_rsp *pneg_rsp;
+ u32 rsplen;
+
+ cifs_dbg(FYI, "validate negotiate\n");
+
+ /*
+ * validation ioctl must be signed, so no point sending this if we
+ * can not sign it. We could eventually change this to selectively
+ * sign just this, the first and only signed request on a connection.
+ * This is good enough for now since a user who wants better security
+ * would also enable signing on the mount. Having validation of
+ * negotiate info for signed connections helps reduce attack vectors
+ */
+ if (tcon->ses->server->sign == false)
+ return 0; /* validation requires signing */
+
+ vneg_inbuf.Capabilities =
+ cpu_to_le32(tcon->ses->server->vals->req_capabilities);
+ memcpy(vneg_inbuf.Guid, cifs_client_guid, SMB2_CLIENT_GUID_SIZE);
+
+ if (tcon->ses->sign)
+ vneg_inbuf.SecurityMode =
+ cpu_to_le16(SMB2_NEGOTIATE_SIGNING_REQUIRED);
+ else if (global_secflags & CIFSSEC_MAY_SIGN)
+ vneg_inbuf.SecurityMode =
+ cpu_to_le16(SMB2_NEGOTIATE_SIGNING_ENABLED);
+ else
+ vneg_inbuf.SecurityMode = 0;
+
+ vneg_inbuf.DialectCount = cpu_to_le16(1);
+ vneg_inbuf.Dialects[0] =
+ cpu_to_le16(tcon->ses->server->vals->protocol_id);
+
+ rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
+ FSCTL_VALIDATE_NEGOTIATE_INFO, true /* is_fsctl */,
+ (char *)&vneg_inbuf, sizeof(struct validate_negotiate_info_req),
+ (char **)&pneg_rsp, &rsplen);
+
+ if (rc != 0) {
+ cifs_dbg(VFS, "validate protocol negotiate failed: %d\n", rc);
+ return -EIO;
+ }
+
+ if (rsplen != sizeof(struct validate_negotiate_info_rsp)) {
+ cifs_dbg(VFS, "invalid size of protocol negotiate response\n");
+ return -EIO;
+ }
+
+ /* check validate negotiate info response matches what we got earlier */
+ if (pneg_rsp->Dialect !=
+ cpu_to_le16(tcon->ses->server->vals->protocol_id))
+ goto vneg_out;
+
+ if (pneg_rsp->SecurityMode != cpu_to_le16(tcon->ses->server->sec_mode))
+ goto vneg_out;
+
+ /* do not validate server guid because not saved at negprot time yet */
+
+ if ((le32_to_cpu(pneg_rsp->Capabilities) | SMB2_NT_FIND |
+ SMB2_LARGE_FILES) != tcon->ses->server->capabilities)
+ goto vneg_out;
+
+ /* validate negotiate successful */
+ cifs_dbg(FYI, "validate negotiate info successful\n");
+ return 0;
+
+vneg_out:
+ cifs_dbg(VFS, "protocol revalidation - security settings mismatch\n");
+ return -EIO;
+}
+
int
SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses,
const struct nls_table *nls_cp)
@@ -829,6 +904,8 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
((tcon->share_flags & SHI1005_FLAGS_DFS) == 0))
cifs_dbg(VFS, "DFS capability contradicts DFS flag\n");
init_copy_chunk_defaults(tcon);
+ if (tcon->ses->server->ops->validate_negotiate)
+ rc = tcon->ses->server->ops->validate_negotiate(xid, tcon);
tcon_exit:
free_rsp_buf(resp_buftype, rsp);
kfree(unc_path);
@@ -1214,10 +1291,17 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
rc = SendReceive2(xid, ses, iov, num_iovecs, &resp_buftype, 0);
rsp = (struct smb2_ioctl_rsp *)iov[0].iov_base;
- if (rc != 0) {
+ if ((rc != 0) && (rc != -EINVAL)) {
if (tcon)
cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE);
goto ioctl_exit;
+ } else if (rc == -EINVAL) {
+ if ((opcode != FSCTL_SRV_COPYCHUNK_WRITE) &&
+ (opcode != FSCTL_SRV_COPYCHUNK)) {
+ if (tcon)
+ cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE);
+ goto ioctl_exit;
+ }
}
/* check if caller wants to look at return data or just return rc */
@@ -2154,11 +2238,9 @@ send_set_info(const unsigned int xid, struct cifs_tcon *tcon,
rc = SendReceive2(xid, ses, iov, num, &resp_buftype, 0);
rsp = (struct smb2_set_info_rsp *)iov[0].iov_base;
- if (rc != 0) {
+ if (rc != 0)
cifs_stats_fail_inc(tcon, SMB2_SET_INFO_HE);
- goto out;
- }
-out:
+
free_rsp_buf(resp_buftype, rsp);
kfree(iov);
return rc;
diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h
index f88320bbb477..2022c542ea3a 100644
--- a/fs/cifs/smb2pdu.h
+++ b/fs/cifs/smb2pdu.h
@@ -577,13 +577,19 @@ struct copychunk_ioctl_rsp {
__le32 TotalBytesWritten;
} __packed;
-/* Response and Request are the same format */
-struct validate_negotiate_info {
+struct validate_negotiate_info_req {
__le32 Capabilities;
__u8 Guid[SMB2_CLIENT_GUID_SIZE];
__le16 SecurityMode;
__le16 DialectCount;
- __le16 Dialect[1];
+ __le16 Dialects[1]; /* dialect (someday maybe list) client asked for */
+} __packed;
+
+struct validate_negotiate_info_rsp {
+ __le32 Capabilities;
+ __u8 Guid[SMB2_CLIENT_GUID_SIZE];
+ __le16 SecurityMode;
+ __le16 Dialect; /* Dialect in use for the connection */
} __packed;
#define RSS_CAPABLE 0x00000001
diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h
index b4eea105b08c..93adc64666f3 100644
--- a/fs/cifs/smb2proto.h
+++ b/fs/cifs/smb2proto.h
@@ -162,5 +162,6 @@ extern int smb2_lockv(const unsigned int xid, struct cifs_tcon *tcon,
struct smb2_lock_element *buf);
extern int SMB2_lease_break(const unsigned int xid, struct cifs_tcon *tcon,
__u8 *lease_key, const __le32 lease_state);
+extern int smb3_validate_negotiate(const unsigned int, struct cifs_tcon *);
#endif /* _SMB2PROTO_H */
diff --git a/fs/cifs/smbfsctl.h b/fs/cifs/smbfsctl.h
index a4b2391fe66e..0e538b5c9622 100644
--- a/fs/cifs/smbfsctl.h
+++ b/fs/cifs/smbfsctl.h
@@ -90,7 +90,7 @@
#define FSCTL_LMR_REQUEST_RESILIENCY 0x001401D4 /* BB add struct */
#define FSCTL_LMR_GET_LINK_TRACK_INF 0x001400E8 /* BB add struct */
#define FSCTL_LMR_SET_LINK_TRACK_INF 0x001400EC /* BB add struct */
-#define FSCTL_VALIDATE_NEGOTIATE_INFO 0x00140204 /* BB add struct */
+#define FSCTL_VALIDATE_NEGOTIATE_INFO 0x00140204
/* Perform server-side data movement */
#define FSCTL_SRV_COPYCHUNK 0x001440F2
#define FSCTL_SRV_COPYCHUNK_WRITE 0x001480F2
diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
index 277bd1be21fd..e081acbac2e7 100644
--- a/fs/configfs/dir.c
+++ b/fs/configfs/dir.c
@@ -56,29 +56,28 @@ static void configfs_d_iput(struct dentry * dentry,
struct configfs_dirent *sd = dentry->d_fsdata;
if (sd) {
- BUG_ON(sd->s_dentry != dentry);
/* Coordinate with configfs_readdir */
spin_lock(&configfs_dirent_lock);
- sd->s_dentry = NULL;
+ /* Coordinate with configfs_attach_attr where will increase
+ * sd->s_count and update sd->s_dentry to new allocated one.
+ * Only set sd->dentry to null when this dentry is the only
+ * sd owner.
+ * If not do so, configfs_d_iput may run just after
+ * configfs_attach_attr and set sd->s_dentry to null
+ * even it's still in use.
+ */
+ if (atomic_read(&sd->s_count) <= 2)
+ sd->s_dentry = NULL;
+
spin_unlock(&configfs_dirent_lock);
configfs_put(sd);
}
iput(inode);
}
-/*
- * We _must_ delete our dentries on last dput, as the chain-to-parent
- * behavior is required to clear the parents of default_groups.
- */
-static int configfs_d_delete(const struct dentry *dentry)
-{
- return 1;
-}
-
const struct dentry_operations configfs_dentry_ops = {
.d_iput = configfs_d_iput,
- /* simple_delete_dentry() isn't exported */
- .d_delete = configfs_d_delete,
+ .d_delete = always_delete_dentry,
};
#ifdef CONFIG_LOCKDEP
@@ -426,8 +425,11 @@ static int configfs_attach_attr(struct configfs_dirent * sd, struct dentry * den
struct configfs_attribute * attr = sd->s_element;
int error;
+ spin_lock(&configfs_dirent_lock);
dentry->d_fsdata = configfs_get(sd);
sd->s_dentry = dentry;
+ spin_unlock(&configfs_dirent_lock);
+
error = configfs_create(dentry, (attr->ca_mode & S_IALLUGO) | S_IFREG,
configfs_init_file);
if (error) {
diff --git a/fs/coredump.c b/fs/coredump.c
index 62406b6959b6..bc3fbcd32558 100644
--- a/fs/coredump.c
+++ b/fs/coredump.c
@@ -695,7 +695,7 @@ int dump_emit(struct coredump_params *cprm, const void *addr, int nr)
while (nr) {
if (dump_interrupted())
return 0;
- n = vfs_write(file, addr, nr, &pos);
+ n = __kernel_write(file, addr, nr, &pos);
if (n <= 0)
return 0;
file->f_pos = pos;
@@ -733,7 +733,7 @@ int dump_align(struct coredump_params *cprm, int align)
{
unsigned mod = cprm->written & (align - 1);
if (align & (align - 1))
- return -EINVAL;
- return mod ? dump_skip(cprm, align - mod) : 0;
+ return 0;
+ return mod ? dump_skip(cprm, align - mod) : 1;
}
EXPORT_SYMBOL(dump_align);
diff --git a/fs/dcache.c b/fs/dcache.c
index 0a38ef8d7f00..6055d61811d3 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -88,35 +88,6 @@ EXPORT_SYMBOL(rename_lock);
static struct kmem_cache *dentry_cache __read_mostly;
-/**
- * read_seqbegin_or_lock - begin a sequence number check or locking block
- * @lock: sequence lock
- * @seq : sequence number to be checked
- *
- * First try it once optimistically without taking the lock. If that fails,
- * take the lock. The sequence number is also used as a marker for deciding
- * whether to be a reader (even) or writer (odd).
- * N.B. seq must be initialized to an even number to begin with.
- */
-static inline void read_seqbegin_or_lock(seqlock_t *lock, int *seq)
-{
- if (!(*seq & 1)) /* Even */
- *seq = read_seqbegin(lock);
- else /* Odd */
- read_seqlock_excl(lock);
-}
-
-static inline int need_seqretry(seqlock_t *lock, int seq)
-{
- return !(seq & 1) && read_seqretry(lock, seq);
-}
-
-static inline void done_seqretry(seqlock_t *lock, int seq)
-{
- if (seq & 1)
- read_sequnlock_excl(lock);
-}
-
/*
* This is the single most critical data structure when it comes
* to the dcache: the hashtable for lookups. Somebody should try
@@ -125,8 +96,6 @@ static inline void done_seqretry(seqlock_t *lock, int seq)
* This hash-function tries to avoid losing too many bits of hash
* information, yet avoid using a prime hash-size or similar.
*/
-#define D_HASHBITS d_hash_shift
-#define D_HASHMASK d_hash_mask
static unsigned int d_hash_mask __read_mostly;
static unsigned int d_hash_shift __read_mostly;
@@ -137,8 +106,8 @@ static inline struct hlist_bl_head *d_hash(const struct dentry *parent,
unsigned int hash)
{
hash += (unsigned long) parent / L1_CACHE_BYTES;
- hash = hash + (hash >> D_HASHBITS);
- return dentry_hashtable + (hash & D_HASHMASK);
+ hash = hash + (hash >> d_hash_shift);
+ return dentry_hashtable + (hash & d_hash_mask);
}
/* Statistics gathering. */
@@ -223,7 +192,7 @@ static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char
if (!tcount)
return 0;
}
- mask = ~(~0ul << tcount*8);
+ mask = bytemask_from_count(tcount);
return unlikely(!!((a ^ b) & mask));
}
@@ -469,7 +438,7 @@ static struct dentry *d_kill(struct dentry *dentry, struct dentry *parent)
{
list_del(&dentry->d_u.d_child);
/*
- * Inform try_to_ascend() that we are no longer attached to the
+ * Inform d_walk() that we are no longer attached to the
* dentry tree
*/
dentry->d_flags |= DCACHE_DENTRY_KILLED;
@@ -1069,34 +1038,6 @@ void shrink_dcache_sb(struct super_block *sb)
}
EXPORT_SYMBOL(shrink_dcache_sb);
-/*
- * This tries to ascend one level of parenthood, but
- * we can race with renaming, so we need to re-check
- * the parenthood after dropping the lock and check
- * that the sequence number still matches.
- */
-static struct dentry *try_to_ascend(struct dentry *old, unsigned seq)
-{
- struct dentry *new = old->d_parent;
-
- rcu_read_lock();
- spin_unlock(&old->d_lock);
- spin_lock(&new->d_lock);
-
- /*
- * might go back up the wrong parent if we have had a rename
- * or deletion
- */
- if (new != old->d_parent ||
- (old->d_flags & DCACHE_DENTRY_KILLED) ||
- need_seqretry(&rename_lock, seq)) {
- spin_unlock(&new->d_lock);
- new = NULL;
- }
- rcu_read_unlock();
- return new;
-}
-
/**
* enum d_walk_ret - action to talke during tree walk
* @D_WALK_CONTINUE: contrinue walk
@@ -1185,9 +1126,24 @@ resume:
*/
if (this_parent != parent) {
struct dentry *child = this_parent;
- this_parent = try_to_ascend(this_parent, seq);
- if (!this_parent)
+ this_parent = child->d_parent;
+
+ rcu_read_lock();
+ spin_unlock(&child->d_lock);
+ spin_lock(&this_parent->d_lock);
+
+ /*
+ * might go back up the wrong parent if we have had a rename
+ * or deletion
+ */
+ if (this_parent != child->d_parent ||
+ (child->d_flags & DCACHE_DENTRY_KILLED) ||
+ need_seqretry(&rename_lock, seq)) {
+ spin_unlock(&this_parent->d_lock);
+ rcu_read_unlock();
goto rename_retry;
+ }
+ rcu_read_unlock();
next = child->d_u.d_child.next;
goto resume;
}
diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c
index 2229a74aeeed..b1eaa7a1f82c 100644
--- a/fs/ecryptfs/file.c
+++ b/fs/ecryptfs/file.c
@@ -313,11 +313,9 @@ static int ecryptfs_fasync(int fd, struct file *file, int flag)
static long
ecryptfs_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
- struct file *lower_file = NULL;
+ struct file *lower_file = ecryptfs_file_to_lower(file);
long rc = -ENOTTY;
- if (ecryptfs_file_to_private(file))
- lower_file = ecryptfs_file_to_lower(file);
if (lower_file->f_op->unlocked_ioctl)
rc = lower_file->f_op->unlocked_ioctl(lower_file, cmd, arg);
return rc;
@@ -327,11 +325,9 @@ ecryptfs_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
static long
ecryptfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
- struct file *lower_file = NULL;
+ struct file *lower_file = ecryptfs_file_to_lower(file);
long rc = -ENOIOCTLCMD;
- if (ecryptfs_file_to_private(file))
- lower_file = ecryptfs_file_to_lower(file);
if (lower_file->f_op && lower_file->f_op->compat_ioctl)
rc = lower_file->f_op->compat_ioctl(lower_file, cmd, arg);
return rc;
diff --git a/fs/efivarfs/super.c b/fs/efivarfs/super.c
index a8766b880c07..becc725a1953 100644
--- a/fs/efivarfs/super.c
+++ b/fs/efivarfs/super.c
@@ -83,19 +83,10 @@ static int efivarfs_d_hash(const struct dentry *dentry, struct qstr *qstr)
return 0;
}
-/*
- * Retaining negative dentries for an in-memory filesystem just wastes
- * memory and lookup time: arrange for them to be deleted immediately.
- */
-static int efivarfs_delete_dentry(const struct dentry *dentry)
-{
- return 1;
-}
-
static struct dentry_operations efivarfs_d_ops = {
.d_compare = efivarfs_d_compare,
.d_hash = efivarfs_d_hash,
- .d_delete = efivarfs_delete_dentry,
+ .d_delete = always_delete_dentry,
};
static struct dentry *efivarfs_alloc_dentry(struct dentry *parent, char *name)
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 79b65c3b9e87..8b5e2584c840 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -1852,8 +1852,7 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
goto error_tgt_fput;
/* Check if EPOLLWAKEUP is allowed */
- if ((epds.events & EPOLLWAKEUP) && !capable(CAP_BLOCK_SUSPEND))
- epds.events &= ~EPOLLWAKEUP;
+ ep_take_care_of_epollwakeup(&epds);
/*
* We have to check that the file structure underneath the file descriptor
diff --git a/fs/exec.c b/fs/exec.c
index 977319fd77f3..7ea097f6b341 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1380,10 +1380,6 @@ int search_binary_handler(struct linux_binprm *bprm)
if (retval)
return retval;
- retval = audit_bprm(bprm);
- if (retval)
- return retval;
-
retval = -ENOENT;
retry:
read_lock(&binfmt_lock);
@@ -1431,6 +1427,7 @@ static int exec_binprm(struct linux_binprm *bprm)
ret = search_binary_handler(bprm);
if (ret >= 0) {
+ audit_bprm(bprm);
trace_sched_process_exec(current, old_pid, bprm);
ptrace_event(PTRACE_EVENT_EXEC, old_vpid);
current->did_exec = 1;
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index 288534920fe5..20d6697bd638 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -1493,6 +1493,7 @@ static ssize_t ext2_quota_write(struct super_block *sb, int type,
sb->s_blocksize - offset : towrite;
tmp_bh.b_state = 0;
+ tmp_bh.b_size = sb->s_blocksize;
err = ext2_get_block(inode, blk, &tmp_bh, 1);
if (err < 0)
goto out;
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index e6185031c1cc..ece55565b9cd 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -268,6 +268,16 @@ struct ext4_io_submit {
/* Translate # of blks to # of clusters */
#define EXT4_NUM_B2C(sbi, blks) (((blks) + (sbi)->s_cluster_ratio - 1) >> \
(sbi)->s_cluster_bits)
+/* Mask out the low bits to get the starting block of the cluster */
+#define EXT4_PBLK_CMASK(s, pblk) ((pblk) & \
+ ~((ext4_fsblk_t) (s)->s_cluster_ratio - 1))
+#define EXT4_LBLK_CMASK(s, lblk) ((lblk) & \
+ ~((ext4_lblk_t) (s)->s_cluster_ratio - 1))
+/* Get the cluster offset */
+#define EXT4_PBLK_COFF(s, pblk) ((pblk) & \
+ ((ext4_fsblk_t) (s)->s_cluster_ratio - 1))
+#define EXT4_LBLK_COFF(s, lblk) ((lblk) & \
+ ((ext4_lblk_t) (s)->s_cluster_ratio - 1))
/*
* Structure of a blocks group descriptor
diff --git a/fs/ext4/ext4_jbd2.c b/fs/ext4/ext4_jbd2.c
index 17ac112ab101..3fe29de832c8 100644
--- a/fs/ext4/ext4_jbd2.c
+++ b/fs/ext4/ext4_jbd2.c
@@ -259,6 +259,15 @@ int __ext4_handle_dirty_metadata(const char *where, unsigned int line,
if (WARN_ON_ONCE(err)) {
ext4_journal_abort_handle(where, line, __func__, bh,
handle, err);
+ ext4_error_inode(inode, where, line,
+ bh->b_blocknr,
+ "journal_dirty_metadata failed: "
+ "handle type %u started at line %u, "
+ "credits %u/%u, errcode %d",
+ handle->h_type,
+ handle->h_line_no,
+ handle->h_requested_credits,
+ handle->h_buffer_credits, err);
}
} else {
if (inode)
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 35f65cf4f318..4410cc3d6ee2 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -360,8 +360,10 @@ static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext)
{
ext4_fsblk_t block = ext4_ext_pblock(ext);
int len = ext4_ext_get_actual_len(ext);
+ ext4_lblk_t lblock = le32_to_cpu(ext->ee_block);
+ ext4_lblk_t last = lblock + len - 1;
- if (len == 0)
+ if (lblock > last)
return 0;
return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len);
}
@@ -387,11 +389,26 @@ static int ext4_valid_extent_entries(struct inode *inode,
if (depth == 0) {
/* leaf entries */
struct ext4_extent *ext = EXT_FIRST_EXTENT(eh);
+ struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
+ ext4_fsblk_t pblock = 0;
+ ext4_lblk_t lblock = 0;
+ ext4_lblk_t prev = 0;
+ int len = 0;
while (entries) {
if (!ext4_valid_extent(inode, ext))
return 0;
+
+ /* Check for overlapping extents */
+ lblock = le32_to_cpu(ext->ee_block);
+ len = ext4_ext_get_actual_len(ext);
+ if ((lblock <= prev) && prev) {
+ pblock = ext4_ext_pblock(ext);
+ es->s_last_error_block = cpu_to_le64(pblock);
+ return 0;
+ }
ext++;
entries--;
+ prev = lblock + len - 1;
}
} else {
struct ext4_extent_idx *ext_idx = EXT_FIRST_INDEX(eh);
@@ -1834,8 +1851,7 @@ static unsigned int ext4_ext_check_overlap(struct ext4_sb_info *sbi,
depth = ext_depth(inode);
if (!path[depth].p_ext)
goto out;
- b2 = le32_to_cpu(path[depth].p_ext->ee_block);
- b2 &= ~(sbi->s_cluster_ratio - 1);
+ b2 = EXT4_LBLK_CMASK(sbi, le32_to_cpu(path[depth].p_ext->ee_block));
/*
* get the next allocated block if the extent in the path
@@ -1845,7 +1861,7 @@ static unsigned int ext4_ext_check_overlap(struct ext4_sb_info *sbi,
b2 = ext4_ext_next_allocated_block(path);
if (b2 == EXT_MAX_BLOCKS)
goto out;
- b2 &= ~(sbi->s_cluster_ratio - 1);
+ b2 = EXT4_LBLK_CMASK(sbi, b2);
}
/* check for wrap through zero on extent logical start block*/
@@ -2504,7 +2520,7 @@ static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
* extent, we have to mark the cluster as used (store negative
* cluster number in partial_cluster).
*/
- unaligned = pblk & (sbi->s_cluster_ratio - 1);
+ unaligned = EXT4_PBLK_COFF(sbi, pblk);
if (unaligned && (ee_len == num) &&
(*partial_cluster != -((long long)EXT4_B2C(sbi, pblk))))
*partial_cluster = EXT4_B2C(sbi, pblk);
@@ -2598,7 +2614,7 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
* accidentally freeing it later on
*/
pblk = ext4_ext_pblock(ex);
- if (pblk & (sbi->s_cluster_ratio - 1))
+ if (EXT4_PBLK_COFF(sbi, pblk))
*partial_cluster =
-((long long)EXT4_B2C(sbi, pblk));
ex--;
@@ -3753,7 +3769,7 @@ int ext4_find_delalloc_cluster(struct inode *inode, ext4_lblk_t lblk)
{
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
ext4_lblk_t lblk_start, lblk_end;
- lblk_start = lblk & (~(sbi->s_cluster_ratio - 1));
+ lblk_start = EXT4_LBLK_CMASK(sbi, lblk);
lblk_end = lblk_start + sbi->s_cluster_ratio - 1;
return ext4_find_delalloc_range(inode, lblk_start, lblk_end);
@@ -3812,9 +3828,9 @@ get_reserved_cluster_alloc(struct inode *inode, ext4_lblk_t lblk_start,
trace_ext4_get_reserved_cluster_alloc(inode, lblk_start, num_blks);
/* Check towards left side */
- c_offset = lblk_start & (sbi->s_cluster_ratio - 1);
+ c_offset = EXT4_LBLK_COFF(sbi, lblk_start);
if (c_offset) {
- lblk_from = lblk_start & (~(sbi->s_cluster_ratio - 1));
+ lblk_from = EXT4_LBLK_CMASK(sbi, lblk_start);
lblk_to = lblk_from + c_offset - 1;
if (ext4_find_delalloc_range(inode, lblk_from, lblk_to))
@@ -3822,7 +3838,7 @@ get_reserved_cluster_alloc(struct inode *inode, ext4_lblk_t lblk_start,
}
/* Now check towards right. */
- c_offset = (lblk_start + num_blks) & (sbi->s_cluster_ratio - 1);
+ c_offset = EXT4_LBLK_COFF(sbi, lblk_start + num_blks);
if (allocated_clusters && c_offset) {
lblk_from = lblk_start + num_blks;
lblk_to = lblk_from + (sbi->s_cluster_ratio - c_offset) - 1;
@@ -4030,7 +4046,7 @@ static int get_implied_cluster_alloc(struct super_block *sb,
struct ext4_ext_path *path)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
- ext4_lblk_t c_offset = map->m_lblk & (sbi->s_cluster_ratio-1);
+ ext4_lblk_t c_offset = EXT4_LBLK_COFF(sbi, map->m_lblk);
ext4_lblk_t ex_cluster_start, ex_cluster_end;
ext4_lblk_t rr_cluster_start;
ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block);
@@ -4048,8 +4064,7 @@ static int get_implied_cluster_alloc(struct super_block *sb,
(rr_cluster_start == ex_cluster_start)) {
if (rr_cluster_start == ex_cluster_end)
ee_start += ee_len - 1;
- map->m_pblk = (ee_start & ~(sbi->s_cluster_ratio - 1)) +
- c_offset;
+ map->m_pblk = EXT4_PBLK_CMASK(sbi, ee_start) + c_offset;
map->m_len = min(map->m_len,
(unsigned) sbi->s_cluster_ratio - c_offset);
/*
@@ -4203,7 +4218,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
*/
map->m_flags &= ~EXT4_MAP_FROM_CLUSTER;
newex.ee_block = cpu_to_le32(map->m_lblk);
- cluster_offset = map->m_lblk & (sbi->s_cluster_ratio-1);
+ cluster_offset = EXT4_LBLK_CMASK(sbi, map->m_lblk);
/*
* If we are doing bigalloc, check to see if the extent returned
@@ -4271,7 +4286,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
* needed so that future calls to get_implied_cluster_alloc()
* work correctly.
*/
- offset = map->m_lblk & (sbi->s_cluster_ratio - 1);
+ offset = EXT4_LBLK_COFF(sbi, map->m_lblk);
ar.len = EXT4_NUM_B2C(sbi, offset+allocated);
ar.goal -= offset;
ar.logical -= offset;
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 075763474118..61d49ff22c81 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -1206,7 +1206,6 @@ static int ext4_journalled_write_end(struct file *file,
*/
static int ext4_da_reserve_metadata(struct inode *inode, ext4_lblk_t lblock)
{
- int retries = 0;
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
struct ext4_inode_info *ei = EXT4_I(inode);
unsigned int md_needed;
@@ -1218,7 +1217,6 @@ static int ext4_da_reserve_metadata(struct inode *inode, ext4_lblk_t lblock)
* in order to allocate nrblocks
* worse case is one extent per block
*/
-repeat:
spin_lock(&ei->i_block_reservation_lock);
/*
* ext4_calc_metadata_amount() has side effects, which we have
@@ -1238,10 +1236,6 @@ repeat:
ei->i_da_metadata_calc_len = save_len;
ei->i_da_metadata_calc_last_lblock = save_last_lblock;
spin_unlock(&ei->i_block_reservation_lock);
- if (ext4_should_retry_alloc(inode->i_sb, &retries)) {
- cond_resched();
- goto repeat;
- }
return -ENOSPC;
}
ei->i_reserved_meta_blocks += md_needed;
@@ -1255,7 +1249,6 @@ repeat:
*/
static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock)
{
- int retries = 0;
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
struct ext4_inode_info *ei = EXT4_I(inode);
unsigned int md_needed;
@@ -1277,7 +1270,6 @@ static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock)
* in order to allocate nrblocks
* worse case is one extent per block
*/
-repeat:
spin_lock(&ei->i_block_reservation_lock);
/*
* ext4_calc_metadata_amount() has side effects, which we have
@@ -1297,10 +1289,6 @@ repeat:
ei->i_da_metadata_calc_len = save_len;
ei->i_da_metadata_calc_last_lblock = save_last_lblock;
spin_unlock(&ei->i_block_reservation_lock);
- if (ext4_should_retry_alloc(inode->i_sb, &retries)) {
- cond_resched();
- goto repeat;
- }
dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1));
return -ENOSPC;
}
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 4d113efa024c..04a5c7504be9 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -3442,6 +3442,9 @@ static void ext4_mb_pa_callback(struct rcu_head *head)
{
struct ext4_prealloc_space *pa;
pa = container_of(head, struct ext4_prealloc_space, u.pa_rcu);
+
+ BUG_ON(atomic_read(&pa->pa_count));
+ BUG_ON(pa->pa_deleted == 0);
kmem_cache_free(ext4_pspace_cachep, pa);
}
@@ -3455,11 +3458,13 @@ static void ext4_mb_put_pa(struct ext4_allocation_context *ac,
ext4_group_t grp;
ext4_fsblk_t grp_blk;
- if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0)
- return;
-
/* in this short window concurrent discard can set pa_deleted */
spin_lock(&pa->pa_lock);
+ if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0) {
+ spin_unlock(&pa->pa_lock);
+ return;
+ }
+
if (pa->pa_deleted == 1) {
spin_unlock(&pa->pa_lock);
return;
@@ -4121,7 +4126,7 @@ ext4_mb_initialize_context(struct ext4_allocation_context *ac,
ext4_get_group_no_and_offset(sb, goal, &group, &block);
/* set up allocation goals */
- ac->ac_b_ex.fe_logical = ar->logical & ~(sbi->s_cluster_ratio - 1);
+ ac->ac_b_ex.fe_logical = EXT4_LBLK_CMASK(sbi, ar->logical);
ac->ac_status = AC_STATUS_CONTINUE;
ac->ac_sb = sb;
ac->ac_inode = ar->inode;
@@ -4663,7 +4668,7 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode,
* blocks at the beginning or the end unless we are explicitly
* requested to avoid doing so.
*/
- overflow = block & (sbi->s_cluster_ratio - 1);
+ overflow = EXT4_PBLK_COFF(sbi, block);
if (overflow) {
if (flags & EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER) {
overflow = sbi->s_cluster_ratio - overflow;
@@ -4677,7 +4682,7 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode,
count += overflow;
}
}
- overflow = count & (sbi->s_cluster_ratio - 1);
+ overflow = EXT4_LBLK_COFF(sbi, count);
if (overflow) {
if (flags & EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER) {
if (count > overflow)
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index c977f4e4e63b..1f7784de05b6 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -792,7 +792,7 @@ static void ext4_put_super(struct super_block *sb)
}
ext4_es_unregister_shrinker(sbi);
- del_timer(&sbi->s_err_report);
+ del_timer_sync(&sbi->s_err_report);
ext4_release_system_zone(sb);
ext4_mb_release(sb);
ext4_ext_release(sb);
@@ -3316,11 +3316,19 @@ int ext4_calculate_overhead(struct super_block *sb)
}
-static ext4_fsblk_t ext4_calculate_resv_clusters(struct ext4_sb_info *sbi)
+static ext4_fsblk_t ext4_calculate_resv_clusters(struct super_block *sb)
{
ext4_fsblk_t resv_clusters;
/*
+ * There's no need to reserve anything when we aren't using extents.
+ * The space estimates are exact, there are no unwritten extents,
+ * hole punching doesn't need new metadata... This is needed especially
+ * to keep ext2/3 backward compatibility.
+ */
+ if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS))
+ return 0;
+ /*
* By default we reserve 2% or 4096 clusters, whichever is smaller.
* This should cover the situations where we can not afford to run
* out of space like for example punch hole, or converting
@@ -3328,7 +3336,8 @@ static ext4_fsblk_t ext4_calculate_resv_clusters(struct ext4_sb_info *sbi)
* allocation would require 1, or 2 blocks, higher numbers are
* very rare.
*/
- resv_clusters = ext4_blocks_count(sbi->s_es) >> sbi->s_cluster_bits;
+ resv_clusters = ext4_blocks_count(EXT4_SB(sb)->s_es) >>
+ EXT4_SB(sb)->s_cluster_bits;
do_div(resv_clusters, 50);
resv_clusters = min_t(ext4_fsblk_t, resv_clusters, 4096);
@@ -4071,10 +4080,10 @@ no_journal:
"available");
}
- err = ext4_reserve_clusters(sbi, ext4_calculate_resv_clusters(sbi));
+ err = ext4_reserve_clusters(sbi, ext4_calculate_resv_clusters(sb));
if (err) {
ext4_msg(sb, KERN_ERR, "failed to reserve %llu clusters for "
- "reserved pool", ext4_calculate_resv_clusters(sbi));
+ "reserved pool", ext4_calculate_resv_clusters(sb));
goto failed_mount4a;
}
@@ -4184,7 +4193,7 @@ failed_mount_wq:
}
failed_mount3:
ext4_es_unregister_shrinker(sbi);
- del_timer(&sbi->s_err_report);
+ del_timer_sync(&sbi->s_err_report);
if (sbi->s_flex_groups)
ext4_kvfree(sbi->s_flex_groups);
percpu_counter_destroy(&sbi->s_freeclusters_counter);
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index e66a8009aff1..c8420f7e4db6 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -1899,7 +1899,8 @@ static int gfs2_glock_iter_next(struct gfs2_glock_iter *gi)
gi->nhash = 0;
}
/* Skip entries for other sb and dead entries */
- } while (gi->sdp != gi->gl->gl_sbd || __lockref_is_dead(&gl->gl_lockref));
+ } while (gi->sdp != gi->gl->gl_sbd ||
+ __lockref_is_dead(&gi->gl->gl_lockref));
return 0;
}
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index 1615df16cf4e..7119504159f1 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -1171,8 +1171,11 @@ static int gfs2_atomic_open(struct inode *dir, struct dentry *dentry,
if (d != NULL)
dentry = d;
if (dentry->d_inode) {
- if (!(*opened & FILE_OPENED))
+ if (!(*opened & FILE_OPENED)) {
+ if (d == NULL)
+ dget(dentry);
return finish_no_open(file, dentry);
+ }
dput(d);
return 0;
}
diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c
index c8423d6de6c3..2a6ba06bee6f 100644
--- a/fs/gfs2/lock_dlm.c
+++ b/fs/gfs2/lock_dlm.c
@@ -466,19 +466,19 @@ static void gdlm_cancel(struct gfs2_glock *gl)
static void control_lvb_read(struct lm_lockstruct *ls, uint32_t *lvb_gen,
char *lvb_bits)
{
- uint32_t gen;
+ __le32 gen;
memcpy(lvb_bits, ls->ls_control_lvb, GDLM_LVB_SIZE);
- memcpy(&gen, lvb_bits, sizeof(uint32_t));
+ memcpy(&gen, lvb_bits, sizeof(__le32));
*lvb_gen = le32_to_cpu(gen);
}
static void control_lvb_write(struct lm_lockstruct *ls, uint32_t lvb_gen,
char *lvb_bits)
{
- uint32_t gen;
+ __le32 gen;
memcpy(ls->ls_control_lvb, lvb_bits, GDLM_LVB_SIZE);
gen = cpu_to_le32(lvb_gen);
- memcpy(ls->ls_control_lvb, &gen, sizeof(uint32_t));
+ memcpy(ls->ls_control_lvb, &gen, sizeof(__le32));
}
static int all_jid_bits_clear(char *lvb)
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index 453b50eaddec..98236d0df3ca 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -667,7 +667,7 @@ static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
struct buffer_head *bh;
struct page *page;
void *kaddr, *ptr;
- struct gfs2_quota q, *qp;
+ struct gfs2_quota q;
int err, nbytes;
u64 size;
@@ -683,28 +683,25 @@ static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
return err;
err = -EIO;
- qp = &q;
- qp->qu_value = be64_to_cpu(qp->qu_value);
- qp->qu_value += change;
- qp->qu_value = cpu_to_be64(qp->qu_value);
- qd->qd_qb.qb_value = qp->qu_value;
+ be64_add_cpu(&q.qu_value, change);
+ qd->qd_qb.qb_value = q.qu_value;
if (fdq) {
if (fdq->d_fieldmask & FS_DQ_BSOFT) {
- qp->qu_warn = cpu_to_be64(fdq->d_blk_softlimit >> sdp->sd_fsb2bb_shift);
- qd->qd_qb.qb_warn = qp->qu_warn;
+ q.qu_warn = cpu_to_be64(fdq->d_blk_softlimit >> sdp->sd_fsb2bb_shift);
+ qd->qd_qb.qb_warn = q.qu_warn;
}
if (fdq->d_fieldmask & FS_DQ_BHARD) {
- qp->qu_limit = cpu_to_be64(fdq->d_blk_hardlimit >> sdp->sd_fsb2bb_shift);
- qd->qd_qb.qb_limit = qp->qu_limit;
+ q.qu_limit = cpu_to_be64(fdq->d_blk_hardlimit >> sdp->sd_fsb2bb_shift);
+ qd->qd_qb.qb_limit = q.qu_limit;
}
if (fdq->d_fieldmask & FS_DQ_BCOUNT) {
- qp->qu_value = cpu_to_be64(fdq->d_bcount >> sdp->sd_fsb2bb_shift);
- qd->qd_qb.qb_value = qp->qu_value;
+ q.qu_value = cpu_to_be64(fdq->d_bcount >> sdp->sd_fsb2bb_shift);
+ qd->qd_qb.qb_value = q.qu_value;
}
}
/* Write the quota into the quota file on disk */
- ptr = qp;
+ ptr = &q;
nbytes = sizeof(struct gfs2_quota);
get_a_page:
page = find_or_create_page(mapping, index, GFP_NOFS);
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index 4d83abdd5635..c8d6161bd682 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -1127,7 +1127,7 @@ int gfs2_rgrp_bh_get(struct gfs2_rgrpd *rgd)
rgd->rd_flags |= (GFS2_RDF_UPTODATE | GFS2_RDF_CHECK);
rgd->rd_free_clone = rgd->rd_free;
}
- if (be32_to_cpu(GFS2_MAGIC) != rgd->rd_rgl->rl_magic) {
+ if (cpu_to_be32(GFS2_MAGIC) != rgd->rd_rgl->rl_magic) {
rgd->rd_rgl->rl_unlinked = cpu_to_be32(count_unlinked(rgd));
gfs2_rgrp_ondisk2lvb(rgd->rd_rgl,
rgd->rd_bits[0].bi_bh->b_data);
@@ -1161,7 +1161,7 @@ int update_rgrp_lvb(struct gfs2_rgrpd *rgd)
if (rgd->rd_flags & GFS2_RDF_UPTODATE)
return 0;
- if (be32_to_cpu(GFS2_MAGIC) != rgd->rd_rgl->rl_magic)
+ if (cpu_to_be32(GFS2_MAGIC) != rgd->rd_rgl->rl_magic)
return gfs2_rgrp_bh_get(rgd);
rl_flags = be32_to_cpu(rgd->rd_rgl->rl_flags);
diff --git a/fs/hfsplus/wrapper.c b/fs/hfsplus/wrapper.c
index b51a6079108d..e9a97a0d4314 100644
--- a/fs/hfsplus/wrapper.c
+++ b/fs/hfsplus/wrapper.c
@@ -24,13 +24,6 @@ struct hfsplus_wd {
u16 embed_count;
};
-static void hfsplus_end_io_sync(struct bio *bio, int err)
-{
- if (err)
- clear_bit(BIO_UPTODATE, &bio->bi_flags);
- complete(bio->bi_private);
-}
-
/*
* hfsplus_submit_bio - Perfrom block I/O
* @sb: super block of volume for I/O
@@ -53,7 +46,6 @@ static void hfsplus_end_io_sync(struct bio *bio, int err)
int hfsplus_submit_bio(struct super_block *sb, sector_t sector,
void *buf, void **data, int rw)
{
- DECLARE_COMPLETION_ONSTACK(wait);
struct bio *bio;
int ret = 0;
u64 io_size;
@@ -73,8 +65,6 @@ int hfsplus_submit_bio(struct super_block *sb, sector_t sector,
bio = bio_alloc(GFP_NOIO, 1);
bio->bi_sector = sector;
bio->bi_bdev = sb->s_bdev;
- bio->bi_end_io = hfsplus_end_io_sync;
- bio->bi_private = &wait;
if (!(rw & WRITE) && data)
*data = (u8 *)buf + offset;
@@ -93,12 +83,7 @@ int hfsplus_submit_bio(struct super_block *sb, sector_t sector,
buf = (u8 *)buf + len;
}
- submit_bio(rw, bio);
- wait_for_completion(&wait);
-
- if (!bio_flagged(bio, BIO_UPTODATE))
- ret = -EIO;
-
+ ret = submit_bio_wait(rw, bio);
out:
bio_put(bio);
return ret < 0 ? ret : 0;
diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
index 25437280a207..db23ce1bd903 100644
--- a/fs/hostfs/hostfs_kern.c
+++ b/fs/hostfs/hostfs_kern.c
@@ -33,15 +33,6 @@ static inline struct hostfs_inode_info *HOSTFS_I(struct inode *inode)
#define FILE_HOSTFS_I(file) HOSTFS_I(file_inode(file))
-static int hostfs_d_delete(const struct dentry *dentry)
-{
- return 1;
-}
-
-static const struct dentry_operations hostfs_dentry_ops = {
- .d_delete = hostfs_d_delete,
-};
-
/* Changed in hostfs_args before the kernel starts running */
static char *root_ino = "";
static int append = 0;
@@ -925,7 +916,7 @@ static int hostfs_fill_sb_common(struct super_block *sb, void *d, int silent)
sb->s_blocksize_bits = 10;
sb->s_magic = HOSTFS_SUPER_MAGIC;
sb->s_op = &hostfs_sbops;
- sb->s_d_op = &hostfs_dentry_ops;
+ sb->s_d_op = &simple_dentry_operations;
sb->s_maxbytes = MAX_LFS_FILESIZE;
/* NULL is printed as <NULL> by sprintf: avoid that. */
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index 52032647dd4a..5fa344afb49a 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -702,7 +702,7 @@ int jbd2_log_wait_commit(journal_t *journal, tid_t tid)
read_lock(&journal->j_state_lock);
#ifdef CONFIG_JBD2_DEBUG
if (!tid_geq(journal->j_commit_request, tid)) {
- printk(KERN_EMERG
+ printk(KERN_ERR
"%s: error: j_commit_request=%d, tid=%d\n",
__func__, journal->j_commit_request, tid);
}
@@ -718,10 +718,8 @@ int jbd2_log_wait_commit(journal_t *journal, tid_t tid)
}
read_unlock(&journal->j_state_lock);
- if (unlikely(is_journal_aborted(journal))) {
- printk(KERN_EMERG "journal commit I/O error\n");
+ if (unlikely(is_journal_aborted(journal)))
err = -EIO;
- }
return err;
}
@@ -1527,13 +1525,13 @@ static int journal_get_superblock(journal_t *journal)
if (JBD2_HAS_COMPAT_FEATURE(journal, JBD2_FEATURE_COMPAT_CHECKSUM) &&
JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2)) {
/* Can't have checksum v1 and v2 on at the same time! */
- printk(KERN_ERR "JBD: Can't enable checksumming v1 and v2 "
+ printk(KERN_ERR "JBD2: Can't enable checksumming v1 and v2 "
"at the same time!\n");
goto out;
}
if (!jbd2_verify_csum_type(journal, sb)) {
- printk(KERN_ERR "JBD: Unknown checksum type\n");
+ printk(KERN_ERR "JBD2: Unknown checksum type\n");
goto out;
}
@@ -1541,7 +1539,7 @@ static int journal_get_superblock(journal_t *journal)
if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2)) {
journal->j_chksum_driver = crypto_alloc_shash("crc32c", 0, 0);
if (IS_ERR(journal->j_chksum_driver)) {
- printk(KERN_ERR "JBD: Cannot load crc32c driver.\n");
+ printk(KERN_ERR "JBD2: Cannot load crc32c driver.\n");
err = PTR_ERR(journal->j_chksum_driver);
journal->j_chksum_driver = NULL;
goto out;
@@ -1550,7 +1548,7 @@ static int journal_get_superblock(journal_t *journal)
/* Check superblock checksum */
if (!jbd2_superblock_csum_verify(journal, sb)) {
- printk(KERN_ERR "JBD: journal checksum error\n");
+ printk(KERN_ERR "JBD2: journal checksum error\n");
goto out;
}
@@ -1836,7 +1834,7 @@ int jbd2_journal_set_features (journal_t *journal, unsigned long compat,
journal->j_chksum_driver = crypto_alloc_shash("crc32c",
0, 0);
if (IS_ERR(journal->j_chksum_driver)) {
- printk(KERN_ERR "JBD: Cannot load crc32c "
+ printk(KERN_ERR "JBD2: Cannot load crc32c "
"driver.\n");
journal->j_chksum_driver = NULL;
return 0;
@@ -2645,7 +2643,7 @@ static void __exit journal_exit(void)
#ifdef CONFIG_JBD2_DEBUG
int n = atomic_read(&nr_journal_heads);
if (n)
- printk(KERN_EMERG "JBD2: leaked %d journal_heads!\n", n);
+ printk(KERN_ERR "JBD2: leaked %d journal_heads!\n", n);
#endif
jbd2_remove_jbd_stats_proc_entry();
jbd2_journal_destroy_caches();
diff --git a/fs/jbd2/recovery.c b/fs/jbd2/recovery.c
index 3929c50428b1..3b6bb19d60b1 100644
--- a/fs/jbd2/recovery.c
+++ b/fs/jbd2/recovery.c
@@ -594,7 +594,7 @@ static int do_one_pass(journal_t *journal,
be32_to_cpu(tmp->h_sequence))) {
brelse(obh);
success = -EIO;
- printk(KERN_ERR "JBD: Invalid "
+ printk(KERN_ERR "JBD2: Invalid "
"checksum recovering "
"block %llu in log\n",
blocknr);
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index 7aa9a32573bb..8360674c85bc 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -932,7 +932,7 @@ repeat:
jbd2_alloc(jh2bh(jh)->b_size,
GFP_NOFS);
if (!frozen_buffer) {
- printk(KERN_EMERG
+ printk(KERN_ERR
"%s: OOM for frozen_buffer\n",
__func__);
JBUFFER_TRACE(jh, "oom!");
@@ -1166,7 +1166,7 @@ repeat:
if (!jh->b_committed_data) {
committed_data = jbd2_alloc(jh2bh(jh)->b_size, GFP_NOFS);
if (!committed_data) {
- printk(KERN_EMERG "%s: No memory for committed data\n",
+ printk(KERN_ERR "%s: No memory for committed data\n",
__func__);
err = -ENOMEM;
goto out;
@@ -1290,7 +1290,10 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
* once a transaction -bzzz
*/
jh->b_modified = 1;
- J_ASSERT_JH(jh, handle->h_buffer_credits > 0);
+ if (handle->h_buffer_credits <= 0) {
+ ret = -ENOSPC;
+ goto out_unlock_bh;
+ }
handle->h_buffer_credits--;
}
@@ -1305,7 +1308,7 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
JBUFFER_TRACE(jh, "fastpath");
if (unlikely(jh->b_transaction !=
journal->j_running_transaction)) {
- printk(KERN_EMERG "JBD: %s: "
+ printk(KERN_ERR "JBD2: %s: "
"jh->b_transaction (%llu, %p, %u) != "
"journal->j_running_transaction (%p, %u)",
journal->j_devname,
@@ -1332,7 +1335,7 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
JBUFFER_TRACE(jh, "already on other transaction");
if (unlikely(jh->b_transaction !=
journal->j_committing_transaction)) {
- printk(KERN_EMERG "JBD: %s: "
+ printk(KERN_ERR "JBD2: %s: "
"jh->b_transaction (%llu, %p, %u) != "
"journal->j_committing_transaction (%p, %u)",
journal->j_devname,
@@ -1345,7 +1348,7 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
ret = -EINVAL;
}
if (unlikely(jh->b_next_transaction != transaction)) {
- printk(KERN_EMERG "JBD: %s: "
+ printk(KERN_ERR "JBD2: %s: "
"jh->b_next_transaction (%llu, %p, %u) != "
"transaction (%p, %u)",
journal->j_devname,
@@ -1373,7 +1376,6 @@ out_unlock_bh:
jbd2_journal_put_journal_head(jh);
out:
JBUFFER_TRACE(jh, "exit");
- WARN_ON(ret); /* All errors are bugs, so dump the stack */
return ret;
}
diff --git a/fs/libfs.c b/fs/libfs.c
index 5de06947ba5e..a1844244246f 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -47,10 +47,16 @@ EXPORT_SYMBOL(simple_statfs);
* Retaining negative dentries for an in-memory filesystem just wastes
* memory and lookup time: arrange for them to be deleted immediately.
*/
-static int simple_delete_dentry(const struct dentry *dentry)
+int always_delete_dentry(const struct dentry *dentry)
{
return 1;
}
+EXPORT_SYMBOL(always_delete_dentry);
+
+const struct dentry_operations simple_dentry_operations = {
+ .d_delete = always_delete_dentry,
+};
+EXPORT_SYMBOL(simple_dentry_operations);
/*
* Lookup the data. This is trivial - if the dentry didn't already
@@ -58,10 +64,6 @@ static int simple_delete_dentry(const struct dentry *dentry)
*/
struct dentry *simple_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
{
- static const struct dentry_operations simple_dentry_operations = {
- .d_delete = simple_delete_dentry,
- };
-
if (dentry->d_name.len > NAME_MAX)
return ERR_PTR(-ENAMETOOLONG);
if (!dentry->d_sb->s_d_op)
diff --git a/fs/logfs/dev_bdev.c b/fs/logfs/dev_bdev.c
index 550475ca6a0e..0f95f0d0b313 100644
--- a/fs/logfs/dev_bdev.c
+++ b/fs/logfs/dev_bdev.c
@@ -14,16 +14,10 @@
#define PAGE_OFS(ofs) ((ofs) & (PAGE_SIZE-1))
-static void request_complete(struct bio *bio, int err)
-{
- complete((struct completion *)bio->bi_private);
-}
-
static int sync_request(struct page *page, struct block_device *bdev, int rw)
{
struct bio bio;
struct bio_vec bio_vec;
- struct completion complete;
bio_init(&bio);
bio.bi_max_vecs = 1;
@@ -35,13 +29,8 @@ static int sync_request(struct page *page, struct block_device *bdev, int rw)
bio.bi_size = PAGE_SIZE;
bio.bi_bdev = bdev;
bio.bi_sector = page->index * (PAGE_SIZE >> 9);
- init_completion(&complete);
- bio.bi_private = &complete;
- bio.bi_end_io = request_complete;
- submit_bio(rw, &bio);
- wait_for_completion(&complete);
- return test_bit(BIO_UPTODATE, &bio.bi_flags) ? 0 : -EIO;
+ return submit_bio_wait(rw, &bio);
}
static int bdev_readpage(void *_sb, struct page *page)
diff --git a/fs/namei.c b/fs/namei.c
index e029a4cbff7d..3531deebad30 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -513,8 +513,7 @@ static int unlazy_walk(struct nameidata *nd, struct dentry *dentry)
if (!lockref_get_not_dead(&parent->d_lockref)) {
nd->path.dentry = NULL;
- rcu_read_unlock();
- return -ECHILD;
+ goto out;
}
/*
@@ -1599,11 +1598,6 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd)
* do a "get_unaligned()" if this helps and is sufficiently
* fast.
*
- * - Little-endian machines (so that we can generate the mask
- * of low bytes efficiently). Again, we *could* do a byte
- * swapping load on big-endian architectures if that is not
- * expensive enough to make the optimization worthless.
- *
* - non-CONFIG_DEBUG_PAGEALLOC configurations (so that we
* do not trap on the (extremely unlikely) case of a page
* crossing operation.
@@ -1647,7 +1641,7 @@ unsigned int full_name_hash(const unsigned char *name, unsigned int len)
if (!len)
goto done;
}
- mask = ~(~0ul << len*8);
+ mask = bytemask_from_count(len);
hash += mask & a;
done:
return fold_hash(hash);
@@ -2435,6 +2429,7 @@ static int may_delete(struct inode *dir, struct dentry *victim, bool isdir)
*/
static inline int may_create(struct inode *dir, struct dentry *child)
{
+ audit_inode_child(dir, child, AUDIT_TYPE_CHILD_CREATE);
if (child->d_inode)
return -EEXIST;
if (IS_DEADDIR(dir))
diff --git a/fs/nfs/blocklayout/blocklayout.h b/fs/nfs/blocklayout/blocklayout.h
index 8485978993e8..9838fb020473 100644
--- a/fs/nfs/blocklayout/blocklayout.h
+++ b/fs/nfs/blocklayout/blocklayout.h
@@ -36,6 +36,7 @@
#include <linux/nfs_fs.h>
#include <linux/sunrpc/rpc_pipe_fs.h>
+#include "../nfs4_fs.h"
#include "../pnfs.h"
#include "../netns.h"
diff --git a/fs/nfs/blocklayout/extents.c b/fs/nfs/blocklayout/extents.c
index 9c3e117c3ed1..4d0161442565 100644
--- a/fs/nfs/blocklayout/extents.c
+++ b/fs/nfs/blocklayout/extents.c
@@ -44,7 +44,7 @@
static inline sector_t normalize(sector_t s, int base)
{
sector_t tmp = s; /* Since do_div modifies its argument */
- return s - do_div(tmp, base);
+ return s - sector_div(tmp, base);
}
static inline sector_t normalize_up(sector_t s, int base)
diff --git a/fs/nfs/dns_resolve.c b/fs/nfs/dns_resolve.c
index fc0f95ec7358..d25f10fb4926 100644
--- a/fs/nfs/dns_resolve.c
+++ b/fs/nfs/dns_resolve.c
@@ -46,7 +46,9 @@ ssize_t nfs_dns_resolve_name(struct net *net, char *name, size_t namelen,
#include <linux/sunrpc/cache.h>
#include <linux/sunrpc/svcauth.h>
#include <linux/sunrpc/rpc_pipe_fs.h>
+#include <linux/nfs_fs.h>
+#include "nfs4_fs.h"
#include "dns_resolve.h"
#include "cache_lib.h"
#include "netns.h"
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 18ab2da4eeb6..00ad1c2b217d 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -312,7 +312,7 @@ struct nfs4_label *nfs4_label_alloc(struct nfs_server *server, gfp_t flags)
}
EXPORT_SYMBOL_GPL(nfs4_label_alloc);
#else
-void inline nfs_setsecurity(struct inode *inode, struct nfs_fattr *fattr,
+void nfs_setsecurity(struct inode *inode, struct nfs_fattr *fattr,
struct nfs4_label *label)
{
}
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index bca6a3e3c49c..8b5cc04a8611 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -269,6 +269,21 @@ extern const u32 nfs41_maxgetdevinfo_overhead;
extern struct rpc_procinfo nfs4_procedures[];
#endif
+#ifdef CONFIG_NFS_V4_SECURITY_LABEL
+extern struct nfs4_label *nfs4_label_alloc(struct nfs_server *server, gfp_t flags);
+static inline void nfs4_label_free(struct nfs4_label *label)
+{
+ if (label) {
+ kfree(label->label);
+ kfree(label);
+ }
+ return;
+}
+#else
+static inline struct nfs4_label *nfs4_label_alloc(struct nfs_server *server, gfp_t flags) { return NULL; }
+static inline void nfs4_label_free(void *label) {}
+#endif /* CONFIG_NFS_V4_SECURITY_LABEL */
+
/* proc.c */
void nfs_close_context(struct nfs_open_context *ctx, int is_sync);
extern struct nfs_client *nfs_init_client(struct nfs_client *clp,
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 3ce79b04522e..5609edc742a0 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -9,6 +9,14 @@
#ifndef __LINUX_FS_NFS_NFS4_FS_H
#define __LINUX_FS_NFS_NFS4_FS_H
+#if defined(CONFIG_NFS_V4_2)
+#define NFS4_MAX_MINOR_VERSION 2
+#elif defined(CONFIG_NFS_V4_1)
+#define NFS4_MAX_MINOR_VERSION 1
+#else
+#define NFS4_MAX_MINOR_VERSION 0
+#endif
+
#if IS_ENABLED(CONFIG_NFS_V4)
#define NFS4_MAX_LOOP_ON_RECOVER (10)
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 659990c0109e..15052b81df42 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -2518,9 +2518,8 @@ static void nfs4_close_done(struct rpc_task *task, void *data)
calldata->roc_barrier);
nfs_set_open_stateid(state, &calldata->res.stateid, 0);
renew_lease(server, calldata->timestamp);
- nfs4_close_clear_stateid_flags(state,
- calldata->arg.fmode);
break;
+ case -NFS4ERR_ADMIN_REVOKED:
case -NFS4ERR_STALE_STATEID:
case -NFS4ERR_OLD_STATEID:
case -NFS4ERR_BAD_STATEID:
@@ -2528,9 +2527,13 @@ static void nfs4_close_done(struct rpc_task *task, void *data)
if (calldata->arg.fmode == 0)
break;
default:
- if (nfs4_async_handle_error(task, server, state) == -EAGAIN)
+ if (nfs4_async_handle_error(task, server, state) == -EAGAIN) {
rpc_restart_call_prepare(task);
+ goto out_release;
+ }
}
+ nfs4_close_clear_stateid_flags(state, calldata->arg.fmode);
+out_release:
nfs_release_seqid(calldata->arg.seqid);
nfs_refresh_inode(calldata->inode, calldata->res.fattr);
dprintk("%s: done, ret = %d!\n", __func__, task->tk_status);
@@ -4802,7 +4805,7 @@ nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server,
dprintk("%s ERROR %d, Reset session\n", __func__,
task->tk_status);
nfs4_schedule_session_recovery(clp->cl_session, task->tk_status);
- goto restart_call;
+ goto wait_on_recovery;
#endif /* CONFIG_NFS_V4_1 */
case -NFS4ERR_DELAY:
nfs_inc_server_stats(server, NFSIOS_DELAY);
@@ -4987,11 +4990,17 @@ static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
trace_nfs4_delegreturn_exit(&data->args, &data->res, task->tk_status);
switch (task->tk_status) {
- case -NFS4ERR_STALE_STATEID:
- case -NFS4ERR_EXPIRED:
case 0:
renew_lease(data->res.server, data->timestamp);
break;
+ case -NFS4ERR_ADMIN_REVOKED:
+ case -NFS4ERR_DELEG_REVOKED:
+ case -NFS4ERR_BAD_STATEID:
+ case -NFS4ERR_OLD_STATEID:
+ case -NFS4ERR_STALE_STATEID:
+ case -NFS4ERR_EXPIRED:
+ task->tk_status = 0;
+ break;
default:
if (nfs4_async_handle_error(task, data->res.server, NULL) ==
-EAGAIN) {
@@ -7589,7 +7598,14 @@ static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata)
return;
server = NFS_SERVER(lrp->args.inode);
- if (nfs4_async_handle_error(task, server, NULL) == -EAGAIN) {
+ switch (task->tk_status) {
+ default:
+ task->tk_status = 0;
+ case 0:
+ break;
+ case -NFS4ERR_DELAY:
+ if (nfs4_async_handle_error(task, server, NULL) != -EAGAIN)
+ break;
rpc_restart_call_prepare(task);
return;
}
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index 088de1355e93..ee7237f99f54 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -141,8 +141,8 @@ xdr_error: \
static void next_decode_page(struct nfsd4_compoundargs *argp)
{
- argp->pagelist++;
argp->p = page_address(argp->pagelist[0]);
+ argp->pagelist++;
if (argp->pagelen < PAGE_SIZE) {
argp->end = argp->p + (argp->pagelen>>2);
argp->pagelen = 0;
@@ -1229,6 +1229,7 @@ nfsd4_decode_write(struct nfsd4_compoundargs *argp, struct nfsd4_write *write)
len -= pages * PAGE_SIZE;
argp->p = (__be32 *)page_address(argp->pagelist[0]);
+ argp->pagelist++;
argp->end = argp->p + XDR_QUADLEN(PAGE_SIZE);
}
argp->p += XDR_QUADLEN(len);
diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
index 9186c7ce0b14..b6af150c96b8 100644
--- a/fs/nfsd/nfscache.c
+++ b/fs/nfsd/nfscache.c
@@ -132,6 +132,13 @@ nfsd_reply_cache_alloc(void)
}
static void
+nfsd_reply_cache_unhash(struct svc_cacherep *rp)
+{
+ hlist_del_init(&rp->c_hash);
+ list_del_init(&rp->c_lru);
+}
+
+static void
nfsd_reply_cache_free_locked(struct svc_cacherep *rp)
{
if (rp->c_type == RC_REPLBUFF && rp->c_replvec.iov_base) {
@@ -417,7 +424,7 @@ nfsd_cache_lookup(struct svc_rqst *rqstp)
rp = list_first_entry(&lru_head, struct svc_cacherep, c_lru);
if (nfsd_cache_entry_expired(rp) ||
num_drc_entries >= max_drc_entries) {
- lru_put_end(rp);
+ nfsd_reply_cache_unhash(rp);
prune_cache_entries();
goto search_cache;
}
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 94b5f5d2bfed..7eea63cada1d 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -298,41 +298,12 @@ commit_metadata(struct svc_fh *fhp)
}
/*
- * Set various file attributes.
- * N.B. After this call fhp needs an fh_put
+ * Go over the attributes and take care of the small differences between
+ * NFS semantics and what Linux expects.
*/
-__be32
-nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
- int check_guard, time_t guardtime)
+static void
+nfsd_sanitize_attrs(struct inode *inode, struct iattr *iap)
{
- struct dentry *dentry;
- struct inode *inode;
- int accmode = NFSD_MAY_SATTR;
- umode_t ftype = 0;
- __be32 err;
- int host_err;
- int size_change = 0;
-
- if (iap->ia_valid & (ATTR_ATIME | ATTR_MTIME | ATTR_SIZE))
- accmode |= NFSD_MAY_WRITE|NFSD_MAY_OWNER_OVERRIDE;
- if (iap->ia_valid & ATTR_SIZE)
- ftype = S_IFREG;
-
- /* Get inode */
- err = fh_verify(rqstp, fhp, ftype, accmode);
- if (err)
- goto out;
-
- dentry = fhp->fh_dentry;
- inode = dentry->d_inode;
-
- /* Ignore any mode updates on symlinks */
- if (S_ISLNK(inode->i_mode))
- iap->ia_valid &= ~ATTR_MODE;
-
- if (!iap->ia_valid)
- goto out;
-
/*
* NFSv2 does not differentiate between "set-[ac]time-to-now"
* which only requires access, and "set-[ac]time-to-X" which
@@ -342,8 +313,7 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
* convert to "set to now" instead of "set to explicit time"
*
* We only call inode_change_ok as the last test as technically
- * it is not an interface that we should be using. It is only
- * valid if the filesystem does not define it's own i_op->setattr.
+ * it is not an interface that we should be using.
*/
#define BOTH_TIME_SET (ATTR_ATIME_SET | ATTR_MTIME_SET)
#define MAX_TOUCH_TIME_ERROR (30*60)
@@ -369,30 +339,6 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
iap->ia_valid &= ~BOTH_TIME_SET;
}
}
-
- /*
- * The size case is special.
- * It changes the file as well as the attributes.
- */
- if (iap->ia_valid & ATTR_SIZE) {
- if (iap->ia_size < inode->i_size) {
- err = nfsd_permission(rqstp, fhp->fh_export, dentry,
- NFSD_MAY_TRUNC|NFSD_MAY_OWNER_OVERRIDE);
- if (err)
- goto out;
- }
-
- host_err = get_write_access(inode);
- if (host_err)
- goto out_nfserr;
-
- size_change = 1;
- host_err = locks_verify_truncate(inode, NULL, iap->ia_size);
- if (host_err) {
- put_write_access(inode);
- goto out_nfserr;
- }
- }
/* sanitize the mode change */
if (iap->ia_valid & ATTR_MODE) {
@@ -415,32 +361,111 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
iap->ia_valid |= (ATTR_KILL_SUID | ATTR_KILL_SGID);
}
}
+}
- /* Change the attributes. */
+static __be32
+nfsd_get_write_access(struct svc_rqst *rqstp, struct svc_fh *fhp,
+ struct iattr *iap)
+{
+ struct inode *inode = fhp->fh_dentry->d_inode;
+ int host_err;
- iap->ia_valid |= ATTR_CTIME;
+ if (iap->ia_size < inode->i_size) {
+ __be32 err;
- err = nfserr_notsync;
- if (!check_guard || guardtime == inode->i_ctime.tv_sec) {
- host_err = nfsd_break_lease(inode);
- if (host_err)
- goto out_nfserr;
- fh_lock(fhp);
+ err = nfsd_permission(rqstp, fhp->fh_export, fhp->fh_dentry,
+ NFSD_MAY_TRUNC | NFSD_MAY_OWNER_OVERRIDE);
+ if (err)
+ return err;
+ }
- host_err = notify_change(dentry, iap, NULL);
- err = nfserrno(host_err);
- fh_unlock(fhp);
+ host_err = get_write_access(inode);
+ if (host_err)
+ goto out_nfserrno;
+
+ host_err = locks_verify_truncate(inode, NULL, iap->ia_size);
+ if (host_err)
+ goto out_put_write_access;
+ return 0;
+
+out_put_write_access:
+ put_write_access(inode);
+out_nfserrno:
+ return nfserrno(host_err);
+}
+
+/*
+ * Set various file attributes. After this call fhp needs an fh_put.
+ */
+__be32
+nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
+ int check_guard, time_t guardtime)
+{
+ struct dentry *dentry;
+ struct inode *inode;
+ int accmode = NFSD_MAY_SATTR;
+ umode_t ftype = 0;
+ __be32 err;
+ int host_err;
+ int size_change = 0;
+
+ if (iap->ia_valid & (ATTR_ATIME | ATTR_MTIME | ATTR_SIZE))
+ accmode |= NFSD_MAY_WRITE|NFSD_MAY_OWNER_OVERRIDE;
+ if (iap->ia_valid & ATTR_SIZE)
+ ftype = S_IFREG;
+
+ /* Get inode */
+ err = fh_verify(rqstp, fhp, ftype, accmode);
+ if (err)
+ goto out;
+
+ dentry = fhp->fh_dentry;
+ inode = dentry->d_inode;
+
+ /* Ignore any mode updates on symlinks */
+ if (S_ISLNK(inode->i_mode))
+ iap->ia_valid &= ~ATTR_MODE;
+
+ if (!iap->ia_valid)
+ goto out;
+
+ nfsd_sanitize_attrs(inode, iap);
+
+ /*
+ * The size case is special, it changes the file in addition to the
+ * attributes.
+ */
+ if (iap->ia_valid & ATTR_SIZE) {
+ err = nfsd_get_write_access(rqstp, fhp, iap);
+ if (err)
+ goto out;
+ size_change = 1;
}
+
+ iap->ia_valid |= ATTR_CTIME;
+
+ if (check_guard && guardtime != inode->i_ctime.tv_sec) {
+ err = nfserr_notsync;
+ goto out_put_write_access;
+ }
+
+ host_err = nfsd_break_lease(inode);
+ if (host_err)
+ goto out_put_write_access_nfserror;
+
+ fh_lock(fhp);
+ host_err = notify_change(dentry, iap, NULL);
+ fh_unlock(fhp);
+
+out_put_write_access_nfserror:
+ err = nfserrno(host_err);
+out_put_write_access:
if (size_change)
put_write_access(inode);
if (!err)
commit_metadata(fhp);
out:
return err;
-
-out_nfserr:
- err = nfserrno(host_err);
- goto out;
}
#if defined(CONFIG_NFSD_V2_ACL) || \
diff --git a/fs/pipe.c b/fs/pipe.c
index d2c45e14e6d8..0e0752ef2715 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -726,11 +726,25 @@ pipe_poll(struct file *filp, poll_table *wait)
return mask;
}
+static void put_pipe_info(struct inode *inode, struct pipe_inode_info *pipe)
+{
+ int kill = 0;
+
+ spin_lock(&inode->i_lock);
+ if (!--pipe->files) {
+ inode->i_pipe = NULL;
+ kill = 1;
+ }
+ spin_unlock(&inode->i_lock);
+
+ if (kill)
+ free_pipe_info(pipe);
+}
+
static int
pipe_release(struct inode *inode, struct file *file)
{
- struct pipe_inode_info *pipe = inode->i_pipe;
- int kill = 0;
+ struct pipe_inode_info *pipe = file->private_data;
__pipe_lock(pipe);
if (file->f_mode & FMODE_READ)
@@ -743,17 +757,9 @@ pipe_release(struct inode *inode, struct file *file)
kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
}
- spin_lock(&inode->i_lock);
- if (!--pipe->files) {
- inode->i_pipe = NULL;
- kill = 1;
- }
- spin_unlock(&inode->i_lock);
__pipe_unlock(pipe);
- if (kill)
- free_pipe_info(pipe);
-
+ put_pipe_info(inode, pipe);
return 0;
}
@@ -1014,7 +1020,6 @@ static int fifo_open(struct inode *inode, struct file *filp)
{
struct pipe_inode_info *pipe;
bool is_pipe = inode->i_sb->s_magic == PIPEFS_MAGIC;
- int kill = 0;
int ret;
filp->f_version = 0;
@@ -1130,15 +1135,9 @@ err_wr:
goto err;
err:
- spin_lock(&inode->i_lock);
- if (!--pipe->files) {
- inode->i_pipe = NULL;
- kill = 1;
- }
- spin_unlock(&inode->i_lock);
__pipe_unlock(pipe);
- if (kill)
- free_pipe_info(pipe);
+
+ put_pipe_info(inode, pipe);
return ret;
}
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 1485e38daaa3..03c8d747be48 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -1151,10 +1151,16 @@ static ssize_t proc_loginuid_write(struct file * file, const char __user * buf,
goto out_free_page;
}
- kloginuid = make_kuid(file->f_cred->user_ns, loginuid);
- if (!uid_valid(kloginuid)) {
- length = -EINVAL;
- goto out_free_page;
+
+ /* is userspace tring to explicitly UNSET the loginuid? */
+ if (loginuid == AUDIT_UID_UNSET) {
+ kloginuid = INVALID_UID;
+ } else {
+ kloginuid = make_kuid(file->f_cred->user_ns, loginuid);
+ if (!uid_valid(kloginuid)) {
+ length = -EINVAL;
+ goto out_free_page;
+ }
}
length = audit_set_loginuid(kloginuid);
diff --git a/fs/proc/generic.c b/fs/proc/generic.c
index 737e15615b04..cca93b6fb9a9 100644
--- a/fs/proc/generic.c
+++ b/fs/proc/generic.c
@@ -175,22 +175,6 @@ static const struct inode_operations proc_link_inode_operations = {
};
/*
- * As some entries in /proc are volatile, we want to
- * get rid of unused dentries. This could be made
- * smarter: we could keep a "volatile" flag in the
- * inode to indicate which ones to keep.
- */
-static int proc_delete_dentry(const struct dentry * dentry)
-{
- return 1;
-}
-
-static const struct dentry_operations proc_dentry_operations =
-{
- .d_delete = proc_delete_dentry,
-};
-
-/*
* Don't create negative dentries here, return -ENOENT by hand
* instead.
*/
@@ -209,7 +193,7 @@ struct dentry *proc_lookup_de(struct proc_dir_entry *de, struct inode *dir,
inode = proc_get_inode(dir->i_sb, de);
if (!inode)
return ERR_PTR(-ENOMEM);
- d_set_d_op(dentry, &proc_dentry_operations);
+ d_set_d_op(dentry, &simple_dentry_operations);
d_add(dentry, inode);
return NULL;
}
diff --git a/fs/proc/inode.c b/fs/proc/inode.c
index 28955d4b7218..124fc43c7090 100644
--- a/fs/proc/inode.c
+++ b/fs/proc/inode.c
@@ -292,16 +292,20 @@ proc_reg_get_unmapped_area(struct file *file, unsigned long orig_addr,
{
struct proc_dir_entry *pde = PDE(file_inode(file));
unsigned long rv = -EIO;
- unsigned long (*get_area)(struct file *, unsigned long, unsigned long,
- unsigned long, unsigned long) = NULL;
+
if (use_pde(pde)) {
+ typeof(proc_reg_get_unmapped_area) *get_area;
+
+ get_area = pde->proc_fops->get_unmapped_area;
#ifdef CONFIG_MMU
- get_area = current->mm->get_unmapped_area;
+ if (!get_area)
+ get_area = current->mm->get_unmapped_area;
#endif
- if (pde->proc_fops->get_unmapped_area)
- get_area = pde->proc_fops->get_unmapped_area;
+
if (get_area)
rv = get_area(file, orig_addr, len, pgoff, flags);
+ else
+ rv = orig_addr;
unuse_pde(pde);
}
return rv;
diff --git a/fs/proc/namespaces.c b/fs/proc/namespaces.c
index 49a7fff2e83a..9ae46b87470d 100644
--- a/fs/proc/namespaces.c
+++ b/fs/proc/namespaces.c
@@ -42,12 +42,6 @@ static const struct inode_operations ns_inode_operations = {
.setattr = proc_setattr,
};
-static int ns_delete_dentry(const struct dentry *dentry)
-{
- /* Don't cache namespace inodes when not in use */
- return 1;
-}
-
static char *ns_dname(struct dentry *dentry, char *buffer, int buflen)
{
struct inode *inode = dentry->d_inode;
@@ -59,7 +53,7 @@ static char *ns_dname(struct dentry *dentry, char *buffer, int buflen)
const struct dentry_operations ns_dentry_operations =
{
- .d_delete = ns_delete_dentry,
+ .d_delete = always_delete_dentry,
.d_dname = ns_dname,
};
diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c
index b8e93a40a5d3..78c3c2097787 100644
--- a/fs/pstore/platform.c
+++ b/fs/pstore/platform.c
@@ -443,8 +443,11 @@ int pstore_register(struct pstore_info *psi)
pstore_get_records(0);
kmsg_dump_register(&pstore_dumper);
- pstore_register_console();
- pstore_register_ftrace();
+
+ if ((psi->flags & PSTORE_FLAGS_FRAGILE) == 0) {
+ pstore_register_console();
+ pstore_register_ftrace();
+ }
if (pstore_update_ms >= 0) {
pstore_timer.expires = jiffies +
diff --git a/fs/squashfs/Kconfig b/fs/squashfs/Kconfig
index c70111ebefd4..b6fa8657dcbc 100644
--- a/fs/squashfs/Kconfig
+++ b/fs/squashfs/Kconfig
@@ -25,6 +25,78 @@ config SQUASHFS
If unsure, say N.
+choice
+ prompt "File decompression options"
+ depends on SQUASHFS
+ help
+ Squashfs now supports two options for decompressing file
+ data. Traditionally Squashfs has decompressed into an
+ intermediate buffer and then memcopied it into the page cache.
+ Squashfs now supports the ability to decompress directly into
+ the page cache.
+
+ If unsure, select "Decompress file data into an intermediate buffer"
+
+config SQUASHFS_FILE_CACHE
+ bool "Decompress file data into an intermediate buffer"
+ help
+ Decompress file data into an intermediate buffer and then
+ memcopy it into the page cache.
+
+config SQUASHFS_FILE_DIRECT
+ bool "Decompress files directly into the page cache"
+ help
+ Directly decompress file data into the page cache.
+ Doing so can significantly improve performance because
+ it eliminates a memcpy and it also removes the lock contention
+ on the single buffer.
+
+endchoice
+
+choice
+ prompt "Decompressor parallelisation options"
+ depends on SQUASHFS
+ help
+ Squashfs now supports three parallelisation options for
+ decompression. Each one exhibits various trade-offs between
+ decompression performance and CPU and memory usage.
+
+ If in doubt, select "Single threaded compression"
+
+config SQUASHFS_DECOMP_SINGLE
+ bool "Single threaded compression"
+ help
+ Traditionally Squashfs has used single-threaded decompression.
+ Only one block (data or metadata) can be decompressed at any
+ one time. This limits CPU and memory usage to a minimum.
+
+config SQUASHFS_DECOMP_MULTI
+ bool "Use multiple decompressors for parallel I/O"
+ help
+ By default Squashfs uses a single decompressor but it gives
+ poor performance on parallel I/O workloads when using multiple CPU
+ machines due to waiting on decompressor availability.
+
+ If you have a parallel I/O workload and your system has enough memory,
+ using this option may improve overall I/O performance.
+
+ This decompressor implementation uses up to two parallel
+ decompressors per core. It dynamically allocates decompressors
+ on a demand basis.
+
+config SQUASHFS_DECOMP_MULTI_PERCPU
+ bool "Use percpu multiple decompressors for parallel I/O"
+ help
+ By default Squashfs uses a single decompressor but it gives
+ poor performance on parallel I/O workloads when using multiple CPU
+ machines due to waiting on decompressor availability.
+
+ This decompressor implementation uses a maximum of one
+ decompressor per core. It uses percpu variables to ensure
+ decompression is load-balanced across the cores.
+
+endchoice
+
config SQUASHFS_XATTR
bool "Squashfs XATTR support"
depends on SQUASHFS
diff --git a/fs/squashfs/Makefile b/fs/squashfs/Makefile
index 110b0476f3b4..4132520b4ff2 100644
--- a/fs/squashfs/Makefile
+++ b/fs/squashfs/Makefile
@@ -5,6 +5,11 @@
obj-$(CONFIG_SQUASHFS) += squashfs.o
squashfs-y += block.o cache.o dir.o export.o file.o fragment.o id.o inode.o
squashfs-y += namei.o super.o symlink.o decompressor.o
+squashfs-$(CONFIG_SQUASHFS_FILE_CACHE) += file_cache.o
+squashfs-$(CONFIG_SQUASHFS_FILE_DIRECT) += file_direct.o page_actor.o
+squashfs-$(CONFIG_SQUASHFS_DECOMP_SINGLE) += decompressor_single.o
+squashfs-$(CONFIG_SQUASHFS_DECOMP_MULTI) += decompressor_multi.o
+squashfs-$(CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU) += decompressor_multi_percpu.o
squashfs-$(CONFIG_SQUASHFS_XATTR) += xattr.o xattr_id.o
squashfs-$(CONFIG_SQUASHFS_LZO) += lzo_wrapper.o
squashfs-$(CONFIG_SQUASHFS_XZ) += xz_wrapper.o
diff --git a/fs/squashfs/block.c b/fs/squashfs/block.c
index 41d108ecc9be..0cea9b9236d0 100644
--- a/fs/squashfs/block.c
+++ b/fs/squashfs/block.c
@@ -36,6 +36,7 @@
#include "squashfs_fs_sb.h"
#include "squashfs.h"
#include "decompressor.h"
+#include "page_actor.h"
/*
* Read the metadata block length, this is stored in the first two
@@ -86,16 +87,16 @@ static struct buffer_head *get_block_length(struct super_block *sb,
* generated a larger block - this does occasionally happen with compression
* algorithms).
*/
-int squashfs_read_data(struct super_block *sb, void **buffer, u64 index,
- int length, u64 *next_index, int srclength, int pages)
+int squashfs_read_data(struct super_block *sb, u64 index, int length,
+ u64 *next_index, struct squashfs_page_actor *output)
{
struct squashfs_sb_info *msblk = sb->s_fs_info;
struct buffer_head **bh;
int offset = index & ((1 << msblk->devblksize_log2) - 1);
u64 cur_index = index >> msblk->devblksize_log2;
- int bytes, compressed, b = 0, k = 0, page = 0, avail;
+ int bytes, compressed, b = 0, k = 0, avail, i;
- bh = kcalloc(((srclength + msblk->devblksize - 1)
+ bh = kcalloc(((output->length + msblk->devblksize - 1)
>> msblk->devblksize_log2) + 1, sizeof(*bh), GFP_KERNEL);
if (bh == NULL)
return -ENOMEM;
@@ -111,9 +112,9 @@ int squashfs_read_data(struct super_block *sb, void **buffer, u64 index,
*next_index = index + length;
TRACE("Block @ 0x%llx, %scompressed size %d, src size %d\n",
- index, compressed ? "" : "un", length, srclength);
+ index, compressed ? "" : "un", length, output->length);
- if (length < 0 || length > srclength ||
+ if (length < 0 || length > output->length ||
(index + length) > msblk->bytes_used)
goto read_failure;
@@ -145,7 +146,7 @@ int squashfs_read_data(struct super_block *sb, void **buffer, u64 index,
TRACE("Block @ 0x%llx, %scompressed size %d\n", index,
compressed ? "" : "un", length);
- if (length < 0 || length > srclength ||
+ if (length < 0 || length > output->length ||
(index + length) > msblk->bytes_used)
goto block_release;
@@ -158,9 +159,15 @@ int squashfs_read_data(struct super_block *sb, void **buffer, u64 index,
ll_rw_block(READ, b - 1, bh + 1);
}
+ for (i = 0; i < b; i++) {
+ wait_on_buffer(bh[i]);
+ if (!buffer_uptodate(bh[i]))
+ goto block_release;
+ }
+
if (compressed) {
- length = squashfs_decompress(msblk, buffer, bh, b, offset,
- length, srclength, pages);
+ length = squashfs_decompress(msblk, bh, b, offset, length,
+ output);
if (length < 0)
goto read_failure;
} else {
@@ -168,22 +175,20 @@ int squashfs_read_data(struct super_block *sb, void **buffer, u64 index,
* Block is uncompressed.
*/
int in, pg_offset = 0;
+ void *data = squashfs_first_page(output);
for (bytes = length; k < b; k++) {
in = min(bytes, msblk->devblksize - offset);
bytes -= in;
- wait_on_buffer(bh[k]);
- if (!buffer_uptodate(bh[k]))
- goto block_release;
while (in) {
if (pg_offset == PAGE_CACHE_SIZE) {
- page++;
+ data = squashfs_next_page(output);
pg_offset = 0;
}
avail = min_t(int, in, PAGE_CACHE_SIZE -
pg_offset);
- memcpy(buffer[page] + pg_offset,
- bh[k]->b_data + offset, avail);
+ memcpy(data + pg_offset, bh[k]->b_data + offset,
+ avail);
in -= avail;
pg_offset += avail;
offset += avail;
@@ -191,6 +196,7 @@ int squashfs_read_data(struct super_block *sb, void **buffer, u64 index,
offset = 0;
put_bh(bh[k]);
}
+ squashfs_finish_page(output);
}
kfree(bh);
diff --git a/fs/squashfs/cache.c b/fs/squashfs/cache.c
index af0b73802592..1cb70a0b2168 100644
--- a/fs/squashfs/cache.c
+++ b/fs/squashfs/cache.c
@@ -56,6 +56,7 @@
#include "squashfs_fs.h"
#include "squashfs_fs_sb.h"
#include "squashfs.h"
+#include "page_actor.h"
/*
* Look-up block in cache, and increment usage count. If not in cache, read
@@ -119,9 +120,8 @@ struct squashfs_cache_entry *squashfs_cache_get(struct super_block *sb,
entry->error = 0;
spin_unlock(&cache->lock);
- entry->length = squashfs_read_data(sb, entry->data,
- block, length, &entry->next_index,
- cache->block_size, cache->pages);
+ entry->length = squashfs_read_data(sb, block, length,
+ &entry->next_index, entry->actor);
spin_lock(&cache->lock);
@@ -220,6 +220,7 @@ void squashfs_cache_delete(struct squashfs_cache *cache)
kfree(cache->entry[i].data[j]);
kfree(cache->entry[i].data);
}
+ kfree(cache->entry[i].actor);
}
kfree(cache->entry);
@@ -280,6 +281,13 @@ struct squashfs_cache *squashfs_cache_init(char *name, int entries,
goto cleanup;
}
}
+
+ entry->actor = squashfs_page_actor_init(entry->data,
+ cache->pages, 0);
+ if (entry->actor == NULL) {
+ ERROR("Failed to allocate %s cache entry\n", name);
+ goto cleanup;
+ }
}
return cache;
@@ -410,6 +418,7 @@ void *squashfs_read_table(struct super_block *sb, u64 block, int length)
int pages = (length + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
int i, res;
void *table, *buffer, **data;
+ struct squashfs_page_actor *actor;
table = buffer = kmalloc(length, GFP_KERNEL);
if (table == NULL)
@@ -421,19 +430,28 @@ void *squashfs_read_table(struct super_block *sb, u64 block, int length)
goto failed;
}
+ actor = squashfs_page_actor_init(data, pages, length);
+ if (actor == NULL) {
+ res = -ENOMEM;
+ goto failed2;
+ }
+
for (i = 0; i < pages; i++, buffer += PAGE_CACHE_SIZE)
data[i] = buffer;
- res = squashfs_read_data(sb, data, block, length |
- SQUASHFS_COMPRESSED_BIT_BLOCK, NULL, length, pages);
+ res = squashfs_read_data(sb, block, length |
+ SQUASHFS_COMPRESSED_BIT_BLOCK, NULL, actor);
kfree(data);
+ kfree(actor);
if (res < 0)
goto failed;
return table;
+failed2:
+ kfree(data);
failed:
kfree(table);
return ERR_PTR(res);
diff --git a/fs/squashfs/decompressor.c b/fs/squashfs/decompressor.c
index 3f6271d86abc..ac22fe73b0ad 100644
--- a/fs/squashfs/decompressor.c
+++ b/fs/squashfs/decompressor.c
@@ -30,6 +30,7 @@
#include "squashfs_fs_sb.h"
#include "decompressor.h"
#include "squashfs.h"
+#include "page_actor.h"
/*
* This file (and decompressor.h) implements a decompressor framework for
@@ -37,29 +38,29 @@
*/
static const struct squashfs_decompressor squashfs_lzma_unsupported_comp_ops = {
- NULL, NULL, NULL, LZMA_COMPRESSION, "lzma", 0
+ NULL, NULL, NULL, NULL, LZMA_COMPRESSION, "lzma", 0
};
#ifndef CONFIG_SQUASHFS_LZO
static const struct squashfs_decompressor squashfs_lzo_comp_ops = {
- NULL, NULL, NULL, LZO_COMPRESSION, "lzo", 0
+ NULL, NULL, NULL, NULL, LZO_COMPRESSION, "lzo", 0
};
#endif
#ifndef CONFIG_SQUASHFS_XZ
static const struct squashfs_decompressor squashfs_xz_comp_ops = {
- NULL, NULL, NULL, XZ_COMPRESSION, "xz", 0
+ NULL, NULL, NULL, NULL, XZ_COMPRESSION, "xz", 0
};
#endif
#ifndef CONFIG_SQUASHFS_ZLIB
static const struct squashfs_decompressor squashfs_zlib_comp_ops = {
- NULL, NULL, NULL, ZLIB_COMPRESSION, "zlib", 0
+ NULL, NULL, NULL, NULL, ZLIB_COMPRESSION, "zlib", 0
};
#endif
static const struct squashfs_decompressor squashfs_unknown_comp_ops = {
- NULL, NULL, NULL, 0, "unknown", 0
+ NULL, NULL, NULL, NULL, 0, "unknown", 0
};
static const struct squashfs_decompressor *decompressor[] = {
@@ -83,10 +84,11 @@ const struct squashfs_decompressor *squashfs_lookup_decompressor(int id)
}
-void *squashfs_decompressor_init(struct super_block *sb, unsigned short flags)
+static void *get_comp_opts(struct super_block *sb, unsigned short flags)
{
struct squashfs_sb_info *msblk = sb->s_fs_info;
- void *strm, *buffer = NULL;
+ void *buffer = NULL, *comp_opts;
+ struct squashfs_page_actor *actor = NULL;
int length = 0;
/*
@@ -94,23 +96,46 @@ void *squashfs_decompressor_init(struct super_block *sb, unsigned short flags)
*/
if (SQUASHFS_COMP_OPTS(flags)) {
buffer = kmalloc(PAGE_CACHE_SIZE, GFP_KERNEL);
- if (buffer == NULL)
- return ERR_PTR(-ENOMEM);
+ if (buffer == NULL) {
+ comp_opts = ERR_PTR(-ENOMEM);
+ goto out;
+ }
+
+ actor = squashfs_page_actor_init(&buffer, 1, 0);
+ if (actor == NULL) {
+ comp_opts = ERR_PTR(-ENOMEM);
+ goto out;
+ }
- length = squashfs_read_data(sb, &buffer,
- sizeof(struct squashfs_super_block), 0, NULL,
- PAGE_CACHE_SIZE, 1);
+ length = squashfs_read_data(sb,
+ sizeof(struct squashfs_super_block), 0, NULL, actor);
if (length < 0) {
- strm = ERR_PTR(length);
- goto finished;
+ comp_opts = ERR_PTR(length);
+ goto out;
}
}
- strm = msblk->decompressor->init(msblk, buffer, length);
+ comp_opts = squashfs_comp_opts(msblk, buffer, length);
-finished:
+out:
+ kfree(actor);
kfree(buffer);
+ return comp_opts;
+}
+
+
+void *squashfs_decompressor_setup(struct super_block *sb, unsigned short flags)
+{
+ struct squashfs_sb_info *msblk = sb->s_fs_info;
+ void *stream, *comp_opts = get_comp_opts(sb, flags);
+
+ if (IS_ERR(comp_opts))
+ return comp_opts;
+
+ stream = squashfs_decompressor_create(msblk, comp_opts);
+ if (IS_ERR(stream))
+ kfree(comp_opts);
- return strm;
+ return stream;
}
diff --git a/fs/squashfs/decompressor.h b/fs/squashfs/decompressor.h
index 330073e29029..af0985321808 100644
--- a/fs/squashfs/decompressor.h
+++ b/fs/squashfs/decompressor.h
@@ -24,28 +24,22 @@
*/
struct squashfs_decompressor {
- void *(*init)(struct squashfs_sb_info *, void *, int);
+ void *(*init)(struct squashfs_sb_info *, void *);
+ void *(*comp_opts)(struct squashfs_sb_info *, void *, int);
void (*free)(void *);
- int (*decompress)(struct squashfs_sb_info *, void **,
- struct buffer_head **, int, int, int, int, int);
+ int (*decompress)(struct squashfs_sb_info *, void *,
+ struct buffer_head **, int, int, int,
+ struct squashfs_page_actor *);
int id;
char *name;
int supported;
};
-static inline void squashfs_decompressor_free(struct squashfs_sb_info *msblk,
- void *s)
+static inline void *squashfs_comp_opts(struct squashfs_sb_info *msblk,
+ void *buff, int length)
{
- if (msblk->decompressor)
- msblk->decompressor->free(s);
-}
-
-static inline int squashfs_decompress(struct squashfs_sb_info *msblk,
- void **buffer, struct buffer_head **bh, int b, int offset, int length,
- int srclength, int pages)
-{
- return msblk->decompressor->decompress(msblk, buffer, bh, b, offset,
- length, srclength, pages);
+ return msblk->decompressor->comp_opts ?
+ msblk->decompressor->comp_opts(msblk, buff, length) : NULL;
}
#ifdef CONFIG_SQUASHFS_XZ
diff --git a/fs/squashfs/decompressor_multi.c b/fs/squashfs/decompressor_multi.c
new file mode 100644
index 000000000000..d6008a636479
--- /dev/null
+++ b/fs/squashfs/decompressor_multi.c
@@ -0,0 +1,198 @@
+/*
+ * Copyright (c) 2013
+ * Minchan Kim <minchan@kernel.org>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ */
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/buffer_head.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/cpumask.h>
+
+#include "squashfs_fs.h"
+#include "squashfs_fs_sb.h"
+#include "decompressor.h"
+#include "squashfs.h"
+
+/*
+ * This file implements multi-threaded decompression in the
+ * decompressor framework
+ */
+
+
+/*
+ * The reason that multiply two is that a CPU can request new I/O
+ * while it is waiting previous request.
+ */
+#define MAX_DECOMPRESSOR (num_online_cpus() * 2)
+
+
+int squashfs_max_decompressors(void)
+{
+ return MAX_DECOMPRESSOR;
+}
+
+
+struct squashfs_stream {
+ void *comp_opts;
+ struct list_head strm_list;
+ struct mutex mutex;
+ int avail_decomp;
+ wait_queue_head_t wait;
+};
+
+
+struct decomp_stream {
+ void *stream;
+ struct list_head list;
+};
+
+
+static void put_decomp_stream(struct decomp_stream *decomp_strm,
+ struct squashfs_stream *stream)
+{
+ mutex_lock(&stream->mutex);
+ list_add(&decomp_strm->list, &stream->strm_list);
+ mutex_unlock(&stream->mutex);
+ wake_up(&stream->wait);
+}
+
+void *squashfs_decompressor_create(struct squashfs_sb_info *msblk,
+ void *comp_opts)
+{
+ struct squashfs_stream *stream;
+ struct decomp_stream *decomp_strm = NULL;
+ int err = -ENOMEM;
+
+ stream = kzalloc(sizeof(*stream), GFP_KERNEL);
+ if (!stream)
+ goto out;
+
+ stream->comp_opts = comp_opts;
+ mutex_init(&stream->mutex);
+ INIT_LIST_HEAD(&stream->strm_list);
+ init_waitqueue_head(&stream->wait);
+
+ /*
+ * We should have a decompressor at least as default
+ * so if we fail to allocate new decompressor dynamically,
+ * we could always fall back to default decompressor and
+ * file system works.
+ */
+ decomp_strm = kmalloc(sizeof(*decomp_strm), GFP_KERNEL);
+ if (!decomp_strm)
+ goto out;
+
+ decomp_strm->stream = msblk->decompressor->init(msblk,
+ stream->comp_opts);
+ if (IS_ERR(decomp_strm->stream)) {
+ err = PTR_ERR(decomp_strm->stream);
+ goto out;
+ }
+
+ list_add(&decomp_strm->list, &stream->strm_list);
+ stream->avail_decomp = 1;
+ return stream;
+
+out:
+ kfree(decomp_strm);
+ kfree(stream);
+ return ERR_PTR(err);
+}
+
+
+void squashfs_decompressor_destroy(struct squashfs_sb_info *msblk)
+{
+ struct squashfs_stream *stream = msblk->stream;
+ if (stream) {
+ struct decomp_stream *decomp_strm;
+
+ while (!list_empty(&stream->strm_list)) {
+ decomp_strm = list_entry(stream->strm_list.prev,
+ struct decomp_stream, list);
+ list_del(&decomp_strm->list);
+ msblk->decompressor->free(decomp_strm->stream);
+ kfree(decomp_strm);
+ stream->avail_decomp--;
+ }
+ WARN_ON(stream->avail_decomp);
+ kfree(stream->comp_opts);
+ kfree(stream);
+ }
+}
+
+
+static struct decomp_stream *get_decomp_stream(struct squashfs_sb_info *msblk,
+ struct squashfs_stream *stream)
+{
+ struct decomp_stream *decomp_strm;
+
+ while (1) {
+ mutex_lock(&stream->mutex);
+
+ /* There is available decomp_stream */
+ if (!list_empty(&stream->strm_list)) {
+ decomp_strm = list_entry(stream->strm_list.prev,
+ struct decomp_stream, list);
+ list_del(&decomp_strm->list);
+ mutex_unlock(&stream->mutex);
+ break;
+ }
+
+ /*
+ * If there is no available decomp and already full,
+ * let's wait for releasing decomp from other users.
+ */
+ if (stream->avail_decomp >= MAX_DECOMPRESSOR)
+ goto wait;
+
+ /* Let's allocate new decomp */
+ decomp_strm = kmalloc(sizeof(*decomp_strm), GFP_KERNEL);
+ if (!decomp_strm)
+ goto wait;
+
+ decomp_strm->stream = msblk->decompressor->init(msblk,
+ stream->comp_opts);
+ if (IS_ERR(decomp_strm->stream)) {
+ kfree(decomp_strm);
+ goto wait;
+ }
+
+ stream->avail_decomp++;
+ WARN_ON(stream->avail_decomp > MAX_DECOMPRESSOR);
+
+ mutex_unlock(&stream->mutex);
+ break;
+wait:
+ /*
+ * If system memory is tough, let's for other's
+ * releasing instead of hurting VM because it could
+ * make page cache thrashing.
+ */
+ mutex_unlock(&stream->mutex);
+ wait_event(stream->wait,
+ !list_empty(&stream->strm_list));
+ }
+
+ return decomp_strm;
+}
+
+
+int squashfs_decompress(struct squashfs_sb_info *msblk, struct buffer_head **bh,
+ int b, int offset, int length, struct squashfs_page_actor *output)
+{
+ int res;
+ struct squashfs_stream *stream = msblk->stream;
+ struct decomp_stream *decomp_stream = get_decomp_stream(msblk, stream);
+ res = msblk->decompressor->decompress(msblk, decomp_stream->stream,
+ bh, b, offset, length, output);
+ put_decomp_stream(decomp_stream, stream);
+ if (res < 0)
+ ERROR("%s decompression failed, data probably corrupt\n",
+ msblk->decompressor->name);
+ return res;
+}
diff --git a/fs/squashfs/decompressor_multi_percpu.c b/fs/squashfs/decompressor_multi_percpu.c
new file mode 100644
index 000000000000..23a9c28ad8ea
--- /dev/null
+++ b/fs/squashfs/decompressor_multi_percpu.c
@@ -0,0 +1,97 @@
+/*
+ * Copyright (c) 2013
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ */
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/percpu.h>
+#include <linux/buffer_head.h>
+
+#include "squashfs_fs.h"
+#include "squashfs_fs_sb.h"
+#include "decompressor.h"
+#include "squashfs.h"
+
+/*
+ * This file implements multi-threaded decompression using percpu
+ * variables, one thread per cpu core.
+ */
+
+struct squashfs_stream {
+ void *stream;
+};
+
+void *squashfs_decompressor_create(struct squashfs_sb_info *msblk,
+ void *comp_opts)
+{
+ struct squashfs_stream *stream;
+ struct squashfs_stream __percpu *percpu;
+ int err, cpu;
+
+ percpu = alloc_percpu(struct squashfs_stream);
+ if (percpu == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ for_each_possible_cpu(cpu) {
+ stream = per_cpu_ptr(percpu, cpu);
+ stream->stream = msblk->decompressor->init(msblk, comp_opts);
+ if (IS_ERR(stream->stream)) {
+ err = PTR_ERR(stream->stream);
+ goto out;
+ }
+ }
+
+ kfree(comp_opts);
+ return (__force void *) percpu;
+
+out:
+ for_each_possible_cpu(cpu) {
+ stream = per_cpu_ptr(percpu, cpu);
+ if (!IS_ERR_OR_NULL(stream->stream))
+ msblk->decompressor->free(stream->stream);
+ }
+ free_percpu(percpu);
+ return ERR_PTR(err);
+}
+
+void squashfs_decompressor_destroy(struct squashfs_sb_info *msblk)
+{
+ struct squashfs_stream __percpu *percpu =
+ (struct squashfs_stream __percpu *) msblk->stream;
+ struct squashfs_stream *stream;
+ int cpu;
+
+ if (msblk->stream) {
+ for_each_possible_cpu(cpu) {
+ stream = per_cpu_ptr(percpu, cpu);
+ msblk->decompressor->free(stream->stream);
+ }
+ free_percpu(percpu);
+ }
+}
+
+int squashfs_decompress(struct squashfs_sb_info *msblk, struct buffer_head **bh,
+ int b, int offset, int length, struct squashfs_page_actor *output)
+{
+ struct squashfs_stream __percpu *percpu =
+ (struct squashfs_stream __percpu *) msblk->stream;
+ struct squashfs_stream *stream = get_cpu_ptr(percpu);
+ int res = msblk->decompressor->decompress(msblk, stream->stream, bh, b,
+ offset, length, output);
+ put_cpu_ptr(stream);
+
+ if (res < 0)
+ ERROR("%s decompression failed, data probably corrupt\n",
+ msblk->decompressor->name);
+
+ return res;
+}
+
+int squashfs_max_decompressors(void)
+{
+ return num_possible_cpus();
+}
diff --git a/fs/squashfs/decompressor_single.c b/fs/squashfs/decompressor_single.c
new file mode 100644
index 000000000000..a6c75929a00e
--- /dev/null
+++ b/fs/squashfs/decompressor_single.c
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2013
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ */
+
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/buffer_head.h>
+
+#include "squashfs_fs.h"
+#include "squashfs_fs_sb.h"
+#include "decompressor.h"
+#include "squashfs.h"
+
+/*
+ * This file implements single-threaded decompression in the
+ * decompressor framework
+ */
+
+struct squashfs_stream {
+ void *stream;
+ struct mutex mutex;
+};
+
+void *squashfs_decompressor_create(struct squashfs_sb_info *msblk,
+ void *comp_opts)
+{
+ struct squashfs_stream *stream;
+ int err = -ENOMEM;
+
+ stream = kmalloc(sizeof(*stream), GFP_KERNEL);
+ if (stream == NULL)
+ goto out;
+
+ stream->stream = msblk->decompressor->init(msblk, comp_opts);
+ if (IS_ERR(stream->stream)) {
+ err = PTR_ERR(stream->stream);
+ goto out;
+ }
+
+ kfree(comp_opts);
+ mutex_init(&stream->mutex);
+ return stream;
+
+out:
+ kfree(stream);
+ return ERR_PTR(err);
+}
+
+void squashfs_decompressor_destroy(struct squashfs_sb_info *msblk)
+{
+ struct squashfs_stream *stream = msblk->stream;
+
+ if (stream) {
+ msblk->decompressor->free(stream->stream);
+ kfree(stream);
+ }
+}
+
+int squashfs_decompress(struct squashfs_sb_info *msblk, struct buffer_head **bh,
+ int b, int offset, int length, struct squashfs_page_actor *output)
+{
+ int res;
+ struct squashfs_stream *stream = msblk->stream;
+
+ mutex_lock(&stream->mutex);
+ res = msblk->decompressor->decompress(msblk, stream->stream, bh, b,
+ offset, length, output);
+ mutex_unlock(&stream->mutex);
+
+ if (res < 0)
+ ERROR("%s decompression failed, data probably corrupt\n",
+ msblk->decompressor->name);
+
+ return res;
+}
+
+int squashfs_max_decompressors(void)
+{
+ return 1;
+}
diff --git a/fs/squashfs/file.c b/fs/squashfs/file.c
index 8ca62c28fe12..e5c9689062ba 100644
--- a/fs/squashfs/file.c
+++ b/fs/squashfs/file.c
@@ -370,77 +370,15 @@ static int read_blocklist(struct inode *inode, int index, u64 *block)
return le32_to_cpu(size);
}
-
-static int squashfs_readpage(struct file *file, struct page *page)
+/* Copy data into page cache */
+void squashfs_copy_cache(struct page *page, struct squashfs_cache_entry *buffer,
+ int bytes, int offset)
{
struct inode *inode = page->mapping->host;
struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
- int bytes, i, offset = 0, sparse = 0;
- struct squashfs_cache_entry *buffer = NULL;
void *pageaddr;
-
- int mask = (1 << (msblk->block_log - PAGE_CACHE_SHIFT)) - 1;
- int index = page->index >> (msblk->block_log - PAGE_CACHE_SHIFT);
- int start_index = page->index & ~mask;
- int end_index = start_index | mask;
- int file_end = i_size_read(inode) >> msblk->block_log;
-
- TRACE("Entered squashfs_readpage, page index %lx, start block %llx\n",
- page->index, squashfs_i(inode)->start);
-
- if (page->index >= ((i_size_read(inode) + PAGE_CACHE_SIZE - 1) >>
- PAGE_CACHE_SHIFT))
- goto out;
-
- if (index < file_end || squashfs_i(inode)->fragment_block ==
- SQUASHFS_INVALID_BLK) {
- /*
- * Reading a datablock from disk. Need to read block list
- * to get location and block size.
- */
- u64 block = 0;
- int bsize = read_blocklist(inode, index, &block);
- if (bsize < 0)
- goto error_out;
-
- if (bsize == 0) { /* hole */
- bytes = index == file_end ?
- (i_size_read(inode) & (msblk->block_size - 1)) :
- msblk->block_size;
- sparse = 1;
- } else {
- /*
- * Read and decompress datablock.
- */
- buffer = squashfs_get_datablock(inode->i_sb,
- block, bsize);
- if (buffer->error) {
- ERROR("Unable to read page, block %llx, size %x"
- "\n", block, bsize);
- squashfs_cache_put(buffer);
- goto error_out;
- }
- bytes = buffer->length;
- }
- } else {
- /*
- * Datablock is stored inside a fragment (tail-end packed
- * block).
- */
- buffer = squashfs_get_fragment(inode->i_sb,
- squashfs_i(inode)->fragment_block,
- squashfs_i(inode)->fragment_size);
-
- if (buffer->error) {
- ERROR("Unable to read page, block %llx, size %x\n",
- squashfs_i(inode)->fragment_block,
- squashfs_i(inode)->fragment_size);
- squashfs_cache_put(buffer);
- goto error_out;
- }
- bytes = i_size_read(inode) & (msblk->block_size - 1);
- offset = squashfs_i(inode)->fragment_offset;
- }
+ int i, mask = (1 << (msblk->block_log - PAGE_CACHE_SHIFT)) - 1;
+ int start_index = page->index & ~mask, end_index = start_index | mask;
/*
* Loop copying datablock into pages. As the datablock likely covers
@@ -451,7 +389,7 @@ static int squashfs_readpage(struct file *file, struct page *page)
for (i = start_index; i <= end_index && bytes > 0; i++,
bytes -= PAGE_CACHE_SIZE, offset += PAGE_CACHE_SIZE) {
struct page *push_page;
- int avail = sparse ? 0 : min_t(int, bytes, PAGE_CACHE_SIZE);
+ int avail = buffer ? min_t(int, bytes, PAGE_CACHE_SIZE) : 0;
TRACE("bytes %d, i %d, available_bytes %d\n", bytes, i, avail);
@@ -475,11 +413,75 @@ skip_page:
if (i != page->index)
page_cache_release(push_page);
}
+}
+
+/* Read datablock stored packed inside a fragment (tail-end packed block) */
+static int squashfs_readpage_fragment(struct page *page)
+{
+ struct inode *inode = page->mapping->host;
+ struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
+ struct squashfs_cache_entry *buffer = squashfs_get_fragment(inode->i_sb,
+ squashfs_i(inode)->fragment_block,
+ squashfs_i(inode)->fragment_size);
+ int res = buffer->error;
+
+ if (res)
+ ERROR("Unable to read page, block %llx, size %x\n",
+ squashfs_i(inode)->fragment_block,
+ squashfs_i(inode)->fragment_size);
+ else
+ squashfs_copy_cache(page, buffer, i_size_read(inode) &
+ (msblk->block_size - 1),
+ squashfs_i(inode)->fragment_offset);
+
+ squashfs_cache_put(buffer);
+ return res;
+}
- if (!sparse)
- squashfs_cache_put(buffer);
+static int squashfs_readpage_sparse(struct page *page, int index, int file_end)
+{
+ struct inode *inode = page->mapping->host;
+ struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
+ int bytes = index == file_end ?
+ (i_size_read(inode) & (msblk->block_size - 1)) :
+ msblk->block_size;
+ squashfs_copy_cache(page, NULL, bytes, 0);
return 0;
+}
+
+static int squashfs_readpage(struct file *file, struct page *page)
+{
+ struct inode *inode = page->mapping->host;
+ struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
+ int index = page->index >> (msblk->block_log - PAGE_CACHE_SHIFT);
+ int file_end = i_size_read(inode) >> msblk->block_log;
+ int res;
+ void *pageaddr;
+
+ TRACE("Entered squashfs_readpage, page index %lx, start block %llx\n",
+ page->index, squashfs_i(inode)->start);
+
+ if (page->index >= ((i_size_read(inode) + PAGE_CACHE_SIZE - 1) >>
+ PAGE_CACHE_SHIFT))
+ goto out;
+
+ if (index < file_end || squashfs_i(inode)->fragment_block ==
+ SQUASHFS_INVALID_BLK) {
+ u64 block = 0;
+ int bsize = read_blocklist(inode, index, &block);
+ if (bsize < 0)
+ goto error_out;
+
+ if (bsize == 0)
+ res = squashfs_readpage_sparse(page, index, file_end);
+ else
+ res = squashfs_readpage_block(page, block, bsize);
+ } else
+ res = squashfs_readpage_fragment(page);
+
+ if (!res)
+ return 0;
error_out:
SetPageError(page);
diff --git a/fs/squashfs/file_cache.c b/fs/squashfs/file_cache.c
new file mode 100644
index 000000000000..f2310d2a2019
--- /dev/null
+++ b/fs/squashfs/file_cache.c
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2013
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ */
+
+#include <linux/fs.h>
+#include <linux/vfs.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/pagemap.h>
+#include <linux/mutex.h>
+
+#include "squashfs_fs.h"
+#include "squashfs_fs_sb.h"
+#include "squashfs_fs_i.h"
+#include "squashfs.h"
+
+/* Read separately compressed datablock and memcopy into page cache */
+int squashfs_readpage_block(struct page *page, u64 block, int bsize)
+{
+ struct inode *i = page->mapping->host;
+ struct squashfs_cache_entry *buffer = squashfs_get_datablock(i->i_sb,
+ block, bsize);
+ int res = buffer->error;
+
+ if (res)
+ ERROR("Unable to read page, block %llx, size %x\n", block,
+ bsize);
+ else
+ squashfs_copy_cache(page, buffer, buffer->length, 0);
+
+ squashfs_cache_put(buffer);
+ return res;
+}
diff --git a/fs/squashfs/file_direct.c b/fs/squashfs/file_direct.c
new file mode 100644
index 000000000000..62a0de6632e1
--- /dev/null
+++ b/fs/squashfs/file_direct.c
@@ -0,0 +1,176 @@
+/*
+ * Copyright (c) 2013
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ */
+
+#include <linux/fs.h>
+#include <linux/vfs.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/pagemap.h>
+#include <linux/mutex.h>
+
+#include "squashfs_fs.h"
+#include "squashfs_fs_sb.h"
+#include "squashfs_fs_i.h"
+#include "squashfs.h"
+#include "page_actor.h"
+
+static int squashfs_read_cache(struct page *target_page, u64 block, int bsize,
+ int pages, struct page **page);
+
+/* Read separately compressed datablock directly into page cache */
+int squashfs_readpage_block(struct page *target_page, u64 block, int bsize)
+
+{
+ struct inode *inode = target_page->mapping->host;
+ struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
+
+ int file_end = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT;
+ int mask = (1 << (msblk->block_log - PAGE_CACHE_SHIFT)) - 1;
+ int start_index = target_page->index & ~mask;
+ int end_index = start_index | mask;
+ int i, n, pages, missing_pages, bytes, res = -ENOMEM;
+ struct page **page;
+ struct squashfs_page_actor *actor;
+ void *pageaddr;
+
+ if (end_index > file_end)
+ end_index = file_end;
+
+ pages = end_index - start_index + 1;
+
+ page = kmalloc(sizeof(void *) * pages, GFP_KERNEL);
+ if (page == NULL)
+ return res;
+
+ /*
+ * Create a "page actor" which will kmap and kunmap the
+ * page cache pages appropriately within the decompressor
+ */
+ actor = squashfs_page_actor_init_special(page, pages, 0);
+ if (actor == NULL)
+ goto out;
+
+ /* Try to grab all the pages covered by the Squashfs block */
+ for (missing_pages = 0, i = 0, n = start_index; i < pages; i++, n++) {
+ page[i] = (n == target_page->index) ? target_page :
+ grab_cache_page_nowait(target_page->mapping, n);
+
+ if (page[i] == NULL) {
+ missing_pages++;
+ continue;
+ }
+
+ if (PageUptodate(page[i])) {
+ unlock_page(page[i]);
+ page_cache_release(page[i]);
+ page[i] = NULL;
+ missing_pages++;
+ }
+ }
+
+ if (missing_pages) {
+ /*
+ * Couldn't get one or more pages, this page has either
+ * been VM reclaimed, but others are still in the page cache
+ * and uptodate, or we're racing with another thread in
+ * squashfs_readpage also trying to grab them. Fall back to
+ * using an intermediate buffer.
+ */
+ res = squashfs_read_cache(target_page, block, bsize, pages,
+ page);
+ if (res < 0)
+ goto mark_errored;
+
+ goto out;
+ }
+
+ /* Decompress directly into the page cache buffers */
+ res = squashfs_read_data(inode->i_sb, block, bsize, NULL, actor);
+ if (res < 0)
+ goto mark_errored;
+
+ /* Last page may have trailing bytes not filled */
+ bytes = res % PAGE_CACHE_SIZE;
+ if (bytes) {
+ pageaddr = kmap_atomic(page[pages - 1]);
+ memset(pageaddr + bytes, 0, PAGE_CACHE_SIZE - bytes);
+ kunmap_atomic(pageaddr);
+ }
+
+ /* Mark pages as uptodate, unlock and release */
+ for (i = 0; i < pages; i++) {
+ flush_dcache_page(page[i]);
+ SetPageUptodate(page[i]);
+ unlock_page(page[i]);
+ if (page[i] != target_page)
+ page_cache_release(page[i]);
+ }
+
+ kfree(actor);
+ kfree(page);
+
+ return 0;
+
+mark_errored:
+ /* Decompression failed, mark pages as errored. Target_page is
+ * dealt with by the caller
+ */
+ for (i = 0; i < pages; i++) {
+ if (page[i] == NULL || page[i] == target_page)
+ continue;
+ flush_dcache_page(page[i]);
+ SetPageError(page[i]);
+ unlock_page(page[i]);
+ page_cache_release(page[i]);
+ }
+
+out:
+ kfree(actor);
+ kfree(page);
+ return res;
+}
+
+
+static int squashfs_read_cache(struct page *target_page, u64 block, int bsize,
+ int pages, struct page **page)
+{
+ struct inode *i = target_page->mapping->host;
+ struct squashfs_cache_entry *buffer = squashfs_get_datablock(i->i_sb,
+ block, bsize);
+ int bytes = buffer->length, res = buffer->error, n, offset = 0;
+ void *pageaddr;
+
+ if (res) {
+ ERROR("Unable to read page, block %llx, size %x\n", block,
+ bsize);
+ goto out;
+ }
+
+ for (n = 0; n < pages && bytes > 0; n++,
+ bytes -= PAGE_CACHE_SIZE, offset += PAGE_CACHE_SIZE) {
+ int avail = min_t(int, bytes, PAGE_CACHE_SIZE);
+
+ if (page[n] == NULL)
+ continue;
+
+ pageaddr = kmap_atomic(page[n]);
+ squashfs_copy_data(pageaddr, buffer, offset, avail);
+ memset(pageaddr + avail, 0, PAGE_CACHE_SIZE - avail);
+ kunmap_atomic(pageaddr);
+ flush_dcache_page(page[n]);
+ SetPageUptodate(page[n]);
+ unlock_page(page[n]);
+ if (page[n] != target_page)
+ page_cache_release(page[n]);
+ }
+
+out:
+ squashfs_cache_put(buffer);
+ return res;
+}
diff --git a/fs/squashfs/lzo_wrapper.c b/fs/squashfs/lzo_wrapper.c
index 00f4dfc5f088..244b9fbfff7b 100644
--- a/fs/squashfs/lzo_wrapper.c
+++ b/fs/squashfs/lzo_wrapper.c
@@ -31,13 +31,14 @@
#include "squashfs_fs_sb.h"
#include "squashfs.h"
#include "decompressor.h"
+#include "page_actor.h"
struct squashfs_lzo {
void *input;
void *output;
};
-static void *lzo_init(struct squashfs_sb_info *msblk, void *buff, int len)
+static void *lzo_init(struct squashfs_sb_info *msblk, void *buff)
{
int block_size = max_t(int, msblk->block_size, SQUASHFS_METADATA_SIZE);
@@ -74,22 +75,16 @@ static void lzo_free(void *strm)
}
-static int lzo_uncompress(struct squashfs_sb_info *msblk, void **buffer,
- struct buffer_head **bh, int b, int offset, int length, int srclength,
- int pages)
+static int lzo_uncompress(struct squashfs_sb_info *msblk, void *strm,
+ struct buffer_head **bh, int b, int offset, int length,
+ struct squashfs_page_actor *output)
{
- struct squashfs_lzo *stream = msblk->stream;
- void *buff = stream->input;
+ struct squashfs_lzo *stream = strm;
+ void *buff = stream->input, *data;
int avail, i, bytes = length, res;
- size_t out_len = srclength;
-
- mutex_lock(&msblk->read_data_mutex);
+ size_t out_len = output->length;
for (i = 0; i < b; i++) {
- wait_on_buffer(bh[i]);
- if (!buffer_uptodate(bh[i]))
- goto block_release;
-
avail = min(bytes, msblk->devblksize - offset);
memcpy(buff, bh[i]->b_data + offset, avail);
buff += avail;
@@ -104,24 +99,24 @@ static int lzo_uncompress(struct squashfs_sb_info *msblk, void **buffer,
goto failed;
res = bytes = (int)out_len;
- for (i = 0, buff = stream->output; bytes && i < pages; i++) {
- avail = min_t(int, bytes, PAGE_CACHE_SIZE);
- memcpy(buffer[i], buff, avail);
- buff += avail;
- bytes -= avail;
+ data = squashfs_first_page(output);
+ buff = stream->output;
+ while (data) {
+ if (bytes <= PAGE_CACHE_SIZE) {
+ memcpy(data, buff, bytes);
+ break;
+ } else {
+ memcpy(data, buff, PAGE_CACHE_SIZE);
+ buff += PAGE_CACHE_SIZE;
+ bytes -= PAGE_CACHE_SIZE;
+ data = squashfs_next_page(output);
+ }
}
+ squashfs_finish_page(output);
- mutex_unlock(&msblk->read_data_mutex);
return res;
-block_release:
- for (; i < b; i++)
- put_bh(bh[i]);
-
failed:
- mutex_unlock(&msblk->read_data_mutex);
-
- ERROR("lzo decompression failed, data probably corrupt\n");
return -EIO;
}
diff --git a/fs/squashfs/page_actor.c b/fs/squashfs/page_actor.c
new file mode 100644
index 000000000000..5a1c11f56441
--- /dev/null
+++ b/fs/squashfs/page_actor.c
@@ -0,0 +1,100 @@
+/*
+ * Copyright (c) 2013
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/pagemap.h>
+#include "page_actor.h"
+
+/*
+ * This file contains implementations of page_actor for decompressing into
+ * an intermediate buffer, and for decompressing directly into the
+ * page cache.
+ *
+ * Calling code should avoid sleeping between calls to squashfs_first_page()
+ * and squashfs_finish_page().
+ */
+
+/* Implementation of page_actor for decompressing into intermediate buffer */
+static void *cache_first_page(struct squashfs_page_actor *actor)
+{
+ actor->next_page = 1;
+ return actor->buffer[0];
+}
+
+static void *cache_next_page(struct squashfs_page_actor *actor)
+{
+ if (actor->next_page == actor->pages)
+ return NULL;
+
+ return actor->buffer[actor->next_page++];
+}
+
+static void cache_finish_page(struct squashfs_page_actor *actor)
+{
+ /* empty */
+}
+
+struct squashfs_page_actor *squashfs_page_actor_init(void **buffer,
+ int pages, int length)
+{
+ struct squashfs_page_actor *actor = kmalloc(sizeof(*actor), GFP_KERNEL);
+
+ if (actor == NULL)
+ return NULL;
+
+ actor->length = length ? : pages * PAGE_CACHE_SIZE;
+ actor->buffer = buffer;
+ actor->pages = pages;
+ actor->next_page = 0;
+ actor->squashfs_first_page = cache_first_page;
+ actor->squashfs_next_page = cache_next_page;
+ actor->squashfs_finish_page = cache_finish_page;
+ return actor;
+}
+
+/* Implementation of page_actor for decompressing directly into page cache. */
+static void *direct_first_page(struct squashfs_page_actor *actor)
+{
+ actor->next_page = 1;
+ return actor->pageaddr = kmap_atomic(actor->page[0]);
+}
+
+static void *direct_next_page(struct squashfs_page_actor *actor)
+{
+ if (actor->pageaddr)
+ kunmap_atomic(actor->pageaddr);
+
+ return actor->pageaddr = actor->next_page == actor->pages ? NULL :
+ kmap_atomic(actor->page[actor->next_page++]);
+}
+
+static void direct_finish_page(struct squashfs_page_actor *actor)
+{
+ if (actor->pageaddr)
+ kunmap_atomic(actor->pageaddr);
+}
+
+struct squashfs_page_actor *squashfs_page_actor_init_special(struct page **page,
+ int pages, int length)
+{
+ struct squashfs_page_actor *actor = kmalloc(sizeof(*actor), GFP_KERNEL);
+
+ if (actor == NULL)
+ return NULL;
+
+ actor->length = length ? : pages * PAGE_CACHE_SIZE;
+ actor->page = page;
+ actor->pages = pages;
+ actor->next_page = 0;
+ actor->pageaddr = NULL;
+ actor->squashfs_first_page = direct_first_page;
+ actor->squashfs_next_page = direct_next_page;
+ actor->squashfs_finish_page = direct_finish_page;
+ return actor;
+}
diff --git a/fs/squashfs/page_actor.h b/fs/squashfs/page_actor.h
new file mode 100644
index 000000000000..26dd82008b82
--- /dev/null
+++ b/fs/squashfs/page_actor.h
@@ -0,0 +1,81 @@
+#ifndef PAGE_ACTOR_H
+#define PAGE_ACTOR_H
+/*
+ * Copyright (c) 2013
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ */
+
+#ifndef CONFIG_SQUASHFS_FILE_DIRECT
+struct squashfs_page_actor {
+ void **page;
+ int pages;
+ int length;
+ int next_page;
+};
+
+static inline struct squashfs_page_actor *squashfs_page_actor_init(void **page,
+ int pages, int length)
+{
+ struct squashfs_page_actor *actor = kmalloc(sizeof(*actor), GFP_KERNEL);
+
+ if (actor == NULL)
+ return NULL;
+
+ actor->length = length ? : pages * PAGE_CACHE_SIZE;
+ actor->page = page;
+ actor->pages = pages;
+ actor->next_page = 0;
+ return actor;
+}
+
+static inline void *squashfs_first_page(struct squashfs_page_actor *actor)
+{
+ actor->next_page = 1;
+ return actor->page[0];
+}
+
+static inline void *squashfs_next_page(struct squashfs_page_actor *actor)
+{
+ return actor->next_page == actor->pages ? NULL :
+ actor->page[actor->next_page++];
+}
+
+static inline void squashfs_finish_page(struct squashfs_page_actor *actor)
+{
+ /* empty */
+}
+#else
+struct squashfs_page_actor {
+ union {
+ void **buffer;
+ struct page **page;
+ };
+ void *pageaddr;
+ void *(*squashfs_first_page)(struct squashfs_page_actor *);
+ void *(*squashfs_next_page)(struct squashfs_page_actor *);
+ void (*squashfs_finish_page)(struct squashfs_page_actor *);
+ int pages;
+ int length;
+ int next_page;
+};
+
+extern struct squashfs_page_actor *squashfs_page_actor_init(void **, int, int);
+extern struct squashfs_page_actor *squashfs_page_actor_init_special(struct page
+ **, int, int);
+static inline void *squashfs_first_page(struct squashfs_page_actor *actor)
+{
+ return actor->squashfs_first_page(actor);
+}
+static inline void *squashfs_next_page(struct squashfs_page_actor *actor)
+{
+ return actor->squashfs_next_page(actor);
+}
+static inline void squashfs_finish_page(struct squashfs_page_actor *actor)
+{
+ actor->squashfs_finish_page(actor);
+}
+#endif
+#endif
diff --git a/fs/squashfs/squashfs.h b/fs/squashfs/squashfs.h
index d1266516ed08..9e1bb79f7e6f 100644
--- a/fs/squashfs/squashfs.h
+++ b/fs/squashfs/squashfs.h
@@ -28,8 +28,8 @@
#define WARNING(s, args...) pr_warning("SQUASHFS: "s, ## args)
/* block.c */
-extern int squashfs_read_data(struct super_block *, void **, u64, int, u64 *,
- int, int);
+extern int squashfs_read_data(struct super_block *, u64, int, u64 *,
+ struct squashfs_page_actor *);
/* cache.c */
extern struct squashfs_cache *squashfs_cache_init(char *, int, int);
@@ -48,7 +48,14 @@ extern void *squashfs_read_table(struct super_block *, u64, int);
/* decompressor.c */
extern const struct squashfs_decompressor *squashfs_lookup_decompressor(int);
-extern void *squashfs_decompressor_init(struct super_block *, unsigned short);
+extern void *squashfs_decompressor_setup(struct super_block *, unsigned short);
+
+/* decompressor_xxx.c */
+extern void *squashfs_decompressor_create(struct squashfs_sb_info *, void *);
+extern void squashfs_decompressor_destroy(struct squashfs_sb_info *);
+extern int squashfs_decompress(struct squashfs_sb_info *, struct buffer_head **,
+ int, int, int, struct squashfs_page_actor *);
+extern int squashfs_max_decompressors(void);
/* export.c */
extern __le64 *squashfs_read_inode_lookup_table(struct super_block *, u64, u64,
@@ -59,6 +66,13 @@ extern int squashfs_frag_lookup(struct super_block *, unsigned int, u64 *);
extern __le64 *squashfs_read_fragment_index_table(struct super_block *,
u64, u64, unsigned int);
+/* file.c */
+void squashfs_copy_cache(struct page *, struct squashfs_cache_entry *, int,
+ int);
+
+/* file_xxx.c */
+extern int squashfs_readpage_block(struct page *, u64, int);
+
/* id.c */
extern int squashfs_get_id(struct super_block *, unsigned int, unsigned int *);
extern __le64 *squashfs_read_id_index_table(struct super_block *, u64, u64,
diff --git a/fs/squashfs/squashfs_fs_sb.h b/fs/squashfs/squashfs_fs_sb.h
index 52934a22f296..1da565cb50c3 100644
--- a/fs/squashfs/squashfs_fs_sb.h
+++ b/fs/squashfs/squashfs_fs_sb.h
@@ -50,6 +50,7 @@ struct squashfs_cache_entry {
wait_queue_head_t wait_queue;
struct squashfs_cache *cache;
void **data;
+ struct squashfs_page_actor *actor;
};
struct squashfs_sb_info {
@@ -63,10 +64,9 @@ struct squashfs_sb_info {
__le64 *id_table;
__le64 *fragment_index;
__le64 *xattr_id_table;
- struct mutex read_data_mutex;
struct mutex meta_index_mutex;
struct meta_index *meta_index;
- void *stream;
+ struct squashfs_stream *stream;
__le64 *inode_lookup_table;
u64 inode_table;
u64 directory_table;
diff --git a/fs/squashfs/super.c b/fs/squashfs/super.c
index 60553a9053ca..202df6312d4e 100644
--- a/fs/squashfs/super.c
+++ b/fs/squashfs/super.c
@@ -98,7 +98,6 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent)
msblk->devblksize = sb_min_blocksize(sb, SQUASHFS_DEVBLK_SIZE);
msblk->devblksize_log2 = ffz(~msblk->devblksize);
- mutex_init(&msblk->read_data_mutex);
mutex_init(&msblk->meta_index_mutex);
/*
@@ -206,13 +205,14 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent)
goto failed_mount;
/* Allocate read_page block */
- msblk->read_page = squashfs_cache_init("data", 1, msblk->block_size);
+ msblk->read_page = squashfs_cache_init("data",
+ squashfs_max_decompressors(), msblk->block_size);
if (msblk->read_page == NULL) {
ERROR("Failed to allocate read_page block\n");
goto failed_mount;
}
- msblk->stream = squashfs_decompressor_init(sb, flags);
+ msblk->stream = squashfs_decompressor_setup(sb, flags);
if (IS_ERR(msblk->stream)) {
err = PTR_ERR(msblk->stream);
msblk->stream = NULL;
@@ -336,7 +336,7 @@ failed_mount:
squashfs_cache_delete(msblk->block_cache);
squashfs_cache_delete(msblk->fragment_cache);
squashfs_cache_delete(msblk->read_page);
- squashfs_decompressor_free(msblk, msblk->stream);
+ squashfs_decompressor_destroy(msblk);
kfree(msblk->inode_lookup_table);
kfree(msblk->fragment_index);
kfree(msblk->id_table);
@@ -383,7 +383,7 @@ static void squashfs_put_super(struct super_block *sb)
squashfs_cache_delete(sbi->block_cache);
squashfs_cache_delete(sbi->fragment_cache);
squashfs_cache_delete(sbi->read_page);
- squashfs_decompressor_free(sbi, sbi->stream);
+ squashfs_decompressor_destroy(sbi);
kfree(sbi->id_table);
kfree(sbi->fragment_index);
kfree(sbi->meta_index);
diff --git a/fs/squashfs/xz_wrapper.c b/fs/squashfs/xz_wrapper.c
index 1760b7d108f6..c609624e4b8a 100644
--- a/fs/squashfs/xz_wrapper.c
+++ b/fs/squashfs/xz_wrapper.c
@@ -32,44 +32,70 @@
#include "squashfs_fs_sb.h"
#include "squashfs.h"
#include "decompressor.h"
+#include "page_actor.h"
struct squashfs_xz {
struct xz_dec *state;
struct xz_buf buf;
};
-struct comp_opts {
+struct disk_comp_opts {
__le32 dictionary_size;
__le32 flags;
};
-static void *squashfs_xz_init(struct squashfs_sb_info *msblk, void *buff,
- int len)
+struct comp_opts {
+ int dict_size;
+};
+
+static void *squashfs_xz_comp_opts(struct squashfs_sb_info *msblk,
+ void *buff, int len)
{
- struct comp_opts *comp_opts = buff;
- struct squashfs_xz *stream;
- int dict_size = msblk->block_size;
- int err, n;
+ struct disk_comp_opts *comp_opts = buff;
+ struct comp_opts *opts;
+ int err = 0, n;
+
+ opts = kmalloc(sizeof(*opts), GFP_KERNEL);
+ if (opts == NULL) {
+ err = -ENOMEM;
+ goto out2;
+ }
if (comp_opts) {
/* check compressor options are the expected length */
if (len < sizeof(*comp_opts)) {
err = -EIO;
- goto failed;
+ goto out;
}
- dict_size = le32_to_cpu(comp_opts->dictionary_size);
+ opts->dict_size = le32_to_cpu(comp_opts->dictionary_size);
/* the dictionary size should be 2^n or 2^n+2^(n+1) */
- n = ffs(dict_size) - 1;
- if (dict_size != (1 << n) && dict_size != (1 << n) +
+ n = ffs(opts->dict_size) - 1;
+ if (opts->dict_size != (1 << n) && opts->dict_size != (1 << n) +
(1 << (n + 1))) {
err = -EIO;
- goto failed;
+ goto out;
}
- }
+ } else
+ /* use defaults */
+ opts->dict_size = max_t(int, msblk->block_size,
+ SQUASHFS_METADATA_SIZE);
+
+ return opts;
+
+out:
+ kfree(opts);
+out2:
+ return ERR_PTR(err);
+}
+
- dict_size = max_t(int, dict_size, SQUASHFS_METADATA_SIZE);
+static void *squashfs_xz_init(struct squashfs_sb_info *msblk, void *buff)
+{
+ struct comp_opts *comp_opts = buff;
+ struct squashfs_xz *stream;
+ int err;
stream = kmalloc(sizeof(*stream), GFP_KERNEL);
if (stream == NULL) {
@@ -77,7 +103,7 @@ static void *squashfs_xz_init(struct squashfs_sb_info *msblk, void *buff,
goto failed;
}
- stream->state = xz_dec_init(XZ_PREALLOC, dict_size);
+ stream->state = xz_dec_init(XZ_PREALLOC, comp_opts->dict_size);
if (stream->state == NULL) {
kfree(stream);
err = -ENOMEM;
@@ -103,42 +129,37 @@ static void squashfs_xz_free(void *strm)
}
-static int squashfs_xz_uncompress(struct squashfs_sb_info *msblk, void **buffer,
- struct buffer_head **bh, int b, int offset, int length, int srclength,
- int pages)
+static int squashfs_xz_uncompress(struct squashfs_sb_info *msblk, void *strm,
+ struct buffer_head **bh, int b, int offset, int length,
+ struct squashfs_page_actor *output)
{
enum xz_ret xz_err;
- int avail, total = 0, k = 0, page = 0;
- struct squashfs_xz *stream = msblk->stream;
-
- mutex_lock(&msblk->read_data_mutex);
+ int avail, total = 0, k = 0;
+ struct squashfs_xz *stream = strm;
xz_dec_reset(stream->state);
stream->buf.in_pos = 0;
stream->buf.in_size = 0;
stream->buf.out_pos = 0;
stream->buf.out_size = PAGE_CACHE_SIZE;
- stream->buf.out = buffer[page++];
+ stream->buf.out = squashfs_first_page(output);
do {
if (stream->buf.in_pos == stream->buf.in_size && k < b) {
avail = min(length, msblk->devblksize - offset);
length -= avail;
- wait_on_buffer(bh[k]);
- if (!buffer_uptodate(bh[k]))
- goto release_mutex;
-
stream->buf.in = bh[k]->b_data + offset;
stream->buf.in_size = avail;
stream->buf.in_pos = 0;
offset = 0;
}
- if (stream->buf.out_pos == stream->buf.out_size
- && page < pages) {
- stream->buf.out = buffer[page++];
- stream->buf.out_pos = 0;
- total += PAGE_CACHE_SIZE;
+ if (stream->buf.out_pos == stream->buf.out_size) {
+ stream->buf.out = squashfs_next_page(output);
+ if (stream->buf.out != NULL) {
+ stream->buf.out_pos = 0;
+ total += PAGE_CACHE_SIZE;
+ }
}
xz_err = xz_dec_run(stream->state, &stream->buf);
@@ -147,23 +168,14 @@ static int squashfs_xz_uncompress(struct squashfs_sb_info *msblk, void **buffer,
put_bh(bh[k++]);
} while (xz_err == XZ_OK);
- if (xz_err != XZ_STREAM_END) {
- ERROR("xz_dec_run error, data probably corrupt\n");
- goto release_mutex;
- }
-
- if (k < b) {
- ERROR("xz_uncompress error, input remaining\n");
- goto release_mutex;
- }
+ squashfs_finish_page(output);
- total += stream->buf.out_pos;
- mutex_unlock(&msblk->read_data_mutex);
- return total;
+ if (xz_err != XZ_STREAM_END || k < b)
+ goto out;
-release_mutex:
- mutex_unlock(&msblk->read_data_mutex);
+ return total + stream->buf.out_pos;
+out:
for (; k < b; k++)
put_bh(bh[k]);
@@ -172,6 +184,7 @@ release_mutex:
const struct squashfs_decompressor squashfs_xz_comp_ops = {
.init = squashfs_xz_init,
+ .comp_opts = squashfs_xz_comp_opts,
.free = squashfs_xz_free,
.decompress = squashfs_xz_uncompress,
.id = XZ_COMPRESSION,
diff --git a/fs/squashfs/zlib_wrapper.c b/fs/squashfs/zlib_wrapper.c
index 55d918fd2d86..8727caba6882 100644
--- a/fs/squashfs/zlib_wrapper.c
+++ b/fs/squashfs/zlib_wrapper.c
@@ -32,8 +32,9 @@
#include "squashfs_fs_sb.h"
#include "squashfs.h"
#include "decompressor.h"
+#include "page_actor.h"
-static void *zlib_init(struct squashfs_sb_info *dummy, void *buff, int len)
+static void *zlib_init(struct squashfs_sb_info *dummy, void *buff)
{
z_stream *stream = kmalloc(sizeof(z_stream), GFP_KERNEL);
if (stream == NULL)
@@ -61,44 +62,37 @@ static void zlib_free(void *strm)
}
-static int zlib_uncompress(struct squashfs_sb_info *msblk, void **buffer,
- struct buffer_head **bh, int b, int offset, int length, int srclength,
- int pages)
+static int zlib_uncompress(struct squashfs_sb_info *msblk, void *strm,
+ struct buffer_head **bh, int b, int offset, int length,
+ struct squashfs_page_actor *output)
{
- int zlib_err, zlib_init = 0;
- int k = 0, page = 0;
- z_stream *stream = msblk->stream;
-
- mutex_lock(&msblk->read_data_mutex);
+ int zlib_err, zlib_init = 0, k = 0;
+ z_stream *stream = strm;
- stream->avail_out = 0;
+ stream->avail_out = PAGE_CACHE_SIZE;
+ stream->next_out = squashfs_first_page(output);
stream->avail_in = 0;
do {
if (stream->avail_in == 0 && k < b) {
int avail = min(length, msblk->devblksize - offset);
length -= avail;
- wait_on_buffer(bh[k]);
- if (!buffer_uptodate(bh[k]))
- goto release_mutex;
-
stream->next_in = bh[k]->b_data + offset;
stream->avail_in = avail;
offset = 0;
}
- if (stream->avail_out == 0 && page < pages) {
- stream->next_out = buffer[page++];
- stream->avail_out = PAGE_CACHE_SIZE;
+ if (stream->avail_out == 0) {
+ stream->next_out = squashfs_next_page(output);
+ if (stream->next_out != NULL)
+ stream->avail_out = PAGE_CACHE_SIZE;
}
if (!zlib_init) {
zlib_err = zlib_inflateInit(stream);
if (zlib_err != Z_OK) {
- ERROR("zlib_inflateInit returned unexpected "
- "result 0x%x, srclength %d\n",
- zlib_err, srclength);
- goto release_mutex;
+ squashfs_finish_page(output);
+ goto out;
}
zlib_init = 1;
}
@@ -109,29 +103,21 @@ static int zlib_uncompress(struct squashfs_sb_info *msblk, void **buffer,
put_bh(bh[k++]);
} while (zlib_err == Z_OK);
- if (zlib_err != Z_STREAM_END) {
- ERROR("zlib_inflate error, data probably corrupt\n");
- goto release_mutex;
- }
+ squashfs_finish_page(output);
- zlib_err = zlib_inflateEnd(stream);
- if (zlib_err != Z_OK) {
- ERROR("zlib_inflate error, data probably corrupt\n");
- goto release_mutex;
- }
+ if (zlib_err != Z_STREAM_END)
+ goto out;
- if (k < b) {
- ERROR("zlib_uncompress error, data remaining\n");
- goto release_mutex;
- }
+ zlib_err = zlib_inflateEnd(stream);
+ if (zlib_err != Z_OK)
+ goto out;
- length = stream->total_out;
- mutex_unlock(&msblk->read_data_mutex);
- return length;
+ if (k < b)
+ goto out;
-release_mutex:
- mutex_unlock(&msblk->read_data_mutex);
+ return stream->total_out;
+out:
for (; k < b; k++)
put_bh(bh[k]);
diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
index 79b5da2acbe1..35e7d08fe629 100644
--- a/fs/sysfs/file.c
+++ b/fs/sysfs/file.c
@@ -649,7 +649,23 @@ static int sysfs_open_file(struct inode *inode, struct file *file)
if (!of)
goto err_out;
- mutex_init(&of->mutex);
+ /*
+ * The following is done to give a different lockdep key to
+ * @of->mutex for files which implement mmap. This is a rather
+ * crude way to avoid false positive lockdep warning around
+ * mm->mmap_sem - mmap nests @of->mutex under mm->mmap_sem and
+ * reading /sys/block/sda/trace/act_mask grabs sr_mutex, under
+ * which mm->mmap_sem nests, while holding @of->mutex. As each
+ * open file has a separate mutex, it's okay as long as those don't
+ * happen on the same file. At this point, we can't easily give
+ * each file a separate locking class. Let's differentiate on
+ * whether the file is bin or not for now.
+ */
+ if (sysfs_is_bin(attr_sd))
+ mutex_init(&of->mutex);
+ else
+ mutex_init(&of->mutex);
+
of->sd = attr_sd;
of->file = file;
diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
index 1c02da8bb7df..3b2c14b6f0fb 100644
--- a/fs/xfs/xfs_bmap.c
+++ b/fs/xfs/xfs_bmap.c
@@ -1137,6 +1137,7 @@ xfs_bmap_add_attrfork(
int committed; /* xaction was committed */
int logflags; /* logging flags */
int error; /* error return value */
+ int cancel_flags = 0;
ASSERT(XFS_IFORK_Q(ip) == 0);
@@ -1147,19 +1148,20 @@ xfs_bmap_add_attrfork(
if (rsvd)
tp->t_flags |= XFS_TRANS_RESERVE;
error = xfs_trans_reserve(tp, &M_RES(mp)->tr_addafork, blks, 0);
- if (error)
- goto error0;
+ if (error) {
+ xfs_trans_cancel(tp, 0);
+ return error;
+ }
+ cancel_flags = XFS_TRANS_RELEASE_LOG_RES;
xfs_ilock(ip, XFS_ILOCK_EXCL);
error = xfs_trans_reserve_quota_nblks(tp, ip, blks, 0, rsvd ?
XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES :
XFS_QMOPT_RES_REGBLKS);
- if (error) {
- xfs_iunlock(ip, XFS_ILOCK_EXCL);
- xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES);
- return error;
- }
+ if (error)
+ goto trans_cancel;
+ cancel_flags |= XFS_TRANS_ABORT;
if (XFS_IFORK_Q(ip))
- goto error1;
+ goto trans_cancel;
if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS) {
/*
* For inodes coming from pre-6.2 filesystems.
@@ -1169,7 +1171,7 @@ xfs_bmap_add_attrfork(
}
ASSERT(ip->i_d.di_anextents == 0);
- xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
+ xfs_trans_ijoin(tp, ip, 0);
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
switch (ip->i_d.di_format) {
@@ -1191,7 +1193,7 @@ xfs_bmap_add_attrfork(
default:
ASSERT(0);
error = XFS_ERROR(EINVAL);
- goto error1;
+ goto trans_cancel;
}
ASSERT(ip->i_afp == NULL);
@@ -1219,7 +1221,7 @@ xfs_bmap_add_attrfork(
if (logflags)
xfs_trans_log_inode(tp, ip, logflags);
if (error)
- goto error2;
+ goto bmap_cancel;
if (!xfs_sb_version_hasattr(&mp->m_sb) ||
(!xfs_sb_version_hasattr2(&mp->m_sb) && version == 2)) {
__int64_t sbfields = 0;
@@ -1242,14 +1244,16 @@ xfs_bmap_add_attrfork(
error = xfs_bmap_finish(&tp, &flist, &committed);
if (error)
- goto error2;
- return xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
-error2:
+ goto bmap_cancel;
+ error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ return error;
+
+bmap_cancel:
xfs_bmap_cancel(&flist);
-error1:
+trans_cancel:
+ xfs_trans_cancel(tp, cancel_flags);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
-error0:
- xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES|XFS_TRANS_ABORT);
return error;
}
@@ -1631,7 +1635,7 @@ xfs_bmap_last_extent(
* blocks at the end of the file which do not start at the previous data block,
* we will try to align the new blocks at stripe unit boundaries.
*
- * Returns 0 in bma->aeof if the file (fork) is empty as any new write will be
+ * Returns 1 in bma->aeof if the file (fork) is empty as any new write will be
* at, or past the EOF.
*/
STATIC int
@@ -1646,9 +1650,14 @@ xfs_bmap_isaeof(
bma->aeof = 0;
error = xfs_bmap_last_extent(NULL, bma->ip, whichfork, &rec,
&is_empty);
- if (error || is_empty)
+ if (error)
return error;
+ if (is_empty) {
+ bma->aeof = 1;
+ return 0;
+ }
+
/*
* Check if we are allocation or past the last extent, or at least into
* the last delayed allocated extent.
@@ -3639,10 +3648,19 @@ xfs_bmap_btalloc(
int isaligned;
int tryagain;
int error;
+ int stripe_align;
ASSERT(ap->length);
mp = ap->ip->i_mount;
+
+ /* stripe alignment for allocation is determined by mount parameters */
+ stripe_align = 0;
+ if (mp->m_swidth && (mp->m_flags & XFS_MOUNT_SWALLOC))
+ stripe_align = mp->m_swidth;
+ else if (mp->m_dalign)
+ stripe_align = mp->m_dalign;
+
align = ap->userdata ? xfs_get_extsz_hint(ap->ip) : 0;
if (unlikely(align)) {
error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,
@@ -3651,6 +3669,8 @@ xfs_bmap_btalloc(
ASSERT(!error);
ASSERT(ap->length);
}
+
+
nullfb = *ap->firstblock == NULLFSBLOCK;
fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, *ap->firstblock);
if (nullfb) {
@@ -3726,7 +3746,7 @@ xfs_bmap_btalloc(
*/
if (!ap->flist->xbf_low && ap->aeof) {
if (!ap->offset) {
- args.alignment = mp->m_dalign;
+ args.alignment = stripe_align;
atype = args.type;
isaligned = 1;
/*
@@ -3751,13 +3771,13 @@ xfs_bmap_btalloc(
* of minlen+alignment+slop doesn't go up
* between the calls.
*/
- if (blen > mp->m_dalign && blen <= args.maxlen)
- nextminlen = blen - mp->m_dalign;
+ if (blen > stripe_align && blen <= args.maxlen)
+ nextminlen = blen - stripe_align;
else
nextminlen = args.minlen;
- if (nextminlen + mp->m_dalign > args.minlen + 1)
+ if (nextminlen + stripe_align > args.minlen + 1)
args.minalignslop =
- nextminlen + mp->m_dalign -
+ nextminlen + stripe_align -
args.minlen - 1;
else
args.minalignslop = 0;
@@ -3779,7 +3799,7 @@ xfs_bmap_btalloc(
*/
args.type = atype;
args.fsbno = ap->blkno;
- args.alignment = mp->m_dalign;
+ args.alignment = stripe_align;
args.minlen = nextminlen;
args.minalignslop = 0;
isaligned = 1;
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index 5887e41c0323..1394106ed22d 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -1187,7 +1187,12 @@ xfs_zero_remaining_bytes(
XFS_BUF_UNWRITE(bp);
XFS_BUF_READ(bp);
XFS_BUF_SET_ADDR(bp, xfs_fsb_to_db(ip, imap.br_startblock));
- xfsbdstrat(mp, bp);
+
+ if (XFS_FORCED_SHUTDOWN(mp)) {
+ error = XFS_ERROR(EIO);
+ break;
+ }
+ xfs_buf_iorequest(bp);
error = xfs_buf_iowait(bp);
if (error) {
xfs_buf_ioerror_alert(bp,
@@ -1200,7 +1205,12 @@ xfs_zero_remaining_bytes(
XFS_BUF_UNDONE(bp);
XFS_BUF_UNREAD(bp);
XFS_BUF_WRITE(bp);
- xfsbdstrat(mp, bp);
+
+ if (XFS_FORCED_SHUTDOWN(mp)) {
+ error = XFS_ERROR(EIO);
+ break;
+ }
+ xfs_buf_iorequest(bp);
error = xfs_buf_iowait(bp);
if (error) {
xfs_buf_ioerror_alert(bp,
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index c7f0b77dcb00..afe7645e4b2b 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -698,7 +698,11 @@ xfs_buf_read_uncached(
bp->b_flags |= XBF_READ;
bp->b_ops = ops;
- xfsbdstrat(target->bt_mount, bp);
+ if (XFS_FORCED_SHUTDOWN(target->bt_mount)) {
+ xfs_buf_relse(bp);
+ return NULL;
+ }
+ xfs_buf_iorequest(bp);
xfs_buf_iowait(bp);
return bp;
}
@@ -1089,7 +1093,7 @@ xfs_bioerror(
* This is meant for userdata errors; metadata bufs come with
* iodone functions attached, so that we can track down errors.
*/
-STATIC int
+int
xfs_bioerror_relse(
struct xfs_buf *bp)
{
@@ -1152,7 +1156,7 @@ xfs_bwrite(
ASSERT(xfs_buf_islocked(bp));
bp->b_flags |= XBF_WRITE;
- bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q);
+ bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q | XBF_WRITE_FAIL);
xfs_bdstrat_cb(bp);
@@ -1164,25 +1168,6 @@ xfs_bwrite(
return error;
}
-/*
- * Wrapper around bdstrat so that we can stop data from going to disk in case
- * we are shutting down the filesystem. Typically user data goes thru this
- * path; one of the exceptions is the superblock.
- */
-void
-xfsbdstrat(
- struct xfs_mount *mp,
- struct xfs_buf *bp)
-{
- if (XFS_FORCED_SHUTDOWN(mp)) {
- trace_xfs_bdstrat_shut(bp, _RET_IP_);
- xfs_bioerror_relse(bp);
- return;
- }
-
- xfs_buf_iorequest(bp);
-}
-
STATIC void
_xfs_buf_ioend(
xfs_buf_t *bp,
@@ -1516,6 +1501,12 @@ xfs_wait_buftarg(
struct xfs_buf *bp;
bp = list_first_entry(&dispose, struct xfs_buf, b_lru);
list_del_init(&bp->b_lru);
+ if (bp->b_flags & XBF_WRITE_FAIL) {
+ xfs_alert(btp->bt_mount,
+"Corruption Alert: Buffer at block 0x%llx had permanent write failures!\n"
+"Please run xfs_repair to determine the extent of the problem.",
+ (long long)bp->b_bn);
+ }
xfs_buf_rele(bp);
}
if (loop++ != 0)
@@ -1799,7 +1790,7 @@ __xfs_buf_delwri_submit(
blk_start_plug(&plug);
list_for_each_entry_safe(bp, n, io_list, b_list) {
- bp->b_flags &= ~(_XBF_DELWRI_Q | XBF_ASYNC);
+ bp->b_flags &= ~(_XBF_DELWRI_Q | XBF_ASYNC | XBF_WRITE_FAIL);
bp->b_flags |= XBF_WRITE;
if (!wait) {
diff --git a/fs/xfs/xfs_buf.h b/fs/xfs/xfs_buf.h
index e65683361017..1cf21a4a9f22 100644
--- a/fs/xfs/xfs_buf.h
+++ b/fs/xfs/xfs_buf.h
@@ -45,6 +45,7 @@ typedef enum {
#define XBF_ASYNC (1 << 4) /* initiator will not wait for completion */
#define XBF_DONE (1 << 5) /* all pages in the buffer uptodate */
#define XBF_STALE (1 << 6) /* buffer has been staled, do not find it */
+#define XBF_WRITE_FAIL (1 << 24)/* async writes have failed on this buffer */
/* I/O hints for the BIO layer */
#define XBF_SYNCIO (1 << 10)/* treat this buffer as synchronous I/O */
@@ -70,6 +71,7 @@ typedef unsigned int xfs_buf_flags_t;
{ XBF_ASYNC, "ASYNC" }, \
{ XBF_DONE, "DONE" }, \
{ XBF_STALE, "STALE" }, \
+ { XBF_WRITE_FAIL, "WRITE_FAIL" }, \
{ XBF_SYNCIO, "SYNCIO" }, \
{ XBF_FUA, "FUA" }, \
{ XBF_FLUSH, "FLUSH" }, \
@@ -80,6 +82,7 @@ typedef unsigned int xfs_buf_flags_t;
{ _XBF_DELWRI_Q, "DELWRI_Q" }, \
{ _XBF_COMPOUND, "COMPOUND" }
+
/*
* Internal state flags.
*/
@@ -269,9 +272,6 @@ extern void xfs_buf_unlock(xfs_buf_t *);
/* Buffer Read and Write Routines */
extern int xfs_bwrite(struct xfs_buf *bp);
-
-extern void xfsbdstrat(struct xfs_mount *, struct xfs_buf *);
-
extern void xfs_buf_ioend(xfs_buf_t *, int);
extern void xfs_buf_ioerror(xfs_buf_t *, int);
extern void xfs_buf_ioerror_alert(struct xfs_buf *, const char *func);
@@ -282,6 +282,8 @@ extern void xfs_buf_iomove(xfs_buf_t *, size_t, size_t, void *,
#define xfs_buf_zero(bp, off, len) \
xfs_buf_iomove((bp), (off), (len), NULL, XBRW_ZERO)
+extern int xfs_bioerror_relse(struct xfs_buf *);
+
static inline int xfs_buf_geterror(xfs_buf_t *bp)
{
return bp ? bp->b_error : ENOMEM;
@@ -301,7 +303,8 @@ extern void xfs_buf_terminate(void);
#define XFS_BUF_ZEROFLAGS(bp) \
((bp)->b_flags &= ~(XBF_READ|XBF_WRITE|XBF_ASYNC| \
- XBF_SYNCIO|XBF_FUA|XBF_FLUSH))
+ XBF_SYNCIO|XBF_FUA|XBF_FLUSH| \
+ XBF_WRITE_FAIL))
void xfs_buf_stale(struct xfs_buf *bp);
#define XFS_BUF_UNSTALE(bp) ((bp)->b_flags &= ~XBF_STALE)
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index a64f67ba25d3..2227b9b050bb 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -496,6 +496,14 @@ xfs_buf_item_unpin(
}
}
+/*
+ * Buffer IO error rate limiting. Limit it to no more than 10 messages per 30
+ * seconds so as to not spam logs too much on repeated detection of the same
+ * buffer being bad..
+ */
+
+DEFINE_RATELIMIT_STATE(xfs_buf_write_fail_rl_state, 30 * HZ, 10);
+
STATIC uint
xfs_buf_item_push(
struct xfs_log_item *lip,
@@ -524,6 +532,14 @@ xfs_buf_item_push(
trace_xfs_buf_item_push(bip);
+ /* has a previous flush failed due to IO errors? */
+ if ((bp->b_flags & XBF_WRITE_FAIL) &&
+ ___ratelimit(&xfs_buf_write_fail_rl_state, "XFS:")) {
+ xfs_warn(bp->b_target->bt_mount,
+"Detected failing async write on buffer block 0x%llx. Retrying async write.\n",
+ (long long)bp->b_bn);
+ }
+
if (!xfs_buf_delwri_queue(bp, buffer_list))
rval = XFS_ITEM_FLUSHING;
xfs_buf_unlock(bp);
@@ -1096,8 +1112,9 @@ xfs_buf_iodone_callbacks(
xfs_buf_ioerror(bp, 0); /* errno of 0 unsets the flag */
- if (!XFS_BUF_ISSTALE(bp)) {
- bp->b_flags |= XBF_WRITE | XBF_ASYNC | XBF_DONE;
+ if (!(bp->b_flags & (XBF_STALE|XBF_WRITE_FAIL))) {
+ bp->b_flags |= XBF_WRITE | XBF_ASYNC |
+ XBF_DONE | XBF_WRITE_FAIL;
xfs_buf_iorequest(bp);
} else {
xfs_buf_relse(bp);
diff --git a/fs/xfs/xfs_dir2_node.c b/fs/xfs/xfs_dir2_node.c
index 56369d4509d5..48c7d18f68c3 100644
--- a/fs/xfs/xfs_dir2_node.c
+++ b/fs/xfs/xfs_dir2_node.c
@@ -2067,12 +2067,12 @@ xfs_dir2_node_lookup(
*/
int /* error */
xfs_dir2_node_removename(
- xfs_da_args_t *args) /* operation arguments */
+ struct xfs_da_args *args) /* operation arguments */
{
- xfs_da_state_blk_t *blk; /* leaf block */
+ struct xfs_da_state_blk *blk; /* leaf block */
int error; /* error return value */
int rval; /* operation return value */
- xfs_da_state_t *state; /* btree cursor */
+ struct xfs_da_state *state; /* btree cursor */
trace_xfs_dir2_node_removename(args);
@@ -2084,19 +2084,18 @@ xfs_dir2_node_removename(
state->mp = args->dp->i_mount;
state->blocksize = state->mp->m_dirblksize;
state->node_ents = state->mp->m_dir_node_ents;
- /*
- * Look up the entry we're deleting, set up the cursor.
- */
+
+ /* Look up the entry we're deleting, set up the cursor. */
error = xfs_da3_node_lookup_int(state, &rval);
if (error)
- rval = error;
- /*
- * Didn't find it, upper layer screwed up.
- */
+ goto out_free;
+
+ /* Didn't find it, upper layer screwed up. */
if (rval != EEXIST) {
- xfs_da_state_free(state);
- return rval;
+ error = rval;
+ goto out_free;
}
+
blk = &state->path.blk[state->path.active - 1];
ASSERT(blk->magic == XFS_DIR2_LEAFN_MAGIC);
ASSERT(state->extravalid);
@@ -2107,7 +2106,7 @@ xfs_dir2_node_removename(
error = xfs_dir2_leafn_remove(args, blk->bp, blk->index,
&state->extrablk, &rval);
if (error)
- return error;
+ goto out_free;
/*
* Fix the hash values up the btree.
*/
@@ -2122,6 +2121,7 @@ xfs_dir2_node_removename(
*/
if (!error)
error = xfs_dir2_node_to_leaf(state);
+out_free:
xfs_da_state_free(state);
return error;
}
diff --git a/fs/xfs/xfs_discard.c b/fs/xfs/xfs_discard.c
index 8367d6dc18c9..4f11ef011139 100644
--- a/fs/xfs/xfs_discard.c
+++ b/fs/xfs/xfs_discard.c
@@ -157,7 +157,7 @@ xfs_ioc_trim(
struct xfs_mount *mp,
struct fstrim_range __user *urange)
{
- struct request_queue *q = mp->m_ddev_targp->bt_bdev->bd_disk->queue;
+ struct request_queue *q = bdev_get_queue(mp->m_ddev_targp->bt_bdev);
unsigned int granularity = q->limits.discard_granularity;
struct fstrim_range range;
xfs_daddr_t start, end, minlen;
@@ -180,7 +180,8 @@ xfs_ioc_trim(
* matter as trimming blocks is an advisory interface.
*/
if (range.start >= XFS_FSB_TO_B(mp, mp->m_sb.sb_dblocks) ||
- range.minlen > XFS_FSB_TO_B(mp, XFS_ALLOC_AG_MAX_USABLE(mp)))
+ range.minlen > XFS_FSB_TO_B(mp, XFS_ALLOC_AG_MAX_USABLE(mp)) ||
+ range.len < mp->m_sb.sb_blocksize)
return -XFS_ERROR(EINVAL);
start = BTOBB(range.start);
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
index a6e54b3319bd..02fb943cbf22 100644
--- a/fs/xfs/xfs_fsops.c
+++ b/fs/xfs/xfs_fsops.c
@@ -220,6 +220,8 @@ xfs_growfs_data_private(
*/
nfree = 0;
for (agno = nagcount - 1; agno >= oagcount; agno--, new -= agsize) {
+ __be32 *agfl_bno;
+
/*
* AG freespace header block
*/
@@ -279,8 +281,10 @@ xfs_growfs_data_private(
agfl->agfl_seqno = cpu_to_be32(agno);
uuid_copy(&agfl->agfl_uuid, &mp->m_sb.sb_uuid);
}
+
+ agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, bp);
for (bucket = 0; bucket < XFS_AGFL_SIZE(mp); bucket++)
- agfl->agfl_bno[bucket] = cpu_to_be32(NULLAGBLOCK);
+ agfl_bno[bucket] = cpu_to_be32(NULLAGBLOCK);
error = xfs_bwrite(bp);
xfs_buf_relse(bp);
diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
index 4d613401a5e0..33ad9a77791f 100644
--- a/fs/xfs/xfs_ioctl.c
+++ b/fs/xfs/xfs_ioctl.c
@@ -442,7 +442,8 @@ xfs_attrlist_by_handle(
return -XFS_ERROR(EPERM);
if (copy_from_user(&al_hreq, arg, sizeof(xfs_fsop_attrlist_handlereq_t)))
return -XFS_ERROR(EFAULT);
- if (al_hreq.buflen > XATTR_LIST_MAX)
+ if (al_hreq.buflen < sizeof(struct attrlist) ||
+ al_hreq.buflen > XATTR_LIST_MAX)
return -XFS_ERROR(EINVAL);
/*
diff --git a/fs/xfs/xfs_ioctl32.c b/fs/xfs/xfs_ioctl32.c
index e8fb1231db81..a7992f8de9d3 100644
--- a/fs/xfs/xfs_ioctl32.c
+++ b/fs/xfs/xfs_ioctl32.c
@@ -356,7 +356,8 @@ xfs_compat_attrlist_by_handle(
if (copy_from_user(&al_hreq, arg,
sizeof(compat_xfs_fsop_attrlist_handlereq_t)))
return -XFS_ERROR(EFAULT);
- if (al_hreq.buflen > XATTR_LIST_MAX)
+ if (al_hreq.buflen < sizeof(struct attrlist) ||
+ al_hreq.buflen > XATTR_LIST_MAX)
return -XFS_ERROR(EINVAL);
/*
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index 27e0e544e963..104455b8046c 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -618,7 +618,8 @@ xfs_setattr_nonsize(
}
if (!gid_eq(igid, gid)) {
if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_GQUOTA_ON(mp)) {
- ASSERT(!XFS_IS_PQUOTA_ON(mp));
+ ASSERT(xfs_sb_version_has_pquotino(&mp->m_sb) ||
+ !XFS_IS_PQUOTA_ON(mp));
ASSERT(mask & ATTR_GID);
ASSERT(gdqp);
olddquot2 = xfs_qm_vop_chown(tp, ip,
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index b6b669df40f3..eae16920655b 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -193,7 +193,10 @@ xlog_bread_noalign(
bp->b_io_length = nbblks;
bp->b_error = 0;
- xfsbdstrat(log->l_mp, bp);
+ if (XFS_FORCED_SHUTDOWN(log->l_mp))
+ return XFS_ERROR(EIO);
+
+ xfs_buf_iorequest(bp);
error = xfs_buf_iowait(bp);
if (error)
xfs_buf_ioerror_alert(bp, __func__);
@@ -4397,7 +4400,13 @@ xlog_do_recover(
XFS_BUF_READ(bp);
XFS_BUF_UNASYNC(bp);
bp->b_ops = &xfs_sb_buf_ops;
- xfsbdstrat(log->l_mp, bp);
+
+ if (XFS_FORCED_SHUTDOWN(log->l_mp)) {
+ xfs_buf_relse(bp);
+ return XFS_ERROR(EIO);
+ }
+
+ xfs_buf_iorequest(bp);
error = xfs_buf_iowait(bp);
if (error) {
xfs_buf_ioerror_alert(bp, __func__);
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index da88f167af78..02df7b408a26 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -41,6 +41,7 @@
#include "xfs_fsops.h"
#include "xfs_trace.h"
#include "xfs_icache.h"
+#include "xfs_dinode.h"
#ifdef HAVE_PERCPU_SB
@@ -718,8 +719,22 @@ xfs_mountfs(
* Set the inode cluster size.
* This may still be overridden by the file system
* block size if it is larger than the chosen cluster size.
+ *
+ * For v5 filesystems, scale the cluster size with the inode size to
+ * keep a constant ratio of inode per cluster buffer, but only if mkfs
+ * has set the inode alignment value appropriately for larger cluster
+ * sizes.
*/
mp->m_inode_cluster_size = XFS_INODE_BIG_CLUSTER_SIZE;
+ if (xfs_sb_version_hascrc(&mp->m_sb)) {
+ int new_size = mp->m_inode_cluster_size;
+
+ new_size *= mp->m_sb.sb_inodesize / XFS_DINODE_MIN_SIZE;
+ if (mp->m_sb.sb_inoalignmt >= XFS_B_TO_FSBT(mp, new_size))
+ mp->m_inode_cluster_size = new_size;
+ xfs_info(mp, "Using inode cluster size of %d bytes",
+ mp->m_inode_cluster_size);
+ }
/*
* Set inode alignment fields
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index 1d8101a10d8e..a466c5e5826e 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -112,7 +112,7 @@ typedef struct xfs_mount {
__uint8_t m_blkbb_log; /* blocklog - BBSHIFT */
__uint8_t m_agno_log; /* log #ag's */
__uint8_t m_agino_log; /* #bits for agino in inum */
- __uint16_t m_inode_cluster_size;/* min inode buf size */
+ uint m_inode_cluster_size;/* min inode buf size */
uint m_blockmask; /* sb_blocksize-1 */
uint m_blockwsize; /* sb_blocksize in words */
uint m_blockwmask; /* blockwsize-1 */
diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
index 14a4996cfec6..dd88f0e27bd8 100644
--- a/fs/xfs/xfs_qm.c
+++ b/fs/xfs/xfs_qm.c
@@ -134,8 +134,6 @@ xfs_qm_dqpurge(
{
struct xfs_mount *mp = dqp->q_mount;
struct xfs_quotainfo *qi = mp->m_quotainfo;
- struct xfs_dquot *gdqp = NULL;
- struct xfs_dquot *pdqp = NULL;
xfs_dqlock(dqp);
if ((dqp->dq_flags & XFS_DQ_FREEING) || dqp->q_nrefs != 0) {
@@ -143,21 +141,6 @@ xfs_qm_dqpurge(
return EAGAIN;
}
- /*
- * If this quota has a hint attached, prepare for releasing it now.
- */
- gdqp = dqp->q_gdquot;
- if (gdqp) {
- xfs_dqlock(gdqp);
- dqp->q_gdquot = NULL;
- }
-
- pdqp = dqp->q_pdquot;
- if (pdqp) {
- xfs_dqlock(pdqp);
- dqp->q_pdquot = NULL;
- }
-
dqp->dq_flags |= XFS_DQ_FREEING;
xfs_dqflock(dqp);
@@ -206,11 +189,47 @@ xfs_qm_dqpurge(
XFS_STATS_DEC(xs_qm_dquot_unused);
xfs_qm_dqdestroy(dqp);
+ return 0;
+}
+
+/*
+ * Release the group or project dquot pointers the user dquots maybe carrying
+ * around as a hint, and proceed to purge the user dquot cache if requested.
+*/
+STATIC int
+xfs_qm_dqpurge_hints(
+ struct xfs_dquot *dqp,
+ void *data)
+{
+ struct xfs_dquot *gdqp = NULL;
+ struct xfs_dquot *pdqp = NULL;
+ uint flags = *((uint *)data);
+
+ xfs_dqlock(dqp);
+ if (dqp->dq_flags & XFS_DQ_FREEING) {
+ xfs_dqunlock(dqp);
+ return EAGAIN;
+ }
+
+ /* If this quota has a hint attached, prepare for releasing it now */
+ gdqp = dqp->q_gdquot;
+ if (gdqp)
+ dqp->q_gdquot = NULL;
+
+ pdqp = dqp->q_pdquot;
+ if (pdqp)
+ dqp->q_pdquot = NULL;
+
+ xfs_dqunlock(dqp);
if (gdqp)
- xfs_qm_dqput(gdqp);
+ xfs_qm_dqrele(gdqp);
if (pdqp)
- xfs_qm_dqput(pdqp);
+ xfs_qm_dqrele(pdqp);
+
+ if (flags & XFS_QMOPT_UQUOTA)
+ return xfs_qm_dqpurge(dqp, NULL);
+
return 0;
}
@@ -222,8 +241,18 @@ xfs_qm_dqpurge_all(
struct xfs_mount *mp,
uint flags)
{
- if (flags & XFS_QMOPT_UQUOTA)
- xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_dqpurge, NULL);
+ /*
+ * We have to release group/project dquot hint(s) from the user dquot
+ * at first if they are there, otherwise we would run into an infinite
+ * loop while walking through radix tree to purge other type of dquots
+ * since their refcount is not zero if the user dquot refers to them
+ * as hint.
+ *
+ * Call the special xfs_qm_dqpurge_hints() will end up go through the
+ * general xfs_qm_dqpurge() against user dquot cache if requested.
+ */
+ xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_dqpurge_hints, &flags);
+
if (flags & XFS_QMOPT_GQUOTA)
xfs_qm_dquot_walk(mp, XFS_DQ_GROUP, xfs_qm_dqpurge, NULL);
if (flags & XFS_QMOPT_PQUOTA)
@@ -2082,24 +2111,21 @@ xfs_qm_vop_create_dqattach(
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
ASSERT(XFS_IS_QUOTA_RUNNING(mp));
- if (udqp) {
+ if (udqp && XFS_IS_UQUOTA_ON(mp)) {
ASSERT(ip->i_udquot == NULL);
- ASSERT(XFS_IS_UQUOTA_ON(mp));
ASSERT(ip->i_d.di_uid == be32_to_cpu(udqp->q_core.d_id));
ip->i_udquot = xfs_qm_dqhold(udqp);
xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1);
}
- if (gdqp) {
+ if (gdqp && XFS_IS_GQUOTA_ON(mp)) {
ASSERT(ip->i_gdquot == NULL);
- ASSERT(XFS_IS_GQUOTA_ON(mp));
ASSERT(ip->i_d.di_gid == be32_to_cpu(gdqp->q_core.d_id));
ip->i_gdquot = xfs_qm_dqhold(gdqp);
xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1);
}
- if (pdqp) {
+ if (pdqp && XFS_IS_PQUOTA_ON(mp)) {
ASSERT(ip->i_pdquot == NULL);
- ASSERT(XFS_IS_PQUOTA_ON(mp));
ASSERT(xfs_get_projid(ip) == be32_to_cpu(pdqp->q_core.d_id));
ip->i_pdquot = xfs_qm_dqhold(pdqp);
diff --git a/fs/xfs/xfs_trans_buf.c b/fs/xfs/xfs_trans_buf.c
index c035d11b7734..647b6f1d8923 100644
--- a/fs/xfs/xfs_trans_buf.c
+++ b/fs/xfs/xfs_trans_buf.c
@@ -314,7 +314,18 @@ xfs_trans_read_buf_map(
ASSERT(bp->b_iodone == NULL);
XFS_BUF_READ(bp);
bp->b_ops = ops;
- xfsbdstrat(tp->t_mountp, bp);
+
+ /*
+ * XXX(hch): clean up the error handling here to be less
+ * of a mess..
+ */
+ if (XFS_FORCED_SHUTDOWN(mp)) {
+ trace_xfs_bdstrat_shut(bp, _RET_IP_);
+ xfs_bioerror_relse(bp);
+ } else {
+ xfs_buf_iorequest(bp);
+ }
+
error = xfs_buf_iowait(bp);
if (error) {
xfs_buf_ioerror_alert(bp, __func__);
diff --git a/fs/xfs/xfs_trans_inode.c b/fs/xfs/xfs_trans_inode.c
index 1bba7f60d94c..50c3f5614288 100644
--- a/fs/xfs/xfs_trans_inode.c
+++ b/fs/xfs/xfs_trans_inode.c
@@ -111,12 +111,14 @@ xfs_trans_log_inode(
/*
* First time we log the inode in a transaction, bump the inode change
- * counter if it is configured for this to occur.
+ * counter if it is configured for this to occur. We don't use
+ * inode_inc_version() because there is no need for extra locking around
+ * i_version as we already hold the inode locked exclusively for
+ * metadata modification.
*/
if (!(ip->i_itemp->ili_item.li_desc->lid_flags & XFS_LID_DIRTY) &&
IS_I_VERSION(VFS_I(ip))) {
- inode_inc_iversion(VFS_I(ip));
- ip->i_d.di_changecount = VFS_I(ip)->i_version;
+ ip->i_d.di_changecount = ++VFS_I(ip)->i_version;
flags |= XFS_ILOG_CORE;
}
diff --git a/fs/xfs/xfs_trans_resv.c b/fs/xfs/xfs_trans_resv.c
index d53d9f0627a7..2fd59c0dae66 100644
--- a/fs/xfs/xfs_trans_resv.c
+++ b/fs/xfs/xfs_trans_resv.c
@@ -385,8 +385,7 @@ xfs_calc_ifree_reservation(
xfs_calc_inode_res(mp, 1) +
xfs_calc_buf_res(2, mp->m_sb.sb_sectsize) +
xfs_calc_buf_res(1, XFS_FSB_TO_B(mp, 1)) +
- MAX((__uint16_t)XFS_FSB_TO_B(mp, 1),
- XFS_INODE_CLUSTER_SIZE(mp)) +
+ max_t(uint, XFS_FSB_TO_B(mp, 1), XFS_INODE_CLUSTER_SIZE(mp)) +
xfs_calc_buf_res(1, 0) +
xfs_calc_buf_res(2 + XFS_IALLOC_BLOCKS(mp) +
mp->m_in_maxlevels, 0) +
diff --git a/include/acpi/acconfig.h b/include/acpi/acconfig.h
index d98c67001840..3ea214cff349 100644
--- a/include/acpi/acconfig.h
+++ b/include/acpi/acconfig.h
@@ -83,7 +83,9 @@
* Should the subsystem abort the loading of an ACPI table if the
* table checksum is incorrect?
*/
+#ifndef ACPI_CHECKSUM_ABORT
#define ACPI_CHECKSUM_ABORT FALSE
+#endif
/*
* Generate a version of ACPICA that only supports "reduced hardware"
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index 89c60b0f6408..c602c7718421 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -100,6 +100,7 @@ enum acpi_hotplug_mode {
struct acpi_hotplug_profile {
struct kobject kobj;
bool enabled:1;
+ bool ignore:1;
enum acpi_hotplug_mode mode;
};
@@ -431,9 +432,9 @@ static inline acpi_handle acpi_get_child(acpi_handle handle, u64 addr)
{
return acpi_find_child(handle, addr, false);
}
+void acpi_preset_companion(struct device *dev, acpi_handle parent, u64 addr);
int acpi_is_root_bridge(acpi_handle);
struct acpi_pci_root *acpi_pci_find_root(acpi_handle handle);
-#define DEVICE_ACPI_HANDLE(dev) ((acpi_handle)ACPI_HANDLE(dev))
int acpi_enable_wakeup_device_power(struct acpi_device *dev, int state);
int acpi_disable_wakeup_device_power(struct acpi_device *dev);
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index d8f9457755b4..4278aba96503 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -46,7 +46,7 @@
/* Current ACPICA subsystem version in YYYYMMDD format */
-#define ACPI_CA_VERSION 0x20130927
+#define ACPI_CA_VERSION 0x20131115
#include <acpi/acconfig.h>
#include <acpi/actypes.h>
diff --git a/include/asm-generic/hash.h b/include/asm-generic/hash.h
new file mode 100644
index 000000000000..b6312843dbd9
--- /dev/null
+++ b/include/asm-generic/hash.h
@@ -0,0 +1,9 @@
+#ifndef __ASM_GENERIC_HASH_H
+#define __ASM_GENERIC_HASH_H
+
+struct fast_hash_ops;
+static inline void setup_arch_fast_hash(struct fast_hash_ops *ops)
+{
+}
+
+#endif /* __ASM_GENERIC_HASH_H */
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index f330d28e4d0e..db0923458940 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -217,7 +217,7 @@ static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
#endif
#ifndef pte_accessible
-# define pte_accessible(pte) ((void)(pte),1)
+# define pte_accessible(mm, pte) ((void)(pte), 1)
#endif
#ifndef flush_tlb_fix_spurious_fault
@@ -599,11 +599,10 @@ static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd)
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
barrier();
#endif
- if (pmd_none(pmdval))
+ if (pmd_none(pmdval) || pmd_trans_huge(pmdval))
return 1;
if (unlikely(pmd_bad(pmdval))) {
- if (!pmd_trans_huge(pmdval))
- pmd_clear_bad(pmd);
+ pmd_clear_bad(pmd);
return 1;
}
return 0;
diff --git a/include/asm-generic/preempt.h b/include/asm-generic/preempt.h
index ddf2b420ac8f..1cd3f5d767a8 100644
--- a/include/asm-generic/preempt.h
+++ b/include/asm-generic/preempt.h
@@ -3,13 +3,11 @@
#include <linux/thread_info.h>
-/*
- * We mask the PREEMPT_NEED_RESCHED bit so as not to confuse all current users
- * that think a non-zero value indicates we cannot preempt.
- */
+#define PREEMPT_ENABLED (0)
+
static __always_inline int preempt_count(void)
{
- return current_thread_info()->preempt_count & ~PREEMPT_NEED_RESCHED;
+ return current_thread_info()->preempt_count;
}
static __always_inline int *preempt_count_ptr(void)
@@ -17,11 +15,6 @@ static __always_inline int *preempt_count_ptr(void)
return &current_thread_info()->preempt_count;
}
-/*
- * We now loose PREEMPT_NEED_RESCHED and cause an extra reschedule; however the
- * alternative is loosing a reschedule. Better schedule too often -- also this
- * should be a very rare operation.
- */
static __always_inline void preempt_count_set(int pc)
{
*preempt_count_ptr() = pc;
@@ -41,28 +34,17 @@ static __always_inline void preempt_count_set(int pc)
task_thread_info(p)->preempt_count = PREEMPT_ENABLED; \
} while (0)
-/*
- * We fold the NEED_RESCHED bit into the preempt count such that
- * preempt_enable() can decrement and test for needing to reschedule with a
- * single instruction.
- *
- * We invert the actual bit, so that when the decrement hits 0 we know we both
- * need to resched (the bit is cleared) and can resched (no preempt count).
- */
-
static __always_inline void set_preempt_need_resched(void)
{
- *preempt_count_ptr() &= ~PREEMPT_NEED_RESCHED;
}
static __always_inline void clear_preempt_need_resched(void)
{
- *preempt_count_ptr() |= PREEMPT_NEED_RESCHED;
}
static __always_inline bool test_preempt_need_resched(void)
{
- return !(*preempt_count_ptr() & PREEMPT_NEED_RESCHED);
+ return false;
}
/*
@@ -81,7 +63,12 @@ static __always_inline void __preempt_count_sub(int val)
static __always_inline bool __preempt_count_dec_and_test(void)
{
- return !--*preempt_count_ptr();
+ /*
+ * Because of load-store architectures cannot do per-cpu atomic
+ * operations; we cannot use PREEMPT_NEED_RESCHED because it might get
+ * lost.
+ */
+ return !--*preempt_count_ptr() && tif_need_resched();
}
/*
@@ -89,7 +76,7 @@ static __always_inline bool __preempt_count_dec_and_test(void)
*/
static __always_inline bool should_resched(void)
{
- return unlikely(!*preempt_count_ptr());
+ return unlikely(!preempt_count() && tif_need_resched());
}
#ifdef CONFIG_PREEMPT
diff --git a/include/asm-generic/simd.h b/include/asm-generic/simd.h
new file mode 100644
index 000000000000..f57eb7b5c23b
--- /dev/null
+++ b/include/asm-generic/simd.h
@@ -0,0 +1,14 @@
+
+#include <linux/hardirq.h>
+
+/*
+ * may_use_simd - whether it is allowable at this time to issue SIMD
+ * instructions or access the SIMD register file
+ *
+ * As architectures typically don't preserve the SIMD register file when
+ * taking an interrupt, !in_interrupt() should be a reasonable default.
+ */
+static __must_check inline bool may_use_simd(void)
+{
+ return !in_interrupt();
+}
diff --git a/include/asm-generic/word-at-a-time.h b/include/asm-generic/word-at-a-time.h
index 3f21f1b72e45..d3909effd725 100644
--- a/include/asm-generic/word-at-a-time.h
+++ b/include/asm-generic/word-at-a-time.h
@@ -49,4 +49,12 @@ static inline bool has_zero(unsigned long val, unsigned long *data, const struct
return (val + c->high_bits) & ~rhs;
}
+#ifndef zero_bytemask
+#ifdef CONFIG_64BIT
+#define zero_bytemask(mask) (~0ul << fls64(mask))
+#else
+#define zero_bytemask(mask) (~0ul << fls(mask))
+#endif /* CONFIG_64BIT */
+#endif /* zero_bytemask */
+
#endif /* _ASM_WORD_AT_A_TIME_H */
diff --git a/arch/x86/include/asm/crypto/ablk_helper.h b/include/crypto/ablk_helper.h
index 4f93df50c23e..4f93df50c23e 100644
--- a/arch/x86/include/asm/crypto/ablk_helper.h
+++ b/include/crypto/ablk_helper.h
diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
index 418d270e1806..e73c19e90e38 100644
--- a/include/crypto/algapi.h
+++ b/include/crypto/algapi.h
@@ -386,5 +386,21 @@ static inline int crypto_requires_sync(u32 type, u32 mask)
return (type ^ CRYPTO_ALG_ASYNC) & mask & CRYPTO_ALG_ASYNC;
}
-#endif /* _CRYPTO_ALGAPI_H */
+noinline unsigned long __crypto_memneq(const void *a, const void *b, size_t size);
+
+/**
+ * crypto_memneq - Compare two areas of memory without leaking
+ * timing information.
+ *
+ * @a: One area of memory
+ * @b: Another area of memory
+ * @size: The size of the area.
+ *
+ * Returns 0 when data is equal, 1 otherwise.
+ */
+static inline int crypto_memneq(const void *a, const void *b, size_t size)
+{
+ return __crypto_memneq(a, b, size) != 0UL ? 1 : 0;
+}
+#endif /* _CRYPTO_ALGAPI_H */
diff --git a/include/crypto/authenc.h b/include/crypto/authenc.h
index e47b044929a8..6775059539b5 100644
--- a/include/crypto/authenc.h
+++ b/include/crypto/authenc.h
@@ -23,5 +23,15 @@ struct crypto_authenc_key_param {
__be32 enckeylen;
};
-#endif /* _CRYPTO_AUTHENC_H */
+struct crypto_authenc_keys {
+ const u8 *authkey;
+ const u8 *enckey;
+
+ unsigned int authkeylen;
+ unsigned int enckeylen;
+};
+int crypto_authenc_extractkeys(struct crypto_authenc_keys *keys, const u8 *key,
+ unsigned int keylen);
+
+#endif /* _CRYPTO_AUTHENC_H */
diff --git a/include/crypto/hash_info.h b/include/crypto/hash_info.h
new file mode 100644
index 000000000000..e1e5a3e5dd1b
--- /dev/null
+++ b/include/crypto/hash_info.h
@@ -0,0 +1,40 @@
+/*
+ * Hash Info: Hash algorithms information
+ *
+ * Copyright (c) 2013 Dmitry Kasatkin <d.kasatkin@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+#ifndef _CRYPTO_HASH_INFO_H
+#define _CRYPTO_HASH_INFO_H
+
+#include <crypto/sha.h>
+#include <crypto/md5.h>
+
+#include <uapi/linux/hash_info.h>
+
+/* not defined in include/crypto/ */
+#define RMD128_DIGEST_SIZE 16
+#define RMD160_DIGEST_SIZE 20
+#define RMD256_DIGEST_SIZE 32
+#define RMD320_DIGEST_SIZE 40
+
+/* not defined in include/crypto/ */
+#define WP512_DIGEST_SIZE 64
+#define WP384_DIGEST_SIZE 48
+#define WP256_DIGEST_SIZE 32
+
+/* not defined in include/crypto/ */
+#define TGR128_DIGEST_SIZE 16
+#define TGR160_DIGEST_SIZE 20
+#define TGR192_DIGEST_SIZE 24
+
+extern const char *const hash_algo_name[HASH_ALGO__LAST];
+extern const int hash_digest_size[HASH_ALGO__LAST];
+
+#endif /* _CRYPTO_HASH_INFO_H */
diff --git a/include/crypto/public_key.h b/include/crypto/public_key.h
index f5b0224c9967..fc09732613ad 100644
--- a/include/crypto/public_key.h
+++ b/include/crypto/public_key.h
@@ -15,6 +15,7 @@
#define _LINUX_PUBLIC_KEY_H
#include <linux/mpi.h>
+#include <crypto/hash_info.h>
enum pkey_algo {
PKEY_ALGO_DSA,
@@ -22,21 +23,11 @@ enum pkey_algo {
PKEY_ALGO__LAST
};
-extern const char *const pkey_algo[PKEY_ALGO__LAST];
+extern const char *const pkey_algo_name[PKEY_ALGO__LAST];
+extern const struct public_key_algorithm *pkey_algo[PKEY_ALGO__LAST];
-enum pkey_hash_algo {
- PKEY_HASH_MD4,
- PKEY_HASH_MD5,
- PKEY_HASH_SHA1,
- PKEY_HASH_RIPE_MD_160,
- PKEY_HASH_SHA256,
- PKEY_HASH_SHA384,
- PKEY_HASH_SHA512,
- PKEY_HASH_SHA224,
- PKEY_HASH__LAST
-};
-
-extern const char *const pkey_hash_algo[PKEY_HASH__LAST];
+/* asymmetric key implementation supports only up to SHA224 */
+#define PKEY_HASH__LAST (HASH_ALGO_SHA224 + 1)
enum pkey_id_type {
PKEY_ID_PGP, /* OpenPGP generated key ID */
@@ -44,7 +35,7 @@ enum pkey_id_type {
PKEY_ID_TYPE__LAST
};
-extern const char *const pkey_id_type[PKEY_ID_TYPE__LAST];
+extern const char *const pkey_id_type_name[PKEY_ID_TYPE__LAST];
/*
* Cryptographic data for the public-key subtype of the asymmetric key type.
@@ -59,6 +50,7 @@ struct public_key {
#define PKEY_CAN_DECRYPT 0x02
#define PKEY_CAN_SIGN 0x04
#define PKEY_CAN_VERIFY 0x08
+ enum pkey_algo pkey_algo : 8;
enum pkey_id_type id_type : 8;
union {
MPI mpi[5];
@@ -88,7 +80,8 @@ struct public_key_signature {
u8 *digest;
u8 digest_size; /* Number of bytes in digest */
u8 nr_mpi; /* Occupancy of mpi[] */
- enum pkey_hash_algo pkey_hash_algo : 8;
+ enum pkey_algo pkey_algo : 8;
+ enum hash_algo pkey_hash_algo : 8;
union {
MPI mpi[2];
struct {
diff --git a/include/crypto/scatterwalk.h b/include/crypto/scatterwalk.h
index 13621cc8cf4c..6a626a507b8c 100644
--- a/include/crypto/scatterwalk.h
+++ b/include/crypto/scatterwalk.h
@@ -36,6 +36,7 @@ static inline void scatterwalk_sg_chain(struct scatterlist *sg1, int num,
{
sg_set_page(&sg1[num - 1], (void *)sg2, 0, 0);
sg1[num - 1].page_link &= ~0x02;
+ sg1[num - 1].page_link |= 0x01;
}
static inline struct scatterlist *scatterwalk_sg_next(struct scatterlist *sg)
@@ -43,7 +44,7 @@ static inline struct scatterlist *scatterwalk_sg_next(struct scatterlist *sg)
if (sg_is_last(sg))
return NULL;
- return (++sg)->length ? sg : (void *)sg_page(sg);
+ return (++sg)->length ? sg : sg_chain_ptr(sg);
}
static inline void scatterwalk_crypto_chain(struct scatterlist *head,
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index 751eaffbf0d5..ee127ec33c60 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -169,6 +169,7 @@ struct ttm_tt;
* @offset: The current GPU offset, which can have different meanings
* depending on the memory type. For SYSTEM type memory, it should be 0.
* @cur_placement: Hint of current placement.
+ * @wu_mutex: Wait unreserved mutex.
*
* Base class for TTM buffer object, that deals with data placement and CPU
* mappings. GPU mappings are really up to the driver, but for simpler GPUs
@@ -250,6 +251,7 @@ struct ttm_buffer_object {
struct reservation_object *resv;
struct reservation_object ttm_resv;
+ struct mutex wu_mutex;
};
/**
@@ -702,5 +704,5 @@ extern ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
size_t count, loff_t *f_pos, bool write);
extern void ttm_bo_swapout_all(struct ttm_bo_device *bdev);
-
+extern int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo);
#endif
diff --git a/include/drm/ttm/ttm_execbuf_util.h b/include/drm/ttm/ttm_execbuf_util.h
index ec8a1d306510..16db7d01a336 100644
--- a/include/drm/ttm/ttm_execbuf_util.h
+++ b/include/drm/ttm/ttm_execbuf_util.h
@@ -70,7 +70,8 @@ extern void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
/**
* function ttm_eu_reserve_buffers
*
- * @ticket: [out] ww_acquire_ctx returned by call.
+ * @ticket: [out] ww_acquire_ctx filled in by call, or NULL if only
+ * non-blocking reserves should be tried.
* @list: thread private list of ttm_validate_buffer structs.
*
* Tries to reserve bos pointed to by the list entries for validation.
diff --git a/include/drm/ttm/ttm_object.h b/include/drm/ttm/ttm_object.h
index fc0cf0649901..58b029894eb3 100644
--- a/include/drm/ttm/ttm_object.h
+++ b/include/drm/ttm/ttm_object.h
@@ -41,6 +41,7 @@
#include <drm/drm_hashtab.h>
#include <linux/kref.h>
#include <linux/rcupdate.h>
+#include <linux/dma-buf.h>
#include <ttm/ttm_memory.h>
/**
@@ -77,6 +78,7 @@ enum ttm_object_type {
ttm_fence_type,
ttm_buffer_type,
ttm_lock_type,
+ ttm_prime_type,
ttm_driver_type0 = 256,
ttm_driver_type1,
ttm_driver_type2,
@@ -132,6 +134,30 @@ struct ttm_base_object {
enum ttm_ref_type ref_type);
};
+
+/**
+ * struct ttm_prime_object - Modified base object that is prime-aware
+ *
+ * @base: struct ttm_base_object that we derive from
+ * @mutex: Mutex protecting the @dma_buf member.
+ * @size: Size of the dma_buf associated with this object
+ * @real_type: Type of the underlying object. Needed since we're setting
+ * the value of @base::object_type to ttm_prime_type
+ * @dma_buf: Non ref-coutned pointer to a struct dma_buf created from this
+ * object.
+ * @refcount_release: The underlying object's release method. Needed since
+ * we set @base::refcount_release to our own release method.
+ */
+
+struct ttm_prime_object {
+ struct ttm_base_object base;
+ struct mutex mutex;
+ size_t size;
+ enum ttm_object_type real_type;
+ struct dma_buf *dma_buf;
+ void (*refcount_release) (struct ttm_base_object **);
+};
+
/**
* ttm_base_object_init
*
@@ -248,14 +274,18 @@ extern void ttm_object_file_release(struct ttm_object_file **p_tfile);
/**
* ttm_object device init - initialize a struct ttm_object_device
*
+ * @mem_glob: struct ttm_mem_global for memory accounting.
* @hash_order: Order of hash table used to hash the base objects.
+ * @ops: DMA buf ops for prime objects of this device.
*
* This function is typically called on device initialization to prepare
* data structures needed for ttm base and ref objects.
*/
-extern struct ttm_object_device *ttm_object_device_init
- (struct ttm_mem_global *mem_glob, unsigned int hash_order);
+extern struct ttm_object_device *
+ttm_object_device_init(struct ttm_mem_global *mem_glob,
+ unsigned int hash_order,
+ const struct dma_buf_ops *ops);
/**
* ttm_object_device_release - release data held by a ttm_object_device
@@ -272,4 +302,31 @@ extern void ttm_object_device_release(struct ttm_object_device **p_tdev);
#define ttm_base_object_kfree(__object, __base)\
kfree_rcu(__object, __base.rhead)
+
+extern int ttm_prime_object_init(struct ttm_object_file *tfile,
+ size_t size,
+ struct ttm_prime_object *prime,
+ bool shareable,
+ enum ttm_object_type type,
+ void (*refcount_release)
+ (struct ttm_base_object **),
+ void (*ref_obj_release)
+ (struct ttm_base_object *,
+ enum ttm_ref_type ref_type));
+
+static inline enum ttm_object_type
+ttm_base_object_type(struct ttm_base_object *base)
+{
+ return (base->object_type == ttm_prime_type) ?
+ container_of(base, struct ttm_prime_object, base)->real_type :
+ base->object_type;
+}
+extern int ttm_prime_fd_to_handle(struct ttm_object_file *tfile,
+ int fd, u32 *handle);
+extern int ttm_prime_handle_to_fd(struct ttm_object_file *tfile,
+ uint32_t handle, uint32_t flags,
+ int *prime_fd);
+
+#define ttm_prime_object_kfree(__obj, __prime) \
+ kfree_rcu(__obj, __prime.base.rhead)
#endif
diff --git a/include/keys/big_key-type.h b/include/keys/big_key-type.h
new file mode 100644
index 000000000000..d69bc8af3292
--- /dev/null
+++ b/include/keys/big_key-type.h
@@ -0,0 +1,25 @@
+/* Big capacity key type.
+ *
+ * Copyright (C) 2013 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _KEYS_BIG_KEY_TYPE_H
+#define _KEYS_BIG_KEY_TYPE_H
+
+#include <linux/key-type.h>
+
+extern struct key_type key_type_big_key;
+
+extern int big_key_instantiate(struct key *key, struct key_preparsed_payload *prep);
+extern void big_key_revoke(struct key *key);
+extern void big_key_destroy(struct key *key);
+extern void big_key_describe(const struct key *big_key, struct seq_file *m);
+extern long big_key_read(const struct key *key, char __user *buffer, size_t buflen);
+
+#endif /* _KEYS_BIG_KEY_TYPE_H */
diff --git a/include/keys/keyring-type.h b/include/keys/keyring-type.h
index cf49159b0e3a..fca5c62340a4 100644
--- a/include/keys/keyring-type.h
+++ b/include/keys/keyring-type.h
@@ -1,6 +1,6 @@
/* Keyring key type
*
- * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
+ * Copyright (C) 2008, 2013 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
@@ -13,19 +13,6 @@
#define _KEYS_KEYRING_TYPE_H
#include <linux/key.h>
-#include <linux/rcupdate.h>
-
-/*
- * the keyring payload contains a list of the keys to which the keyring is
- * subscribed
- */
-struct keyring_list {
- struct rcu_head rcu; /* RCU deletion hook */
- unsigned short maxkeys; /* max keys this list can hold */
- unsigned short nkeys; /* number of keys currently held */
- unsigned short delkey; /* key to be unlinked by RCU */
- struct key __rcu *keys[0];
-};
-
+#include <linux/assoc_array.h>
#endif /* _KEYS_KEYRING_TYPE_H */
diff --git a/include/keys/system_keyring.h b/include/keys/system_keyring.h
new file mode 100644
index 000000000000..8dabc399bd1d
--- /dev/null
+++ b/include/keys/system_keyring.h
@@ -0,0 +1,23 @@
+/* System keyring containing trusted public keys.
+ *
+ * Copyright (C) 2013 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#ifndef _KEYS_SYSTEM_KEYRING_H
+#define _KEYS_SYSTEM_KEYRING_H
+
+#ifdef CONFIG_SYSTEM_TRUSTED_KEYRING
+
+#include <linux/key.h>
+
+extern struct key *system_trusted_keyring;
+
+#endif
+
+#endif /* _KEYS_SYSTEM_KEYRING_H */
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index b0972c4ce81c..d9099b15b472 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -44,6 +44,20 @@
#include <acpi/acpi_numa.h>
#include <asm/acpi.h>
+static inline acpi_handle acpi_device_handle(struct acpi_device *adev)
+{
+ return adev ? adev->handle : NULL;
+}
+
+#define ACPI_COMPANION(dev) ((dev)->acpi_node.companion)
+#define ACPI_COMPANION_SET(dev, adev) ACPI_COMPANION(dev) = (adev)
+#define ACPI_HANDLE(dev) acpi_device_handle(ACPI_COMPANION(dev))
+
+static inline const char *acpi_dev_name(struct acpi_device *adev)
+{
+ return dev_name(&adev->dev);
+}
+
enum acpi_irq_model_id {
ACPI_IRQ_MODEL_PIC = 0,
ACPI_IRQ_MODEL_IOAPIC,
@@ -401,6 +415,15 @@ static inline bool acpi_driver_match_device(struct device *dev,
#define acpi_disabled 1
+#define ACPI_COMPANION(dev) (NULL)
+#define ACPI_COMPANION_SET(dev, adev) do { } while (0)
+#define ACPI_HANDLE(dev) (NULL)
+
+static inline const char *acpi_dev_name(struct acpi_device *adev)
+{
+ return NULL;
+}
+
static inline void acpi_early_init(void) { }
static inline int early_acpi_boot_init(void)
diff --git a/include/linux/assoc_array.h b/include/linux/assoc_array.h
new file mode 100644
index 000000000000..a89df3be1686
--- /dev/null
+++ b/include/linux/assoc_array.h
@@ -0,0 +1,92 @@
+/* Generic associative array implementation.
+ *
+ * See Documentation/assoc_array.txt for information.
+ *
+ * Copyright (C) 2013 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#ifndef _LINUX_ASSOC_ARRAY_H
+#define _LINUX_ASSOC_ARRAY_H
+
+#ifdef CONFIG_ASSOCIATIVE_ARRAY
+
+#include <linux/types.h>
+
+#define ASSOC_ARRAY_KEY_CHUNK_SIZE BITS_PER_LONG /* Key data retrieved in chunks of this size */
+
+/*
+ * Generic associative array.
+ */
+struct assoc_array {
+ struct assoc_array_ptr *root; /* The node at the root of the tree */
+ unsigned long nr_leaves_on_tree;
+};
+
+/*
+ * Operations on objects and index keys for use by array manipulation routines.
+ */
+struct assoc_array_ops {
+ /* Method to get a chunk of an index key from caller-supplied data */
+ unsigned long (*get_key_chunk)(const void *index_key, int level);
+
+ /* Method to get a piece of an object's index key */
+ unsigned long (*get_object_key_chunk)(const void *object, int level);
+
+ /* Is this the object we're looking for? */
+ bool (*compare_object)(const void *object, const void *index_key);
+
+ /* How different is an object from an index key, to a bit position in
+ * their keys? (or -1 if they're the same)
+ */
+ int (*diff_objects)(const void *object, const void *index_key);
+
+ /* Method to free an object. */
+ void (*free_object)(void *object);
+};
+
+/*
+ * Access and manipulation functions.
+ */
+struct assoc_array_edit;
+
+static inline void assoc_array_init(struct assoc_array *array)
+{
+ array->root = NULL;
+ array->nr_leaves_on_tree = 0;
+}
+
+extern int assoc_array_iterate(const struct assoc_array *array,
+ int (*iterator)(const void *object,
+ void *iterator_data),
+ void *iterator_data);
+extern void *assoc_array_find(const struct assoc_array *array,
+ const struct assoc_array_ops *ops,
+ const void *index_key);
+extern void assoc_array_destroy(struct assoc_array *array,
+ const struct assoc_array_ops *ops);
+extern struct assoc_array_edit *assoc_array_insert(struct assoc_array *array,
+ const struct assoc_array_ops *ops,
+ const void *index_key,
+ void *object);
+extern void assoc_array_insert_set_object(struct assoc_array_edit *edit,
+ void *object);
+extern struct assoc_array_edit *assoc_array_delete(struct assoc_array *array,
+ const struct assoc_array_ops *ops,
+ const void *index_key);
+extern struct assoc_array_edit *assoc_array_clear(struct assoc_array *array,
+ const struct assoc_array_ops *ops);
+extern void assoc_array_apply_edit(struct assoc_array_edit *edit);
+extern void assoc_array_cancel_edit(struct assoc_array_edit *edit);
+extern int assoc_array_gc(struct assoc_array *array,
+ const struct assoc_array_ops *ops,
+ bool (*iterator)(void *object, void *iterator_data),
+ void *iterator_data);
+
+#endif /* CONFIG_ASSOCIATIVE_ARRAY */
+#endif /* _LINUX_ASSOC_ARRAY_H */
diff --git a/include/linux/assoc_array_priv.h b/include/linux/assoc_array_priv.h
new file mode 100644
index 000000000000..711275e6681c
--- /dev/null
+++ b/include/linux/assoc_array_priv.h
@@ -0,0 +1,182 @@
+/* Private definitions for the generic associative array implementation.
+ *
+ * See Documentation/assoc_array.txt for information.
+ *
+ * Copyright (C) 2013 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#ifndef _LINUX_ASSOC_ARRAY_PRIV_H
+#define _LINUX_ASSOC_ARRAY_PRIV_H
+
+#ifdef CONFIG_ASSOCIATIVE_ARRAY
+
+#include <linux/assoc_array.h>
+
+#define ASSOC_ARRAY_FAN_OUT 16 /* Number of slots per node */
+#define ASSOC_ARRAY_FAN_MASK (ASSOC_ARRAY_FAN_OUT - 1)
+#define ASSOC_ARRAY_LEVEL_STEP (ilog2(ASSOC_ARRAY_FAN_OUT))
+#define ASSOC_ARRAY_LEVEL_STEP_MASK (ASSOC_ARRAY_LEVEL_STEP - 1)
+#define ASSOC_ARRAY_KEY_CHUNK_MASK (ASSOC_ARRAY_KEY_CHUNK_SIZE - 1)
+#define ASSOC_ARRAY_KEY_CHUNK_SHIFT (ilog2(BITS_PER_LONG))
+
+/*
+ * Undefined type representing a pointer with type information in the bottom
+ * two bits.
+ */
+struct assoc_array_ptr;
+
+/*
+ * An N-way node in the tree.
+ *
+ * Each slot contains one of four things:
+ *
+ * (1) Nothing (NULL).
+ *
+ * (2) A leaf object (pointer types 0).
+ *
+ * (3) A next-level node (pointer type 1, subtype 0).
+ *
+ * (4) A shortcut (pointer type 1, subtype 1).
+ *
+ * The tree is optimised for search-by-ID, but permits reasonable iteration
+ * also.
+ *
+ * The tree is navigated by constructing an index key consisting of an array of
+ * segments, where each segment is ilog2(ASSOC_ARRAY_FAN_OUT) bits in size.
+ *
+ * The segments correspond to levels of the tree (the first segment is used at
+ * level 0, the second at level 1, etc.).
+ */
+struct assoc_array_node {
+ struct assoc_array_ptr *back_pointer;
+ u8 parent_slot;
+ struct assoc_array_ptr *slots[ASSOC_ARRAY_FAN_OUT];
+ unsigned long nr_leaves_on_branch;
+};
+
+/*
+ * A shortcut through the index space out to where a collection of nodes/leaves
+ * with the same IDs live.
+ */
+struct assoc_array_shortcut {
+ struct assoc_array_ptr *back_pointer;
+ int parent_slot;
+ int skip_to_level;
+ struct assoc_array_ptr *next_node;
+ unsigned long index_key[];
+};
+
+/*
+ * Preallocation cache.
+ */
+struct assoc_array_edit {
+ struct rcu_head rcu;
+ struct assoc_array *array;
+ const struct assoc_array_ops *ops;
+ const struct assoc_array_ops *ops_for_excised_subtree;
+ struct assoc_array_ptr *leaf;
+ struct assoc_array_ptr **leaf_p;
+ struct assoc_array_ptr *dead_leaf;
+ struct assoc_array_ptr *new_meta[3];
+ struct assoc_array_ptr *excised_meta[1];
+ struct assoc_array_ptr *excised_subtree;
+ struct assoc_array_ptr **set_backpointers[ASSOC_ARRAY_FAN_OUT];
+ struct assoc_array_ptr *set_backpointers_to;
+ struct assoc_array_node *adjust_count_on;
+ long adjust_count_by;
+ struct {
+ struct assoc_array_ptr **ptr;
+ struct assoc_array_ptr *to;
+ } set[2];
+ struct {
+ u8 *p;
+ u8 to;
+ } set_parent_slot[1];
+ u8 segment_cache[ASSOC_ARRAY_FAN_OUT + 1];
+};
+
+/*
+ * Internal tree member pointers are marked in the bottom one or two bits to
+ * indicate what type they are so that we don't have to look behind every
+ * pointer to see what it points to.
+ *
+ * We provide functions to test type annotations and to create and translate
+ * the annotated pointers.
+ */
+#define ASSOC_ARRAY_PTR_TYPE_MASK 0x1UL
+#define ASSOC_ARRAY_PTR_LEAF_TYPE 0x0UL /* Points to leaf (or nowhere) */
+#define ASSOC_ARRAY_PTR_META_TYPE 0x1UL /* Points to node or shortcut */
+#define ASSOC_ARRAY_PTR_SUBTYPE_MASK 0x2UL
+#define ASSOC_ARRAY_PTR_NODE_SUBTYPE 0x0UL
+#define ASSOC_ARRAY_PTR_SHORTCUT_SUBTYPE 0x2UL
+
+static inline bool assoc_array_ptr_is_meta(const struct assoc_array_ptr *x)
+{
+ return (unsigned long)x & ASSOC_ARRAY_PTR_TYPE_MASK;
+}
+static inline bool assoc_array_ptr_is_leaf(const struct assoc_array_ptr *x)
+{
+ return !assoc_array_ptr_is_meta(x);
+}
+static inline bool assoc_array_ptr_is_shortcut(const struct assoc_array_ptr *x)
+{
+ return (unsigned long)x & ASSOC_ARRAY_PTR_SUBTYPE_MASK;
+}
+static inline bool assoc_array_ptr_is_node(const struct assoc_array_ptr *x)
+{
+ return !assoc_array_ptr_is_shortcut(x);
+}
+
+static inline void *assoc_array_ptr_to_leaf(const struct assoc_array_ptr *x)
+{
+ return (void *)((unsigned long)x & ~ASSOC_ARRAY_PTR_TYPE_MASK);
+}
+
+static inline
+unsigned long __assoc_array_ptr_to_meta(const struct assoc_array_ptr *x)
+{
+ return (unsigned long)x &
+ ~(ASSOC_ARRAY_PTR_SUBTYPE_MASK | ASSOC_ARRAY_PTR_TYPE_MASK);
+}
+static inline
+struct assoc_array_node *assoc_array_ptr_to_node(const struct assoc_array_ptr *x)
+{
+ return (struct assoc_array_node *)__assoc_array_ptr_to_meta(x);
+}
+static inline
+struct assoc_array_shortcut *assoc_array_ptr_to_shortcut(const struct assoc_array_ptr *x)
+{
+ return (struct assoc_array_shortcut *)__assoc_array_ptr_to_meta(x);
+}
+
+static inline
+struct assoc_array_ptr *__assoc_array_x_to_ptr(const void *p, unsigned long t)
+{
+ return (struct assoc_array_ptr *)((unsigned long)p | t);
+}
+static inline
+struct assoc_array_ptr *assoc_array_leaf_to_ptr(const void *p)
+{
+ return __assoc_array_x_to_ptr(p, ASSOC_ARRAY_PTR_LEAF_TYPE);
+}
+static inline
+struct assoc_array_ptr *assoc_array_node_to_ptr(const struct assoc_array_node *p)
+{
+ return __assoc_array_x_to_ptr(
+ p, ASSOC_ARRAY_PTR_META_TYPE | ASSOC_ARRAY_PTR_NODE_SUBTYPE);
+}
+static inline
+struct assoc_array_ptr *assoc_array_shortcut_to_ptr(const struct assoc_array_shortcut *p)
+{
+ return __assoc_array_x_to_ptr(
+ p, ASSOC_ARRAY_PTR_META_TYPE | ASSOC_ARRAY_PTR_SHORTCUT_SUBTYPE);
+}
+
+#endif /* CONFIG_ASSOCIATIVE_ARRAY */
+#endif /* _LINUX_ASSOC_ARRAY_PRIV_H */
diff --git a/include/linux/audit.h b/include/linux/audit.h
index 729a4d165bcc..a40641954c29 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -73,6 +73,8 @@ struct audit_field {
void *lsm_rule;
};
+extern int is_audit_feature_set(int which);
+
extern int __init audit_register_class(int class, unsigned *list);
extern int audit_classify_syscall(int abi, unsigned syscall);
extern int audit_classify_arch(int arch);
@@ -207,7 +209,7 @@ static inline int audit_get_sessionid(struct task_struct *tsk)
extern void __audit_ipc_obj(struct kern_ipc_perm *ipcp);
extern void __audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, umode_t mode);
-extern int __audit_bprm(struct linux_binprm *bprm);
+extern void __audit_bprm(struct linux_binprm *bprm);
extern int __audit_socketcall(int nargs, unsigned long *args);
extern int __audit_sockaddr(int len, void *addr);
extern void __audit_fd_pair(int fd1, int fd2);
@@ -236,11 +238,10 @@ static inline void audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid
if (unlikely(!audit_dummy_context()))
__audit_ipc_set_perm(qbytes, uid, gid, mode);
}
-static inline int audit_bprm(struct linux_binprm *bprm)
+static inline void audit_bprm(struct linux_binprm *bprm)
{
if (unlikely(!audit_dummy_context()))
- return __audit_bprm(bprm);
- return 0;
+ __audit_bprm(bprm);
}
static inline int audit_socketcall(int nargs, unsigned long *args)
{
@@ -367,10 +368,8 @@ static inline void audit_ipc_obj(struct kern_ipc_perm *ipcp)
static inline void audit_ipc_set_perm(unsigned long qbytes, uid_t uid,
gid_t gid, umode_t mode)
{ }
-static inline int audit_bprm(struct linux_binprm *bprm)
-{
- return 0;
-}
+static inline void audit_bprm(struct linux_binprm *bprm)
+{ }
static inline int audit_socketcall(int nargs, unsigned long *args)
{
return 0;
diff --git a/include/linux/auxvec.h b/include/linux/auxvec.h
index 669fef5c745a..3e0fbe441763 100644
--- a/include/linux/auxvec.h
+++ b/include/linux/auxvec.h
@@ -3,6 +3,6 @@
#include <uapi/linux/auxvec.h>
-#define AT_VECTOR_SIZE_BASE 19 /* NEW_AUX_ENT entries in auxiliary table */
+#define AT_VECTOR_SIZE_BASE 20 /* NEW_AUX_ENT entries in auxiliary table */
/* number of "#define AT_.*" above, minus {AT_NULL, AT_IGNORE, AT_NOTELF} */
#endif /* _LINUX_AUXVEC_H */
diff --git a/include/linux/bcma/bcma.h b/include/linux/bcma/bcma.h
index 4d043c30216f..0b3bb16c705a 100644
--- a/include/linux/bcma/bcma.h
+++ b/include/linux/bcma/bcma.h
@@ -418,7 +418,14 @@ static inline void bcma_maskset16(struct bcma_device *cc,
bcma_write16(cc, offset, (bcma_read16(cc, offset) & mask) | set);
}
-extern struct bcma_device *bcma_find_core(struct bcma_bus *bus, u16 coreid);
+extern struct bcma_device *bcma_find_core_unit(struct bcma_bus *bus, u16 coreid,
+ u8 unit);
+static inline struct bcma_device *bcma_find_core(struct bcma_bus *bus,
+ u16 coreid)
+{
+ return bcma_find_core_unit(bus, coreid, 0);
+}
+
extern bool bcma_core_is_enabled(struct bcma_device *core);
extern void bcma_core_disable(struct bcma_device *core, u32 flags);
extern int bcma_core_enable(struct bcma_device *core, u32 flags);
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index f26ec20f6354..1b135d49b279 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -505,6 +505,9 @@ struct request_queue {
(1 << QUEUE_FLAG_SAME_COMP) | \
(1 << QUEUE_FLAG_ADD_RANDOM))
+#define QUEUE_FLAG_MQ_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
+ (1 << QUEUE_FLAG_SAME_COMP))
+
static inline void queue_lockdep_assert_held(struct request_queue *q)
{
if (q->queue_lock)
diff --git a/include/linux/cgroup_subsys.h b/include/linux/cgroup_subsys.h
index b613ffd402d1..7b99d717411d 100644
--- a/include/linux/cgroup_subsys.h
+++ b/include/linux/cgroup_subsys.h
@@ -31,7 +31,7 @@ SUBSYS(devices)
SUBSYS(freezer)
#endif
-#if IS_SUBSYS_ENABLED(CONFIG_NET_CLS_CGROUP)
+#if IS_SUBSYS_ENABLED(CONFIG_CGROUP_NET_CLASSID)
SUBSYS(net_cls)
#endif
@@ -43,7 +43,7 @@ SUBSYS(blkio)
SUBSYS(perf)
#endif
-#if IS_SUBSYS_ENABLED(CONFIG_NETPRIO_CGROUP)
+#if IS_SUBSYS_ENABLED(CONFIG_CGROUP_NET_PRIO)
SUBSYS(net_prio)
#endif
diff --git a/include/linux/compiler-intel.h b/include/linux/compiler-intel.h
index 973ce10c40b6..dc1bd3dcf11f 100644
--- a/include/linux/compiler-intel.h
+++ b/include/linux/compiler-intel.h
@@ -28,8 +28,6 @@
#endif
-#define uninitialized_var(x) x
-
#ifndef __HAVE_BUILTIN_BSWAP16__
/* icc has this, but it's called _bswap16 */
#define __HAVE_BUILTIN_BSWAP16__
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 57e87e749a48..bf72e9ac6de0 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -29,8 +29,10 @@ struct vfsmount;
/* The hash is always the low bits of hash_len */
#ifdef __LITTLE_ENDIAN
#define HASH_LEN_DECLARE u32 hash; u32 len;
+ #define bytemask_from_count(cnt) (~(~0ul << (cnt)*8))
#else
#define HASH_LEN_DECLARE u32 len; u32 hash;
+ #define bytemask_from_count(cnt) (~(~0ul >> (cnt)*8))
#endif
/*
diff --git a/include/linux/device.h b/include/linux/device.h
index b025925df7f7..952b01033c32 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -644,9 +644,11 @@ struct device_dma_parameters {
unsigned long segment_boundary_mask;
};
+struct acpi_device;
+
struct acpi_dev_node {
#ifdef CONFIG_ACPI
- void *handle;
+ struct acpi_device *companion;
#endif
};
@@ -790,14 +792,6 @@ static inline struct device *kobj_to_dev(struct kobject *kobj)
return container_of(kobj, struct device, kobj);
}
-#ifdef CONFIG_ACPI
-#define ACPI_HANDLE(dev) ((dev)->acpi_node.handle)
-#define ACPI_HANDLE_SET(dev, _handle_) (dev)->acpi_node.handle = (_handle_)
-#else
-#define ACPI_HANDLE(dev) (NULL)
-#define ACPI_HANDLE_SET(dev, _handle_) do { } while (0)
-#endif
-
/* Get the wakeup routines, which depend on struct device */
#include <linux/pm_wakeup.h>
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 0bc727534108..41cf0c399288 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -45,13 +45,13 @@ static inline int dma_submit_error(dma_cookie_t cookie)
/**
* enum dma_status - DMA transaction status
- * @DMA_SUCCESS: transaction completed successfully
+ * @DMA_COMPLETE: transaction completed
* @DMA_IN_PROGRESS: transaction not yet processed
* @DMA_PAUSED: transaction is paused
* @DMA_ERROR: transaction failed
*/
enum dma_status {
- DMA_SUCCESS,
+ DMA_COMPLETE,
DMA_IN_PROGRESS,
DMA_PAUSED,
DMA_ERROR,
@@ -171,12 +171,6 @@ struct dma_interleaved_template {
* @DMA_CTRL_ACK - if clear, the descriptor cannot be reused until the client
* acknowledges receipt, i.e. has has a chance to establish any dependency
* chains
- * @DMA_COMPL_SKIP_SRC_UNMAP - set to disable dma-unmapping the source buffer(s)
- * @DMA_COMPL_SKIP_DEST_UNMAP - set to disable dma-unmapping the destination(s)
- * @DMA_COMPL_SRC_UNMAP_SINGLE - set to do the source dma-unmapping as single
- * (if not set, do the source dma-unmapping as page)
- * @DMA_COMPL_DEST_UNMAP_SINGLE - set to do the destination dma-unmapping as single
- * (if not set, do the destination dma-unmapping as page)
* @DMA_PREP_PQ_DISABLE_P - prevent generation of P while generating Q
* @DMA_PREP_PQ_DISABLE_Q - prevent generation of Q while generating P
* @DMA_PREP_CONTINUE - indicate to a driver that it is reusing buffers as
@@ -188,14 +182,10 @@ struct dma_interleaved_template {
enum dma_ctrl_flags {
DMA_PREP_INTERRUPT = (1 << 0),
DMA_CTRL_ACK = (1 << 1),
- DMA_COMPL_SKIP_SRC_UNMAP = (1 << 2),
- DMA_COMPL_SKIP_DEST_UNMAP = (1 << 3),
- DMA_COMPL_SRC_UNMAP_SINGLE = (1 << 4),
- DMA_COMPL_DEST_UNMAP_SINGLE = (1 << 5),
- DMA_PREP_PQ_DISABLE_P = (1 << 6),
- DMA_PREP_PQ_DISABLE_Q = (1 << 7),
- DMA_PREP_CONTINUE = (1 << 8),
- DMA_PREP_FENCE = (1 << 9),
+ DMA_PREP_PQ_DISABLE_P = (1 << 2),
+ DMA_PREP_PQ_DISABLE_Q = (1 << 3),
+ DMA_PREP_CONTINUE = (1 << 4),
+ DMA_PREP_FENCE = (1 << 5),
};
/**
@@ -413,6 +403,17 @@ void dma_chan_cleanup(struct kref *kref);
typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param);
typedef void (*dma_async_tx_callback)(void *dma_async_param);
+
+struct dmaengine_unmap_data {
+ u8 to_cnt;
+ u8 from_cnt;
+ u8 bidi_cnt;
+ struct device *dev;
+ struct kref kref;
+ size_t len;
+ dma_addr_t addr[0];
+};
+
/**
* struct dma_async_tx_descriptor - async transaction descriptor
* ---dma generic offload fields---
@@ -438,6 +439,7 @@ struct dma_async_tx_descriptor {
dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx);
dma_async_tx_callback callback;
void *callback_param;
+ struct dmaengine_unmap_data *unmap;
#ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
struct dma_async_tx_descriptor *next;
struct dma_async_tx_descriptor *parent;
@@ -445,6 +447,40 @@ struct dma_async_tx_descriptor {
#endif
};
+#ifdef CONFIG_DMA_ENGINE
+static inline void dma_set_unmap(struct dma_async_tx_descriptor *tx,
+ struct dmaengine_unmap_data *unmap)
+{
+ kref_get(&unmap->kref);
+ tx->unmap = unmap;
+}
+
+struct dmaengine_unmap_data *
+dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags);
+void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap);
+#else
+static inline void dma_set_unmap(struct dma_async_tx_descriptor *tx,
+ struct dmaengine_unmap_data *unmap)
+{
+}
+static inline struct dmaengine_unmap_data *
+dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags)
+{
+ return NULL;
+}
+static inline void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap)
+{
+}
+#endif
+
+static inline void dma_descriptor_unmap(struct dma_async_tx_descriptor *tx)
+{
+ if (tx->unmap) {
+ dmaengine_unmap_put(tx->unmap);
+ tx->unmap = NULL;
+ }
+}
+
#ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
static inline void txd_lock(struct dma_async_tx_descriptor *txd)
{
@@ -979,10 +1015,10 @@ static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie,
{
if (last_complete <= last_used) {
if ((cookie <= last_complete) || (cookie > last_used))
- return DMA_SUCCESS;
+ return DMA_COMPLETE;
} else {
if ((cookie <= last_complete) && (cookie > last_used))
- return DMA_SUCCESS;
+ return DMA_COMPLETE;
}
return DMA_IN_PROGRESS;
}
@@ -1013,11 +1049,11 @@ static inline struct dma_chan *dma_find_channel(enum dma_transaction_type tx_typ
}
static inline enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
{
- return DMA_SUCCESS;
+ return DMA_COMPLETE;
}
static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
{
- return DMA_SUCCESS;
+ return DMA_COMPLETE;
}
static inline void dma_issue_pending_all(void)
{
diff --git a/include/linux/efi.h b/include/linux/efi.h
index bc5687d0f315..11ce6784a196 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -801,6 +801,8 @@ struct efivar_entry {
struct efi_variable var;
struct list_head list;
struct kobject kobj;
+ bool scanning;
+ bool deleting;
};
@@ -866,6 +868,8 @@ void efivar_run_worker(void);
#if defined(CONFIG_EFI_VARS) || defined(CONFIG_EFI_VARS_MODULE)
int efivars_sysfs_init(void);
+#define EFIVARS_DATA_SIZE_MAX 1024
+
#endif /* CONFIG_EFI_VARS */
#endif /* _LINUX_EFI_H */
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index fc4a9aa7dd82..f344ac04f858 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -26,6 +26,7 @@
#include <linux/netdevice.h>
#include <linux/random.h>
#include <asm/unaligned.h>
+#include <asm/bitsperlong.h>
#ifdef __KERNEL__
__be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev);
@@ -60,6 +61,8 @@ static const u8 eth_reserved_addr_base[ETH_ALEN] __aligned(2) =
*
* Return true if address is link local reserved addr (01:80:c2:00:00:0X) per
* IEEE 802.1Q 8.6.3 Frame filtering.
+ *
+ * Please note: addr must be aligned to u16.
*/
static inline bool is_link_local_ether_addr(const u8 *addr)
{
@@ -67,7 +70,12 @@ static inline bool is_link_local_ether_addr(const u8 *addr)
static const __be16 *b = (const __be16 *)eth_reserved_addr_base;
static const __be16 m = cpu_to_be16(0xfff0);
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+ return (((*(const u32 *)addr) ^ (*(const u32 *)b)) |
+ ((a[2] ^ b[2]) & m)) == 0;
+#else
return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | ((a[2] ^ b[2]) & m)) == 0;
+#endif
}
/**
@@ -75,10 +83,18 @@ static inline bool is_link_local_ether_addr(const u8 *addr)
* @addr: Pointer to a six-byte array containing the Ethernet address
*
* Return true if the address is all zeroes.
+ *
+ * Please note: addr must be aligned to u16.
*/
static inline bool is_zero_ether_addr(const u8 *addr)
{
- return !(addr[0] | addr[1] | addr[2] | addr[3] | addr[4] | addr[5]);
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+ return ((*(const u32 *)addr) | (*(const u16 *)(addr + 4))) == 0;
+#else
+ return (*(const u16 *)(addr + 0) |
+ *(const u16 *)(addr + 2) |
+ *(const u16 *)(addr + 4)) == 0;
+#endif
}
/**
@@ -109,10 +125,14 @@ static inline bool is_local_ether_addr(const u8 *addr)
* @addr: Pointer to a six-byte array containing the Ethernet address
*
* Return true if the address is the broadcast address.
+ *
+ * Please note: addr must be aligned to u16.
*/
static inline bool is_broadcast_ether_addr(const u8 *addr)
{
- return (addr[0] & addr[1] & addr[2] & addr[3] & addr[4] & addr[5]) == 0xff;
+ return (*(const u16 *)(addr + 0) &
+ *(const u16 *)(addr + 2) &
+ *(const u16 *)(addr + 4)) == 0xffff;
}
/**
@@ -134,6 +154,8 @@ static inline bool is_unicast_ether_addr(const u8 *addr)
* a multicast address, and is not FF:FF:FF:FF:FF:FF.
*
* Return true if the address is valid.
+ *
+ * Please note: addr must be aligned to u16.
*/
static inline bool is_valid_ether_addr(const u8 *addr)
{
@@ -211,40 +233,26 @@ static inline void eth_hw_addr_inherit(struct net_device *dst,
}
/**
- * compare_ether_addr - Compare two Ethernet addresses
- * @addr1: Pointer to a six-byte array containing the Ethernet address
- * @addr2: Pointer other six-byte array containing the Ethernet address
- *
- * Compare two Ethernet addresses, returns 0 if equal, non-zero otherwise.
- * Unlike memcmp(), it doesn't return a value suitable for sorting.
- */
-static inline unsigned compare_ether_addr(const u8 *addr1, const u8 *addr2)
-{
- const u16 *a = (const u16 *) addr1;
- const u16 *b = (const u16 *) addr2;
-
- BUILD_BUG_ON(ETH_ALEN != 6);
- return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | (a[2] ^ b[2])) != 0;
-}
-
-/**
* ether_addr_equal - Compare two Ethernet addresses
* @addr1: Pointer to a six-byte array containing the Ethernet address
* @addr2: Pointer other six-byte array containing the Ethernet address
*
* Compare two Ethernet addresses, returns true if equal
+ *
+ * Please note: addr1 & addr2 must both be aligned to u16.
*/
static inline bool ether_addr_equal(const u8 *addr1, const u8 *addr2)
{
- return !compare_ether_addr(addr1, addr2);
-}
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+ u32 fold = ((*(const u32 *)addr1) ^ (*(const u32 *)addr2)) |
+ ((*(const u16 *)(addr1 + 4)) ^ (*(const u16 *)(addr2 + 4)));
-static inline unsigned long zap_last_2bytes(unsigned long value)
-{
-#ifdef __BIG_ENDIAN
- return value >> 16;
+ return fold == 0;
#else
- return value << 16;
+ const u16 *a = (const u16 *)addr1;
+ const u16 *b = (const u16 *)addr2;
+
+ return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | (a[2] ^ b[2])) == 0;
#endif
}
@@ -265,22 +273,38 @@ static inline unsigned long zap_last_2bytes(unsigned long value)
static inline bool ether_addr_equal_64bits(const u8 addr1[6+2],
const u8 addr2[6+2])
{
-#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
- unsigned long fold = ((*(unsigned long *)addr1) ^
- (*(unsigned long *)addr2));
-
- if (sizeof(fold) == 8)
- return zap_last_2bytes(fold) == 0;
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
+ u64 fold = (*(const u64 *)addr1) ^ (*(const u64 *)addr2);
- fold |= zap_last_2bytes((*(unsigned long *)(addr1 + 4)) ^
- (*(unsigned long *)(addr2 + 4)));
- return fold == 0;
+#ifdef __BIG_ENDIAN
+ return (fold >> 16) == 0;
+#else
+ return (fold << 16) == 0;
+#endif
#else
return ether_addr_equal(addr1, addr2);
#endif
}
/**
+ * ether_addr_equal_unaligned - Compare two not u16 aligned Ethernet addresses
+ * @addr1: Pointer to a six-byte array containing the Ethernet address
+ * @addr2: Pointer other six-byte array containing the Ethernet address
+ *
+ * Compare two Ethernet addresses, returns true if equal
+ *
+ * Please note: Use only when any Ethernet address may not be u16 aligned.
+ */
+static inline bool ether_addr_equal_unaligned(const u8 *addr1, const u8 *addr2)
+{
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+ return ether_addr_equal(addr1, addr2);
+#else
+ return memcmp(addr1, addr2, ETH_ALEN) == 0;
+#endif
+}
+
+/**
* is_etherdev_addr - Tell if given Ethernet address belongs to the device.
* @dev: Pointer to a device structure
* @addr: Pointer to a six-byte array containing the Ethernet address
diff --git a/include/linux/fs.h b/include/linux/fs.h
index bf5d574ebdf4..121f11f001c0 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -2622,7 +2622,9 @@ extern int simple_write_begin(struct file *file, struct address_space *mapping,
extern int simple_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata);
+extern int always_delete_dentry(const struct dentry *);
extern struct inode *alloc_anon_inode(struct super_block *);
+extern const struct dentry_operations simple_dentry_operations;
extern struct dentry *simple_lookup(struct inode *, struct dentry *, unsigned int flags);
extern ssize_t generic_read_dir(struct file *, char __user *, size_t, loff_t *);
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index 9abbe630c456..8c9b7a1c4138 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -248,6 +248,9 @@ struct ftrace_event_call {
#ifdef CONFIG_PERF_EVENTS
int perf_refcount;
struct hlist_head __percpu *perf_events;
+
+ int (*perf_perm)(struct ftrace_event_call *,
+ struct perf_event *);
#endif
};
@@ -317,6 +320,19 @@ struct ftrace_event_file {
} \
early_initcall(trace_init_flags_##name);
+#define __TRACE_EVENT_PERF_PERM(name, expr...) \
+ static int perf_perm_##name(struct ftrace_event_call *tp_event, \
+ struct perf_event *p_event) \
+ { \
+ return ({ expr; }); \
+ } \
+ static int __init trace_init_perf_perm_##name(void) \
+ { \
+ event_##name.perf_perm = &perf_perm_##name; \
+ return 0; \
+ } \
+ early_initcall(trace_init_perf_perm_##name);
+
#define PERF_MAX_TRACE_SIZE 2048
#define MAX_FILTER_STR_VAL 256 /* Should handle KSYM_SYMBOL_LEN */
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
index 656a27efb2c8..3ea2cf6b0e6c 100644
--- a/include/linux/gpio/driver.h
+++ b/include/linux/gpio/driver.h
@@ -2,9 +2,12 @@
#define __LINUX_GPIO_DRIVER_H
#include <linux/types.h>
+#include <linux/module.h>
struct device;
struct gpio_desc;
+struct of_phandle_args;
+struct device_node;
struct seq_file;
/**
@@ -125,6 +128,13 @@ extern struct gpio_chip *gpiochip_find(void *data,
int gpiod_lock_as_irq(struct gpio_desc *desc);
void gpiod_unlock_as_irq(struct gpio_desc *desc);
+enum gpio_lookup_flags {
+ GPIO_ACTIVE_HIGH = (0 << 0),
+ GPIO_ACTIVE_LOW = (1 << 0),
+ GPIO_OPEN_DRAIN = (1 << 1),
+ GPIO_OPEN_SOURCE = (1 << 2),
+};
+
/**
* Lookup table for associating GPIOs to specific devices and functions using
* platform data.
@@ -152,9 +162,9 @@ struct gpiod_lookup {
*/
unsigned int idx;
/*
- * mask of GPIOF_* values
+ * mask of GPIO_* values
*/
- unsigned long flags;
+ enum gpio_lookup_flags flags;
};
/*
diff --git a/include/linux/hash.h b/include/linux/hash.h
index f09a0ae4d858..bd1754c7ecef 100644
--- a/include/linux/hash.h
+++ b/include/linux/hash.h
@@ -15,6 +15,7 @@
*/
#include <asm/types.h>
+#include <asm/hash.h>
#include <linux/compiler.h>
/* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */
@@ -78,4 +79,39 @@ static inline u32 hash32_ptr(const void *ptr)
#endif
return (u32)val;
}
+
+struct fast_hash_ops {
+ u32 (*hash)(const void *data, u32 len, u32 seed);
+ u32 (*hash2)(const u32 *data, u32 len, u32 seed);
+};
+
+/**
+ * arch_fast_hash - Caclulates a hash over a given buffer that can have
+ * arbitrary size. This function will eventually use an
+ * architecture-optimized hashing implementation if
+ * available, and trades off distribution for speed.
+ *
+ * @data: buffer to hash
+ * @len: length of buffer in bytes
+ * @seed: start seed
+ *
+ * Returns 32bit hash.
+ */
+extern u32 arch_fast_hash(const void *data, u32 len, u32 seed);
+
+/**
+ * arch_fast_hash2 - Caclulates a hash over a given buffer that has a
+ * size that is of a multiple of 32bit words. This
+ * function will eventually use an architecture-
+ * optimized hashing implementation if available,
+ * and trades off distribution for speed.
+ *
+ * @data: buffer to hash (must be 32bit padded)
+ * @len: number of 32bit words
+ * @seed: start seed
+ *
+ * Returns 32bit hash.
+ */
+extern u32 arch_fast_hash2(const u32 *data, u32 len, u32 seed);
+
#endif /* _LINUX_HASH_H */
diff --git a/include/linux/hid-sensor-hub.h b/include/linux/hid-sensor-hub.h
index a265af294ea4..b914ca3f57ba 100644
--- a/include/linux/hid-sensor-hub.h
+++ b/include/linux/hid-sensor-hub.h
@@ -21,6 +21,8 @@
#include <linux/hid.h>
#include <linux/hid-sensor-ids.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/trigger.h>
/**
* struct hid_sensor_hub_attribute_info - Attribute info
@@ -40,6 +42,8 @@ struct hid_sensor_hub_attribute_info {
s32 units;
s32 unit_expo;
s32 size;
+ s32 logical_minimum;
+ s32 logical_maximum;
};
/**
@@ -184,6 +188,7 @@ struct hid_sensor_common {
struct platform_device *pdev;
unsigned usage_id;
bool data_ready;
+ struct iio_trigger *trigger;
struct hid_sensor_hub_attribute_info poll;
struct hid_sensor_hub_attribute_info report_state;
struct hid_sensor_hub_attribute_info power_state;
diff --git a/include/linux/hid-sensor-ids.h b/include/linux/hid-sensor-ids.h
index 4f945d3ed49f..8323775ac21d 100644
--- a/include/linux/hid-sensor-ids.h
+++ b/include/linux/hid-sensor-ids.h
@@ -117,4 +117,16 @@
#define HID_USAGE_SENSOR_PROP_REPORT_STATE 0x200316
#define HID_USAGE_SENSOR_PROY_POWER_STATE 0x200319
+/* Power state enumerations */
+#define HID_USAGE_SENSOR_PROP_POWER_STATE_UNDEFINED_ENUM 0x00
+#define HID_USAGE_SENSOR_PROP_POWER_STATE_D0_FULL_POWER_ENUM 0x01
+#define HID_USAGE_SENSOR_PROP_POWER_STATE_D1_LOW_POWER_ENUM 0x02
+#define HID_USAGE_SENSOR_PROP_POWER_STATE_D2_STANDBY_WITH_WAKE_ENUM 0x03
+#define HID_USAGE_SENSOR_PROP_POWER_STATE_D3_SLEEP_WITH_WAKE_ENUM 0x04
+#define HID_USAGE_SENSOR_PROP_POWER_STATE_D4_POWER_OFF_ENUM 0x05
+
+/* Report State enumerations */
+#define HID_USAGE_SENSOR_PROP_REPORTING_STATE_NO_EVENTS_ENUM 0x00
+#define HID_USAGE_SENSOR_PROP_REPORTING_STATE_ALL_EVENTS_ENUM 0x01
+
#endif
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index acd2010328f3..bd7e98752222 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -31,6 +31,7 @@ struct hugepage_subpool *hugepage_new_subpool(long nr_blocks);
void hugepage_put_subpool(struct hugepage_subpool *spool);
int PageHuge(struct page *page);
+int PageHeadHuge(struct page *page_head);
void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
@@ -69,7 +70,6 @@ int dequeue_hwpoisoned_huge_page(struct page *page);
bool isolate_huge_page(struct page *page, struct list_head *list);
void putback_active_hugepage(struct page *page);
bool is_hugepage_active(struct page *page);
-void copy_huge_page(struct page *dst, struct page *src);
#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);
@@ -104,6 +104,11 @@ static inline int PageHuge(struct page *page)
return 0;
}
+static inline int PageHeadHuge(struct page *page_head)
+{
+ return 0;
+}
+
static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
{
}
@@ -137,12 +142,12 @@ static inline int dequeue_hwpoisoned_huge_page(struct page *page)
return 0;
}
-#define isolate_huge_page(p, l) false
-#define putback_active_hugepage(p) do {} while (0)
-#define is_hugepage_active(x) false
-static inline void copy_huge_page(struct page *dst, struct page *src)
+static inline bool isolate_huge_page(struct page *page, struct list_head *list)
{
+ return false;
}
+#define putback_active_hugepage(p) do {} while (0)
+#define is_hugepage_active(x) false
static inline unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
unsigned long address, unsigned long end, pgprot_t newprot)
diff --git a/include/linux/if_macvlan.h b/include/linux/if_macvlan.h
index 84ba5ac39e03..7c8b20b120ea 100644
--- a/include/linux/if_macvlan.h
+++ b/include/linux/if_macvlan.h
@@ -2,6 +2,7 @@
#define _LINUX_IF_MACVLAN_H
#include <linux/if_link.h>
+#include <linux/if_vlan.h>
#include <linux/list.h>
#include <linux/netdevice.h>
#include <linux/netlink.h>
@@ -24,28 +25,6 @@ static inline struct socket *macvtap_get_socket(struct file *f)
struct macvlan_port;
struct macvtap_queue;
-/**
- * struct macvlan_pcpu_stats - MACVLAN percpu stats
- * @rx_packets: number of received packets
- * @rx_bytes: number of received bytes
- * @rx_multicast: number of received multicast packets
- * @tx_packets: number of transmitted packets
- * @tx_bytes: number of transmitted bytes
- * @syncp: synchronization point for 64bit counters
- * @rx_errors: number of rx errors
- * @tx_dropped: number of tx dropped packets
- */
-struct macvlan_pcpu_stats {
- u64 rx_packets;
- u64 rx_bytes;
- u64 rx_multicast;
- u64 tx_packets;
- u64 tx_bytes;
- struct u64_stats_sync syncp;
- u32 rx_errors;
- u32 tx_dropped;
-};
-
/*
* Maximum times a macvtap device can be opened. This can be used to
* configure the number of receive queue, e.g. for multiqueue virtio.
@@ -62,15 +41,13 @@ struct macvlan_dev {
struct macvlan_port *port;
struct net_device *lowerdev;
void *fwd_priv;
- struct macvlan_pcpu_stats __percpu *pcpu_stats;
+ struct vlan_pcpu_stats __percpu *pcpu_stats;
DECLARE_BITMAP(mc_filter, MACVLAN_MC_FILTER_SZ);
netdev_features_t set_features;
enum macvlan_mode mode;
u16 flags;
- int (*receive)(struct sk_buff *skb);
- int (*forward)(struct net_device *dev, struct sk_buff *skb);
/* This array tracks active taps. */
struct macvtap_queue __rcu *taps[MAX_MACVTAP_QUEUES];
/* This list tracks all taps (both enabled and disabled) */
@@ -86,7 +63,7 @@ static inline void macvlan_count_rx(const struct macvlan_dev *vlan,
bool multicast)
{
if (likely(success)) {
- struct macvlan_pcpu_stats *pcpu_stats;
+ struct vlan_pcpu_stats *pcpu_stats;
pcpu_stats = this_cpu_ptr(vlan->pcpu_stats);
u64_stats_update_begin(&pcpu_stats->syncp);
@@ -103,10 +80,7 @@ static inline void macvlan_count_rx(const struct macvlan_dev *vlan,
extern void macvlan_common_setup(struct net_device *dev);
extern int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[],
- int (*receive)(struct sk_buff *skb),
- int (*forward)(struct net_device *dev,
- struct sk_buff *skb));
+ struct nlattr *tb[], struct nlattr *data[]);
extern void macvlan_count_rx(const struct macvlan_dev *vlan,
unsigned int len, bool success,
@@ -116,9 +90,6 @@ extern void macvlan_dellink(struct net_device *dev, struct list_head *head);
extern int macvlan_link_register(struct rtnl_link_ops *ops);
-extern netdev_tx_t macvlan_start_xmit(struct sk_buff *skb,
- struct net_device *dev);
-
#if IS_ENABLED(CONFIG_MACVLAN)
static inline struct net_device *
macvlan_dev_real_dev(const struct net_device *dev)
diff --git a/include/linux/if_tunnel.h b/include/linux/if_tunnel.h
index f4e56ecd0b1a..712710bc0580 100644
--- a/include/linux/if_tunnel.h
+++ b/include/linux/if_tunnel.h
@@ -13,13 +13,4 @@
#define for_each_ip_tunnel_rcu(pos, start) \
for (pos = rcu_dereference(start); pos; pos = rcu_dereference(pos->next))
-/* often modified stats are per cpu, other are shared (netdev->stats) */
-struct pcpu_tstats {
- u64 rx_packets;
- u64 rx_bytes;
- u64 tx_packets;
- u64 tx_bytes;
- struct u64_stats_sync syncp;
-};
-
#endif /* _IF_TUNNEL_H_ */
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index f252deb99454..bbedfb56bd66 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -82,25 +82,6 @@ static inline int is_vlan_dev(struct net_device *dev)
#define vlan_tx_tag_get(__skb) ((__skb)->vlan_tci & ~VLAN_TAG_PRESENT)
#define vlan_tx_tag_get_id(__skb) ((__skb)->vlan_tci & VLAN_VID_MASK)
-#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
-
-extern struct net_device *__vlan_find_dev_deep(struct net_device *real_dev,
- __be16 vlan_proto, u16 vlan_id);
-extern struct net_device *vlan_dev_real_dev(const struct net_device *dev);
-extern u16 vlan_dev_vlan_id(const struct net_device *dev);
-
-/**
- * struct vlan_priority_tci_mapping - vlan egress priority mappings
- * @priority: skb priority
- * @vlan_qos: vlan priority: (skb->priority << 13) & 0xE000
- * @next: pointer to next struct
- */
-struct vlan_priority_tci_mapping {
- u32 priority;
- u16 vlan_qos;
- struct vlan_priority_tci_mapping *next;
-};
-
/**
* struct vlan_pcpu_stats - VLAN percpu rx/tx stats
* @rx_packets: number of received packets
@@ -123,6 +104,25 @@ struct vlan_pcpu_stats {
u32 tx_dropped;
};
+#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+
+extern struct net_device *__vlan_find_dev_deep(struct net_device *real_dev,
+ __be16 vlan_proto, u16 vlan_id);
+extern struct net_device *vlan_dev_real_dev(const struct net_device *dev);
+extern u16 vlan_dev_vlan_id(const struct net_device *dev);
+
+/**
+ * struct vlan_priority_tci_mapping - vlan egress priority mappings
+ * @priority: skb priority
+ * @vlan_qos: vlan priority: (skb->priority << 13) & 0xE000
+ * @next: pointer to next struct
+ */
+struct vlan_priority_tci_mapping {
+ u32 priority;
+ u16 vlan_qos;
+ struct vlan_priority_tci_mapping *next;
+};
+
struct proc_dir_entry;
struct netpoll;
diff --git a/include/linux/inet_lro.h b/include/linux/inet_lro.h
index 2cf55afbcd4e..9a715cfa1fe3 100644
--- a/include/linux/inet_lro.h
+++ b/include/linux/inet_lro.h
@@ -133,33 +133,10 @@ struct net_lro_mgr {
void lro_receive_skb(struct net_lro_mgr *lro_mgr,
struct sk_buff *skb,
void *priv);
-
-/*
- * Processes a fragment list
- *
- * This functions aggregate fragments and generate SKBs do pass
- * the packets to the stack.
- *
- * @lro_mgr: LRO manager to use
- * @frags: Fragment to be processed. Must contain entire header in first
- * element.
- * @len: Length of received data
- * @true_size: Actual size of memory the fragment is consuming
- * @priv: Private data that may be used by driver functions
- * (for example get_tcp_ip_hdr)
- */
-
-void lro_receive_frags(struct net_lro_mgr *lro_mgr,
- struct skb_frag_struct *frags,
- int len, int true_size, void *priv, __wsum sum);
-
/*
* Forward all aggregated SKBs held by lro_mgr to network stack
*/
void lro_flush_all(struct net_lro_mgr *lro_mgr);
-void lro_flush_pkt(struct net_lro_mgr *lro_mgr,
- struct iphdr *iph, struct tcphdr *tcph);
-
#endif
diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h
index 0d678aefe69d..0068708161ff 100644
--- a/include/linux/inetdevice.h
+++ b/include/linux/inetdevice.h
@@ -136,8 +136,8 @@ struct in_ifaddr {
__be32 ifa_mask;
__be32 ifa_broadcast;
unsigned char ifa_scope;
- unsigned char ifa_flags;
unsigned char ifa_prefixlen;
+ __u32 ifa_flags;
char ifa_label[IFNAMSIZ];
/* In seconds, relative to tstamp. Expiry is at tstamp + HZ * lft. */
@@ -164,11 +164,10 @@ int devinet_ioctl(struct net *net, unsigned int cmd, void __user *);
void devinet_init(void);
struct in_device *inetdev_by_index(struct net *, int);
__be32 inet_select_addr(const struct net_device *dev, __be32 dst, int scope);
-__be32 inet_confirm_addr(struct in_device *in_dev, __be32 dst, __be32 local,
- int scope);
+__be32 inet_confirm_addr(struct net *net, struct in_device *in_dev, __be32 dst,
+ __be32 local, int scope);
struct in_ifaddr *inet_ifa_byprefix(struct in_device *in_dev, __be32 prefix,
__be32 mask);
-
static __inline__ int inet_ifa_match(__be32 addr, struct in_ifaddr *ifa)
{
return !((addr^ifa->ifa_address)&ifa->ifa_mask);
@@ -220,6 +219,13 @@ static inline struct in_device *__in_dev_get_rtnl(const struct net_device *dev)
return rtnl_dereference(dev->ip_ptr);
}
+static inline struct neigh_parms *__in_dev_arp_parms_get_rcu(const struct net_device *dev)
+{
+ struct in_device *in_dev = __in_dev_get_rcu(dev);
+
+ return in_dev ? in_dev->arp_parms : NULL;
+}
+
void in_dev_finish_destroy(struct in_device *idev);
static inline void in_dev_put(struct in_device *idev)
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 5d89d1b808a6..7e1ded0d8e45 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -4,6 +4,7 @@
#include <uapi/linux/ipv6.h>
#define ipv6_optlen(p) (((p)->hdrlen+1) << 3)
+#define ipv6_authlen(p) (((p)->hdrlen+2) << 2)
/*
* This structure contains configuration options per IPv6 link.
*/
@@ -190,7 +191,7 @@ struct ipv6_pinfo {
/* sockopt flags */
__u16 recverr:1,
sndflow:1,
- pmtudisc:2,
+ pmtudisc:3,
ipv6only:1,
srcprefs:3, /* 001: prefer temporary address
* 010: prefer public address
@@ -199,7 +200,7 @@ struct ipv6_pinfo {
dontfrag:1;
__u8 min_hopcount;
__u8 tclass;
- __u8 rcv_tclass;
+ __be32 rcv_flowinfo;
__u32 dst_cookie;
__u32 rx_dst_cookie;
diff --git a/include/linux/irqreturn.h b/include/linux/irqreturn.h
index 714ba08dc092..e374e369fb2f 100644
--- a/include/linux/irqreturn.h
+++ b/include/linux/irqreturn.h
@@ -14,6 +14,6 @@ enum irqreturn {
};
typedef enum irqreturn irqreturn_t;
-#define IRQ_RETVAL(x) ((x) != IRQ_NONE)
+#define IRQ_RETVAL(x) ((x) ? IRQ_HANDLED : IRQ_NONE)
#endif
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index d4e98d13eff4..ecb87544cc5d 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -193,7 +193,8 @@ extern int _cond_resched(void);
(__x < 0) ? -__x : __x; \
})
-#if defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP)
+#if defined(CONFIG_MMU) && \
+ (defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP))
void might_fault(void);
#else
static inline void might_fault(void) { }
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index d78d28a733b1..5fd33dc1fe3a 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -198,6 +198,9 @@ extern u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4];
extern size_t vmcoreinfo_size;
extern size_t vmcoreinfo_max_size;
+/* flag to track if kexec reboot is in progress */
+extern bool kexec_in_progress;
+
int __init parse_crashkernel(char *cmdline, unsigned long long system_ram,
unsigned long long *crash_size, unsigned long long *crash_base);
int parse_crashkernel_high(char *cmdline, unsigned long long system_ram,
diff --git a/include/linux/key-type.h b/include/linux/key-type.h
index 518a53afb9ea..a74c3a84dfdd 100644
--- a/include/linux/key-type.h
+++ b/include/linux/key-type.h
@@ -45,6 +45,7 @@ struct key_preparsed_payload {
const void *data; /* Raw data */
size_t datalen; /* Raw datalen */
size_t quotalen; /* Quota length for proposed payload */
+ bool trusted; /* True if key is trusted */
};
typedef int (*request_key_actor_t)(struct key_construction *key,
@@ -63,6 +64,11 @@ struct key_type {
*/
size_t def_datalen;
+ /* Default key search algorithm. */
+ unsigned def_lookup_type;
+#define KEYRING_SEARCH_LOOKUP_DIRECT 0x0000 /* Direct lookup by description. */
+#define KEYRING_SEARCH_LOOKUP_ITERATE 0x0001 /* Iterative search. */
+
/* vet a description */
int (*vet_description)(const char *description);
diff --git a/include/linux/key.h b/include/linux/key.h
index 4dfde1161c5e..80d677483e31 100644
--- a/include/linux/key.h
+++ b/include/linux/key.h
@@ -22,6 +22,7 @@
#include <linux/sysctl.h>
#include <linux/rwsem.h>
#include <linux/atomic.h>
+#include <linux/assoc_array.h>
#ifdef __KERNEL__
#include <linux/uidgid.h>
@@ -82,6 +83,12 @@ struct key_owner;
struct keyring_list;
struct keyring_name;
+struct keyring_index_key {
+ struct key_type *type;
+ const char *description;
+ size_t desc_len;
+};
+
/*****************************************************************************/
/*
* key reference with possession attribute handling
@@ -99,7 +106,7 @@ struct keyring_name;
typedef struct __key_reference_with_attributes *key_ref_t;
static inline key_ref_t make_key_ref(const struct key *key,
- unsigned long possession)
+ bool possession)
{
return (key_ref_t) ((unsigned long) key | possession);
}
@@ -109,7 +116,7 @@ static inline struct key *key_ref_to_ptr(const key_ref_t key_ref)
return (struct key *) ((unsigned long) key_ref & ~1UL);
}
-static inline unsigned long is_key_possessed(const key_ref_t key_ref)
+static inline bool is_key_possessed(const key_ref_t key_ref)
{
return (unsigned long) key_ref & 1UL;
}
@@ -129,7 +136,6 @@ struct key {
struct list_head graveyard_link;
struct rb_node serial_node;
};
- struct key_type *type; /* type of key */
struct rw_semaphore sem; /* change vs change sem */
struct key_user *user; /* owner of this key */
void *security; /* security data for this key */
@@ -162,13 +168,21 @@ struct key {
#define KEY_FLAG_NEGATIVE 5 /* set if key is negative */
#define KEY_FLAG_ROOT_CAN_CLEAR 6 /* set if key can be cleared by root without permission */
#define KEY_FLAG_INVALIDATED 7 /* set if key has been invalidated */
+#define KEY_FLAG_TRUSTED 8 /* set if key is trusted */
+#define KEY_FLAG_TRUSTED_ONLY 9 /* set if keyring only accepts links to trusted keys */
- /* the description string
- * - this is used to match a key against search criteria
- * - this should be a printable string
+ /* the key type and key description string
+ * - the desc is used to match a key against search criteria
+ * - it should be a printable string
* - eg: for krb5 AFS, this might be "afs@REDHAT.COM"
*/
- char *description;
+ union {
+ struct keyring_index_key index_key;
+ struct {
+ struct key_type *type; /* type of key */
+ char *description;
+ };
+ };
/* type specific data
* - this is used by the keyring type to index the name
@@ -185,11 +199,14 @@ struct key {
* whatever
*/
union {
- unsigned long value;
- void __rcu *rcudata;
- void *data;
- struct keyring_list __rcu *subscriptions;
- } payload;
+ union {
+ unsigned long value;
+ void __rcu *rcudata;
+ void *data;
+ void *data2[2];
+ } payload;
+ struct assoc_array keys;
+ };
};
extern struct key *key_alloc(struct key_type *type,
@@ -203,18 +220,23 @@ extern struct key *key_alloc(struct key_type *type,
#define KEY_ALLOC_IN_QUOTA 0x0000 /* add to quota, reject if would overrun */
#define KEY_ALLOC_QUOTA_OVERRUN 0x0001 /* add to quota, permit even if overrun */
#define KEY_ALLOC_NOT_IN_QUOTA 0x0002 /* not in quota */
+#define KEY_ALLOC_TRUSTED 0x0004 /* Key should be flagged as trusted */
extern void key_revoke(struct key *key);
extern void key_invalidate(struct key *key);
extern void key_put(struct key *key);
-static inline struct key *key_get(struct key *key)
+static inline struct key *__key_get(struct key *key)
{
- if (key)
- atomic_inc(&key->usage);
+ atomic_inc(&key->usage);
return key;
}
+static inline struct key *key_get(struct key *key)
+{
+ return key ? __key_get(key) : key;
+}
+
static inline void key_ref_put(key_ref_t key_ref)
{
key_put(key_ref_to_ptr(key_ref));
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 0e23c26485f4..9b503376738f 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -418,6 +418,7 @@ enum {
ATA_HORKAGE_DUMP_ID = (1 << 16), /* dump IDENTIFY data */
ATA_HORKAGE_MAX_SEC_LBA48 = (1 << 17), /* Set max sects to 65535 */
ATA_HORKAGE_ATAPI_DMADIR = (1 << 18), /* device requires dmadir */
+ ATA_HORKAGE_NO_NCQ_TRIM = (1 << 19), /* don't use queued TRIM */
/* DMA mask for user DMA control: User visible values; DO NOT
renumber */
diff --git a/include/linux/lockref.h b/include/linux/lockref.h
index c8929c3832db..4bfde0e99ed5 100644
--- a/include/linux/lockref.h
+++ b/include/linux/lockref.h
@@ -19,7 +19,7 @@
#define USE_CMPXCHG_LOCKREF \
(IS_ENABLED(CONFIG_ARCH_USE_CMPXCHG_LOCKREF) && \
- IS_ENABLED(CONFIG_SMP) && !BLOATED_SPINLOCKS)
+ IS_ENABLED(CONFIG_SMP) && SPINLOCK_SIZE <= 4)
struct lockref {
union {
diff --git a/include/linux/math64.h b/include/linux/math64.h
index 69ed5f5e9f6e..c45c089bfdac 100644
--- a/include/linux/math64.h
+++ b/include/linux/math64.h
@@ -133,4 +133,34 @@ __iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
return ret;
}
+#if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__)
+
+#ifndef mul_u64_u32_shr
+static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
+{
+ return (u64)(((unsigned __int128)a * mul) >> shift);
+}
+#endif /* mul_u64_u32_shr */
+
+#else
+
+#ifndef mul_u64_u32_shr
+static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
+{
+ u32 ah, al;
+ u64 ret;
+
+ al = a;
+ ah = a >> 32;
+
+ ret = ((u64)al * mul) >> shift;
+ if (ah)
+ ret += ((u64)ah * mul) << (32 - shift);
+
+ return ret;
+}
+#endif /* mul_u64_u32_shr */
+
+#endif
+
#endif /* _LINUX_MATH64_H */
diff --git a/include/linux/mdio.h b/include/linux/mdio.h
index 3d15c838116c..b42963bc81dd 100644
--- a/include/linux/mdio.h
+++ b/include/linux/mdio.h
@@ -70,9 +70,6 @@ extern int mdio45_nway_restart(const struct mdio_if_info *mdio);
extern void mdio45_ethtool_gset_npage(const struct mdio_if_info *mdio,
struct ethtool_cmd *ecmd,
u32 npage_adv, u32 npage_lpa);
-extern void
-mdio45_ethtool_spauseparam_an(const struct mdio_if_info *mdio,
- const struct ethtool_pauseparam *ecmd);
/**
* mdio45_ethtool_gset - get settings for ETHTOOL_GSET
diff --git a/include/linux/mfd/samsung/core.h b/include/linux/mfd/samsung/core.h
index 2d0c9071bcfb..cab2dd279076 100644
--- a/include/linux/mfd/samsung/core.h
+++ b/include/linux/mfd/samsung/core.h
@@ -39,7 +39,8 @@ enum sec_device_type {
struct sec_pmic_dev {
struct device *dev;
struct sec_platform_data *pdata;
- struct regmap *regmap;
+ struct regmap *regmap_pmic;
+ struct regmap *regmap_rtc;
struct i2c_client *i2c;
struct i2c_client *rtc;
diff --git a/include/linux/micrel_phy.h b/include/linux/micrel_phy.h
index ad05ce60c1c9..2e5b194b9b19 100644
--- a/include/linux/micrel_phy.h
+++ b/include/linux/micrel_phy.h
@@ -22,6 +22,8 @@
#define PHY_ID_KSZ8021 0x00221555
#define PHY_ID_KSZ8031 0x00221556
#define PHY_ID_KSZ8041 0x00221510
+/* undocumented */
+#define PHY_ID_KSZ8041RNLI 0x00221537
#define PHY_ID_KSZ8051 0x00221550
/* same id: ks8001 Rev. A/B, and ks8721 Rev 3. */
#define PHY_ID_KSZ8001 0x0022161A
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index f5096b58b20d..f015c059e159 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -55,7 +55,8 @@ extern int migrate_huge_page_move_mapping(struct address_space *mapping,
struct page *newpage, struct page *page);
extern int migrate_page_move_mapping(struct address_space *mapping,
struct page *newpage, struct page *page,
- struct buffer_head *head, enum migrate_mode mode);
+ struct buffer_head *head, enum migrate_mode mode,
+ int extra_count);
#else
static inline void putback_lru_pages(struct list_head *l) {}
@@ -90,10 +91,19 @@ static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
#endif /* CONFIG_MIGRATION */
#ifdef CONFIG_NUMA_BALANCING
+extern bool pmd_trans_migrating(pmd_t pmd);
+extern void wait_migrate_huge_page(struct anon_vma *anon_vma, pmd_t *pmd);
extern int migrate_misplaced_page(struct page *page,
struct vm_area_struct *vma, int node);
extern bool migrate_ratelimited(int node);
#else
+static inline bool pmd_trans_migrating(pmd_t pmd)
+{
+ return false;
+}
+static inline void wait_migrate_huge_page(struct anon_vma *anon_vma, pmd_t *pmd)
+{
+}
static inline int migrate_misplaced_page(struct page *page,
struct vm_area_struct *vma, int node)
{
diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h
index 8df61bc5da00..b0ec0fe035c0 100644
--- a/include/linux/mlx4/cmd.h
+++ b/include/linux/mlx4/cmd.h
@@ -180,6 +180,7 @@ enum {
MLX4_SET_PORT_GID_TABLE = 0x5,
MLX4_SET_PORT_PRIO2TC = 0x8,
MLX4_SET_PORT_SCHEDULER = 0x9,
+ MLX4_SET_PORT_VXLAN = 0xB
};
enum {
diff --git a/include/linux/mlx4/cq.h b/include/linux/mlx4/cq.h
index 98fa492cf406..5dd0d70a0689 100644
--- a/include/linux/mlx4/cq.h
+++ b/include/linux/mlx4/cq.h
@@ -81,7 +81,12 @@ struct mlx4_ts_cqe {
} __packed;
enum {
+ MLX4_CQE_L2_TUNNEL_IPOK = 1 << 31,
MLX4_CQE_VLAN_PRESENT_MASK = 1 << 29,
+ MLX4_CQE_L2_TUNNEL = 1 << 27,
+ MLX4_CQE_L2_TUNNEL_CSUM = 1 << 26,
+ MLX4_CQE_L2_TUNNEL_IPV4 = 1 << 25,
+
MLX4_CQE_QPN_MASK = 0xffffff,
};
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 7d3a523160ba..c99ecf6d2c67 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -119,6 +119,11 @@ static inline const char *mlx4_steering_mode_str(int steering_mode)
}
enum {
+ MLX4_TUNNEL_OFFLOAD_MODE_NONE,
+ MLX4_TUNNEL_OFFLOAD_MODE_VXLAN
+};
+
+enum {
MLX4_DEV_CAP_FLAG_RC = 1LL << 0,
MLX4_DEV_CAP_FLAG_UC = 1LL << 1,
MLX4_DEV_CAP_FLAG_UD = 1LL << 2,
@@ -160,7 +165,8 @@ enum {
MLX4_DEV_CAP_FLAG2_TS = 1LL << 5,
MLX4_DEV_CAP_FLAG2_VLAN_CONTROL = 1LL << 6,
MLX4_DEV_CAP_FLAG2_FSM = 1LL << 7,
- MLX4_DEV_CAP_FLAG2_UPDATE_QP = 1LL << 8
+ MLX4_DEV_CAP_FLAG2_UPDATE_QP = 1LL << 8,
+ MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS = 1LL << 9
};
enum {
@@ -454,6 +460,8 @@ struct mlx4_caps {
u32 userspace_caps; /* userspace must be aware of these */
u32 function_caps; /* VFs must be aware of these */
u16 hca_core_clock;
+ u64 phys_port_id[MLX4_MAX_PORTS + 1];
+ int tunnel_offload_mode;
};
struct mlx4_buf_list {
@@ -908,6 +916,7 @@ enum mlx4_net_trans_rule_id {
MLX4_NET_TRANS_RULE_ID_IPV4,
MLX4_NET_TRANS_RULE_ID_TCP,
MLX4_NET_TRANS_RULE_ID_UDP,
+ MLX4_NET_TRANS_RULE_ID_VXLAN,
MLX4_NET_TRANS_RULE_NUM, /* should be last */
};
@@ -965,6 +974,12 @@ struct mlx4_spec_ib {
u8 dst_gid_msk[16];
};
+struct mlx4_spec_vxlan {
+ __be32 vni;
+ __be32 vni_mask;
+
+};
+
struct mlx4_spec_list {
struct list_head list;
enum mlx4_net_trans_rule_id id;
@@ -973,6 +988,7 @@ struct mlx4_spec_list {
struct mlx4_spec_ib ib;
struct mlx4_spec_ipv4 ipv4;
struct mlx4_spec_tcp_udp tcp_udp;
+ struct mlx4_spec_vxlan vxlan;
};
};
@@ -1059,6 +1075,15 @@ struct mlx4_net_trans_rule_hw_ipv4 {
__be32 src_ip_msk;
} __packed;
+struct mlx4_net_trans_rule_hw_vxlan {
+ u8 size;
+ u8 rsvd;
+ __be16 id;
+ __be32 rsvd1;
+ __be32 vni;
+ __be32 vni_mask;
+} __packed;
+
struct _rule_hw {
union {
struct {
@@ -1070,9 +1095,19 @@ struct _rule_hw {
struct mlx4_net_trans_rule_hw_ib ib;
struct mlx4_net_trans_rule_hw_ipv4 ipv4;
struct mlx4_net_trans_rule_hw_tcp_udp tcp_udp;
+ struct mlx4_net_trans_rule_hw_vxlan vxlan;
};
};
+enum {
+ VXLAN_STEER_BY_OUTER_MAC = 1 << 0,
+ VXLAN_STEER_BY_OUTER_VLAN = 1 << 1,
+ VXLAN_STEER_BY_VSID_VNI = 1 << 2,
+ VXLAN_STEER_BY_INNER_MAC = 1 << 3,
+ VXLAN_STEER_BY_INNER_VLAN = 1 << 4,
+};
+
+
int mlx4_flow_steer_promisc_add(struct mlx4_dev *dev, u8 port, u32 qpn,
enum mlx4_net_trans_promisc_mode mode);
int mlx4_flow_steer_promisc_remove(struct mlx4_dev *dev, u8 port,
@@ -1095,6 +1130,7 @@ int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
int mlx4_SET_PORT_PRIO2TC(struct mlx4_dev *dev, u8 port, u8 *prio2tc);
int mlx4_SET_PORT_SCHEDULER(struct mlx4_dev *dev, u8 port, u8 *tc_tx_bw,
u8 *pg, u16 *ratelimit);
+int mlx4_SET_PORT_VXLAN(struct mlx4_dev *dev, u8 port, u8 steering);
int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx);
int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index);
void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan);
@@ -1113,6 +1149,7 @@ int mlx4_assign_eq(struct mlx4_dev *dev, char *name, struct cpu_rmap *rmap,
int *vector);
void mlx4_release_eq(struct mlx4_dev *dev, int vec);
+int mlx4_get_phys_port_id(struct mlx4_dev *dev);
int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port);
int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port);
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h
index 6d351473c292..59f8ba84568b 100644
--- a/include/linux/mlx4/qp.h
+++ b/include/linux/mlx4/qp.h
@@ -109,6 +109,10 @@ enum {
MLX4_RSS_TCP_IPV4 = 1 << 4,
MLX4_RSS_IPV4 = 1 << 5,
+ MLX4_RSS_BY_OUTER_HEADERS = 0 << 6,
+ MLX4_RSS_BY_INNER_HEADERS = 2 << 6,
+ MLX4_RSS_BY_INNER_HEADERS_IPONLY = 3 << 6,
+
/* offset of mlx4_rss_context within mlx4_qp_context.pri_path */
MLX4_RSS_OFFSET_IN_QPC_PRI_PATH = 0x24,
/* offset of being RSS indirection QP within mlx4_qp_context.flags */
@@ -252,6 +256,8 @@ enum { /* param3 */
enum {
MLX4_WQE_CTRL_NEC = 1 << 29,
+ MLX4_WQE_CTRL_IIP = 1 << 28,
+ MLX4_WQE_CTRL_ILP = 1 << 27,
MLX4_WQE_CTRL_FENCE = 1 << 6,
MLX4_WQE_CTRL_CQ_UPDATE = 3 << 2,
MLX4_WQE_CTRL_SOLICITED = 1 << 1,
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 0548eb201e05..35527173cf50 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1317,8 +1317,7 @@ static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long a
#endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
#if USE_SPLIT_PTE_PTLOCKS
-#if BLOATED_SPINLOCKS
-void __init ptlock_cache_init(void);
+#if ALLOC_SPLIT_PTLOCKS
extern bool ptlock_alloc(struct page *page);
extern void ptlock_free(struct page *page);
@@ -1326,8 +1325,7 @@ static inline spinlock_t *ptlock_ptr(struct page *page)
{
return page->ptl;
}
-#else /* BLOATED_SPINLOCKS */
-static inline void ptlock_cache_init(void) {}
+#else /* ALLOC_SPLIT_PTLOCKS */
static inline bool ptlock_alloc(struct page *page)
{
return true;
@@ -1341,7 +1339,7 @@ static inline spinlock_t *ptlock_ptr(struct page *page)
{
return &page->ptl;
}
-#endif /* BLOATED_SPINLOCKS */
+#endif /* ALLOC_SPLIT_PTLOCKS */
static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
{
@@ -1380,17 +1378,10 @@ static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
{
return &mm->page_table_lock;
}
-static inline void ptlock_cache_init(void) {}
static inline bool ptlock_init(struct page *page) { return true; }
static inline void pte_lock_deinit(struct page *page) {}
#endif /* USE_SPLIT_PTE_PTLOCKS */
-static inline void pgtable_init(void)
-{
- ptlock_cache_init();
- pgtable_cache_init();
-}
-
static inline bool pgtable_page_ctor(struct page *page)
{
inc_zone_page_state(page, NR_PAGETABLE);
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 10f5a7272b80..290901a8c1de 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -26,6 +26,7 @@ struct address_space;
#define USE_SPLIT_PTE_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS)
#define USE_SPLIT_PMD_PTLOCKS (USE_SPLIT_PTE_PTLOCKS && \
IS_ENABLED(CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK))
+#define ALLOC_SPLIT_PTLOCKS (SPINLOCK_SIZE > BITS_PER_LONG/8)
/*
* Each physical page in the system has a struct page associated with
@@ -44,18 +45,22 @@ struct page {
/* First double word block */
unsigned long flags; /* Atomic flags, some possibly
* updated asynchronously */
- struct address_space *mapping; /* If low bit clear, points to
- * inode address_space, or NULL.
- * If page mapped as anonymous
- * memory, low bit is set, and
- * it points to anon_vma object:
- * see PAGE_MAPPING_ANON below.
- */
+ union {
+ struct address_space *mapping; /* If low bit clear, points to
+ * inode address_space, or NULL.
+ * If page mapped as anonymous
+ * memory, low bit is set, and
+ * it points to anon_vma object:
+ * see PAGE_MAPPING_ANON below.
+ */
+ void *s_mem; /* slab first object */
+ };
+
/* Second double word */
struct {
union {
pgoff_t index; /* Our offset within mapping. */
- void *freelist; /* slub/slob first free object */
+ void *freelist; /* sl[aou]b first free object */
bool pfmemalloc; /* If set by the page allocator,
* ALLOC_NO_WATERMARKS was set
* and the low watermark was not
@@ -65,9 +70,6 @@ struct page {
* this page is only used to
* free other pages.
*/
-#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && USE_SPLIT_PMD_PTLOCKS
- pgtable_t pmd_huge_pte; /* protected by page->ptl */
-#endif
};
union {
@@ -114,6 +116,7 @@ struct page {
};
atomic_t _count; /* Usage count, see below. */
};
+ unsigned int active; /* SLAB */
};
};
@@ -135,6 +138,12 @@ struct page {
struct list_head list; /* slobs list of pages */
struct slab *slab_page; /* slab fields */
+ struct rcu_head rcu_head; /* Used by SLAB
+ * when destroying via RCU
+ */
+#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && USE_SPLIT_PMD_PTLOCKS
+ pgtable_t pmd_huge_pte; /* protected by page->ptl */
+#endif
};
/* Remainder is not double word aligned */
@@ -147,7 +156,7 @@ struct page {
* system if PG_buddy is set.
*/
#if USE_SPLIT_PTE_PTLOCKS
-#if BLOATED_SPINLOCKS
+#if ALLOC_SPLIT_PTLOCKS
spinlock_t *ptl;
#else
spinlock_t ptl;
@@ -435,6 +444,14 @@ struct mm_struct {
/* numa_scan_seq prevents two threads setting pte_numa */
int numa_scan_seq;
#endif
+#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
+ /*
+ * An operation with batched TLB flushing is going on. Anything that
+ * can move process memory needs to flush the TLB when moving a
+ * PROT_NONE or PROT_NUMA mapped page.
+ */
+ bool tlb_flush_pending;
+#endif
struct uprobes_state uprobes_state;
};
@@ -451,4 +468,45 @@ static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
return mm->cpu_vm_mask_var;
}
+#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
+/*
+ * Memory barriers to keep this state in sync are graciously provided by
+ * the page table locks, outside of which no page table modifications happen.
+ * The barriers below prevent the compiler from re-ordering the instructions
+ * around the memory barriers that are already present in the code.
+ */
+static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
+{
+ barrier();
+ return mm->tlb_flush_pending;
+}
+static inline void set_tlb_flush_pending(struct mm_struct *mm)
+{
+ mm->tlb_flush_pending = true;
+
+ /*
+ * Guarantee that the tlb_flush_pending store does not leak into the
+ * critical section updating the page tables
+ */
+ smp_mb__before_spinlock();
+}
+/* Clearing is done after a TLB flush, which also provides a barrier. */
+static inline void clear_tlb_flush_pending(struct mm_struct *mm)
+{
+ barrier();
+ mm->tlb_flush_pending = false;
+}
+#else
+static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
+{
+ return false;
+}
+static inline void set_tlb_flush_pending(struct mm_struct *mm)
+{
+}
+static inline void clear_tlb_flush_pending(struct mm_struct *mm)
+{
+}
+#endif
+
#endif /* _LINUX_MM_TYPES_H */
diff --git a/include/linux/msi.h b/include/linux/msi.h
index 87cce50bd121..009b02481436 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -26,11 +26,11 @@ struct msi_desc {
struct {
__u8 is_msix : 1;
__u8 multiple: 3; /* log2 number of messages */
- __u8 maskbit : 1; /* mask-pending bit supported ? */
- __u8 is_64 : 1; /* Address size: 0=32bit 1=64bit */
- __u8 pos; /* Location of the msi capability */
- __u16 entry_nr; /* specific enabled entry */
- unsigned default_irq; /* default pre-assigned irq */
+ __u8 maskbit : 1; /* mask-pending bit supported ? */
+ __u8 is_64 : 1; /* Address size: 0=32bit 1=64bit */
+ __u8 pos; /* Location of the msi capability */
+ __u16 entry_nr; /* specific enabled entry */
+ unsigned default_irq; /* default pre-assigned irq */
} msi_attrib;
u32 masked; /* mask bits */
diff --git a/include/linux/net.h b/include/linux/net.h
index 4bcee94cef93..69be3e6079c8 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -181,7 +181,7 @@ struct proto_ops {
int offset, size_t size, int flags);
ssize_t (*splice_read)(struct socket *sock, loff_t *ppos,
struct pipe_inode_info *pipe, size_t len, unsigned int flags);
- void (*set_peek_off)(struct sock *sk, int val);
+ int (*set_peek_off)(struct sock *sk, int val);
};
#define DECLARE_SOCKADDR(type, dst, src) \
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 7f0ed423a360..a2a70cc70e7b 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1255,7 +1255,7 @@ struct net_device {
unsigned char perm_addr[MAX_ADDR_LEN]; /* permanent hw address */
unsigned char addr_assign_type; /* hw address assignment type */
unsigned char addr_len; /* hardware address length */
- unsigned char neigh_priv_len;
+ unsigned short neigh_priv_len;
unsigned short dev_id; /* Used to differentiate devices
* that share the same link
* layer address
@@ -1283,6 +1283,9 @@ struct net_device {
#if IS_ENABLED(CONFIG_NET_DSA)
struct dsa_switch_tree *dsa_ptr; /* dsa specific data */
#endif
+#if IS_ENABLED(CONFIG_TIPC)
+ struct tipc_bearer __rcu *tipc_ptr; /* TIPC specific data */
+#endif
void *atalk_ptr; /* AppleTalk link */
struct in_device __rcu *ip_ptr; /* IPv4 specific data */
struct dn_dev __rcu *dn_ptr; /* DECnet specific data */
@@ -1406,7 +1409,7 @@ struct net_device {
union {
void *ml_priv;
struct pcpu_lstats __percpu *lstats; /* loopback stats */
- struct pcpu_tstats __percpu *tstats; /* tunnel stats */
+ struct pcpu_sw_netstats __percpu *tstats;
struct pcpu_dstats __percpu *dstats; /* dummy stats */
struct pcpu_vstats __percpu *vstats; /* veth stats */
};
@@ -1441,7 +1444,7 @@ struct net_device {
/* max exchange id for FCoE LRO by ddp */
unsigned int fcoe_ddp_xid;
#endif
-#if IS_ENABLED(CONFIG_NETPRIO_CGROUP)
+#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
struct netprio_map __rcu *priomap;
#endif
/* phy device may attach itself for hardware timestamping */
@@ -1629,7 +1632,10 @@ struct napi_gro_cb {
int data_offset;
/* This is non-zero if the packet cannot be merged with the new skb. */
- int flush;
+ u16 flush;
+
+ /* Save the IP ID here and check when we get to the transport layer */
+ u16 flush_id;
/* Number of segments aggregated. */
u16 count;
@@ -1648,6 +1654,9 @@ struct napi_gro_cb {
/* Used in ipv6_gro_receive() */
int proto;
+ /* used to support CHECKSUM_COMPLETE for tunneling protocols */
+ __wsum csum;
+
/* used in skb_gro_receive() slow path */
struct sk_buff *last;
};
@@ -1673,7 +1682,7 @@ struct offload_callbacks {
int (*gso_send_check)(struct sk_buff *skb);
struct sk_buff **(*gro_receive)(struct sk_buff **head,
struct sk_buff *skb);
- int (*gro_complete)(struct sk_buff *skb);
+ int (*gro_complete)(struct sk_buff *skb, int nhoff);
};
struct packet_offload {
@@ -1682,6 +1691,15 @@ struct packet_offload {
struct list_head list;
};
+/* often modified stats are per cpu, other are shared (netdev->stats) */
+struct pcpu_sw_netstats {
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 tx_packets;
+ u64 tx_bytes;
+ struct u64_stats_sync syncp;
+};
+
#include <linux/notifier.h>
/* netdevice notifier chain. Please remember to update the rtnetlink
@@ -1738,8 +1756,6 @@ netdev_notifier_info_to_dev(const struct netdev_notifier_info *info)
return info->dev;
}
-int call_netdevice_notifiers_info(unsigned long val, struct net_device *dev,
- struct netdev_notifier_info *info);
int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
@@ -1806,7 +1822,6 @@ void dev_remove_pack(struct packet_type *pt);
void __dev_remove_pack(struct packet_type *pt);
void dev_add_offload(struct packet_offload *po);
void dev_remove_offload(struct packet_offload *po);
-void __dev_remove_offload(struct packet_offload *po);
struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short flags,
unsigned short mask);
@@ -1891,6 +1906,14 @@ static inline void *skb_gro_network_header(struct sk_buff *skb)
skb_network_offset(skb);
}
+static inline void skb_gro_postpull_rcsum(struct sk_buff *skb,
+ const void *start, unsigned int len)
+{
+ if (skb->ip_summed == CHECKSUM_COMPLETE)
+ NAPI_GRO_CB(skb)->csum = csum_sub(NAPI_GRO_CB(skb)->csum,
+ csum_partial(start, len, 0));
+}
+
static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
unsigned short type,
const void *daddr, const void *saddr,
@@ -1912,6 +1935,15 @@ static inline int dev_parse_header(const struct sk_buff *skb,
return dev->header_ops->parse(skb, haddr);
}
+static inline int dev_rebuild_header(struct sk_buff *skb)
+{
+ const struct net_device *dev = skb->dev;
+
+ if (!dev->header_ops || !dev->header_ops->rebuild)
+ return 0;
+ return dev->header_ops->rebuild(skb);
+}
+
typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len);
int register_gifconf(unsigned int family, gifconf_func_t *gifconf);
static inline int unregister_gifconf(unsigned int family)
@@ -2368,17 +2400,52 @@ static inline int netif_copy_real_num_queues(struct net_device *to_dev,
#define DEFAULT_MAX_NUM_RSS_QUEUES (8)
int netif_get_num_default_rss_queues(void);
-/* Use this variant when it is known for sure that it
- * is executing from hardware interrupt context or with hardware interrupts
- * disabled.
- */
-void dev_kfree_skb_irq(struct sk_buff *skb);
+enum skb_free_reason {
+ SKB_REASON_CONSUMED,
+ SKB_REASON_DROPPED,
+};
+
+void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason);
+void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason);
-/* Use this variant in places where it could be invoked
- * from either hardware interrupt or other context, with hardware interrupts
- * either disabled or enabled.
+/*
+ * It is not allowed to call kfree_skb() or consume_skb() from hardware
+ * interrupt context or with hardware interrupts being disabled.
+ * (in_irq() || irqs_disabled())
+ *
+ * We provide four helpers that can be used in following contexts :
+ *
+ * dev_kfree_skb_irq(skb) when caller drops a packet from irq context,
+ * replacing kfree_skb(skb)
+ *
+ * dev_consume_skb_irq(skb) when caller consumes a packet from irq context.
+ * Typically used in place of consume_skb(skb) in TX completion path
+ *
+ * dev_kfree_skb_any(skb) when caller doesn't know its current irq context,
+ * replacing kfree_skb(skb)
+ *
+ * dev_consume_skb_any(skb) when caller doesn't know its current irq context,
+ * and consumed a packet. Used in place of consume_skb(skb)
*/
-void dev_kfree_skb_any(struct sk_buff *skb);
+static inline void dev_kfree_skb_irq(struct sk_buff *skb)
+{
+ __dev_kfree_skb_irq(skb, SKB_REASON_DROPPED);
+}
+
+static inline void dev_consume_skb_irq(struct sk_buff *skb)
+{
+ __dev_kfree_skb_irq(skb, SKB_REASON_CONSUMED);
+}
+
+static inline void dev_kfree_skb_any(struct sk_buff *skb)
+{
+ __dev_kfree_skb_any(skb, SKB_REASON_DROPPED);
+}
+
+static inline void dev_consume_skb_any(struct sk_buff *skb)
+{
+ __dev_kfree_skb_any(skb, SKB_REASON_CONSUMED);
+}
int netif_rx(struct sk_buff *skb);
int netif_rx_ni(struct sk_buff *skb);
@@ -2387,6 +2454,8 @@ gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb);
void napi_gro_flush(struct napi_struct *napi, bool flush_old);
struct sk_buff *napi_get_frags(struct napi_struct *napi);
gro_result_t napi_gro_frags(struct napi_struct *napi);
+struct packet_offload *gro_find_receive_by_type(__be16 type);
+struct packet_offload *gro_find_complete_by_type(__be16 type);
static inline void napi_free_frags(struct napi_struct *napi)
{
@@ -2772,17 +2841,10 @@ int register_netdev(struct net_device *dev);
void unregister_netdev(struct net_device *dev);
/* General hardware address lists handling functions */
-int __hw_addr_add_multiple(struct netdev_hw_addr_list *to_list,
- struct netdev_hw_addr_list *from_list,
- int addr_len, unsigned char addr_type);
-void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list,
- struct netdev_hw_addr_list *from_list,
- int addr_len, unsigned char addr_type);
int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
struct netdev_hw_addr_list *from_list, int addr_len);
void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
struct netdev_hw_addr_list *from_list, int addr_len);
-void __hw_addr_flush(struct netdev_hw_addr_list *list);
void __hw_addr_init(struct netdev_hw_addr_list *list);
/* Functions used for device addresses handling */
@@ -2790,10 +2852,6 @@ int dev_addr_add(struct net_device *dev, const unsigned char *addr,
unsigned char addr_type);
int dev_addr_del(struct net_device *dev, const unsigned char *addr,
unsigned char addr_type);
-int dev_addr_add_multiple(struct net_device *to_dev,
- struct net_device *from_dev, unsigned char addr_type);
-int dev_addr_del_multiple(struct net_device *to_dev,
- struct net_device *from_dev, unsigned char addr_type);
void dev_addr_flush(struct net_device *dev);
int dev_addr_init(struct net_device *dev);
@@ -2840,7 +2898,6 @@ extern int weight_p;
extern int bpf_jit_enable;
bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev);
-bool netdev_has_any_upper_dev(struct net_device *dev);
struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
struct list_head **iter);
@@ -2869,6 +2926,7 @@ void *netdev_lower_get_next_private_rcu(struct net_device *dev,
priv = netdev_lower_get_next_private_rcu(dev, &(iter)))
void *netdev_adjacent_get_private(struct list_head *adj_list);
+void *netdev_lower_get_first_private_rcu(struct net_device *dev);
struct net_device *netdev_master_upper_dev_get(struct net_device *dev);
struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev);
int netdev_upper_dev_link(struct net_device *dev, struct net_device *upper_dev);
@@ -2879,8 +2937,6 @@ int netdev_master_upper_dev_link_private(struct net_device *dev,
void *private);
void netdev_upper_dev_unlink(struct net_device *dev,
struct net_device *upper_dev);
-void *netdev_lower_dev_get_private_rcu(struct net_device *dev,
- struct net_device *lower_dev);
void *netdev_lower_dev_get_private(struct net_device *dev,
struct net_device *lower_dev);
int skb_checksum_help(struct sk_buff *skb);
@@ -3008,6 +3064,19 @@ static inline void netif_set_gso_max_size(struct net_device *dev,
dev->gso_max_size = size;
}
+static inline void skb_gso_error_unwind(struct sk_buff *skb, __be16 protocol,
+ int pulled_hlen, u16 mac_offset,
+ int mac_len)
+{
+ skb->protocol = protocol;
+ skb->encapsulation = 1;
+ skb_push(skb, pulled_hlen);
+ skb_reset_transport_header(skb);
+ skb->mac_header = mac_offset;
+ skb->network_header = skb->mac_header + mac_len;
+ skb->mac_len = mac_len;
+}
+
static inline bool netif_is_macvlan(struct net_device *dev)
{
return dev->priv_flags & IFF_MACVLAN;
diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
index c7174b816674..0c7d01eae56c 100644
--- a/include/linux/netfilter/ipset/ip_set.h
+++ b/include/linux/netfilter/ipset/ip_set.h
@@ -331,7 +331,6 @@ extern ip_set_id_t ip_set_get_byname(struct net *net,
const char *name, struct ip_set **set);
extern void ip_set_put_byindex(struct net *net, ip_set_id_t index);
extern const char *ip_set_name_byindex(struct net *net, ip_set_id_t index);
-extern ip_set_id_t ip_set_nfnl_get(struct net *net, const char *name);
extern ip_set_id_t ip_set_nfnl_get_byindex(struct net *net, ip_set_id_t index);
extern void ip_set_nfnl_put(struct net *net, ip_set_id_t index);
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index 7a6c396a263b..aad8eeaf416d 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -62,7 +62,6 @@ extern void netlink_kernel_release(struct sock *sk);
extern int __netlink_change_ngroups(struct sock *sk, unsigned int groups);
extern int netlink_change_ngroups(struct sock *sk, unsigned int groups);
extern void __netlink_clear_multicast_users(struct sock *sk, unsigned int group);
-extern void netlink_clear_multicast_users(struct sock *sk, unsigned int group);
extern void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err);
extern int netlink_has_listeners(struct sock *sk, unsigned int group);
extern struct sk_buff *netlink_alloc_skb(struct sock *ssk, unsigned int size,
@@ -168,7 +167,6 @@ struct netlink_tap {
};
extern int netlink_add_tap(struct netlink_tap *nt);
-extern int __netlink_remove_tap(struct netlink_tap *nt);
extern int netlink_remove_tap(struct netlink_tap *nt);
#endif /* __LINUX_NETLINK_H */
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index c1637062c1ce..12c2cb947df5 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -413,16 +413,6 @@ enum lock_type4 {
#define NFS4_VERSION 4
#define NFS4_MINOR_VERSION 0
-#if defined(CONFIG_NFS_V4_2)
-#define NFS4_MAX_MINOR_VERSION 2
-#else
-#if defined(CONFIG_NFS_V4_1)
-#define NFS4_MAX_MINOR_VERSION 1
-#else
-#define NFS4_MAX_MINOR_VERSION 0
-#endif /* CONFIG_NFS_V4_1 */
-#endif /* CONFIG_NFS_V4_2 */
-
#define NFS4_DEBUG 1
/* Index of predefined Linux client operations */
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index 14a48207a304..48997374eaf0 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -507,24 +507,6 @@ extern int nfs_mountpoint_expiry_timeout;
extern void nfs_release_automount_timer(void);
/*
- * linux/fs/nfs/nfs4proc.c
- */
-#ifdef CONFIG_NFS_V4_SECURITY_LABEL
-extern struct nfs4_label *nfs4_label_alloc(struct nfs_server *server, gfp_t flags);
-static inline void nfs4_label_free(struct nfs4_label *label)
-{
- if (label) {
- kfree(label->label);
- kfree(label);
- }
- return;
-}
-#else
-static inline struct nfs4_label *nfs4_label_alloc(struct nfs_server *server, gfp_t flags) { return NULL; }
-static inline void nfs4_label_free(void *label) {}
-#endif
-
-/*
* linux/fs/nfs/unlink.c
*/
extern void nfs_complete_unlink(struct dentry *dentry, struct inode *);
diff --git a/include/linux/padata.h b/include/linux/padata.h
index 86292beebfe2..438694650471 100644
--- a/include/linux/padata.h
+++ b/include/linux/padata.h
@@ -129,10 +129,9 @@ struct parallel_data {
struct padata_serial_queue __percpu *squeue;
atomic_t reorder_objects;
atomic_t refcnt;
+ atomic_t seq_nr;
struct padata_cpumask cpumask;
spinlock_t lock ____cacheline_aligned;
- spinlock_t seq_lock;
- unsigned int seq_nr;
unsigned int processed;
struct timer_list timer;
};
diff --git a/include/linux/pci-acpi.h b/include/linux/pci-acpi.h
index d006f0ca60f4..5a462c4e5009 100644
--- a/include/linux/pci-acpi.h
+++ b/include/linux/pci-acpi.h
@@ -27,7 +27,7 @@ static inline acpi_handle acpi_find_root_bridge_handle(struct pci_dev *pdev)
while (!pci_is_root_bus(pbus))
pbus = pbus->parent;
- return DEVICE_ACPI_HANDLE(pbus->bridge);
+ return ACPI_HANDLE(pbus->bridge);
}
static inline acpi_handle acpi_pci_get_bridge_handle(struct pci_bus *pbus)
@@ -39,7 +39,7 @@ static inline acpi_handle acpi_pci_get_bridge_handle(struct pci_bus *pbus)
else
dev = &pbus->self->dev;
- return DEVICE_ACPI_HANDLE(dev);
+ return ACPI_HANDLE(dev);
}
void acpi_pci_add_bus(struct pci_bus *bus);
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 835ec7bf6c05..a13d6825e586 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -32,7 +32,6 @@
#include <linux/irqreturn.h>
#include <uapi/linux/pci.h>
-/* Include the ID list */
#include <linux/pci_ids.h>
/*
@@ -42,9 +41,10 @@
*
* 7:3 = slot
* 2:0 = function
- * PCI_DEVFN(), PCI_SLOT(), and PCI_FUNC() are defined uapi/linux/pci.h
+ *
+ * PCI_DEVFN(), PCI_SLOT(), and PCI_FUNC() are defined in uapi/linux/pci.h.
* In the interest of not exposing interfaces to user-space unnecessarily,
- * the following kernel only defines are being added here.
+ * the following kernel-only defines are being added here.
*/
#define PCI_DEVID(bus, devfn) ((((u16)bus) << 8) | devfn)
/* return bus from PCI devid = ((u16)bus_number) << 8) | devfn */
@@ -153,10 +153,10 @@ enum pcie_reset_state {
/* Reset is NOT asserted (Use to deassert reset) */
pcie_deassert_reset = (__force pcie_reset_state_t) 1,
- /* Use #PERST to reset PCI-E device */
+ /* Use #PERST to reset PCIe device */
pcie_warm_reset = (__force pcie_reset_state_t) 2,
- /* Use PCI-E Hot Reset to reset device */
+ /* Use PCIe Hot Reset to reset device */
pcie_hot_reset = (__force pcie_reset_state_t) 3
};
@@ -259,13 +259,13 @@ struct pci_dev {
unsigned int class; /* 3 bytes: (base,sub,prog-if) */
u8 revision; /* PCI revision, low byte of class word */
u8 hdr_type; /* PCI header type (`multi' flag masked out) */
- u8 pcie_cap; /* PCI-E capability offset */
+ u8 pcie_cap; /* PCIe capability offset */
u8 msi_cap; /* MSI capability offset */
u8 msix_cap; /* MSI-X capability offset */
- u8 pcie_mpss:3; /* PCI-E Max Payload Size Supported */
+ u8 pcie_mpss:3; /* PCIe Max Payload Size Supported */
u8 rom_base_reg; /* which config register controls the ROM */
- u8 pin; /* which interrupt pin this device uses */
- u16 pcie_flags_reg; /* cached PCI-E Capabilities Register */
+ u8 pin; /* which interrupt pin this device uses */
+ u16 pcie_flags_reg; /* cached PCIe Capabilities Register */
struct pci_driver *driver; /* which driver has allocated this device */
u64 dma_mask; /* Mask of the bits of bus address this
@@ -300,7 +300,7 @@ struct pci_dev {
unsigned int d3cold_delay; /* D3cold->D0 transition time in ms */
#ifdef CONFIG_PCIEASPM
- struct pcie_link_state *link_state; /* ASPM link state. */
+ struct pcie_link_state *link_state; /* ASPM link state */
#endif
pci_channel_state_t error_state; /* current connectivity state */
@@ -317,7 +317,7 @@ struct pci_dev {
bool match_driver; /* Skip attaching driver */
/* These fields are used by common fixups */
- unsigned int transparent:1; /* Transparent PCI bridge */
+ unsigned int transparent:1; /* Subtractive decode PCI bridge */
unsigned int multifunction:1;/* Part of multi-function device */
/* keep track of device state */
unsigned int is_added:1;
@@ -326,7 +326,7 @@ struct pci_dev {
unsigned int block_cfg_access:1; /* config space access is blocked */
unsigned int broken_parity_status:1; /* Device generates false positive parity */
unsigned int irq_reroute_variant:2; /* device needs IRQ rerouting variant */
- unsigned int msi_enabled:1;
+ unsigned int msi_enabled:1;
unsigned int msix_enabled:1;
unsigned int ari_enabled:1; /* ARI forwarding */
unsigned int is_managed:1;
@@ -371,7 +371,6 @@ static inline struct pci_dev *pci_physfn(struct pci_dev *dev)
if (dev->is_virtfn)
dev = dev->physfn;
#endif
-
return dev;
}
@@ -456,7 +455,7 @@ struct pci_bus {
char name[48];
unsigned short bridge_ctl; /* manage NO_ISA/FBB/et al behaviors */
- pci_bus_flags_t bus_flags; /* Inherited by child busses */
+ pci_bus_flags_t bus_flags; /* inherited by child buses */
struct device *bridge;
struct device dev;
struct bin_attribute *legacy_io; /* legacy I/O for this bus */
@@ -468,7 +467,7 @@ struct pci_bus {
#define to_pci_bus(n) container_of(n, struct pci_bus, dev)
/*
- * Returns true if the pci bus is root (behind host-pci bridge),
+ * Returns true if the PCI bus is root (behind host-PCI bridge),
* false otherwise
*
* Some code assumes that "bus->self == NULL" means that bus is a root bus.
@@ -510,7 +509,7 @@ static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev) { return false;
#define PCIBIOS_BUFFER_TOO_SMALL 0x89
/*
- * Translate above to generic errno for passing back through non-pci.
+ * Translate above to generic errno for passing back through non-PCI code.
*/
static inline int pcibios_err_to_errno(int err)
{
@@ -561,11 +560,12 @@ struct pci_dynids {
struct list_head list; /* for IDs added at runtime */
};
-/* ---------------------------------------------------------------- */
-/** PCI Error Recovery System (PCI-ERS). If a PCI device driver provides
- * a set of callbacks in struct pci_error_handlers, then that device driver
- * will be notified of PCI bus errors, and will be driven to recovery
- * when an error occurs.
+
+/*
+ * PCI Error Recovery System (PCI-ERS). If a PCI device driver provides
+ * a set of callbacks in struct pci_error_handlers, that device driver
+ * will be notified of PCI bus errors, and will be driven to recovery
+ * when an error occurs.
*/
typedef unsigned int __bitwise pci_ers_result_t;
@@ -609,7 +609,6 @@ struct pci_error_handlers {
void (*resume)(struct pci_dev *dev);
};
-/* ---------------------------------------------------------------- */
struct module;
struct pci_driver {
@@ -713,10 +712,10 @@ extern enum pcie_bus_config_types pcie_bus_config;
extern struct bus_type pci_bus_type;
-/* Do NOT directly access these two variables, unless you are arch specific pci
- * code, or pci core code. */
+/* Do NOT directly access these two variables, unless you are arch-specific PCI
+ * code, or PCI core code. */
extern struct list_head pci_root_buses; /* list of all known PCI buses */
-/* Some device drivers need know if pci is initiated */
+/* Some device drivers need know if PCI is initiated */
int no_pci_devices(void);
void pcibios_resource_survey_bus(struct pci_bus *bus);
@@ -724,7 +723,7 @@ void pcibios_add_bus(struct pci_bus *bus);
void pcibios_remove_bus(struct pci_bus *bus);
void pcibios_fixup_bus(struct pci_bus *);
int __must_check pcibios_enable_device(struct pci_dev *, int mask);
-/* Architecture specific versions may override this (weak) */
+/* Architecture-specific versions may override this (weak) */
char *pcibios_setup(char *str);
/* Used only when drivers/pci/setup.c is used */
@@ -961,6 +960,7 @@ void pci_update_resource(struct pci_dev *dev, int resno);
int __must_check pci_assign_resource(struct pci_dev *dev, int i);
int __must_check pci_reassign_resource(struct pci_dev *dev, int i, resource_size_t add_size, resource_size_t align);
int pci_select_bars(struct pci_dev *dev, unsigned long flags);
+bool pci_device_is_present(struct pci_dev *pdev);
/* ROM control related routines */
int pci_enable_rom(struct pci_dev *pdev);
@@ -1258,7 +1258,7 @@ void pci_cfg_access_unlock(struct pci_dev *dev);
/*
* PCI domain support. Sometimes called PCI segment (eg by ACPI),
- * a PCI domain is defined to be a set of PCI busses which share
+ * a PCI domain is defined to be a set of PCI buses which share
* configuration space.
*/
#ifdef CONFIG_PCI_DOMAINS
@@ -1568,65 +1568,65 @@ enum pci_fixup_pass {
/* Anonymous variables would be nice... */
#define DECLARE_PCI_FIXUP_SECTION(section, name, vendor, device, class, \
class_shift, hook) \
- static const struct pci_fixup __pci_fixup_##name __used \
+ static const struct pci_fixup __PASTE(__pci_fixup_##name,__LINE__) __used \
__attribute__((__section__(#section), aligned((sizeof(void *))))) \
= { vendor, device, class, class_shift, hook };
#define DECLARE_PCI_FIXUP_CLASS_EARLY(vendor, device, class, \
class_shift, hook) \
DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early, \
- vendor##device##hook, vendor, device, class, class_shift, hook)
+ hook, vendor, device, class, class_shift, hook)
#define DECLARE_PCI_FIXUP_CLASS_HEADER(vendor, device, class, \
class_shift, hook) \
DECLARE_PCI_FIXUP_SECTION(.pci_fixup_header, \
- vendor##device##hook, vendor, device, class, class_shift, hook)
+ hook, vendor, device, class, class_shift, hook)
#define DECLARE_PCI_FIXUP_CLASS_FINAL(vendor, device, class, \
class_shift, hook) \
DECLARE_PCI_FIXUP_SECTION(.pci_fixup_final, \
- vendor##device##hook, vendor, device, class, class_shift, hook)
+ hook, vendor, device, class, class_shift, hook)
#define DECLARE_PCI_FIXUP_CLASS_ENABLE(vendor, device, class, \
class_shift, hook) \
DECLARE_PCI_FIXUP_SECTION(.pci_fixup_enable, \
- vendor##device##hook, vendor, device, class, class_shift, hook)
+ hook, vendor, device, class, class_shift, hook)
#define DECLARE_PCI_FIXUP_CLASS_RESUME(vendor, device, class, \
class_shift, hook) \
DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume, \
- resume##vendor##device##hook, vendor, device, class, \
+ resume##hook, vendor, device, class, \
class_shift, hook)
#define DECLARE_PCI_FIXUP_CLASS_RESUME_EARLY(vendor, device, class, \
class_shift, hook) \
DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early, \
- resume_early##vendor##device##hook, vendor, device, \
+ resume_early##hook, vendor, device, \
class, class_shift, hook)
#define DECLARE_PCI_FIXUP_CLASS_SUSPEND(vendor, device, class, \
class_shift, hook) \
DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \
- suspend##vendor##device##hook, vendor, device, class, \
+ suspend##hook, vendor, device, class, \
class_shift, hook)
#define DECLARE_PCI_FIXUP_EARLY(vendor, device, hook) \
DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early, \
- vendor##device##hook, vendor, device, PCI_ANY_ID, 0, hook)
+ hook, vendor, device, PCI_ANY_ID, 0, hook)
#define DECLARE_PCI_FIXUP_HEADER(vendor, device, hook) \
DECLARE_PCI_FIXUP_SECTION(.pci_fixup_header, \
- vendor##device##hook, vendor, device, PCI_ANY_ID, 0, hook)
+ hook, vendor, device, PCI_ANY_ID, 0, hook)
#define DECLARE_PCI_FIXUP_FINAL(vendor, device, hook) \
DECLARE_PCI_FIXUP_SECTION(.pci_fixup_final, \
- vendor##device##hook, vendor, device, PCI_ANY_ID, 0, hook)
+ hook, vendor, device, PCI_ANY_ID, 0, hook)
#define DECLARE_PCI_FIXUP_ENABLE(vendor, device, hook) \
DECLARE_PCI_FIXUP_SECTION(.pci_fixup_enable, \
- vendor##device##hook, vendor, device, PCI_ANY_ID, 0, hook)
+ hook, vendor, device, PCI_ANY_ID, 0, hook)
#define DECLARE_PCI_FIXUP_RESUME(vendor, device, hook) \
DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume, \
- resume##vendor##device##hook, vendor, device, \
+ resume##hook, vendor, device, \
PCI_ANY_ID, 0, hook)
#define DECLARE_PCI_FIXUP_RESUME_EARLY(vendor, device, hook) \
DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early, \
- resume_early##vendor##device##hook, vendor, device, \
+ resume_early##hook, vendor, device, \
PCI_ANY_ID, 0, hook)
#define DECLARE_PCI_FIXUP_SUSPEND(vendor, device, hook) \
DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \
- suspend##vendor##device##hook, vendor, device, \
+ suspend##hook, vendor, device, \
PCI_ANY_ID, 0, hook)
#ifdef CONFIG_PCI_QUIRKS
@@ -1672,7 +1672,7 @@ extern u8 pci_cache_line_size;
extern unsigned long pci_hotplug_io_size;
extern unsigned long pci_hotplug_mem_size;
-/* Architecture specific versions may override these (weak) */
+/* Architecture-specific versions may override these (weak) */
int pcibios_add_platform_entries(struct pci_dev *dev);
void pcibios_disable_device(struct pci_dev *dev);
void pcibios_set_master(struct pci_dev *dev);
diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
index 430dd963707b..a2e2f1d17e16 100644
--- a/include/linux/pci_hotplug.h
+++ b/include/linux/pci_hotplug.h
@@ -39,8 +39,8 @@
* @hardware_test: Called to run a specified hardware test on the specified
* slot.
* @get_power_status: Called to get the current power status of a slot.
- * If this field is NULL, the value passed in the struct hotplug_slot_info
- * will be used when this value is requested by a user.
+ * If this field is NULL, the value passed in the struct hotplug_slot_info
+ * will be used when this value is requested by a user.
* @get_attention_status: Called to get the current attention status of a slot.
* If this field is NULL, the value passed in the struct hotplug_slot_info
* will be used when this value is requested by a user.
@@ -191,4 +191,3 @@ static inline int pci_get_hp_params(struct pci_dev *dev,
void pci_configure_slot(struct pci_dev *dev);
#endif
-
diff --git a/include/linux/pcieport_if.h b/include/linux/pcieport_if.h
index 9572669eea97..4f1089f2cc98 100644
--- a/include/linux/pcieport_if.h
+++ b/include/linux/pcieport_if.h
@@ -23,7 +23,7 @@
#define PCIE_PORT_SERVICE_VC (1 << PCIE_PORT_SERVICE_VC_SHIFT)
struct pcie_device {
- int irq; /* Service IRQ/MSI/MSI-X Vector */
+ int irq; /* Service IRQ/MSI/MSI-X Vector */
struct pci_dev *port; /* Root/Upstream/Downstream Port */
u32 service; /* Port service this device represents */
void *priv_data; /* Service Private Data */
diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h
index 57e890abe1f0..a5fc7d01aad6 100644
--- a/include/linux/percpu-defs.h
+++ b/include/linux/percpu-defs.h
@@ -69,6 +69,7 @@
__PCPU_DUMMY_ATTRS char __pcpu_scope_##name; \
extern __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \
__PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \
+ extern __PCPU_ATTRS(sec) __typeof__(type) name; \
__PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES __weak \
__typeof__(type) name
#else
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 48a4dc3cb8cf..7c81dd8870d4 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -1,6 +1,4 @@
/*
- * include/linux/phy.h
- *
* Framework and drivers for configuring and reading different PHYs
* Based on code in sungem_phy.c and gianfar_phy.c
*
@@ -27,18 +25,27 @@
#include <linux/atomic.h>
-#define PHY_BASIC_FEATURES (SUPPORTED_10baseT_Half | \
- SUPPORTED_10baseT_Full | \
- SUPPORTED_100baseT_Half | \
- SUPPORTED_100baseT_Full | \
- SUPPORTED_Autoneg | \
+#define PHY_DEFAULT_FEATURES (SUPPORTED_Autoneg | \
SUPPORTED_TP | \
SUPPORTED_MII)
-#define PHY_GBIT_FEATURES (PHY_BASIC_FEATURES | \
- SUPPORTED_1000baseT_Half | \
+#define PHY_10BT_FEATURES (SUPPORTED_10baseT_Half | \
+ SUPPORTED_10baseT_Full)
+
+#define PHY_100BT_FEATURES (SUPPORTED_100baseT_Half | \
+ SUPPORTED_100baseT_Full)
+
+#define PHY_1000BT_FEATURES (SUPPORTED_1000baseT_Half | \
SUPPORTED_1000baseT_Full)
+#define PHY_BASIC_FEATURES (PHY_10BT_FEATURES | \
+ PHY_100BT_FEATURES | \
+ PHY_DEFAULT_FEATURES)
+
+#define PHY_GBIT_FEATURES (PHY_BASIC_FEATURES | \
+ PHY_1000BT_FEATURES)
+
+
/*
* Set phydev->irq to PHY_POLL if interrupts are not supported,
* or not desired for this PHY. Set to PHY_IGNORE_INTERRUPT if
@@ -231,7 +238,7 @@ int mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val);
* - phy_stop moves to HALTED
*/
enum phy_state {
- PHY_DOWN=0,
+ PHY_DOWN = 0,
PHY_STARTING,
PHY_READY,
PHY_PENDING,
@@ -275,11 +282,9 @@ struct phy_c45_device_ids {
* attached_dev: The attached enet driver's device instance ptr
* adjust_link: Callback for the enet controller to respond to
* changes in the link state.
- * adjust_state: Callback for the enet driver to respond to
- * changes in the state machine.
*
- * speed, duplex, pause, supported, advertising, and
- * autoneg are used like in mii_if_info
+ * speed, duplex, pause, supported, advertising, lp_advertising,
+ * and autoneg are used like in mii_if_info
*
* interrupts currently only supports enabled or disabled,
* but could be changed in the future to support enabling
@@ -331,6 +336,7 @@ struct phy_device {
/* See mii.h for more info */
u32 supported;
u32 advertising;
+ u32 lp_advertising;
int autoneg;
@@ -356,8 +362,6 @@ struct phy_device {
struct net_device *attached_dev;
void (*adjust_link)(struct net_device *dev);
-
- void (*adjust_state)(struct net_device *dev);
};
#define to_phy_device(d) container_of(d, struct phy_device, dev)
@@ -534,19 +538,22 @@ static inline bool phy_is_internal(struct phy_device *phydev)
}
struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id,
- bool is_c45, struct phy_c45_device_ids *c45_ids);
+ bool is_c45,
+ struct phy_c45_device_ids *c45_ids);
struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45);
int phy_device_register(struct phy_device *phy);
int phy_init_hw(struct phy_device *phydev);
-struct phy_device * phy_attach(struct net_device *dev,
- const char *bus_id, phy_interface_t interface);
+int phy_suspend(struct phy_device *phydev);
+int phy_resume(struct phy_device *phydev);
+struct phy_device *phy_attach(struct net_device *dev, const char *bus_id,
+ phy_interface_t interface);
struct phy_device *phy_find_first(struct mii_bus *bus);
int phy_connect_direct(struct net_device *dev, struct phy_device *phydev,
- void (*handler)(struct net_device *),
- phy_interface_t interface);
-struct phy_device * phy_connect(struct net_device *dev, const char *bus_id,
- void (*handler)(struct net_device *),
- phy_interface_t interface);
+ void (*handler)(struct net_device *),
+ phy_interface_t interface);
+struct phy_device *phy_connect(struct net_device *dev, const char *bus_id,
+ void (*handler)(struct net_device *),
+ phy_interface_t interface);
void phy_disconnect(struct phy_device *phydev);
void phy_detach(struct phy_device *phydev);
void phy_start(struct phy_device *phydev);
@@ -555,7 +562,8 @@ int phy_start_aneg(struct phy_device *phydev);
int phy_stop_interrupts(struct phy_device *phydev);
-static inline int phy_read_status(struct phy_device *phydev) {
+static inline int phy_read_status(struct phy_device *phydev)
+{
return phydev->drv->read_status(phydev);
}
@@ -573,31 +581,29 @@ int phy_drivers_register(struct phy_driver *new_driver, int n);
void phy_state_machine(struct work_struct *work);
void phy_change(struct work_struct *work);
void phy_mac_interrupt(struct phy_device *phydev, int new_link);
-void phy_start_machine(struct phy_device *phydev,
- void (*handler)(struct net_device *));
+void phy_start_machine(struct phy_device *phydev);
void phy_stop_machine(struct phy_device *phydev);
int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd);
int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd);
-int phy_mii_ioctl(struct phy_device *phydev,
- struct ifreq *ifr, int cmd);
+int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd);
int phy_start_interrupts(struct phy_device *phydev);
void phy_print_status(struct phy_device *phydev);
void phy_device_free(struct phy_device *phydev);
int phy_register_fixup(const char *bus_id, u32 phy_uid, u32 phy_uid_mask,
- int (*run)(struct phy_device *));
+ int (*run)(struct phy_device *));
int phy_register_fixup_for_id(const char *bus_id,
- int (*run)(struct phy_device *));
+ int (*run)(struct phy_device *));
int phy_register_fixup_for_uid(u32 phy_uid, u32 phy_uid_mask,
- int (*run)(struct phy_device *));
-int phy_scan_fixups(struct phy_device *phydev);
+ int (*run)(struct phy_device *));
int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable);
int phy_get_eee_err(struct phy_device *phydev);
int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data);
int phy_ethtool_get_eee(struct phy_device *phydev, struct ethtool_eee *data);
int phy_ethtool_set_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol);
-void phy_ethtool_get_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol);
+void phy_ethtool_get_wol(struct phy_device *phydev,
+ struct ethtool_wolinfo *wol);
int __init mdio_bus_init(void);
void mdio_bus_exit(void);
diff --git a/include/linux/platform_data/edma.h b/include/linux/platform_data/edma.h
index 179fb91bb5f2..f50821cb64be 100644
--- a/include/linux/platform_data/edma.h
+++ b/include/linux/platform_data/edma.h
@@ -67,10 +67,10 @@ struct edmacc_param {
#define ITCCHEN BIT(23)
/*ch_status paramater of callback function possible values*/
-#define DMA_COMPLETE 1
-#define DMA_CC_ERROR 2
-#define DMA_TC1_ERROR 3
-#define DMA_TC2_ERROR 4
+#define EDMA_DMA_COMPLETE 1
+#define EDMA_DMA_CC_ERROR 2
+#define EDMA_DMA_TC1_ERROR 3
+#define EDMA_DMA_TC2_ERROR 4
enum address_mode {
INCR = 0,
diff --git a/include/linux/platform_data/eth-netx.h b/include/linux/platform_data/eth-netx.h
index 88af1ac28ead..a395159725d5 100644
--- a/include/linux/platform_data/eth-netx.h
+++ b/include/linux/platform_data/eth-netx.h
@@ -1,6 +1,4 @@
/*
- * arch/arm/mach-netx/include/mach/eth.h
- *
* Copyright (c) 2005 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
*
* This program is free software; you can redistribute it and/or modify
@@ -17,8 +15,8 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-#ifndef ASMARM_ARCH_ETH_H
-#define ASMARM_ARCH_ETH_H
+#ifndef __ETH_NETX_H
+#define __ETH_NETX_H
struct netxeth_platform_data {
unsigned int xcno; /* number of xmac/xpec engine this eth uses */
diff --git a/include/linux/printk.h b/include/linux/printk.h
index 694925837a16..26fb95ce5080 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -88,6 +88,13 @@ struct va_format {
#define HW_ERR "[Hardware Error]: "
/*
+ * DEPRECATED
+ * Add this to a message whenever you want to warn user space about the use
+ * of a deprecated aspect of an API so they can stop using it
+ */
+#define DEPRECATED "[Deprecated]: "
+
+/*
* Dummy printk for disabled debugging statements to use whilst maintaining
* gcc's format and side-effect checking.
*/
diff --git a/include/linux/pstore.h b/include/linux/pstore.h
index abd437d0a8a7..ece0c6bbfcc5 100644
--- a/include/linux/pstore.h
+++ b/include/linux/pstore.h
@@ -51,6 +51,7 @@ struct pstore_info {
char *buf;
size_t bufsize;
struct mutex read_mutex; /* serialize open/read/close */
+ int flags;
int (*open)(struct pstore_info *psi);
int (*close)(struct pstore_info *psi);
ssize_t (*read)(u64 *id, enum pstore_type_id *type,
@@ -70,6 +71,8 @@ struct pstore_info {
void *data;
};
+#define PSTORE_FLAGS_FRAGILE 1
+
#ifdef CONFIG_PSTORE
extern int pstore_register(struct pstore_info *);
extern bool pstore_cannot_block_path(enum kmsg_dump_reason reason);
diff --git a/include/linux/reboot.h b/include/linux/reboot.h
index 8e00f9f6f963..9e7db9e73cc1 100644
--- a/include/linux/reboot.h
+++ b/include/linux/reboot.h
@@ -43,6 +43,7 @@ extern int unregister_reboot_notifier(struct notifier_block *);
* Architecture-specific implementations of sys_reboot commands.
*/
+extern void migrate_to_reboot_cpu(void);
extern void machine_restart(char *cmd);
extern void machine_halt(void);
extern void machine_power_off(void);
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index 939428ad25ac..8e3e66ac0a52 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -24,6 +24,11 @@ extern int rtnl_trylock(void);
extern int rtnl_is_locked(void);
#ifdef CONFIG_PROVE_LOCKING
extern int lockdep_rtnl_is_held(void);
+#else
+static inline int lockdep_rtnl_is_held(void)
+{
+ return 1;
+}
#endif /* #ifdef CONFIG_PROVE_LOCKING */
/**
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 7e35d4b9e14a..53f97eb8dbc7 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -440,8 +440,6 @@ struct task_cputime {
.sum_exec_runtime = 0, \
}
-#define PREEMPT_ENABLED (PREEMPT_NEED_RESCHED)
-
#ifdef CONFIG_PREEMPT_COUNT
#define PREEMPT_DISABLED (1 + PREEMPT_ENABLED)
#else
@@ -831,8 +829,6 @@ struct sched_domain {
unsigned int balance_interval; /* initialise to 1. units in ms. */
unsigned int nr_balance_failed; /* initialise to 0 */
- u64 last_update;
-
/* idle_balance() stats */
u64 max_newidle_lb_cost;
unsigned long next_decay_max_lb_cost;
@@ -934,7 +930,8 @@ struct pipe_inode_info;
struct uts_namespace;
struct load_weight {
- unsigned long weight, inv_weight;
+ unsigned long weight;
+ u32 inv_weight;
};
struct sched_avg {
diff --git a/include/linux/sctp.h b/include/linux/sctp.h
index 3bfe8d6ee248..c4c5eba26d9f 100644
--- a/include/linux/sctp.h
+++ b/include/linux/sctp.h
@@ -23,9 +23,8 @@
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with GNU CC; see the file COPYING. If not, write to
- * the Free Software Foundation, 59 Temple Place - Suite 330,
- * Boston, MA 02111-1307, USA.
+ * along with GNU CC; see the file COPYING. If not, see
+ * <http://www.gnu.org/licenses/>.
*
* Please send any bug reports or fixes you make to the
* email address(es):
diff --git a/include/linux/security.h b/include/linux/security.h
index 9d37e2b9d3ec..5623a7f965b7 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -1052,17 +1052,25 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
* @xfrm_policy_delete_security:
* @ctx contains the xfrm_sec_ctx.
* Authorize deletion of xp->security.
- * @xfrm_state_alloc_security:
+ * @xfrm_state_alloc:
* @x contains the xfrm_state being added to the Security Association
* Database by the XFRM system.
* @sec_ctx contains the security context information being provided by
* the user-level SA generation program (e.g., setkey or racoon).
- * @secid contains the secid from which to take the mls portion of the context.
* Allocate a security structure to the x->security field; the security
* field is initialized to NULL when the xfrm_state is allocated. Set the
- * context to correspond to either sec_ctx or polsec, with the mls portion
- * taken from secid in the latter case.
- * Return 0 if operation was successful (memory to allocate, legal context).
+ * context to correspond to sec_ctx. Return 0 if operation was successful
+ * (memory to allocate, legal context).
+ * @xfrm_state_alloc_acquire:
+ * @x contains the xfrm_state being added to the Security Association
+ * Database by the XFRM system.
+ * @polsec contains the policy's security context.
+ * @secid contains the secid from which to take the mls portion of the
+ * context.
+ * Allocate a security structure to the x->security field; the security
+ * field is initialized to NULL when the xfrm_state is allocated. Set the
+ * context to correspond to secid. Return 0 if operation was successful
+ * (memory to allocate, legal context).
* @xfrm_state_free_security:
* @x contains the xfrm_state.
* Deallocate x->security.
@@ -1679,9 +1687,11 @@ struct security_operations {
int (*xfrm_policy_clone_security) (struct xfrm_sec_ctx *old_ctx, struct xfrm_sec_ctx **new_ctx);
void (*xfrm_policy_free_security) (struct xfrm_sec_ctx *ctx);
int (*xfrm_policy_delete_security) (struct xfrm_sec_ctx *ctx);
- int (*xfrm_state_alloc_security) (struct xfrm_state *x,
- struct xfrm_user_sec_ctx *sec_ctx,
- u32 secid);
+ int (*xfrm_state_alloc) (struct xfrm_state *x,
+ struct xfrm_user_sec_ctx *sec_ctx);
+ int (*xfrm_state_alloc_acquire) (struct xfrm_state *x,
+ struct xfrm_sec_ctx *polsec,
+ u32 secid);
void (*xfrm_state_free_security) (struct xfrm_state *x);
int (*xfrm_state_delete_security) (struct xfrm_state *x);
int (*xfrm_policy_lookup) (struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir);
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index 1e8a8b6e837d..cf87a24c0f92 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -354,6 +354,35 @@ static inline void read_sequnlock_excl(seqlock_t *sl)
spin_unlock(&sl->lock);
}
+/**
+ * read_seqbegin_or_lock - begin a sequence number check or locking block
+ * @lock: sequence lock
+ * @seq : sequence number to be checked
+ *
+ * First try it once optimistically without taking the lock. If that fails,
+ * take the lock. The sequence number is also used as a marker for deciding
+ * whether to be a reader (even) or writer (odd).
+ * N.B. seq must be initialized to an even number to begin with.
+ */
+static inline void read_seqbegin_or_lock(seqlock_t *lock, int *seq)
+{
+ if (!(*seq & 1)) /* Even */
+ *seq = read_seqbegin(lock);
+ else /* Odd */
+ read_seqlock_excl(lock);
+}
+
+static inline int need_seqretry(seqlock_t *lock, int seq)
+{
+ return !(seq & 1) && read_seqretry(lock, seq);
+}
+
+static inline void done_seqretry(seqlock_t *lock, int seq)
+{
+ if (seq & 1)
+ read_sequnlock_excl(lock);
+}
+
static inline void read_seqlock_excl_bh(seqlock_t *sl)
{
spin_lock_bh(&sl->lock);
diff --git a/include/linux/sh_eth.h b/include/linux/sh_eth.h
index 90b5e30c2f22..8c9131db2b25 100644
--- a/include/linux/sh_eth.h
+++ b/include/linux/sh_eth.h
@@ -8,6 +8,7 @@ enum {EDMAC_LITTLE_ENDIAN, EDMAC_BIG_ENDIAN};
struct sh_eth_plat_data {
int phy;
+ int phy_irq;
int edmac_endian;
phy_interface_t phy_interface;
void (*set_mdio_gate)(void *addr);
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
index 30aa0dc60d75..9d55438bc4ad 100644
--- a/include/linux/shmem_fs.h
+++ b/include/linux/shmem_fs.h
@@ -47,6 +47,8 @@ extern int shmem_init(void);
extern int shmem_fill_super(struct super_block *sb, void *data, int silent);
extern struct file *shmem_file_setup(const char *name,
loff_t size, unsigned long flags);
+extern struct file *shmem_kernel_file_setup(const char *name, loff_t size,
+ unsigned long flags);
extern int shmem_zero_setup(struct vm_area_struct *);
extern int shmem_lock(struct file *file, int lock, struct user_struct *user);
extern void shmem_unlock_mapping(struct address_space *mapping);
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index bec1cc7d5e3c..d97f2d07d02b 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -34,11 +34,82 @@
#include <linux/netdev_features.h>
#include <net/flow_keys.h>
+/* A. Checksumming of received packets by device.
+ *
+ * CHECKSUM_NONE:
+ *
+ * Device failed to checksum this packet e.g. due to lack of capabilities.
+ * The packet contains full (though not verified) checksum in packet but
+ * not in skb->csum. Thus, skb->csum is undefined in this case.
+ *
+ * CHECKSUM_UNNECESSARY:
+ *
+ * The hardware you're dealing with doesn't calculate the full checksum
+ * (as in CHECKSUM_COMPLETE), but it does parse headers and verify checksums
+ * for specific protocols e.g. TCP/UDP/SCTP, then, for such packets it will
+ * set CHECKSUM_UNNECESSARY if their checksums are okay. skb->csum is still
+ * undefined in this case though. It is a bad option, but, unfortunately,
+ * nowadays most vendors do this. Apparently with the secret goal to sell
+ * you new devices, when you will add new protocol to your host, f.e. IPv6 8)
+ *
+ * CHECKSUM_COMPLETE:
+ *
+ * This is the most generic way. The device supplied checksum of the _whole_
+ * packet as seen by netif_rx() and fills out in skb->csum. Meaning, the
+ * hardware doesn't need to parse L3/L4 headers to implement this.
+ *
+ * Note: Even if device supports only some protocols, but is able to produce
+ * skb->csum, it MUST use CHECKSUM_COMPLETE, not CHECKSUM_UNNECESSARY.
+ *
+ * CHECKSUM_PARTIAL:
+ *
+ * This is identical to the case for output below. This may occur on a packet
+ * received directly from another Linux OS, e.g., a virtualized Linux kernel
+ * on the same host. The packet can be treated in the same way as
+ * CHECKSUM_UNNECESSARY, except that on output (i.e., forwarding) the
+ * checksum must be filled in by the OS or the hardware.
+ *
+ * B. Checksumming on output.
+ *
+ * CHECKSUM_NONE:
+ *
+ * The skb was already checksummed by the protocol, or a checksum is not
+ * required.
+ *
+ * CHECKSUM_PARTIAL:
+ *
+ * The device is required to checksum the packet as seen by hard_start_xmit()
+ * from skb->csum_start up to the end, and to record/write the checksum at
+ * offset skb->csum_start + skb->csum_offset.
+ *
+ * The device must show its capabilities in dev->features, set up at device
+ * setup time, e.g. netdev_features.h:
+ *
+ * NETIF_F_HW_CSUM - It's a clever device, it's able to checksum everything.
+ * NETIF_F_IP_CSUM - Device is dumb, it's able to checksum only TCP/UDP over
+ * IPv4. Sigh. Vendors like this way for an unknown reason.
+ * Though, see comment above about CHECKSUM_UNNECESSARY. 8)
+ * NETIF_F_IPV6_CSUM - About as dumb as the last one but does IPv6 instead.
+ * NETIF_F_... - Well, you get the picture.
+ *
+ * CHECKSUM_UNNECESSARY:
+ *
+ * Normally, the device will do per protocol specific checksumming. Protocol
+ * implementations that do not want the NIC to perform the checksum
+ * calculation should use this flag in their outgoing skbs.
+ *
+ * NETIF_F_FCOE_CRC - This indicates that the device can do FCoE FC CRC
+ * offload. Correspondingly, the FCoE protocol driver
+ * stack should use CHECKSUM_UNNECESSARY.
+ *
+ * Any questions? No questions, good. --ANK
+ */
+
/* Don't change this without changing skb_csum_unnecessary! */
-#define CHECKSUM_NONE 0
-#define CHECKSUM_UNNECESSARY 1
-#define CHECKSUM_COMPLETE 2
-#define CHECKSUM_PARTIAL 3
+#define CHECKSUM_NONE 0
+#define CHECKSUM_UNNECESSARY 1
+#define CHECKSUM_COMPLETE 2
+#define CHECKSUM_PARTIAL 3
#define SKB_DATA_ALIGN(X) (((X) + (SMP_CACHE_BYTES - 1)) & \
~(SMP_CACHE_BYTES - 1))
@@ -54,58 +125,6 @@
SKB_DATA_ALIGN(sizeof(struct sk_buff)) + \
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
-/* A. Checksumming of received packets by device.
- *
- * NONE: device failed to checksum this packet.
- * skb->csum is undefined.
- *
- * UNNECESSARY: device parsed packet and wouldbe verified checksum.
- * skb->csum is undefined.
- * It is bad option, but, unfortunately, many of vendors do this.
- * Apparently with secret goal to sell you new device, when you
- * will add new protocol to your host. F.e. IPv6. 8)
- *
- * COMPLETE: the most generic way. Device supplied checksum of _all_
- * the packet as seen by netif_rx in skb->csum.
- * NOTE: Even if device supports only some protocols, but
- * is able to produce some skb->csum, it MUST use COMPLETE,
- * not UNNECESSARY.
- *
- * PARTIAL: identical to the case for output below. This may occur
- * on a packet received directly from another Linux OS, e.g.,
- * a virtualised Linux kernel on the same host. The packet can
- * be treated in the same way as UNNECESSARY except that on
- * output (i.e., forwarding) the checksum must be filled in
- * by the OS or the hardware.
- *
- * B. Checksumming on output.
- *
- * NONE: skb is checksummed by protocol or csum is not required.
- *
- * PARTIAL: device is required to csum packet as seen by hard_start_xmit
- * from skb->csum_start to the end and to record the checksum
- * at skb->csum_start + skb->csum_offset.
- *
- * Device must show its capabilities in dev->features, set
- * at device setup time.
- * NETIF_F_HW_CSUM - it is clever device, it is able to checksum
- * everything.
- * NETIF_F_IP_CSUM - device is dumb. It is able to csum only
- * TCP/UDP over IPv4. Sigh. Vendors like this
- * way by an unknown reason. Though, see comment above
- * about CHECKSUM_UNNECESSARY. 8)
- * NETIF_F_IPV6_CSUM about as dumb as the last one but does IPv6 instead.
- *
- * UNNECESSARY: device will do per protocol specific csum. Protocol drivers
- * that do not want net to perform the checksum calculation should use
- * this flag in their outgoing skbs.
- * NETIF_F_FCOE_CRC this indicates the device can do FCoE FC CRC
- * offload. Correspondingly, the FCoE protocol driver
- * stack should use CHECKSUM_UNNECESSARY.
- *
- * Any questions? No questions, good. --ANK
- */
-
struct net_device;
struct scatterlist;
struct pipe_inode_info;
@@ -703,15 +722,73 @@ unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
unsigned int to, struct ts_config *config,
struct ts_state *state);
-void __skb_get_rxhash(struct sk_buff *skb);
-static inline __u32 skb_get_rxhash(struct sk_buff *skb)
+/*
+ * Packet hash types specify the type of hash in skb_set_hash.
+ *
+ * Hash types refer to the protocol layer addresses which are used to
+ * construct a packet's hash. The hashes are used to differentiate or identify
+ * flows of the protocol layer for the hash type. Hash types are either
+ * layer-2 (L2), layer-3 (L3), or layer-4 (L4).
+ *
+ * Properties of hashes:
+ *
+ * 1) Two packets in different flows have different hash values
+ * 2) Two packets in the same flow should have the same hash value
+ *
+ * A hash at a higher layer is considered to be more specific. A driver should
+ * set the most specific hash possible.
+ *
+ * A driver cannot indicate a more specific hash than the layer at which a hash
+ * was computed. For instance an L3 hash cannot be set as an L4 hash.
+ *
+ * A driver may indicate a hash level which is less specific than the
+ * actual layer the hash was computed on. For instance, a hash computed
+ * at L4 may be considered an L3 hash. This should only be done if the
+ * driver can't unambiguously determine that the HW computed the hash at
+ * the higher layer. Note that the "should" in the second property above
+ * permits this.
+ */
+enum pkt_hash_types {
+ PKT_HASH_TYPE_NONE, /* Undefined type */
+ PKT_HASH_TYPE_L2, /* Input: src_MAC, dest_MAC */
+ PKT_HASH_TYPE_L3, /* Input: src_IP, dst_IP */
+ PKT_HASH_TYPE_L4, /* Input: src_IP, dst_IP, src_port, dst_port */
+};
+
+static inline void
+skb_set_hash(struct sk_buff *skb, __u32 hash, enum pkt_hash_types type)
+{
+ skb->l4_rxhash = (type == PKT_HASH_TYPE_L4);
+ skb->rxhash = hash;
+}
+
+void __skb_get_hash(struct sk_buff *skb);
+static inline __u32 skb_get_hash(struct sk_buff *skb)
{
if (!skb->l4_rxhash)
- __skb_get_rxhash(skb);
+ __skb_get_hash(skb);
return skb->rxhash;
}
+static inline void skb_clear_hash(struct sk_buff *skb)
+{
+ skb->rxhash = 0;
+ skb->l4_rxhash = 0;
+}
+
+static inline void skb_clear_hash_if_not_l4(struct sk_buff *skb)
+{
+ if (!skb->l4_rxhash)
+ skb_clear_hash(skb);
+}
+
+static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from)
+{
+ to->rxhash = from->rxhash;
+ to->l4_rxhash = from->l4_rxhash;
+};
+
#ifdef NET_SKBUFF_DATA_USES_OFFSET
static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
{
@@ -750,7 +827,7 @@ static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
*/
static inline int skb_queue_empty(const struct sk_buff_head *list)
{
- return list->next == (struct sk_buff *)list;
+ return list->next == (const struct sk_buff *) list;
}
/**
@@ -763,7 +840,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
static inline bool skb_queue_is_last(const struct sk_buff_head *list,
const struct sk_buff *skb)
{
- return skb->next == (struct sk_buff *)list;
+ return skb->next == (const struct sk_buff *) list;
}
/**
@@ -776,7 +853,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
static inline bool skb_queue_is_first(const struct sk_buff_head *list,
const struct sk_buff *skb)
{
- return skb->prev == (struct sk_buff *)list;
+ return skb->prev == (const struct sk_buff *) list;
}
/**
@@ -1638,6 +1715,11 @@ static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
skb->mac_header += offset;
}
+static inline void skb_pop_mac_header(struct sk_buff *skb)
+{
+ skb->mac_header = skb->network_header;
+}
+
static inline void skb_probe_transport_header(struct sk_buff *skb,
const int offset_hint)
{
@@ -2263,6 +2345,24 @@ static inline void skb_postpull_rcsum(struct sk_buff *skb,
unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
+/**
+ * pskb_trim_rcsum - trim received skb and update checksum
+ * @skb: buffer to trim
+ * @len: new length
+ *
+ * This is exactly the same as pskb_trim except that it ensures the
+ * checksum of received packets are still valid after the operation.
+ */
+
+static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
+{
+ if (likely(len >= skb->len))
+ return 0;
+ if (skb->ip_summed == CHECKSUM_COMPLETE)
+ skb->ip_summed = CHECKSUM_NONE;
+ return __pskb_trim(skb, len);
+}
+
#define skb_queue_walk(queue, skb) \
for (skb = (queue)->next; \
skb != (struct sk_buff *)(queue); \
@@ -2345,6 +2445,9 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
struct pipe_inode_info *pipe, unsigned int len,
unsigned int flags);
void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
+unsigned int skb_zerocopy_headlen(const struct sk_buff *from);
+void skb_zerocopy(struct sk_buff *to, const struct sk_buff *from,
+ int len, int hlen);
void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len);
int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen);
void skb_scrub_packet(struct sk_buff *skb, bool xnet);
@@ -2360,27 +2463,6 @@ __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len,
__wsum skb_checksum(const struct sk_buff *skb, int offset, int len,
__wsum csum);
-/**
- * pskb_trim_rcsum - trim received skb and update checksum
- * @skb: buffer to trim
- * @len: new length
- *
- * This is exactly the same as pskb_trim except that it ensures the
- * checksum of received packets are still valid after the operation.
- */
-
-static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
-{
- if (likely(len >= skb->len))
- return 0;
- if (skb->ip_summed == CHECKSUM_COMPLETE) {
- __wsum adj = skb_checksum(skb, len, skb->len - len, 0);
-
- skb->csum = csum_sub(skb->csum, adj);
- }
- return __pskb_trim(skb, len);
-}
-
static inline void *skb_header_pointer(const struct sk_buff *skb, int offset,
int len, void *buffer)
{
@@ -2395,6 +2477,24 @@ static inline void *skb_header_pointer(const struct sk_buff *skb, int offset,
return buffer;
}
+/**
+ * skb_needs_linearize - check if we need to linearize a given skb
+ * depending on the given device features.
+ * @skb: socket buffer to check
+ * @features: net device features
+ *
+ * Returns true if either:
+ * 1. skb has frag_list and the device doesn't support FRAGLIST, or
+ * 2. skb is fragmented and the device does not support SG.
+ */
+static inline bool skb_needs_linearize(struct sk_buff *skb,
+ netdev_features_t features)
+{
+ return skb_is_nonlinear(skb) &&
+ ((skb_has_frag_list(skb) && !(features & NETIF_F_FRAGLIST)) ||
+ (skb_shinfo(skb)->nr_frags && !(features & NETIF_F_SG)));
+}
+
static inline void skb_copy_from_linear_data(const struct sk_buff *skb,
void *to,
const unsigned int len)
@@ -2529,6 +2629,10 @@ static inline void sw_tx_timestamp(struct sk_buff *skb)
* Ethernet MAC Drivers should call this function in their hard_xmit()
* function immediately before giving the sk_buff to the MAC hardware.
*
+ * Specifically, one should make absolutely sure that this function is
+ * called before TX completion of this packet can trigger. Otherwise
+ * the packet could potentially already be freed.
+ *
* @skb: A socket buffer.
*/
static inline void skb_tx_timestamp(struct sk_buff *skb)
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 74f105847d13..1e2f4fe12773 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -53,7 +53,14 @@
* }
* rcu_read_unlock();
*
- * See also the comment on struct slab_rcu in mm/slab.c.
+ * This is useful if we need to approach a kernel structure obliquely,
+ * from its address obtained without the usual locking. We can lock
+ * the structure to stabilize it and check it's still at the given address,
+ * only if we can be sure that the memory has not been meanwhile reused
+ * for some other kind of object (which our subsystem's lock might corrupt).
+ *
+ * rcu_read_lock before reading the address, then rcu_read_unlock after
+ * taking the spinlock within the structure expected at that address.
*/
#define SLAB_DESTROY_BY_RCU 0x00080000UL /* Defer freeing slabs to RCU */
#define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */
@@ -381,10 +388,55 @@ static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
/**
* kmalloc - allocate memory
* @size: how many bytes of memory are required.
- * @flags: the type of memory to allocate (see kcalloc).
+ * @flags: the type of memory to allocate.
*
* kmalloc is the normal method of allocating memory
* for objects smaller than page size in the kernel.
+ *
+ * The @flags argument may be one of:
+ *
+ * %GFP_USER - Allocate memory on behalf of user. May sleep.
+ *
+ * %GFP_KERNEL - Allocate normal kernel ram. May sleep.
+ *
+ * %GFP_ATOMIC - Allocation will not sleep. May use emergency pools.
+ * For example, use this inside interrupt handlers.
+ *
+ * %GFP_HIGHUSER - Allocate pages from high memory.
+ *
+ * %GFP_NOIO - Do not do any I/O at all while trying to get memory.
+ *
+ * %GFP_NOFS - Do not make any fs calls while trying to get memory.
+ *
+ * %GFP_NOWAIT - Allocation will not sleep.
+ *
+ * %GFP_THISNODE - Allocate node-local memory only.
+ *
+ * %GFP_DMA - Allocation suitable for DMA.
+ * Should only be used for kmalloc() caches. Otherwise, use a
+ * slab created with SLAB_DMA.
+ *
+ * Also it is possible to set different flags by OR'ing
+ * in one or more of the following additional @flags:
+ *
+ * %__GFP_COLD - Request cache-cold pages instead of
+ * trying to return cache-warm pages.
+ *
+ * %__GFP_HIGH - This allocation has high priority and may use emergency pools.
+ *
+ * %__GFP_NOFAIL - Indicate that this allocation is in no way allowed to fail
+ * (think twice before using).
+ *
+ * %__GFP_NORETRY - If memory is not immediately available,
+ * then give up at once.
+ *
+ * %__GFP_NOWARN - If allocation fails, don't issue any warnings.
+ *
+ * %__GFP_REPEAT - If allocation fails initially, try once more before failing.
+ *
+ * There are other flags available as well, but these are not intended
+ * for general use, and so are not documented here. For a full list of
+ * potential flags, always refer to linux/gfp.h.
*/
static __always_inline void *kmalloc(size_t size, gfp_t flags)
{
@@ -495,61 +547,6 @@ int cache_show(struct kmem_cache *s, struct seq_file *m);
void print_slabinfo_header(struct seq_file *m);
/**
- * kmalloc - allocate memory
- * @size: how many bytes of memory are required.
- * @flags: the type of memory to allocate.
- *
- * The @flags argument may be one of:
- *
- * %GFP_USER - Allocate memory on behalf of user. May sleep.
- *
- * %GFP_KERNEL - Allocate normal kernel ram. May sleep.
- *
- * %GFP_ATOMIC - Allocation will not sleep. May use emergency pools.
- * For example, use this inside interrupt handlers.
- *
- * %GFP_HIGHUSER - Allocate pages from high memory.
- *
- * %GFP_NOIO - Do not do any I/O at all while trying to get memory.
- *
- * %GFP_NOFS - Do not make any fs calls while trying to get memory.
- *
- * %GFP_NOWAIT - Allocation will not sleep.
- *
- * %GFP_THISNODE - Allocate node-local memory only.
- *
- * %GFP_DMA - Allocation suitable for DMA.
- * Should only be used for kmalloc() caches. Otherwise, use a
- * slab created with SLAB_DMA.
- *
- * Also it is possible to set different flags by OR'ing
- * in one or more of the following additional @flags:
- *
- * %__GFP_COLD - Request cache-cold pages instead of
- * trying to return cache-warm pages.
- *
- * %__GFP_HIGH - This allocation has high priority and may use emergency pools.
- *
- * %__GFP_NOFAIL - Indicate that this allocation is in no way allowed to fail
- * (think twice before using).
- *
- * %__GFP_NORETRY - If memory is not immediately available,
- * then give up at once.
- *
- * %__GFP_NOWARN - If allocation fails, don't issue any warnings.
- *
- * %__GFP_REPEAT - If allocation fails initially, try once more before failing.
- *
- * There are other flags available as well, but these are not intended
- * for general use, and so are not documented here. For a full list of
- * potential flags, always refer to linux/gfp.h.
- *
- * kmalloc is the normal method of allocating memory
- * in the kernel.
- */
-static __always_inline void *kmalloc(size_t size, gfp_t flags);
-
-/**
* kmalloc_array - allocate memory for an array.
* @n: number of elements.
* @size: element size.
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index e9346b4f1ef4..09bfffb08a56 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -27,8 +27,8 @@ struct kmem_cache {
size_t colour; /* cache colouring range */
unsigned int colour_off; /* colour offset */
- struct kmem_cache *slabp_cache;
- unsigned int slab_size;
+ struct kmem_cache *freelist_cache;
+ unsigned int freelist_size;
/* constructor func */
void (*ctor)(void *obj);
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index cc0b67eada42..f56bfa9e4526 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -11,7 +11,7 @@
enum stat_item {
ALLOC_FASTPATH, /* Allocation from cpu slab */
ALLOC_SLOWPATH, /* Allocation by getting a new cpu slab */
- FREE_FASTPATH, /* Free to cpu slub */
+ FREE_FASTPATH, /* Free to cpu slab */
FREE_SLOWPATH, /* Freeing not to cpu slab */
FREE_FROZEN, /* Freeing to frozen slab */
FREE_ADD_PARTIAL, /* Freeing moves slab to partial list */
diff --git a/include/linux/socket.h b/include/linux/socket.h
index 445ef7519dc2..5d488a65ad20 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -305,8 +305,6 @@ struct ucred {
/* IPX options */
#define IPX_TYPE 1
-extern void cred_to_ucred(struct pid *pid, const struct cred *cred, struct ucred *ucred);
-
extern int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov,
int offset, int len);
extern int csum_partial_copy_fromiovecend(unsigned char *kdata,
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index d68633452d9b..4ad0706d40eb 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -248,7 +248,10 @@ struct tcp_sock {
struct sk_buff* lost_skb_hint;
struct sk_buff *retransmit_skb_hint;
- struct sk_buff_head out_of_order_queue; /* Out of order segments go here */
+ /* OOO segments go in this list. Note that socket lock must be held,
+ * as we do not use sk_buff_head lock.
+ */
+ struct sk_buff_head out_of_order_queue;
/* SACKs data, these 2 need to be together (see tcp_options_write) */
struct tcp_sack_block duplicate_sack[1]; /* D-SACK block */
diff --git a/include/linux/tegra-powergate.h b/include/linux/tegra-powergate.h
index c98cfa406952..fd4498329c7c 100644
--- a/include/linux/tegra-powergate.h
+++ b/include/linux/tegra-powergate.h
@@ -45,6 +45,7 @@ struct clk;
#define TEGRA_POWERGATE_3D0 TEGRA_POWERGATE_3D
+#ifdef CONFIG_ARCH_TEGRA
int tegra_powergate_is_powered(int id);
int tegra_powergate_power_on(int id);
int tegra_powergate_power_off(int id);
@@ -52,5 +53,31 @@ int tegra_powergate_remove_clamping(int id);
/* Must be called with clk disabled, and returns with clk enabled */
int tegra_powergate_sequence_power_up(int id, struct clk *clk);
+#else
+static inline int tegra_powergate_is_powered(int id)
+{
+ return -ENOSYS;
+}
+
+static inline int tegra_powergate_power_on(int id)
+{
+ return -ENOSYS;
+}
+
+static inline int tegra_powergate_power_off(int id)
+{
+ return -ENOSYS;
+}
+
+static inline int tegra_powergate_remove_clamping(int id)
+{
+ return -ENOSYS;
+}
+
+static inline int tegra_powergate_sequence_power_up(int id, struct clk *clk)
+{
+ return -ENOSYS;
+}
+#endif
#endif /* _MACH_TEGRA_POWERGATE_H_ */
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index ebeab360d851..f16dc0a40049 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -267,6 +267,8 @@ static inline void tracepoint_synchronize_unregister(void)
#define TRACE_EVENT_FLAGS(event, flag)
+#define TRACE_EVENT_PERF_PERM(event, expr...)
+
#endif /* DECLARE_TRACE */
#ifndef TRACE_EVENT
@@ -399,4 +401,6 @@ static inline void tracepoint_synchronize_unregister(void)
#define TRACE_EVENT_FLAGS(event, flag)
+#define TRACE_EVENT_PERF_PERM(event, expr...)
+
#endif /* ifdef TRACE_EVENT (see note above) */
diff --git a/include/linux/usb.h b/include/linux/usb.h
index 7454865ad148..512ab162832c 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -1264,6 +1264,8 @@ typedef void (*usb_complete_t)(struct urb *);
* @sg: scatter gather buffer list, the buffer size of each element in
* the list (except the last) must be divisible by the endpoint's
* max packet size if no_sg_constraint isn't set in 'struct usb_bus'
+ * (FIXME: scatter-gather under xHCI is broken for periodic transfers.
+ * Do not use urb->sg for interrupt endpoints for now, only bulk.)
* @num_mapped_sgs: (internal) number of mapped sg entries
* @num_sgs: number of entries in the sg list
* @transfer_buffer_length: How big is transfer_buffer. The transfer may
diff --git a/include/linux/usb/wusb.h b/include/linux/usb/wusb.h
index 0c4d4ca370ec..eeb28329fa3c 100644
--- a/include/linux/usb/wusb.h
+++ b/include/linux/usb/wusb.h
@@ -271,6 +271,8 @@ static inline u8 wusb_key_index(int index, int type, int originator)
#define WUSB_KEY_INDEX_TYPE_GTK 2
#define WUSB_KEY_INDEX_ORIGINATOR_HOST 0
#define WUSB_KEY_INDEX_ORIGINATOR_DEVICE 1
+/* bits 0-3 used for the key index. */
+#define WUSB_KEY_INDEX_MAX 15
/* A CCM Nonce, defined in WUSB1.0[6.4.1] */
struct aes_ccm_nonce {
diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
index 4db29859464f..4836ba3c1cd8 100644
--- a/include/linux/user_namespace.h
+++ b/include/linux/user_namespace.h
@@ -27,6 +27,12 @@ struct user_namespace {
kuid_t owner;
kgid_t group;
unsigned int proc_inum;
+
+ /* Register of per-UID persistent keyrings for this namespace */
+#ifdef CONFIG_PERSISTENT_KEYRINGS
+ struct key *persistent_keyring_register;
+ struct rw_semaphore persistent_keyring_register_sem;
+#endif
};
extern struct user_namespace init_user_ns;
diff --git a/include/linux/wait.h b/include/linux/wait.h
index 61939ba30aa0..eaa00b10abaa 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -278,6 +278,31 @@ do { \
__ret; \
})
+#define __wait_event_cmd(wq, condition, cmd1, cmd2) \
+ (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
+ cmd1; schedule(); cmd2)
+
+/**
+ * wait_event_cmd - sleep until a condition gets true
+ * @wq: the waitqueue to wait on
+ * @condition: a C expression for the event to wait for
+ * cmd1: the command will be executed before sleep
+ * cmd2: the command will be executed after sleep
+ *
+ * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
+ * @condition evaluates to true. The @condition is checked each time
+ * the waitqueue @wq is woken up.
+ *
+ * wake_up() has to be called after changing any variable that could
+ * change the result of the wait condition.
+ */
+#define wait_event_cmd(wq, condition, cmd1, cmd2) \
+do { \
+ if (condition) \
+ break; \
+ __wait_event_cmd(wq, condition, cmd1, cmd2); \
+} while (0)
+
#define __wait_event_interruptible(wq, condition) \
___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \
schedule())
diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h
index bd8218b15009..941055e9d125 100644
--- a/include/media/videobuf2-core.h
+++ b/include/media/videobuf2-core.h
@@ -83,7 +83,7 @@ struct vb2_fileio_data;
struct vb2_mem_ops {
void *(*alloc)(void *alloc_ctx, unsigned long size, gfp_t gfp_flags);
void (*put)(void *buf_priv);
- struct dma_buf *(*get_dmabuf)(void *buf_priv);
+ struct dma_buf *(*get_dmabuf)(void *buf_priv, unsigned long flags);
void *(*get_userptr)(void *alloc_ctx, unsigned long vaddr,
unsigned long size, int write);
diff --git a/include/net/Space.h b/include/net/Space.h
new file mode 100644
index 000000000000..8a32771e4215
--- /dev/null
+++ b/include/net/Space.h
@@ -0,0 +1,31 @@
+/* A unified ethernet device probe. This is the easiest way to have every
+ * ethernet adaptor have the name "eth[0123...]".
+ */
+
+struct net_device *hp100_probe(int unit);
+struct net_device *ultra_probe(int unit);
+struct net_device *wd_probe(int unit);
+struct net_device *ne_probe(int unit);
+struct net_device *fmv18x_probe(int unit);
+struct net_device *i82596_probe(int unit);
+struct net_device *ni65_probe(int unit);
+struct net_device *sonic_probe(int unit);
+struct net_device *smc_init(int unit);
+struct net_device *atarilance_probe(int unit);
+struct net_device *sun3lance_probe(int unit);
+struct net_device *sun3_82586_probe(int unit);
+struct net_device *apne_probe(int unit);
+struct net_device *cs89x0_probe(int unit);
+struct net_device *mvme147lance_probe(int unit);
+struct net_device *tc515_probe(int unit);
+struct net_device *lance_probe(int unit);
+struct net_device *mac8390_probe(int unit);
+struct net_device *mac89x0_probe(int unit);
+struct net_device *cops_probe(int unit);
+struct net_device *ltpc_probe(void);
+
+/* Fibre Channel adapters */
+int iph5526_probe(struct net_device *dev);
+
+/* SBNI adapters */
+int sbni_probe(int unit);
diff --git a/include/net/act_api.h b/include/net/act_api.h
index 9e90fdff470d..d34e1f4d897b 100644
--- a/include/net/act_api.h
+++ b/include/net/act_api.h
@@ -9,7 +9,7 @@
#include <net/pkt_sched.h>
struct tcf_common {
- struct tcf_common *tcfc_next;
+ struct hlist_node tcfc_head;
u32 tcfc_index;
int tcfc_refcnt;
int tcfc_bindcnt;
@@ -22,7 +22,7 @@ struct tcf_common {
spinlock_t tcfc_lock;
struct rcu_head tcfc_rcu;
};
-#define tcf_next common.tcfc_next
+#define tcf_head common.tcfc_head
#define tcf_index common.tcfc_index
#define tcf_refcnt common.tcfc_refcnt
#define tcf_bindcnt common.tcfc_bindcnt
@@ -36,9 +36,9 @@ struct tcf_common {
#define tcf_rcu common.tcfc_rcu
struct tcf_hashinfo {
- struct tcf_common **htab;
+ struct hlist_head *htab;
unsigned int hmask;
- rwlock_t *lock;
+ spinlock_t lock;
};
static inline unsigned int tcf_hash(u32 index, unsigned int hmask)
@@ -46,6 +46,26 @@ static inline unsigned int tcf_hash(u32 index, unsigned int hmask)
return index & hmask;
}
+static inline int tcf_hashinfo_init(struct tcf_hashinfo *hf, unsigned int mask)
+{
+ int i;
+
+ spin_lock_init(&hf->lock);
+ hf->hmask = mask;
+ hf->htab = kzalloc((mask + 1) * sizeof(struct hlist_head),
+ GFP_KERNEL);
+ if (!hf->htab)
+ return -ENOMEM;
+ for (i = 0; i < mask + 1; i++)
+ INIT_HLIST_HEAD(&hf->htab[i]);
+ return 0;
+}
+
+static inline void tcf_hashinfo_destroy(struct tcf_hashinfo *hf)
+{
+ kfree(hf->htab);
+}
+
#ifdef CONFIG_NET_CLS_ACT
#define ACT_P_CREATED 1
@@ -60,19 +80,18 @@ struct tc_action {
const struct tc_action_ops *ops;
__u32 type; /* for backward compat(TCA_OLD_COMPAT) */
__u32 order;
- struct tc_action *next;
+ struct list_head list;
};
#define TCA_CAP_NONE 0
struct tc_action_ops {
- struct tc_action_ops *next;
+ struct list_head head;
struct tcf_hashinfo *hinfo;
char kind[IFNAMSIZ];
__u32 type; /* TBD to match kind */
__u32 capab; /* capabilities includes 4 bit version */
struct module *owner;
int (*act)(struct sk_buff *, const struct tc_action *, struct tcf_result *);
- int (*get_stats)(struct sk_buff *, struct tc_action *);
int (*dump)(struct sk_buff *, struct tc_action *, int, int);
int (*cleanup)(struct tc_action *, int bind);
int (*lookup)(struct tc_action *, u32);
@@ -86,10 +105,7 @@ struct tcf_common *tcf_hash_lookup(u32 index, struct tcf_hashinfo *hinfo);
void tcf_hash_destroy(struct tcf_common *p, struct tcf_hashinfo *hinfo);
int tcf_hash_release(struct tcf_common *p, int bind,
struct tcf_hashinfo *hinfo);
-int tcf_generic_walker(struct sk_buff *skb, struct netlink_callback *cb,
- int type, struct tc_action *a);
u32 tcf_hash_new_index(u32 *idx_gen, struct tcf_hashinfo *hinfo);
-int tcf_hash_search(struct tc_action *a, u32 index);
struct tcf_common *tcf_hash_check(u32 index, struct tc_action *a,
int bind, struct tcf_hashinfo *hinfo);
struct tcf_common *tcf_hash_create(u32 index, struct nlattr *est,
@@ -100,16 +116,16 @@ void tcf_hash_insert(struct tcf_common *p, struct tcf_hashinfo *hinfo);
int tcf_register_action(struct tc_action_ops *a);
int tcf_unregister_action(struct tc_action_ops *a);
-void tcf_action_destroy(struct tc_action *a, int bind);
-int tcf_action_exec(struct sk_buff *skb, const struct tc_action *a,
+void tcf_action_destroy(struct list_head *actions, int bind);
+int tcf_action_exec(struct sk_buff *skb, const struct list_head *actions,
struct tcf_result *res);
-struct tc_action *tcf_action_init(struct net *net, struct nlattr *nla,
+int tcf_action_init(struct net *net, struct nlattr *nla,
struct nlattr *est, char *n, int ovr,
- int bind);
+ int bind, struct list_head *);
struct tc_action *tcf_action_init_1(struct net *net, struct nlattr *nla,
struct nlattr *est, char *n, int ovr,
int bind);
-int tcf_action_dump(struct sk_buff *skb, struct tc_action *a, int, int);
+int tcf_action_dump(struct sk_buff *skb, struct list_head *, int, int);
int tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int, int);
int tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int, int);
int tcf_action_copy_stats(struct sk_buff *, struct tc_action *, int);
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index 86505bfa5d2c..66c4a44d8f5c 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -81,9 +81,9 @@ int ipv6_dev_get_saddr(struct net *net, const struct net_device *dev,
const struct in6_addr *daddr, unsigned int srcprefs,
struct in6_addr *saddr);
int __ipv6_get_lladdr(struct inet6_dev *idev, struct in6_addr *addr,
- unsigned char banned_flags);
+ u32 banned_flags);
int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr,
- unsigned char banned_flags);
+ u32 banned_flags);
int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2);
void addrconf_join_solict(struct net_device *dev, const struct in6_addr *addr);
void addrconf_leave_solict(struct inet6_dev *idev, const struct in6_addr *addr);
@@ -248,6 +248,13 @@ static inline struct inet6_dev *in6_dev_get(const struct net_device *dev)
return idev;
}
+static inline struct neigh_parms *__in6_dev_nd_parms_get_rcu(const struct net_device *dev)
+{
+ struct inet6_dev *idev = __in6_dev_get(dev);
+
+ return idev ? idev->nd_parms : NULL;
+}
+
void in6_dev_finish_destroy(struct inet6_dev *idev);
static inline void in6_dev_put(struct inet6_dev *idev)
diff --git a/include/net/arp.h b/include/net/arp.h
index 7509d9da4e36..73c49864076b 100644
--- a/include/net/arp.h
+++ b/include/net/arp.h
@@ -62,6 +62,5 @@ struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip,
const unsigned char *src_hw,
const unsigned char *target_hw);
void arp_xmit(struct sk_buff *skb);
-int arp_invalidate(struct net_device *dev, __be32 ip);
#endif /* _ARP_H */
diff --git a/include/net/cipso_ipv4.h b/include/net/cipso_ipv4.h
index a8c2ef6d3b93..a6fd939f202d 100644
--- a/include/net/cipso_ipv4.h
+++ b/include/net/cipso_ipv4.h
@@ -26,8 +26,7 @@
* the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
*/
@@ -304,7 +303,7 @@ static inline int cipso_v4_validate(const struct sk_buff *skb,
for (opt_iter = 6; opt_iter < opt_len;) {
tag_len = opt[opt_iter + 1];
- if ((tag_len == 0) || (opt[opt_iter + 1] > (opt_len - opt_iter))) {
+ if ((tag_len == 0) || (tag_len > (opt_len - opt_iter))) {
err_offset = opt_iter + 1;
goto out;
}
diff --git a/include/net/cls_cgroup.h b/include/net/cls_cgroup.h
index 33d03b648646..9cf2d5ef38d9 100644
--- a/include/net/cls_cgroup.h
+++ b/include/net/cls_cgroup.h
@@ -16,17 +16,16 @@
#include <linux/cgroup.h>
#include <linux/hardirq.h>
#include <linux/rcupdate.h>
+#include <net/sock.h>
-#if IS_ENABLED(CONFIG_NET_CLS_CGROUP)
-struct cgroup_cls_state
-{
+#ifdef CONFIG_CGROUP_NET_CLASSID
+struct cgroup_cls_state {
struct cgroup_subsys_state css;
u32 classid;
};
-void sock_update_classid(struct sock *sk);
+struct cgroup_cls_state *task_cls_state(struct task_struct *p);
-#if IS_BUILTIN(CONFIG_NET_CLS_CGROUP)
static inline u32 task_cls_classid(struct task_struct *p)
{
u32 classid;
@@ -41,33 +40,18 @@ static inline u32 task_cls_classid(struct task_struct *p)
return classid;
}
-#elif IS_MODULE(CONFIG_NET_CLS_CGROUP)
-static inline u32 task_cls_classid(struct task_struct *p)
-{
- struct cgroup_subsys_state *css;
- u32 classid = 0;
-
- if (in_interrupt())
- return 0;
-
- rcu_read_lock();
- css = task_css(p, net_cls_subsys_id);
- if (css)
- classid = container_of(css,
- struct cgroup_cls_state, css)->classid;
- rcu_read_unlock();
- return classid;
-}
-#endif
-#else /* !CGROUP_NET_CLS_CGROUP */
static inline void sock_update_classid(struct sock *sk)
{
-}
+ u32 classid;
-static inline u32 task_cls_classid(struct task_struct *p)
+ classid = task_cls_classid(current);
+ if (classid != sk->sk_classid)
+ sk->sk_classid = classid;
+}
+#else /* !CONFIG_CGROUP_NET_CLASSID */
+static inline void sock_update_classid(struct sock *sk)
{
- return 0;
}
-#endif /* CGROUP_NET_CLS_CGROUP */
+#endif /* CONFIG_CGROUP_NET_CLASSID */
#endif /* _NET_CLS_CGROUP_H */
diff --git a/include/net/dcbevent.h b/include/net/dcbevent.h
index d2f3041c0dfa..aec07c8a660a 100644
--- a/include/net/dcbevent.h
+++ b/include/net/dcbevent.h
@@ -11,8 +11,7 @@
* more details.
*
* You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
+ * this program; if not, see <http://www.gnu.org/licenses/>.
*
* Author: John Fastabend <john.r.fastabend@intel.com>
*/
diff --git a/include/net/dcbnl.h b/include/net/dcbnl.h
index fc5d5dcebb00..a975edf21b22 100644
--- a/include/net/dcbnl.h
+++ b/include/net/dcbnl.h
@@ -11,8 +11,7 @@
* more details.
*
* You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
+ * this program; if not, see <http://www.gnu.org/licenses/>.
*
* Author: Lucy Liu <lucy.liu@intel.com>
*/
diff --git a/include/net/dn_dev.h b/include/net/dn_dev.h
index 20b5ab06032d..197886cd7bdd 100644
--- a/include/net/dn_dev.h
+++ b/include/net/dn_dev.h
@@ -9,7 +9,7 @@ struct dn_ifaddr {
struct dn_dev *ifa_dev;
__le16 ifa_local;
__le16 ifa_address;
- __u8 ifa_flags;
+ __u32 ifa_flags;
__u8 ifa_scope;
char ifa_label[IFNAMSIZ];
struct rcu_head rcu;
diff --git a/include/net/dst.h b/include/net/dst.h
index 44995c13e941..77eb53fabfb0 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -322,12 +322,11 @@ static inline void __skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev,
skb->dev = dev;
/*
- * Clear rxhash so that we can recalulate the hash for the
+ * Clear hash so that we can recalulate the hash for the
* encapsulated packet, unless we have already determine the hash
* over the L4 4-tuple.
*/
- if (!skb->l4_rxhash)
- skb->rxhash = 0;
+ skb_clear_hash_if_not_l4(skb);
skb_set_queue_mapping(skb, 0);
skb_scrub_packet(skb, !net_eq(net, dev_net(dev)));
}
diff --git a/include/net/flow.h b/include/net/flow.h
index 65ce471d2ab5..d23e7fa2042e 100644
--- a/include/net/flow.h
+++ b/include/net/flow.h
@@ -20,8 +20,7 @@ struct flowi_common {
__u8 flowic_proto;
__u8 flowic_flags;
#define FLOWI_FLAG_ANYSRC 0x01
-#define FLOWI_FLAG_CAN_SLEEP 0x02
-#define FLOWI_FLAG_KNOWN_NH 0x04
+#define FLOWI_FLAG_KNOWN_NH 0x02
__u32 flowic_secid;
};
diff --git a/include/net/genetlink.h b/include/net/genetlink.h
index 1b177ed803b7..93695f0e22a5 100644
--- a/include/net/genetlink.h
+++ b/include/net/genetlink.h
@@ -73,6 +73,7 @@ struct genl_family {
* @attrs: netlink attributes
* @_net: network namespace
* @user_ptr: user pointers
+ * @dst_sk: destination socket
*/
struct genl_info {
u32 snd_seq;
@@ -85,6 +86,7 @@ struct genl_info {
struct net * _net;
#endif
void * user_ptr[2];
+ struct sock * dst_sk;
};
static inline struct net *genl_info_net(struct genl_info *info)
@@ -177,6 +179,8 @@ void genl_notify(struct genl_family *family,
struct sk_buff *skb, struct net *net, u32 portid,
u32 group, struct nlmsghdr *nlh, gfp_t flags);
+struct sk_buff *genlmsg_new_unicast(size_t payload, struct genl_info *info,
+ gfp_t flags);
void *genlmsg_put(struct sk_buff *skb, u32 portid, u32 seq,
struct genl_family *family, int flags, u8 cmd);
diff --git a/include/net/gre.h b/include/net/gre.h
index dcd9ae3270d3..70046a0b0b89 100644
--- a/include/net/gre.h
+++ b/include/net/gre.h
@@ -33,9 +33,6 @@ struct gre_cisco_protocol {
int gre_cisco_register(struct gre_cisco_protocol *proto);
int gre_cisco_unregister(struct gre_cisco_protocol *proto);
-int gre_offload_init(void);
-void gre_offload_exit(void);
-
void gre_build_header(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
int hdr_len);
diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h
index 76d54270f2e2..b58c36c1c3f6 100644
--- a/include/net/if_inet6.h
+++ b/include/net/if_inet6.h
@@ -50,8 +50,8 @@ struct inet6_ifaddr {
int state;
+ __u32 flags;
__u8 dad_probes;
- __u8 flags;
__u16 scope;
diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
index f4e127af4e17..6efe73c79c52 100644
--- a/include/net/inetpeer.h
+++ b/include/net/inetpeer.h
@@ -163,7 +163,6 @@ void inet_putpeer(struct inet_peer *p);
bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout);
void inetpeer_invalidate_tree(struct inet_peer_base *);
-void inetpeer_invalidate_family(int family);
/*
* temporary check to make sure we dont access rid, ip_id_count, tcp_ts,
diff --git a/include/net/ip.h b/include/net/ip.h
index 217bc5bfc6c6..535664477c4a 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -177,12 +177,6 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr,
__be32 saddr, const struct ip_reply_arg *arg,
unsigned int len);
-struct ipv4_config {
- int log_martians;
- int no_pmtu_disc;
-};
-
-extern struct ipv4_config ipv4_config;
#define IP_INC_STATS(net, field) SNMP_INC_STATS64((net)->mib.ip_statistics, field)
#define IP_INC_STATS_BH(net, field) SNMP_INC_STATS64_BH((net)->mib.ip_statistics, field)
#define IP_ADD_STATS(net, field, val) SNMP_ADD_STATS64((net)->mib.ip_statistics, field, val)
@@ -473,7 +467,7 @@ int compat_ip_getsockopt(struct sock *sk, int level, int optname,
int ip_ra_control(struct sock *sk, unsigned char on,
void (*destructor)(struct sock *));
-int ip_recv_error(struct sock *sk, struct msghdr *msg, int len);
+int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len);
void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port,
u32 info, u8 *payload);
void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 dport,
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index 2182525e4d74..aca0c2709fd6 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -282,7 +282,7 @@ struct fib6_node *fib6_locate(struct fib6_node *root,
const struct in6_addr *saddr, int src_len);
void fib6_clean_all(struct net *net, int (*func)(struct rt6_info *, void *arg),
- int prune, void *arg);
+ void *arg);
int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info);
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index 733747ce163c..017badb1aec7 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -51,26 +51,6 @@ static inline unsigned int rt6_flags2srcprefs(int flags)
return (flags >> 3) & 7;
}
-void rt6_bind_peer(struct rt6_info *rt, int create);
-
-static inline struct inet_peer *__rt6_get_peer(struct rt6_info *rt, int create)
-{
- if (rt6_has_peer(rt))
- return rt6_peer_ptr(rt);
-
- rt6_bind_peer(rt, create);
- return (rt6_has_peer(rt) ? rt6_peer_ptr(rt) : NULL);
-}
-
-static inline struct inet_peer *rt6_get_peer(struct rt6_info *rt)
-{
- return __rt6_get_peer(rt, 0);
-}
-
-static inline struct inet_peer *rt6_get_peer_create(struct rt6_info *rt)
-{
- return __rt6_get_peer(rt, 1);
-}
void ip6_route_input(struct sk_buff *skb);
@@ -172,16 +152,28 @@ static inline bool ipv6_unicast_destination(const struct sk_buff *skb)
return rt->rt6i_flags & RTF_LOCAL;
}
+static inline bool ipv6_anycast_destination(const struct sk_buff *skb)
+{
+ struct rt6_info *rt = (struct rt6_info *) skb_dst(skb);
+
+ return rt->rt6i_flags & RTF_ANYCAST;
+}
+
int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *));
static inline int ip6_skb_dst_mtu(struct sk_buff *skb)
{
struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL;
- return (np && np->pmtudisc == IPV6_PMTUDISC_PROBE) ?
+ return (np && np->pmtudisc >= IPV6_PMTUDISC_PROBE) ?
skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb));
}
+static inline bool ip6_sk_accept_pmtu(const struct sock *sk)
+{
+ return inet6_sk(sk)->pmtudisc != IPV6_PMTUDISC_INTERFACE;
+}
+
static inline struct in6_addr *rt6_nexthop(struct rt6_info *rt)
{
return &rt->rt6i_gateway;
diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h
index 6d1549c4893c..a5593dab6af7 100644
--- a/include/net/ip6_tunnel.h
+++ b/include/net/ip6_tunnel.h
@@ -79,7 +79,7 @@ static inline void ip6tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
err = ip6_local_out(skb);
if (net_xmit_eval(err) == 0) {
- struct pcpu_tstats *tstats = this_cpu_ptr(dev->tstats);
+ struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
u64_stats_update_begin(&tstats->syncp);
tstats->tx_bytes += pkt_len;
tstats->tx_packets++;
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index 732f8c6ae975..cd729becbb07 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -38,6 +38,11 @@ struct ip_tunnel_prl_entry {
struct rcu_head rcu_head;
};
+struct ip_tunnel_dst {
+ struct dst_entry __rcu *dst;
+ spinlock_t lock;
+};
+
struct ip_tunnel {
struct ip_tunnel __rcu *next;
struct hlist_node hash_node;
@@ -54,6 +59,8 @@ struct ip_tunnel {
int hlen; /* Precalculated header length */
int mlink;
+ struct ip_tunnel_dst __percpu *dst_cache;
+
struct ip_tunnel_parm parms;
/* for SIT */
@@ -155,10 +162,10 @@ struct sk_buff *iptunnel_handle_offloads(struct sk_buff *skb, bool gre_csum,
static inline void iptunnel_xmit_stats(int err,
struct net_device_stats *err_stats,
- struct pcpu_tstats __percpu *stats)
+ struct pcpu_sw_netstats __percpu *stats)
{
if (err > 0) {
- struct pcpu_tstats *tstats = this_cpu_ptr(stats);
+ struct pcpu_sw_netstats *tstats = this_cpu_ptr(stats);
u64_stats_update_begin(&tstats->syncp);
tstats->tx_bytes += err;
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 2a5f668cd683..12079c65ea3e 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -110,7 +110,8 @@ struct frag_hdr {
__be32 identification;
};
-#define IP6_MF 0x0001
+#define IP6_MF 0x0001
+#define IP6_OFFSET 0xFFF8
#include <net/sock.h>
@@ -237,6 +238,7 @@ struct ip6_flowlabel {
#define IPV6_FLOWINFO_MASK cpu_to_be32(0x0FFFFFFF)
#define IPV6_FLOWLABEL_MASK cpu_to_be32(0x000FFFFF)
+#define IPV6_TCLASS_MASK (IPV6_FLOWINFO_MASK & ~IPV6_FLOWLABEL_MASK)
struct ipv6_fl_socklist {
struct ipv6_fl_socklist __rcu *next;
@@ -265,9 +267,6 @@ void icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info);
int icmpv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6,
struct icmp6hdr *thdr, int len);
-struct dst_entry *icmpv6_route_lookup(struct net *net, struct sk_buff *skb,
- struct sock *sk, struct flowi6 *fl6);
-
int ip6_ra_control(struct sock *sk, int sel);
int ipv6_parse_hopopts(struct sk_buff *skb);
@@ -677,6 +676,11 @@ static inline __be32 ip6_flowinfo(const struct ipv6hdr *hdr)
return *(__be32 *)hdr & IPV6_FLOWINFO_MASK;
}
+static inline __be32 ip6_flowlabel(const struct ipv6hdr *hdr)
+{
+ return *(__be32 *)hdr & IPV6_FLOWLABEL_MASK;
+}
+
/*
* Prototypes exported by ipv6
*/
@@ -711,11 +715,9 @@ void ip6_flush_pending_frames(struct sock *sk);
int ip6_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi6 *fl6);
struct dst_entry *ip6_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
- const struct in6_addr *final_dst,
- bool can_sleep);
+ const struct in6_addr *final_dst);
struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
- const struct in6_addr *final_dst,
- bool can_sleep);
+ const struct in6_addr *final_dst);
struct dst_entry *ip6_blackhole_route(struct net *net,
struct dst_entry *orig_dst);
@@ -776,8 +778,10 @@ int compat_ipv6_getsockopt(struct sock *sk, int level, int optname,
int ip6_datagram_connect(struct sock *sk, struct sockaddr *addr, int addr_len);
-int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len);
-int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len);
+int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len,
+ int *addr_len);
+int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len,
+ int *addr_len);
void ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port,
u32 info, u8 *payload);
void ipv6_local_error(struct sock *sk, int err, struct flowi6 *fl6, u32 info);
@@ -832,7 +836,6 @@ static inline int snmp6_unregister_dev(struct inet6_dev *idev) { return 0; }
#ifdef CONFIG_SYSCTL
extern struct ctl_table ipv6_route_table_template[];
-extern struct ctl_table ipv6_icmp_table_template[];
struct ctl_table *ipv6_icmp_sysctl_init(struct net *net);
struct ctl_table *ipv6_route_sysctl_init(struct net *net);
diff --git a/include/net/irda/discovery.h b/include/net/irda/discovery.h
index 0ce93398720d..63ae32530567 100644
--- a/include/net/irda/discovery.h
+++ b/include/net/irda/discovery.h
@@ -23,9 +23,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
- * MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
********************************************************************/
diff --git a/include/net/irda/ircomm_core.h b/include/net/irda/ircomm_core.h
index 69b610acd2df..2a580ce9edad 100644
--- a/include/net/irda/ircomm_core.h
+++ b/include/net/irda/ircomm_core.h
@@ -22,9 +22,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
- * MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
********************************************************************/
diff --git a/include/net/irda/ircomm_event.h b/include/net/irda/ircomm_event.h
index bc0c6f31f1c6..5bbc32998d57 100644
--- a/include/net/irda/ircomm_event.h
+++ b/include/net/irda/ircomm_event.h
@@ -22,9 +22,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
- * MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
********************************************************************/
diff --git a/include/net/irda/ircomm_lmp.h b/include/net/irda/ircomm_lmp.h
index ae02106be590..5042a5021a04 100644
--- a/include/net/irda/ircomm_lmp.h
+++ b/include/net/irda/ircomm_lmp.h
@@ -22,9 +22,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
- * MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
********************************************************************/
diff --git a/include/net/irda/ircomm_param.h b/include/net/irda/ircomm_param.h
index e6678800c41f..1f67432321c4 100644
--- a/include/net/irda/ircomm_param.h
+++ b/include/net/irda/ircomm_param.h
@@ -22,9 +22,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
- * MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
********************************************************************/
diff --git a/include/net/irda/ircomm_ttp.h b/include/net/irda/ircomm_ttp.h
index 403081ed725c..c5627288bca3 100644
--- a/include/net/irda/ircomm_ttp.h
+++ b/include/net/irda/ircomm_ttp.h
@@ -22,9 +22,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
- * MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
********************************************************************/
diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
index 0224402260a7..8d4f588974bc 100644
--- a/include/net/irda/ircomm_tty.h
+++ b/include/net/irda/ircomm_tty.h
@@ -22,9 +22,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
- * MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
********************************************************************/
diff --git a/include/net/irda/ircomm_tty_attach.h b/include/net/irda/ircomm_tty_attach.h
index 0a63bbb972d7..20dcbdf258cf 100644
--- a/include/net/irda/ircomm_tty_attach.h
+++ b/include/net/irda/ircomm_tty_attach.h
@@ -22,9 +22,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
- * MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
********************************************************************/
diff --git a/include/net/irda/irda_device.h b/include/net/irda/irda_device.h
index 11417475a6c3..664bf8178412 100644
--- a/include/net/irda/irda_device.h
+++ b/include/net/irda/irda_device.h
@@ -24,9 +24,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
- * MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
********************************************************************/
diff --git a/include/net/irda/irlap_event.h b/include/net/irda/irlap_event.h
index f9d88da97af2..e4325fee1267 100644
--- a/include/net/irda/irlap_event.h
+++ b/include/net/irda/irlap_event.h
@@ -25,9 +25,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
- * MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
********************************************************************/
diff --git a/include/net/irda/irlap_frame.h b/include/net/irda/irlap_frame.h
index 57173ae398ae..cbc12a926e5f 100644
--- a/include/net/irda/irlap_frame.h
+++ b/include/net/irda/irlap_frame.h
@@ -24,9 +24,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
- * MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
********************************************************************/
diff --git a/include/net/irda/parameters.h b/include/net/irda/parameters.h
index c0d938847bd3..42713c931d1f 100644
--- a/include/net/irda/parameters.h
+++ b/include/net/irda/parameters.h
@@ -22,9 +22,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
- * MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
* Michel Dänzer <daenzer@debian.org>, 10/2001
* - simplify irda_pv_t to avoid endianness issues
diff --git a/include/net/irda/qos.h b/include/net/irda/qos.h
index cc577dc0a0ef..05a5a249956f 100644
--- a/include/net/irda/qos.h
+++ b/include/net/irda/qos.h
@@ -22,9 +22,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
- * MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
********************************************************************/
diff --git a/include/net/llc.h b/include/net/llc.h
index 68490cbc8a65..e8e61d4fb458 100644
--- a/include/net/llc.h
+++ b/include/net/llc.h
@@ -93,7 +93,6 @@ struct hlist_nulls_head *llc_sk_laddr_hash(struct llc_sap *sap,
#define LLC_DEST_CONN 2 /* Type 2 goes here */
extern struct list_head llc_sap_list;
-extern spinlock_t llc_sap_list_lock;
int llc_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
struct net_device *orig_dev);
diff --git a/include/net/llc_pdu.h b/include/net/llc_pdu.h
index 31e2de7d57c5..c0f0a13ed818 100644
--- a/include/net/llc_pdu.h
+++ b/include/net/llc_pdu.h
@@ -142,7 +142,7 @@
#define LLC_S_PF_IS_1(pdu) ((pdu->ctrl_2 & LLC_S_PF_BIT_MASK) ? 1 : 0)
#define PDU_SUPV_GET_Nr(pdu) ((pdu->ctrl_2 & 0xFE) >> 1)
-#define PDU_GET_NEXT_Vr(sn) (++sn & ~LLC_2_SEQ_NBR_MODULO)
+#define PDU_GET_NEXT_Vr(sn) (((sn) + 1) & ~LLC_2_SEQ_NBR_MODULO)
/* FRMR information field macros */
diff --git a/include/net/mip6.h b/include/net/mip6.h
index 26ba99b5a4b1..0386b618908c 100644
--- a/include/net/mip6.h
+++ b/include/net/mip6.h
@@ -13,8 +13,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
* Authors:
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index 536501a3e58d..4c09bd23b832 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -21,6 +21,7 @@
#include <linux/skbuff.h>
#include <linux/rcupdate.h>
#include <linux/seq_file.h>
+#include <linux/bitmap.h>
#include <linux/err.h>
#include <linux/sysctl.h>
@@ -37,6 +38,32 @@
struct neighbour;
+enum {
+ NEIGH_VAR_MCAST_PROBES,
+ NEIGH_VAR_UCAST_PROBES,
+ NEIGH_VAR_APP_PROBES,
+ NEIGH_VAR_RETRANS_TIME,
+ NEIGH_VAR_BASE_REACHABLE_TIME,
+ NEIGH_VAR_DELAY_PROBE_TIME,
+ NEIGH_VAR_GC_STALETIME,
+ NEIGH_VAR_QUEUE_LEN_BYTES,
+ NEIGH_VAR_PROXY_QLEN,
+ NEIGH_VAR_ANYCAST_DELAY,
+ NEIGH_VAR_PROXY_DELAY,
+ NEIGH_VAR_LOCKTIME,
+#define NEIGH_VAR_DATA_MAX (NEIGH_VAR_LOCKTIME + 1)
+ /* Following are used as a second way to access one of the above */
+ NEIGH_VAR_QUEUE_LEN, /* same data as NEIGH_VAR_QUEUE_LEN_BYTES */
+ NEIGH_VAR_RETRANS_TIME_MS, /* same data as NEIGH_VAR_RETRANS_TIME */
+ NEIGH_VAR_BASE_REACHABLE_TIME_MS, /* same data as NEIGH_VAR_BASE_REACHABLE_TIME */
+ /* Following are used by "default" only */
+ NEIGH_VAR_GC_INTERVAL,
+ NEIGH_VAR_GC_THRESH1,
+ NEIGH_VAR_GC_THRESH2,
+ NEIGH_VAR_GC_THRESH3,
+ NEIGH_VAR_MAX
+};
+
struct neigh_parms {
#ifdef CONFIG_NET_NS
struct net *net;
@@ -53,22 +80,30 @@ struct neigh_parms {
atomic_t refcnt;
struct rcu_head rcu_head;
- int base_reachable_time;
- int retrans_time;
- int gc_staletime;
int reachable_time;
- int delay_probe_time;
-
- int queue_len_bytes;
- int ucast_probes;
- int app_probes;
- int mcast_probes;
- int anycast_delay;
- int proxy_delay;
- int proxy_qlen;
- int locktime;
+ int data[NEIGH_VAR_DATA_MAX];
+ DECLARE_BITMAP(data_state, NEIGH_VAR_DATA_MAX);
};
+static inline void neigh_var_set(struct neigh_parms *p, int index, int val)
+{
+ set_bit(index, p->data_state);
+ p->data[index] = val;
+}
+
+#define NEIGH_VAR(p, attr) ((p)->data[NEIGH_VAR_ ## attr])
+#define NEIGH_VAR_SET(p, attr, val) neigh_var_set(p, NEIGH_VAR_ ## attr, val)
+
+static inline void neigh_parms_data_state_setall(struct neigh_parms *p)
+{
+ bitmap_fill(p->data_state, NEIGH_VAR_DATA_MAX);
+}
+
+static inline void neigh_parms_data_state_cleanall(struct neigh_parms *p)
+{
+ bitmap_zero(p->data_state, NEIGH_VAR_DATA_MAX);
+}
+
struct neigh_statistics {
unsigned long allocs; /* number of allocated neighs */
unsigned long destroys; /* number of destroyed neighs */
@@ -180,6 +215,11 @@ struct neigh_table {
struct pneigh_entry **phash_buckets;
};
+static inline int neigh_parms_family(struct neigh_parms *p)
+{
+ return p->tbl->family;
+}
+
#define NEIGH_PRIV_ALIGN sizeof(long long)
#define NEIGH_ENTRY_SIZE(size) ALIGN((size), NEIGH_PRIV_ALIGN)
@@ -212,6 +252,7 @@ static inline struct neighbour *neigh_create(struct neigh_table *tbl,
void neigh_destroy(struct neighbour *neigh);
int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb);
int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, u32 flags);
+void __neigh_set_probe_once(struct neighbour *neigh);
void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev);
int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb);
@@ -274,8 +315,17 @@ void *neigh_seq_start(struct seq_file *, loff_t *, struct neigh_table *,
void *neigh_seq_next(struct seq_file *, void *, loff_t *);
void neigh_seq_stop(struct seq_file *, void *);
+int neigh_proc_dointvec(struct ctl_table *ctl, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos);
+int neigh_proc_dointvec_jiffies(struct ctl_table *ctl, int write,
+ void __user *buffer,
+ size_t *lenp, loff_t *ppos);
+int neigh_proc_dointvec_ms_jiffies(struct ctl_table *ctl, int write,
+ void __user *buffer,
+ size_t *lenp, loff_t *ppos);
+
int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
- char *p_name, proc_handler *proc_handler);
+ proc_handler *proc_handler);
void neigh_sysctl_unregister(struct neigh_parms *p);
static inline void __neigh_parms_put(struct neigh_parms *parms)
diff --git a/include/net/netfilter/ipv4/nf_conntrack_ipv4.h b/include/net/netfilter/ipv4/nf_conntrack_ipv4.h
index 6c3d12e2949f..981c327374da 100644
--- a/include/net/netfilter/ipv4/nf_conntrack_ipv4.h
+++ b/include/net/netfilter/ipv4/nf_conntrack_ipv4.h
@@ -19,6 +19,4 @@ extern struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp;
int nf_conntrack_ipv4_compat_init(void);
void nf_conntrack_ipv4_compat_fini(void);
-void need_ipv4_conntrack(void);
-
#endif /*_NF_CONNTRACK_IPV4_H*/
diff --git a/include/net/netfilter/ipv4/nf_reject.h b/include/net/netfilter/ipv4/nf_reject.h
new file mode 100644
index 000000000000..931fbf812171
--- /dev/null
+++ b/include/net/netfilter/ipv4/nf_reject.h
@@ -0,0 +1,128 @@
+#ifndef _IPV4_NF_REJECT_H
+#define _IPV4_NF_REJECT_H
+
+#include <net/ip.h>
+#include <net/tcp.h>
+#include <net/route.h>
+#include <net/dst.h>
+
+static inline void nf_send_unreach(struct sk_buff *skb_in, int code)
+{
+ icmp_send(skb_in, ICMP_DEST_UNREACH, code, 0);
+}
+
+/* Send RST reply */
+static void nf_send_reset(struct sk_buff *oldskb, int hook)
+{
+ struct sk_buff *nskb;
+ const struct iphdr *oiph;
+ struct iphdr *niph;
+ const struct tcphdr *oth;
+ struct tcphdr _otcph, *tcph;
+
+ /* IP header checks: fragment. */
+ if (ip_hdr(oldskb)->frag_off & htons(IP_OFFSET))
+ return;
+
+ oth = skb_header_pointer(oldskb, ip_hdrlen(oldskb),
+ sizeof(_otcph), &_otcph);
+ if (oth == NULL)
+ return;
+
+ /* No RST for RST. */
+ if (oth->rst)
+ return;
+
+ if (skb_rtable(oldskb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
+ return;
+
+ /* Check checksum */
+ if (nf_ip_checksum(oldskb, hook, ip_hdrlen(oldskb), IPPROTO_TCP))
+ return;
+ oiph = ip_hdr(oldskb);
+
+ nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct tcphdr) +
+ LL_MAX_HEADER, GFP_ATOMIC);
+ if (!nskb)
+ return;
+
+ skb_reserve(nskb, LL_MAX_HEADER);
+
+ skb_reset_network_header(nskb);
+ niph = (struct iphdr *)skb_put(nskb, sizeof(struct iphdr));
+ niph->version = 4;
+ niph->ihl = sizeof(struct iphdr) / 4;
+ niph->tos = 0;
+ niph->id = 0;
+ niph->frag_off = htons(IP_DF);
+ niph->protocol = IPPROTO_TCP;
+ niph->check = 0;
+ niph->saddr = oiph->daddr;
+ niph->daddr = oiph->saddr;
+
+ skb_reset_transport_header(nskb);
+ tcph = (struct tcphdr *)skb_put(nskb, sizeof(struct tcphdr));
+ memset(tcph, 0, sizeof(*tcph));
+ tcph->source = oth->dest;
+ tcph->dest = oth->source;
+ tcph->doff = sizeof(struct tcphdr) / 4;
+
+ if (oth->ack)
+ tcph->seq = oth->ack_seq;
+ else {
+ tcph->ack_seq = htonl(ntohl(oth->seq) + oth->syn + oth->fin +
+ oldskb->len - ip_hdrlen(oldskb) -
+ (oth->doff << 2));
+ tcph->ack = 1;
+ }
+
+ tcph->rst = 1;
+ tcph->check = ~tcp_v4_check(sizeof(struct tcphdr), niph->saddr,
+ niph->daddr, 0);
+ nskb->ip_summed = CHECKSUM_PARTIAL;
+ nskb->csum_start = (unsigned char *)tcph - nskb->head;
+ nskb->csum_offset = offsetof(struct tcphdr, check);
+
+ /* ip_route_me_harder expects skb->dst to be set */
+ skb_dst_set_noref(nskb, skb_dst(oldskb));
+
+ nskb->protocol = htons(ETH_P_IP);
+ if (ip_route_me_harder(nskb, RTN_UNSPEC))
+ goto free_nskb;
+
+ niph->ttl = ip4_dst_hoplimit(skb_dst(nskb));
+
+ /* "Never happens" */
+ if (nskb->len > dst_mtu(skb_dst(nskb)))
+ goto free_nskb;
+
+ nf_ct_attach(nskb, oldskb);
+
+#ifdef CONFIG_BRIDGE_NETFILTER
+ /* If we use ip_local_out for bridged traffic, the MAC source on
+ * the RST will be ours, instead of the destination's. This confuses
+ * some routers/firewalls, and they drop the packet. So we need to
+ * build the eth header using the original destination's MAC as the
+ * source, and send the RST packet directly.
+ */
+ if (oldskb->nf_bridge) {
+ struct ethhdr *oeth = eth_hdr(oldskb);
+ nskb->dev = oldskb->nf_bridge->physindev;
+ niph->tot_len = htons(nskb->len);
+ ip_send_check(niph);
+ if (dev_hard_header(nskb, nskb->dev, ntohs(nskb->protocol),
+ oeth->h_source, oeth->h_dest, nskb->len) < 0)
+ goto free_nskb;
+ dev_queue_xmit(nskb);
+ } else
+#endif
+ ip_local_out(nskb);
+
+ return;
+
+ free_nskb:
+ kfree_skb(nskb);
+}
+
+
+#endif /* _IPV4_NF_REJECT_H */
diff --git a/include/net/netfilter/ipv6/nf_reject.h b/include/net/netfilter/ipv6/nf_reject.h
new file mode 100644
index 000000000000..710d17ed70b4
--- /dev/null
+++ b/include/net/netfilter/ipv6/nf_reject.h
@@ -0,0 +1,171 @@
+#ifndef _IPV6_NF_REJECT_H
+#define _IPV6_NF_REJECT_H
+
+#include <net/ipv6.h>
+#include <net/ip6_route.h>
+#include <net/ip6_fib.h>
+#include <net/ip6_checksum.h>
+#include <linux/netfilter_ipv6.h>
+
+static inline void
+nf_send_unreach6(struct net *net, struct sk_buff *skb_in, unsigned char code,
+ unsigned int hooknum)
+{
+ if (hooknum == NF_INET_LOCAL_OUT && skb_in->dev == NULL)
+ skb_in->dev = net->loopback_dev;
+
+ icmpv6_send(skb_in, ICMPV6_DEST_UNREACH, code, 0);
+}
+
+/* Send RST reply */
+static void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook)
+{
+ struct sk_buff *nskb;
+ struct tcphdr otcph, *tcph;
+ unsigned int otcplen, hh_len;
+ int tcphoff, needs_ack;
+ const struct ipv6hdr *oip6h = ipv6_hdr(oldskb);
+ struct ipv6hdr *ip6h;
+#define DEFAULT_TOS_VALUE 0x0U
+ const __u8 tclass = DEFAULT_TOS_VALUE;
+ struct dst_entry *dst = NULL;
+ u8 proto;
+ __be16 frag_off;
+ struct flowi6 fl6;
+
+ if ((!(ipv6_addr_type(&oip6h->saddr) & IPV6_ADDR_UNICAST)) ||
+ (!(ipv6_addr_type(&oip6h->daddr) & IPV6_ADDR_UNICAST))) {
+ pr_debug("addr is not unicast.\n");
+ return;
+ }
+
+ proto = oip6h->nexthdr;
+ tcphoff = ipv6_skip_exthdr(oldskb, ((u8*)(oip6h+1) - oldskb->data), &proto, &frag_off);
+
+ if ((tcphoff < 0) || (tcphoff > oldskb->len)) {
+ pr_debug("Cannot get TCP header.\n");
+ return;
+ }
+
+ otcplen = oldskb->len - tcphoff;
+
+ /* IP header checks: fragment, too short. */
+ if (proto != IPPROTO_TCP || otcplen < sizeof(struct tcphdr)) {
+ pr_debug("proto(%d) != IPPROTO_TCP, "
+ "or too short. otcplen = %d\n",
+ proto, otcplen);
+ return;
+ }
+
+ if (skb_copy_bits(oldskb, tcphoff, &otcph, sizeof(struct tcphdr)))
+ BUG();
+
+ /* No RST for RST. */
+ if (otcph.rst) {
+ pr_debug("RST is set\n");
+ return;
+ }
+
+ /* Check checksum. */
+ if (nf_ip6_checksum(oldskb, hook, tcphoff, IPPROTO_TCP)) {
+ pr_debug("TCP checksum is invalid\n");
+ return;
+ }
+
+ memset(&fl6, 0, sizeof(fl6));
+ fl6.flowi6_proto = IPPROTO_TCP;
+ fl6.saddr = oip6h->daddr;
+ fl6.daddr = oip6h->saddr;
+ fl6.fl6_sport = otcph.dest;
+ fl6.fl6_dport = otcph.source;
+ security_skb_classify_flow(oldskb, flowi6_to_flowi(&fl6));
+ dst = ip6_route_output(net, NULL, &fl6);
+ if (dst == NULL || dst->error) {
+ dst_release(dst);
+ return;
+ }
+ dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), NULL, 0);
+ if (IS_ERR(dst))
+ return;
+
+ hh_len = (dst->dev->hard_header_len + 15)&~15;
+ nskb = alloc_skb(hh_len + 15 + dst->header_len + sizeof(struct ipv6hdr)
+ + sizeof(struct tcphdr) + dst->trailer_len,
+ GFP_ATOMIC);
+
+ if (!nskb) {
+ net_dbg_ratelimited("cannot alloc skb\n");
+ dst_release(dst);
+ return;
+ }
+
+ skb_dst_set(nskb, dst);
+
+ skb_reserve(nskb, hh_len + dst->header_len);
+
+ skb_put(nskb, sizeof(struct ipv6hdr));
+ skb_reset_network_header(nskb);
+ ip6h = ipv6_hdr(nskb);
+ ip6_flow_hdr(ip6h, tclass, 0);
+ ip6h->hop_limit = ip6_dst_hoplimit(dst);
+ ip6h->nexthdr = IPPROTO_TCP;
+ ip6h->saddr = oip6h->daddr;
+ ip6h->daddr = oip6h->saddr;
+
+ skb_reset_transport_header(nskb);
+ tcph = (struct tcphdr *)skb_put(nskb, sizeof(struct tcphdr));
+ /* Truncate to length (no data) */
+ tcph->doff = sizeof(struct tcphdr)/4;
+ tcph->source = otcph.dest;
+ tcph->dest = otcph.source;
+
+ if (otcph.ack) {
+ needs_ack = 0;
+ tcph->seq = otcph.ack_seq;
+ tcph->ack_seq = 0;
+ } else {
+ needs_ack = 1;
+ tcph->ack_seq = htonl(ntohl(otcph.seq) + otcph.syn + otcph.fin
+ + otcplen - (otcph.doff<<2));
+ tcph->seq = 0;
+ }
+
+ /* Reset flags */
+ ((u_int8_t *)tcph)[13] = 0;
+ tcph->rst = 1;
+ tcph->ack = needs_ack;
+ tcph->window = 0;
+ tcph->urg_ptr = 0;
+ tcph->check = 0;
+
+ /* Adjust TCP checksum */
+ tcph->check = csum_ipv6_magic(&ipv6_hdr(nskb)->saddr,
+ &ipv6_hdr(nskb)->daddr,
+ sizeof(struct tcphdr), IPPROTO_TCP,
+ csum_partial(tcph,
+ sizeof(struct tcphdr), 0));
+
+ nf_ct_attach(nskb, oldskb);
+
+#ifdef CONFIG_BRIDGE_NETFILTER
+ /* If we use ip6_local_out for bridged traffic, the MAC source on
+ * the RST will be ours, instead of the destination's. This confuses
+ * some routers/firewalls, and they drop the packet. So we need to
+ * build the eth header using the original destination's MAC as the
+ * source, and send the RST packet directly.
+ */
+ if (oldskb->nf_bridge) {
+ struct ethhdr *oeth = eth_hdr(oldskb);
+ nskb->dev = oldskb->nf_bridge->physindev;
+ nskb->protocol = htons(ETH_P_IPV6);
+ ip6h->payload_len = htons(sizeof(struct tcphdr));
+ if (dev_hard_header(nskb, nskb->dev, ntohs(nskb->protocol),
+ oeth->h_source, oeth->h_dest, nskb->len) < 0)
+ return;
+ dev_queue_xmit(nskb);
+ } else
+#endif
+ ip6_local_out(nskb);
+}
+
+#endif /* _IPV6_NF_REJECT_H */
diff --git a/include/net/netfilter/nf_conntrack_l3proto.h b/include/net/netfilter/nf_conntrack_l3proto.h
index 3efab704b7eb..adc1fa3dd7ab 100644
--- a/include/net/netfilter/nf_conntrack_l3proto.h
+++ b/include/net/netfilter/nf_conntrack_l3proto.h
@@ -87,7 +87,6 @@ int nf_ct_l3proto_register(struct nf_conntrack_l3proto *proto);
void nf_ct_l3proto_unregister(struct nf_conntrack_l3proto *proto);
struct nf_conntrack_l3proto *nf_ct_l3proto_find_get(u_int16_t l3proto);
-void nf_ct_l3proto_put(struct nf_conntrack_l3proto *p);
/* Existing built-in protocols */
extern struct nf_conntrack_l3proto nf_conntrack_l3proto_generic;
diff --git a/include/net/netfilter/nf_queue.h b/include/net/netfilter/nf_queue.h
index c1d5b3e34a21..84a53d780306 100644
--- a/include/net/netfilter/nf_queue.h
+++ b/include/net/netfilter/nf_queue.h
@@ -1,6 +1,10 @@
#ifndef _NF_QUEUE_H
#define _NF_QUEUE_H
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/jhash.h>
+
/* Each queued (to userspace) skbuff has one of these. */
struct nf_queue_entry {
struct list_head list;
@@ -33,4 +37,62 @@ void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict);
bool nf_queue_entry_get_refs(struct nf_queue_entry *entry);
void nf_queue_entry_release_refs(struct nf_queue_entry *entry);
+static inline void init_hashrandom(u32 *jhash_initval)
+{
+ while (*jhash_initval == 0)
+ *jhash_initval = prandom_u32();
+}
+
+static inline u32 hash_v4(const struct sk_buff *skb, u32 jhash_initval)
+{
+ const struct iphdr *iph = ip_hdr(skb);
+
+ /* packets in either direction go into same queue */
+ if ((__force u32)iph->saddr < (__force u32)iph->daddr)
+ return jhash_3words((__force u32)iph->saddr,
+ (__force u32)iph->daddr, iph->protocol, jhash_initval);
+
+ return jhash_3words((__force u32)iph->daddr,
+ (__force u32)iph->saddr, iph->protocol, jhash_initval);
+}
+
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
+static inline u32 hash_v6(const struct sk_buff *skb, u32 jhash_initval)
+{
+ const struct ipv6hdr *ip6h = ipv6_hdr(skb);
+ u32 a, b, c;
+
+ if ((__force u32)ip6h->saddr.s6_addr32[3] <
+ (__force u32)ip6h->daddr.s6_addr32[3]) {
+ a = (__force u32) ip6h->saddr.s6_addr32[3];
+ b = (__force u32) ip6h->daddr.s6_addr32[3];
+ } else {
+ b = (__force u32) ip6h->saddr.s6_addr32[3];
+ a = (__force u32) ip6h->daddr.s6_addr32[3];
+ }
+
+ if ((__force u32)ip6h->saddr.s6_addr32[1] <
+ (__force u32)ip6h->daddr.s6_addr32[1])
+ c = (__force u32) ip6h->saddr.s6_addr32[1];
+ else
+ c = (__force u32) ip6h->daddr.s6_addr32[1];
+
+ return jhash_3words(a, b, c, jhash_initval);
+}
+#endif
+
+static inline u32
+nfqueue_hash(const struct sk_buff *skb, u16 queue, u16 queues_total, u8 family,
+ u32 jhash_initval)
+{
+ if (family == NFPROTO_IPV4)
+ queue += ((u64) hash_v4(skb, jhash_initval) * queues_total) >> 32;
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
+ else if (family == NFPROTO_IPV6)
+ queue += ((u64) hash_v6(skb, jhash_initval) * queues_total) >> 32;
+#endif
+
+ return queue;
+}
+
#endif /* _NF_QUEUE_H */
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 5a91abfc0c30..57c8ff7955df 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -13,9 +13,10 @@ struct nft_pktinfo {
struct sk_buff *skb;
const struct net_device *in;
const struct net_device *out;
- u8 hooknum;
+ const struct nf_hook_ops *ops;
u8 nhoff;
u8 thoff;
+ u8 tprot;
/* for x_tables compatibility */
struct xt_action_param xt;
};
@@ -29,7 +30,8 @@ static inline void nft_set_pktinfo(struct nft_pktinfo *pkt,
pkt->skb = skb;
pkt->in = pkt->xt.in = in;
pkt->out = pkt->xt.out = out;
- pkt->hooknum = pkt->xt.hooknum = ops->hooknum;
+ pkt->ops = ops;
+ pkt->xt.hooknum = ops->hooknum;
pkt->xt.family = ops->pf;
}
@@ -421,6 +423,8 @@ struct nft_stats {
u64 pkts;
};
+#define NFT_HOOK_OPS_MAX 2
+
/**
* struct nft_base_chain - nf_tables base chain
*
@@ -431,8 +435,8 @@ struct nft_stats {
* @chain: the chain
*/
struct nft_base_chain {
- struct nf_hook_ops ops;
- enum nft_chain_type type;
+ struct nf_hook_ops ops[NFT_HOOK_OPS_MAX];
+ const struct nf_chain_type *type;
u8 policy;
struct nft_stats __percpu *stats;
struct nft_chain chain;
@@ -443,8 +447,8 @@ static inline struct nft_base_chain *nft_base_chain(const struct nft_chain *chai
return container_of(chain, struct nft_base_chain, chain);
}
-unsigned int nft_do_chain_pktinfo(struct nft_pktinfo *pkt,
- const struct nf_hook_ops *ops);
+unsigned int nft_do_chain(struct nft_pktinfo *pkt,
+ const struct nf_hook_ops *ops);
/**
* struct nft_table - nf_tables table
@@ -475,6 +479,8 @@ struct nft_table {
* @nhooks: number of hooks in this family
* @owner: module owner
* @tables: used internally
+ * @nops: number of hook ops in this family
+ * @hook_ops_init: initialization function for chain hook ops
* @hooks: hookfn overrides for packet validation
*/
struct nft_af_info {
@@ -483,23 +489,36 @@ struct nft_af_info {
unsigned int nhooks;
struct module *owner;
struct list_head tables;
+ unsigned int nops;
+ void (*hook_ops_init)(struct nf_hook_ops *,
+ unsigned int);
nf_hookfn *hooks[NF_MAX_HOOKS];
};
int nft_register_afinfo(struct net *, struct nft_af_info *);
void nft_unregister_afinfo(struct nft_af_info *);
+/**
+ * struct nf_chain_type - nf_tables chain type info
+ *
+ * @name: name of the type
+ * @type: numeric identifier
+ * @family: address family
+ * @owner: module owner
+ * @hook_mask: mask of valid hooks
+ * @hooks: hookfn overrides
+ */
struct nf_chain_type {
- unsigned int hook_mask;
- const char *name;
- enum nft_chain_type type;
- nf_hookfn *fn[NF_MAX_HOOKS];
- struct module *me;
- int family;
+ const char *name;
+ enum nft_chain_type type;
+ int family;
+ struct module *owner;
+ unsigned int hook_mask;
+ nf_hookfn *hooks[NF_MAX_HOOKS];
};
-int nft_register_chain_type(struct nf_chain_type *);
-void nft_unregister_chain_type(struct nf_chain_type *);
+int nft_register_chain_type(const struct nf_chain_type *);
+void nft_unregister_chain_type(const struct nf_chain_type *);
int nft_register_expr(struct nft_expr_type *);
void nft_unregister_expr(struct nft_expr_type *);
diff --git a/include/net/netfilter/nf_tables_ipv4.h b/include/net/netfilter/nf_tables_ipv4.h
index 1be1c2c197ee..cba143fbd2e4 100644
--- a/include/net/netfilter/nf_tables_ipv4.h
+++ b/include/net/netfilter/nf_tables_ipv4.h
@@ -15,9 +15,12 @@ nft_set_pktinfo_ipv4(struct nft_pktinfo *pkt,
nft_set_pktinfo(pkt, ops, skb, in, out);
- pkt->xt.thoff = ip_hdrlen(pkt->skb);
ip = ip_hdr(pkt->skb);
+ pkt->tprot = ip->protocol;
+ pkt->xt.thoff = ip_hdrlen(pkt->skb);
pkt->xt.fragoff = ntohs(ip->frag_off) & IP_OFFSET;
}
+extern struct nft_af_info nft_af_ipv4;
+
#endif
diff --git a/include/net/netfilter/nf_tables_ipv6.h b/include/net/netfilter/nf_tables_ipv6.h
index 4a9b88a65963..74d976137658 100644
--- a/include/net/netfilter/nf_tables_ipv6.h
+++ b/include/net/netfilter/nf_tables_ipv6.h
@@ -21,10 +21,13 @@ nft_set_pktinfo_ipv6(struct nft_pktinfo *pkt,
if (protohdr < 0)
return -1;
+ pkt->tprot = protohdr;
pkt->xt.thoff = thoff;
pkt->xt.fragoff = frag_off;
return 0;
}
+extern struct nft_af_info nft_af_ipv6;
+
#endif
diff --git a/include/net/netlabel.h b/include/net/netlabel.h
index 2c95d55f7914..24948bedb64c 100644
--- a/include/net/netlabel.h
+++ b/include/net/netlabel.h
@@ -22,8 +22,7 @@
* the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
*/
diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
index c9c0c538b68b..fbcc7fa536dc 100644
--- a/include/net/netns/conntrack.h
+++ b/include/net/netns/conntrack.h
@@ -65,6 +65,23 @@ struct nf_ip_net {
struct netns_ct {
atomic_t count;
unsigned int expect_count;
+#ifdef CONFIG_SYSCTL
+ struct ctl_table_header *sysctl_header;
+ struct ctl_table_header *acct_sysctl_header;
+ struct ctl_table_header *tstamp_sysctl_header;
+ struct ctl_table_header *event_sysctl_header;
+ struct ctl_table_header *helper_sysctl_header;
+#endif
+ char *slabname;
+ unsigned int sysctl_log_invalid; /* Log invalid packets */
+ unsigned int sysctl_events_retry_timeout;
+ int sysctl_events;
+ int sysctl_acct;
+ int sysctl_auto_assign_helper;
+ bool auto_assign_helper_warned;
+ int sysctl_tstamp;
+ int sysctl_checksum;
+
unsigned int htable_size;
struct kmem_cache *nf_conntrack_cachep;
struct hlist_nulls_head *hash;
@@ -75,14 +92,6 @@ struct netns_ct {
struct ip_conntrack_stat __percpu *stat;
struct nf_ct_event_notifier __rcu *nf_conntrack_event_cb;
struct nf_exp_event_notifier __rcu *nf_expect_event_cb;
- int sysctl_events;
- unsigned int sysctl_events_retry_timeout;
- int sysctl_acct;
- int sysctl_tstamp;
- int sysctl_checksum;
- unsigned int sysctl_log_invalid; /* Log invalid packets */
- int sysctl_auto_assign_helper;
- bool auto_assign_helper_warned;
struct nf_ip_net nf_ct_proto;
#if defined(CONFIG_NF_CONNTRACK_LABELS)
unsigned int labels_used;
@@ -92,13 +101,5 @@ struct netns_ct {
struct hlist_head *nat_bysource;
unsigned int nat_htable_size;
#endif
-#ifdef CONFIG_SYSCTL
- struct ctl_table_header *sysctl_header;
- struct ctl_table_header *acct_sysctl_header;
- struct ctl_table_header *tstamp_sysctl_header;
- struct ctl_table_header *event_sysctl_header;
- struct ctl_table_header *helper_sysctl_header;
-#endif
- char *slabname;
};
#endif
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index ee520cba2ec2..929a668e91a9 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -69,6 +69,7 @@ struct netns_ipv4 {
struct local_ports sysctl_local_ports;
int sysctl_tcp_ecn;
+ int sysctl_ip_no_pmtu_disc;
kgid_t sysctl_ping_group_range[2];
diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
index 0fb2401197c5..76fc7d1dbfd3 100644
--- a/include/net/netns/ipv6.h
+++ b/include/net/netns/ipv6.h
@@ -73,6 +73,7 @@ struct netns_ipv6 {
#endif
atomic_t dev_addr_genid;
atomic_t rt_genid;
+ int anycast_src_echo_reply;
};
#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
diff --git a/include/net/netns/nftables.h b/include/net/netns/nftables.h
index 15d056d534e3..26a394cb91a8 100644
--- a/include/net/netns/nftables.h
+++ b/include/net/netns/nftables.h
@@ -10,6 +10,7 @@ struct netns_nftables {
struct list_head commit_list;
struct nft_af_info *ipv4;
struct nft_af_info *ipv6;
+ struct nft_af_info *inet;
struct nft_af_info *arp;
struct nft_af_info *bridge;
u8 gencursor;
diff --git a/include/net/netns/xfrm.h b/include/net/netns/xfrm.h
index 5299e69a32af..1006a265beb3 100644
--- a/include/net/netns/xfrm.h
+++ b/include/net/netns/xfrm.h
@@ -33,8 +33,6 @@ struct netns_xfrm {
struct hlist_head state_gc_list;
struct work_struct state_gc_work;
- wait_queue_head_t km_waitq;
-
struct list_head policy_all;
struct hlist_head *policy_byidx;
unsigned int policy_idx_hmask;
@@ -59,6 +57,10 @@ struct netns_xfrm {
#if IS_ENABLED(CONFIG_IPV6)
struct dst_ops xfrm6_dst_ops;
#endif
+ spinlock_t xfrm_state_lock;
+ spinlock_t xfrm_policy_sk_bundle_lock;
+ rwlock_t xfrm_policy_lock;
+ struct mutex xfrm_cfg_mutex;
};
#endif
diff --git a/include/net/netprio_cgroup.h b/include/net/netprio_cgroup.h
index 099d02782e22..dafc09f0fdbc 100644
--- a/include/net/netprio_cgroup.h
+++ b/include/net/netprio_cgroup.h
@@ -13,12 +13,12 @@
#ifndef _NETPRIO_CGROUP_H
#define _NETPRIO_CGROUP_H
+
#include <linux/cgroup.h>
#include <linux/hardirq.h>
#include <linux/rcupdate.h>
-
-#if IS_ENABLED(CONFIG_NETPRIO_CGROUP)
+#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
struct netprio_map {
struct rcu_head rcu;
u32 priomap_len;
@@ -27,8 +27,7 @@ struct netprio_map {
void sock_update_netprioidx(struct sock *sk);
-#if IS_BUILTIN(CONFIG_NETPRIO_CGROUP)
-
+#if IS_BUILTIN(CONFIG_CGROUP_NET_PRIO)
static inline u32 task_netprioidx(struct task_struct *p)
{
struct cgroup_subsys_state *css;
@@ -40,9 +39,7 @@ static inline u32 task_netprioidx(struct task_struct *p)
rcu_read_unlock();
return idx;
}
-
-#elif IS_MODULE(CONFIG_NETPRIO_CGROUP)
-
+#elif IS_MODULE(CONFIG_CGROUP_NET_PRIO)
static inline u32 task_netprioidx(struct task_struct *p)
{
struct cgroup_subsys_state *css;
@@ -56,9 +53,7 @@ static inline u32 task_netprioidx(struct task_struct *p)
return idx;
}
#endif
-
-#else /* !CONFIG_NETPRIO_CGROUP */
-
+#else /* !CONFIG_CGROUP_NET_PRIO */
static inline u32 task_netprioidx(struct task_struct *p)
{
return 0;
@@ -66,6 +61,5 @@ static inline u32 task_netprioidx(struct task_struct *p)
#define sock_update_netprioidx(sk)
-#endif /* CONFIG_NETPRIO_CGROUP */
-
+#endif /* CONFIG_CGROUP_NET_PRIO */
#endif /* _NET_CLS_CGROUP_H */
diff --git a/include/net/nfc/hci.h b/include/net/nfc/hci.h
index 2eca2960ca9c..03c4650b548c 100644
--- a/include/net/nfc/hci.h
+++ b/include/net/nfc/hci.h
@@ -12,9 +12,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the
- * Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __NET_HCI_H
diff --git a/include/net/nfc/llc.h b/include/net/nfc/llc.h
index 400ab7ae749d..c25fbdee0d61 100644
--- a/include/net/nfc/llc.h
+++ b/include/net/nfc/llc.h
@@ -13,9 +13,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the
- * Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __NFC_LLC_H_
diff --git a/include/net/nfc/nci.h b/include/net/nfc/nci.h
index e5aa5acafea0..fbfa4e471abb 100644
--- a/include/net/nfc/nci.h
+++ b/include/net/nfc/nci.h
@@ -20,8 +20,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
*/
diff --git a/include/net/nfc/nci_core.h b/include/net/nfc/nci_core.h
index 6126f1f992b4..0ff070e8f8de 100644
--- a/include/net/nfc/nci_core.h
+++ b/include/net/nfc/nci_core.h
@@ -21,8 +21,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
*/
diff --git a/include/net/nfc/nfc.h b/include/net/nfc/nfc.h
index 82fc4e43fc6e..e80894bca1d0 100644
--- a/include/net/nfc/nfc.h
+++ b/include/net/nfc/nfc.h
@@ -16,9 +16,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the
- * Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __NET_NFC_H
diff --git a/include/net/ping.h b/include/net/ping.h
index 3f67704f3747..76013653e08c 100644
--- a/include/net/ping.h
+++ b/include/net/ping.h
@@ -31,7 +31,8 @@
/* Compatibility glue so we can support IPv6 when it's compiled as a module */
struct pingv6_ops {
- int (*ipv6_recv_error)(struct sock *sk, struct msghdr *msg, int len);
+ int (*ipv6_recv_error)(struct sock *sk, struct msghdr *msg, int len,
+ int *addr_len);
int (*ip6_datagram_recv_ctl)(struct sock *sk, struct msghdr *msg,
struct sk_buff *skb);
int (*icmpv6_err_convert)(u8 type, u8 code, int *err);
@@ -41,11 +42,6 @@ struct pingv6_ops {
const struct net_device *dev, int strict);
};
-struct ping_table {
- struct hlist_nulls_head hash[PING_HTABLE_SIZE];
- rwlock_t lock;
-};
-
struct ping_iter_state {
struct seq_net_private p;
int bucket;
@@ -53,7 +49,6 @@ struct ping_iter_state {
};
extern struct proto ping_prot;
-extern struct ping_table ping_table;
#if IS_ENABLED(CONFIG_IPV6)
extern struct pingv6_ops pingv6_ops;
#endif
@@ -80,8 +75,6 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
size_t len, int noblock, int flags, int *addr_len);
int ping_common_sendmsg(int family, struct msghdr *msg, size_t len,
void *user_icmph, size_t icmph_len);
-int ping_v4_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
- size_t len);
int ping_v6_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
size_t len);
int ping_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index 2ebef77a2f9a..50ea07969c09 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -62,18 +62,26 @@ tcf_unbind_filter(struct tcf_proto *tp, struct tcf_result *r)
struct tcf_exts {
#ifdef CONFIG_NET_CLS_ACT
- struct tc_action *action;
+ __u32 type; /* for backward compat(TCA_OLD_COMPAT) */
+ struct list_head actions;
#endif
-};
-
-/* Map to export classifier specific extension TLV types to the
- * generic extensions API. Unsupported extensions must be set to 0.
- */
-struct tcf_ext_map {
+ /* Map to export classifier specific extension TLV types to the
+ * generic extensions API. Unsupported extensions must be set to 0.
+ */
int action;
int police;
};
+static inline void tcf_exts_init(struct tcf_exts *exts, int action, int police)
+{
+#ifdef CONFIG_NET_CLS_ACT
+ exts->type = 0;
+ INIT_LIST_HEAD(&exts->actions);
+#endif
+ exts->action = action;
+ exts->police = police;
+}
+
/**
* tcf_exts_is_predicative - check if a predicative extension is present
* @exts: tc filter extensions handle
@@ -85,7 +93,7 @@ static inline int
tcf_exts_is_predicative(struct tcf_exts *exts)
{
#ifdef CONFIG_NET_CLS_ACT
- return !!exts->action;
+ return !list_empty(&exts->actions);
#else
return 0;
#endif
@@ -120,23 +128,20 @@ tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts,
struct tcf_result *res)
{
#ifdef CONFIG_NET_CLS_ACT
- if (exts->action)
- return tcf_action_exec(skb, exts->action, res);
+ if (!list_empty(&exts->actions))
+ return tcf_action_exec(skb, &exts->actions, res);
#endif
return 0;
}
int tcf_exts_validate(struct net *net, struct tcf_proto *tp,
struct nlattr **tb, struct nlattr *rate_tlv,
- struct tcf_exts *exts,
- const struct tcf_ext_map *map);
+ struct tcf_exts *exts);
void tcf_exts_destroy(struct tcf_proto *tp, struct tcf_exts *exts);
void tcf_exts_change(struct tcf_proto *tp, struct tcf_exts *dst,
struct tcf_exts *src);
-int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts,
- const struct tcf_ext_map *map);
-int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts,
- const struct tcf_ext_map *map);
+int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts);
+int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts);
/**
* struct tcf_pkt_info - packet information
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index 59ec3cd15d68..891d80d2c4d2 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -88,6 +88,7 @@ int unregister_qdisc(struct Qdisc_ops *qops);
void qdisc_get_default(char *id, size_t len);
int qdisc_set_default(const char *id);
+void qdisc_list_add(struct Qdisc *q);
void qdisc_list_del(struct Qdisc *q);
struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle);
struct Qdisc *qdisc_lookup_class(struct net_device *dev, u32 handle);
diff --git a/include/net/route.h b/include/net/route.h
index f68c167280a7..638e3ebf76f3 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -239,14 +239,12 @@ static inline char rt_tos2priority(u8 tos)
static inline void ip_route_connect_init(struct flowi4 *fl4, __be32 dst, __be32 src,
u32 tos, int oif, u8 protocol,
__be16 sport, __be16 dport,
- struct sock *sk, bool can_sleep)
+ struct sock *sk)
{
__u8 flow_flags = 0;
if (inet_sk(sk)->transparent)
flow_flags |= FLOWI_FLAG_ANYSRC;
- if (can_sleep)
- flow_flags |= FLOWI_FLAG_CAN_SLEEP;
flowi4_init_output(fl4, oif, sk->sk_mark, tos, RT_SCOPE_UNIVERSE,
protocol, flow_flags, dst, src, dport, sport);
@@ -256,13 +254,13 @@ static inline struct rtable *ip_route_connect(struct flowi4 *fl4,
__be32 dst, __be32 src, u32 tos,
int oif, u8 protocol,
__be16 sport, __be16 dport,
- struct sock *sk, bool can_sleep)
+ struct sock *sk)
{
struct net *net = sock_net(sk);
struct rtable *rt;
ip_route_connect_init(fl4, dst, src, tos, oif, protocol,
- sport, dport, sk, can_sleep);
+ sport, dport, sk);
if (!dst || !src) {
rt = __ip_route_output_key(net, fl4);
diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
index bb13a182fba6..8fb42070a2c1 100644
--- a/include/net/rtnetlink.h
+++ b/include/net/rtnetlink.h
@@ -115,10 +115,9 @@ struct rtnl_af_ops {
const struct nlattr *attr);
};
-int __rtnl_af_register(struct rtnl_af_ops *ops);
void __rtnl_af_unregister(struct rtnl_af_ops *ops);
-int rtnl_af_register(struct rtnl_af_ops *ops);
+void rtnl_af_register(struct rtnl_af_ops *ops);
void rtnl_af_unregister(struct rtnl_af_ops *ops);
struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[]);
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index d0a6321c302e..013d96dc6918 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -185,7 +185,7 @@ struct tcf_result {
};
struct tcf_proto_ops {
- struct tcf_proto_ops *next;
+ struct list_head head;
char kind[IFNAMSIZ];
int (*classify)(struct sk_buff *,
diff --git a/include/net/sctp/auth.h b/include/net/sctp/auth.h
index aa80bef3c9d5..f2d58aa37a6f 100644
--- a/include/net/sctp/auth.h
+++ b/include/net/sctp/auth.h
@@ -16,9 +16,8 @@
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with GNU CC; see the file COPYING. If not, write to
- * the Free Software Foundation, 59 Temple Place - Suite 330,
- * Boston, MA 02111-1307, USA.
+ * along with GNU CC; see the file COPYING. If not, see
+ * <http://www.gnu.org/licenses/>.
*
* Please send any bug reports or fixes you make to the
* email address(es):
diff --git a/include/net/sctp/checksum.h b/include/net/sctp/checksum.h
index 6bd44fe94c26..4a5b9a306c69 100644
--- a/include/net/sctp/checksum.h
+++ b/include/net/sctp/checksum.h
@@ -19,9 +19,8 @@
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with GNU CC; see the file COPYING. If not, write to
- * the Free Software Foundation, 59 Temple Place - Suite 330,
- * Boston, MA 02111-1307, USA.
+ * along with GNU CC; see the file COPYING. If not, see
+ * <http://www.gnu.org/licenses/>.
*
* Please send any bug reports or fixes you make to the
* email address(es):
diff --git a/include/net/sctp/command.h b/include/net/sctp/command.h
index 832f2191489c..4b7cd695e431 100644
--- a/include/net/sctp/command.h
+++ b/include/net/sctp/command.h
@@ -19,9 +19,8 @@
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with GNU CC; see the file COPYING. If not, write to
- * the Free Software Foundation, 59 Temple Place - Suite 330,
- * Boston, MA 02111-1307, USA.
+ * along with GNU CC; see the file COPYING. If not, see
+ * <http://www.gnu.org/licenses/>.
*
* Please send any bug reports or fixes you make to the
* email address(es):
diff --git a/include/net/sctp/constants.h b/include/net/sctp/constants.h
index 2f0a565a0fd5..307728f622ef 100644
--- a/include/net/sctp/constants.h
+++ b/include/net/sctp/constants.h
@@ -19,9 +19,8 @@
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with GNU CC; see the file COPYING. If not, write to
- * the Free Software Foundation, 59 Temple Place - Suite 330,
- * Boston, MA 02111-1307, USA.
+ * along with GNU CC; see the file COPYING. If not, see
+ * <http://www.gnu.org/licenses/>.
*
* Please send any bug reports or fixes you make to the
* email address(es):
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index c5fe80697f8d..610a8c8738fa 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -21,9 +21,8 @@
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with GNU CC; see the file COPYING. If not, write to
- * the Free Software Foundation, 59 Temple Place - Suite 330,
- * Boston, MA 02111-1307, USA.
+ * along with GNU CC; see the file COPYING. If not, see
+ * <http://www.gnu.org/licenses/>.
*
* Please send any bug reports or fixes you make to the
* email address(es):
diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
index 4ef75af340b6..7f4eeb340a54 100644
--- a/include/net/sctp/sm.h
+++ b/include/net/sctp/sm.h
@@ -21,9 +21,8 @@
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with GNU CC; see the file COPYING. If not, write to
- * the Free Software Foundation, 59 Temple Place - Suite 330,
- * Boston, MA 02111-1307, USA.
+ * along with GNU CC; see the file COPYING. If not, see
+ * <http://www.gnu.org/licenses/>.
*
* Please send any bug reports or fixes you make to the
* email addresses:
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 2174d8da0770..e9f732fda950 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -19,9 +19,8 @@
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with GNU CC; see the file COPYING. If not, write to
- * the Free Software Foundation, 59 Temple Place - Suite 330,
- * Boston, MA 02111-1307, USA.
+ * along with GNU CC; see the file COPYING. If not, see
+ * <http://www.gnu.org/licenses/>.
*
* Please send any bug reports or fixes you make to the
* email addresses:
@@ -629,6 +628,7 @@ struct sctp_chunk {
#define SCTP_NEED_FRTX 0x1
#define SCTP_DONT_FRTX 0x2
__u16 rtt_in_progress:1, /* This chunk used for RTT calc? */
+ resent:1, /* Has this chunk ever been resent. */
has_tsn:1, /* Does this chunk have a TSN yet? */
has_ssn:1, /* Does this chunk have a SSN yet? */
singleton:1, /* Only chunk in the packet? */
@@ -1045,9 +1045,6 @@ struct sctp_outq {
/* Corked? */
char cork;
-
- /* Is this structure empty? */
- char empty;
};
void sctp_outq_init(struct sctp_association *, struct sctp_outq *);
@@ -1360,12 +1357,6 @@ struct sctp_association {
/* This is all information about our peer. */
struct {
- /* rwnd
- *
- * Peer Rwnd : Current calculated value of the peer's rwnd.
- */
- __u32 rwnd;
-
/* transport_addr_list
*
* Peer : A list of SCTP transport addresses that the
@@ -1383,6 +1374,12 @@ struct sctp_association {
*/
struct list_head transport_addr_list;
+ /* rwnd
+ *
+ * Peer Rwnd : Current calculated value of the peer's rwnd.
+ */
+ __u32 rwnd;
+
/* transport_count
*
* Peer : A count of the number of peer addresses
@@ -1465,6 +1462,20 @@ struct sctp_association {
*/
struct sctp_tsnmap tsn_map;
+ /* This mask is used to disable sending the ASCONF chunk
+ * with specified parameter to peer.
+ */
+ __be16 addip_disabled_mask;
+
+ /* These are capabilities which our peer advertised. */
+ __u8 ecn_capable:1, /* Can peer do ECN? */
+ ipv4_address:1, /* Peer understands IPv4 addresses? */
+ ipv6_address:1, /* Peer understands IPv6 addresses? */
+ hostname_address:1, /* Peer understands DNS addresses? */
+ asconf_capable:1, /* Does peer support ADDIP? */
+ prsctp_capable:1, /* Can peer do PR-SCTP? */
+ auth_capable:1; /* Is peer doing SCTP-AUTH? */
+
/* Ack State : This flag indicates if the next received
* : packet is to be responded to with a
* : SACK. This is initializedto 0. When a packet
@@ -1479,25 +1490,11 @@ struct sctp_association {
__u32 sack_cnt;
__u32 sack_generation;
- /* These are capabilities which our peer advertised. */
- __u8 ecn_capable:1, /* Can peer do ECN? */
- ipv4_address:1, /* Peer understands IPv4 addresses? */
- ipv6_address:1, /* Peer understands IPv6 addresses? */
- hostname_address:1, /* Peer understands DNS addresses? */
- asconf_capable:1, /* Does peer support ADDIP? */
- prsctp_capable:1, /* Can peer do PR-SCTP? */
- auth_capable:1; /* Is peer doing SCTP-AUTH? */
-
__u32 adaptation_ind; /* Adaptation Code point. */
- /* This mask is used to disable sending the ASCONF chunk
- * with specified parameter to peer.
- */
- __be16 addip_disabled_mask;
-
struct sctp_inithdr_host i;
- int cookie_len;
void *cookie;
+ int cookie_len;
/* ADDIP Section 4.2 Upon reception of an ASCONF Chunk.
* C1) ... "Peer-Serial-Number'. This value MUST be initialized to the
@@ -1529,14 +1526,14 @@ struct sctp_association {
*/
sctp_state_t state;
- /* The cookie life I award for any cookie. */
- ktime_t cookie_life;
-
/* Overall : The overall association error count.
* Error Count : [Clear this any time I get something.]
*/
int overall_error_count;
+ /* The cookie life I award for any cookie. */
+ ktime_t cookie_life;
+
/* These are the association's initial, max, and min RTO values.
* These values will be initialized by system defaults, but can
* be modified via the SCTP_RTOINFO socket option.
@@ -1591,10 +1588,9 @@ struct sctp_association {
/* Flags controlling Heartbeat, SACK delay, and Path MTU Discovery. */
__u32 param_flags;
+ __u32 sackfreq;
/* SACK delay timeout */
unsigned long sackdelay;
- __u32 sackfreq;
-
unsigned long timeouts[SCTP_NUM_TIMEOUT_TYPES];
struct timer_list timers[SCTP_NUM_TIMEOUT_TYPES];
@@ -1602,12 +1598,12 @@ struct sctp_association {
/* Transport to which SHUTDOWN chunk was last sent. */
struct sctp_transport *shutdown_last_sent_to;
- /* How many times have we resent a SHUTDOWN */
- int shutdown_retries;
-
/* Transport to which INIT chunk was last sent. */
struct sctp_transport *init_last_sent_to;
+ /* How many times have we resent a SHUTDOWN */
+ int shutdown_retries;
+
/* Next TSN : The next TSN number to be assigned to a new
* : DATA chunk. This is sent in the INIT or INIT
* : ACK chunk to the peer and incremented each
@@ -1725,12 +1721,6 @@ struct sctp_association {
/* How many duplicated TSNs have we seen? */
int numduptsns;
- /* Number of seconds of idle time before an association is closed.
- * In the association context, this is really used as a boolean
- * since the real timeout is stored in the timeouts array
- */
- __u32 autoclose;
-
/* These are to support
* "SCTP Extensions for Dynamic Reconfiguration of IP Addresses
* and Enforcement of Flow and Message Limits"
@@ -1818,8 +1808,8 @@ struct sctp_association {
* after reaching 4294967295.
*/
__u32 addip_serial;
- union sctp_addr *asconf_addr_del_pending;
int src_out_of_asoc_ok;
+ union sctp_addr *asconf_addr_del_pending;
struct sctp_transport *new_transport;
/* SCTP AUTH: list of the endpoint shared keys. These
diff --git a/include/net/sctp/tsnmap.h b/include/net/sctp/tsnmap.h
index 54bbbe547303..31b8dbaad45a 100644
--- a/include/net/sctp/tsnmap.h
+++ b/include/net/sctp/tsnmap.h
@@ -22,9 +22,8 @@
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with GNU CC; see the file COPYING. If not, write to
- * the Free Software Foundation, 59 Temple Place - Suite 330,
- * Boston, MA 02111-1307, USA.
+ * along with GNU CC; see the file COPYING. If not, see
+ * <http://www.gnu.org/licenses/>.
*
* Please send any bug reports or fixes you make to the
* email address(es):
diff --git a/include/net/sctp/ulpevent.h b/include/net/sctp/ulpevent.h
index 27b9f5c90153..daacb32b55b5 100644
--- a/include/net/sctp/ulpevent.h
+++ b/include/net/sctp/ulpevent.h
@@ -25,9 +25,8 @@
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with GNU CC; see the file COPYING. If not, write to
- * the Free Software Foundation, 59 Temple Place - Suite 330,
- * Boston, MA 02111-1307, USA.
+ * along with GNU CC; see the file COPYING. If not, see
+ * <http://www.gnu.org/licenses/>.
*
* Please send any bug reports or fixes you make to the
* email address(es):
diff --git a/include/net/sctp/ulpqueue.h b/include/net/sctp/ulpqueue.h
index b0cf5d54d717..e0dce07b8794 100644
--- a/include/net/sctp/ulpqueue.h
+++ b/include/net/sctp/ulpqueue.h
@@ -24,9 +24,8 @@
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with GNU CC; see the file COPYING. If not, write to
- * the Free Software Foundation, 59 Temple Place - Suite 330,
- * Boston, MA 02111-1307, USA.
+ * along with GNU CC; see the file COPYING. If not, see
+ * <http://www.gnu.org/licenses/>.
*
* Please send any bug reports or fixes you make to the
* email addresses:
diff --git a/include/net/sock.h b/include/net/sock.h
index e3a18ff0c38b..5c3f7c3624aa 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -395,7 +395,7 @@ struct sock {
unsigned short sk_ack_backlog;
unsigned short sk_max_ack_backlog;
__u32 sk_priority;
-#if IS_ENABLED(CONFIG_NETPRIO_CGROUP)
+#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
__u32 sk_cgrp_prioidx;
#endif
struct pid *sk_peer_pid;
@@ -820,30 +820,44 @@ static inline int sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
return sk->sk_backlog_rcv(sk, skb);
}
-static inline void sock_rps_record_flow(const struct sock *sk)
+static inline void sock_rps_record_flow_hash(__u32 hash)
{
#ifdef CONFIG_RPS
struct rps_sock_flow_table *sock_flow_table;
rcu_read_lock();
sock_flow_table = rcu_dereference(rps_sock_flow_table);
- rps_record_sock_flow(sock_flow_table, sk->sk_rxhash);
+ rps_record_sock_flow(sock_flow_table, hash);
rcu_read_unlock();
#endif
}
-static inline void sock_rps_reset_flow(const struct sock *sk)
+static inline void sock_rps_reset_flow_hash(__u32 hash)
{
#ifdef CONFIG_RPS
struct rps_sock_flow_table *sock_flow_table;
rcu_read_lock();
sock_flow_table = rcu_dereference(rps_sock_flow_table);
- rps_reset_sock_flow(sock_flow_table, sk->sk_rxhash);
+ rps_reset_sock_flow(sock_flow_table, hash);
rcu_read_unlock();
#endif
}
+static inline void sock_rps_record_flow(const struct sock *sk)
+{
+#ifdef CONFIG_RPS
+ sock_rps_record_flow_hash(sk->sk_rxhash);
+#endif
+}
+
+static inline void sock_rps_reset_flow(const struct sock *sk)
+{
+#ifdef CONFIG_RPS
+ sock_rps_reset_flow_hash(sk->sk_rxhash);
+#endif
+}
+
static inline void sock_rps_save_rxhash(struct sock *sk,
const struct sk_buff *skb)
{
@@ -1035,7 +1049,6 @@ enum cg_proto_flags {
};
struct cg_proto {
- void (*enter_memory_pressure)(struct sock *sk);
struct res_counter memory_allocated; /* Current allocated memory. */
struct percpu_counter sockets_allocated; /* Current number of sockets. */
int memory_pressure;
@@ -1155,8 +1168,7 @@ static inline void sk_leave_memory_pressure(struct sock *sk)
struct proto *prot = sk->sk_prot;
for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto))
- if (cg_proto->memory_pressure)
- cg_proto->memory_pressure = 0;
+ cg_proto->memory_pressure = 0;
}
}
@@ -1171,7 +1183,7 @@ static inline void sk_enter_memory_pressure(struct sock *sk)
struct proto *prot = sk->sk_prot;
for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto))
- cg_proto->enter_memory_pressure(sk);
+ cg_proto->memory_pressure = 1;
}
sk->sk_prot->enter_memory_pressure(sk);
@@ -1537,8 +1549,6 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority);
struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
gfp_t priority);
-struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force,
- gfp_t priority);
void sock_wfree(struct sk_buff *skb);
void skb_orphan_partial(struct sk_buff *skb);
void sock_rfree(struct sk_buff *skb);
diff --git a/include/net/tc_act/tc_skbedit.h b/include/net/tc_act/tc_skbedit.h
index e103fe02f375..dd5d86fab030 100644
--- a/include/net/tc_act/tc_skbedit.h
+++ b/include/net/tc_act/tc_skbedit.h
@@ -11,8 +11,7 @@
* more details.
*
* You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
+ * this program; if not, see <http://www.gnu.org/licenses/>.
*
* Author: Alexander Duyck <alexander.h.duyck@intel.com>
*/
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 70e55d200610..56fc366da6d5 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -282,6 +282,7 @@ extern int sysctl_tcp_limit_output_bytes;
extern int sysctl_tcp_challenge_ack_limit;
extern unsigned int sysctl_tcp_notsent_lowat;
extern int sysctl_tcp_min_tso_segs;
+extern int sysctl_tcp_autocorking;
extern atomic_long_t tcp_memory_allocated;
extern struct percpu_counter tcp_sockets_allocated;
@@ -467,7 +468,6 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
struct tcp_fastopen_cookie *foc);
int tcp_disconnect(struct sock *sk, int flags);
-void tcp_connect_init(struct sock *sk);
void tcp_finish_connect(struct sock *sk, struct sk_buff *skb);
int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size);
void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb);
@@ -622,8 +622,6 @@ static inline u32 __tcp_set_rto(const struct tcp_sock *tp)
return (tp->srtt >> 3) + tp->rttvar;
}
-void tcp_set_rto(struct sock *sk);
-
static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd)
{
tp->pred_flags = htonl((tp->tcp_header_len << 26) |
@@ -977,13 +975,6 @@ static inline u32 tcp_wnd_end(const struct tcp_sock *tp)
}
bool tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight);
-static inline void tcp_minshall_update(struct tcp_sock *tp, unsigned int mss,
- const struct sk_buff *skb)
-{
- if (skb->len < mss)
- tp->snd_sml = TCP_SKB_CB(skb)->end_seq;
-}
-
static inline void tcp_check_probe_timer(struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 6b82fdf4ba71..b7635ef4d436 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -53,7 +53,6 @@
#define XFRM_INC_STATS_USER(net, field) ((void)(net))
#endif
-extern struct mutex xfrm_cfg_mutex;
/* Organization of SPD aka "XFRM rules"
------------------------------------
@@ -1409,7 +1408,7 @@ static inline void xfrm_sysctl_fini(struct net *net)
void xfrm_state_walk_init(struct xfrm_state_walk *walk, u8 proto);
int xfrm_state_walk(struct net *net, struct xfrm_state_walk *walk,
int (*func)(struct xfrm_state *, int, void*), void *);
-void xfrm_state_walk_done(struct xfrm_state_walk *walk);
+void xfrm_state_walk_done(struct xfrm_state_walk *walk, struct net *net);
struct xfrm_state *xfrm_state_alloc(struct net *net);
struct xfrm_state *xfrm_state_find(const xfrm_address_t *daddr,
const xfrm_address_t *saddr,
@@ -1436,12 +1435,12 @@ struct xfrm_state *xfrm_state_lookup_byaddr(struct net *net, u32 mark,
unsigned short family);
#ifdef CONFIG_XFRM_SUB_POLICY
int xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n,
- unsigned short family);
+ unsigned short family, struct net *net);
int xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n,
unsigned short family);
#else
static inline int xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src,
- int n, unsigned short family)
+ int n, unsigned short family, struct net *net)
{
return -ENOSYS;
}
@@ -1553,7 +1552,7 @@ void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type);
int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
int (*func)(struct xfrm_policy *, int, int, void*),
void *);
-void xfrm_policy_walk_done(struct xfrm_policy_walk *walk);
+void xfrm_policy_walk_done(struct xfrm_policy_walk *walk, struct net *net);
int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl);
struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark,
u8 type, int dir,
@@ -1564,6 +1563,7 @@ struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8, int dir,
u32 id, int delete, int *err);
int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info);
u32 xfrm_get_acqseq(void);
+int verify_spi_info(u8 proto, u32 min, u32 max);
int xfrm_alloc_spi(struct xfrm_state *x, u32 minspi, u32 maxspi);
struct xfrm_state *xfrm_find_acq(struct net *net, const struct xfrm_mark *mark,
u8 mode, u32 reqid, u8 proto,
@@ -1576,12 +1576,12 @@ int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol);
int km_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
const struct xfrm_migrate *m, int num_bundles,
const struct xfrm_kmaddress *k);
-struct xfrm_state *xfrm_migrate_state_find(struct xfrm_migrate *m);
+struct xfrm_state *xfrm_migrate_state_find(struct xfrm_migrate *m, struct net *net);
struct xfrm_state *xfrm_state_migrate(struct xfrm_state *x,
struct xfrm_migrate *m);
int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
struct xfrm_migrate *m, int num_bundles,
- struct xfrm_kmaddress *k);
+ struct xfrm_kmaddress *k, struct net *net);
#endif
int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport);
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 979874c627ee..61e1935c91b1 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -978,7 +978,7 @@ struct ib_uobject {
};
struct ib_udata {
- void __user *inbuf;
+ const void __user *inbuf;
void __user *outbuf;
size_t inlen;
size_t outlen;
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index 546084964d55..fe3b58e836c8 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -475,6 +475,9 @@ struct scsi_host_template {
*/
unsigned ordered_tag:1;
+ /* True if the controller does not support WRITE SAME */
+ unsigned no_write_same:1;
+
/*
* Countdown for host blocking with no commands outstanding.
*/
@@ -677,6 +680,9 @@ struct Scsi_Host {
/* Don't resume host in EH */
unsigned eh_noresume:1;
+ /* The controller does not support WRITE SAME */
+ unsigned no_write_same:1;
+
/*
* Optional work queue to be utilized by the transport
*/
diff --git a/include/sound/memalloc.h b/include/sound/memalloc.h
index af9983970417..5f73785f5977 100644
--- a/include/sound/memalloc.h
+++ b/include/sound/memalloc.h
@@ -108,7 +108,7 @@ static inline dma_addr_t snd_sgbuf_get_addr(struct snd_dma_buffer *dmab,
{
struct snd_sg_buf *sgbuf = dmab->private_data;
dma_addr_t addr = sgbuf->table[offset >> PAGE_SHIFT].addr;
- addr &= PAGE_MASK;
+ addr &= ~((dma_addr_t)PAGE_SIZE - 1);
return addr + offset % PAGE_SIZE;
}
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
index 2037c45adfe6..56ebdfca6273 100644
--- a/include/sound/soc-dapm.h
+++ b/include/sound/soc-dapm.h
@@ -104,7 +104,8 @@ struct device;
SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
.kcontrol_news = wcontrols, .num_kcontrols = 1}
#define SND_SOC_DAPM_MUX(wname, wreg, wshift, winvert, wcontrols) \
-{ .id = snd_soc_dapm_mux, .name = wname, .reg = wreg, \
+{ .id = snd_soc_dapm_mux, .name = wname, \
+ SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
.kcontrol_news = wcontrols, .num_kcontrols = 1}
#define SND_SOC_DAPM_VIRT_MUX(wname, wreg, wshift, winvert, wcontrols) \
{ .id = snd_soc_dapm_virt_mux, .name = wname, \
diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h
index 5ebe21cd5d1c..39e0114d70c5 100644
--- a/include/target/target_core_backend.h
+++ b/include/target/target_core_backend.h
@@ -34,6 +34,11 @@ struct se_subsystem_api {
sense_reason_t (*parse_cdb)(struct se_cmd *cmd);
u32 (*get_device_type)(struct se_device *);
sector_t (*get_blocks)(struct se_device *);
+ sector_t (*get_alignment_offset_lbas)(struct se_device *);
+ /* lbppbe = logical blocks per physical block exponent. see SBC-3 */
+ unsigned int (*get_lbppbe)(struct se_device *);
+ unsigned int (*get_io_min)(struct se_device *);
+ unsigned int (*get_io_opt)(struct se_device *);
unsigned char *(*get_sense_buffer)(struct se_cmd *);
bool (*get_write_cache)(struct se_device *);
};
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 5bdb8b7d2a69..321301c0a643 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -227,6 +227,7 @@ enum tcm_tmreq_table {
/* fabric independent task management response values */
enum tcm_tmrsp_table {
+ TMR_FUNCTION_FAILED = 0,
TMR_FUNCTION_COMPLETE = 1,
TMR_TASK_DOES_NOT_EXIST = 2,
TMR_LUN_DOES_NOT_EXIST = 3,
@@ -282,11 +283,12 @@ struct t10_alua_lu_gp_member {
struct t10_alua_tg_pt_gp {
u16 tg_pt_gp_id;
int tg_pt_gp_valid_id;
+ int tg_pt_gp_alua_supported_states;
int tg_pt_gp_alua_access_status;
int tg_pt_gp_alua_access_type;
int tg_pt_gp_nonop_delay_msecs;
int tg_pt_gp_trans_delay_msecs;
- int tg_pt_gp_implict_trans_secs;
+ int tg_pt_gp_implicit_trans_secs;
int tg_pt_gp_pref;
int tg_pt_gp_write_metadata;
/* Used by struct t10_alua_tg_pt_gp->tg_pt_gp_md_buf_len */
@@ -442,7 +444,6 @@ struct se_cmd {
/* Used for sense data */
void *sense_buffer;
struct list_head se_delayed_node;
- struct list_head se_lun_node;
struct list_head se_qf_node;
struct se_device *se_dev;
struct se_dev_entry *se_deve;
@@ -470,15 +471,11 @@ struct se_cmd {
#define CMD_T_SENT (1 << 4)
#define CMD_T_STOP (1 << 5)
#define CMD_T_FAILED (1 << 6)
-#define CMD_T_LUN_STOP (1 << 7)
-#define CMD_T_LUN_FE_STOP (1 << 8)
-#define CMD_T_DEV_ACTIVE (1 << 9)
-#define CMD_T_REQUEST_STOP (1 << 10)
-#define CMD_T_BUSY (1 << 11)
+#define CMD_T_DEV_ACTIVE (1 << 7)
+#define CMD_T_REQUEST_STOP (1 << 8)
+#define CMD_T_BUSY (1 << 9)
spinlock_t t_state_lock;
struct completion t_transport_stop_comp;
- struct completion transport_lun_fe_stop_comp;
- struct completion transport_lun_stop_comp;
struct work_struct work;
@@ -498,6 +495,9 @@ struct se_cmd {
/* backend private data */
void *priv;
+
+ /* Used for lun->lun_ref counting */
+ bool lun_ref_active;
};
struct se_ua {
@@ -517,10 +517,6 @@ struct se_node_acl {
u32 acl_index;
#define MAX_ACL_TAG_SIZE 64
char acl_tag[MAX_ACL_TAG_SIZE];
- u64 num_cmds;
- u64 read_bytes;
- u64 write_bytes;
- spinlock_t stats_lock;
/* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */
atomic_t acl_pr_ref_count;
struct se_dev_entry **device_list;
@@ -624,10 +620,39 @@ struct se_dev_attrib {
u32 unmap_granularity;
u32 unmap_granularity_alignment;
u32 max_write_same_len;
+ u32 max_bytes_per_io;
struct se_device *da_dev;
struct config_group da_group;
};
+struct se_port_stat_grps {
+ struct config_group stat_group;
+ struct config_group scsi_port_group;
+ struct config_group scsi_tgt_port_group;
+ struct config_group scsi_transport_group;
+};
+
+struct se_lun {
+#define SE_LUN_LINK_MAGIC 0xffff7771
+ u32 lun_link_magic;
+ /* See transport_lun_status_table */
+ enum transport_lun_status_table lun_status;
+ u32 lun_access;
+ u32 lun_flags;
+ u32 unpacked_lun;
+ atomic_t lun_acl_count;
+ spinlock_t lun_acl_lock;
+ spinlock_t lun_sep_lock;
+ struct completion lun_shutdown_comp;
+ struct list_head lun_acl_list;
+ struct se_device *lun_se_dev;
+ struct se_port *lun_sep;
+ struct config_group lun_group;
+ struct se_port_stat_grps port_stat_grps;
+ struct completion lun_ref_comp;
+ struct percpu_ref lun_ref;
+};
+
struct se_dev_stat_grps {
struct config_group stat_group;
struct config_group scsi_dev_group;
@@ -656,11 +681,10 @@ struct se_device {
/* Pointer to transport specific device structure */
u32 dev_index;
u64 creation_time;
- u32 num_resets;
- u64 num_cmds;
- u64 read_bytes;
- u64 write_bytes;
- spinlock_t stats_lock;
+ atomic_long_t num_resets;
+ atomic_long_t num_cmds;
+ atomic_long_t read_bytes;
+ atomic_long_t write_bytes;
/* Active commands on this virtual SE device */
atomic_t simple_cmds;
atomic_t dev_ordered_id;
@@ -711,6 +735,7 @@ struct se_device {
struct se_subsystem_api *transport;
/* Linked list for struct se_hba struct se_device list */
struct list_head dev_list;
+ struct se_lun xcopy_lun;
};
struct se_hba {
@@ -730,34 +755,6 @@ struct se_hba {
struct se_subsystem_api *transport;
};
-struct se_port_stat_grps {
- struct config_group stat_group;
- struct config_group scsi_port_group;
- struct config_group scsi_tgt_port_group;
- struct config_group scsi_transport_group;
-};
-
-struct se_lun {
-#define SE_LUN_LINK_MAGIC 0xffff7771
- u32 lun_link_magic;
- /* See transport_lun_status_table */
- enum transport_lun_status_table lun_status;
- u32 lun_access;
- u32 lun_flags;
- u32 unpacked_lun;
- atomic_t lun_acl_count;
- spinlock_t lun_acl_lock;
- spinlock_t lun_cmd_lock;
- spinlock_t lun_sep_lock;
- struct completion lun_shutdown_comp;
- struct list_head lun_cmd_list;
- struct list_head lun_acl_list;
- struct se_device *lun_se_dev;
- struct se_port *lun_sep;
- struct config_group lun_group;
- struct se_port_stat_grps port_stat_grps;
-};
-
struct scsi_port_stats {
u64 cmd_pdus;
u64 tx_data_octets;
diff --git a/include/target/target_core_configfs.h b/include/target/target_core_configfs.h
index 713c5004f4ae..e0801386e4dc 100644
--- a/include/target/target_core_configfs.h
+++ b/include/target/target_core_configfs.h
@@ -54,4 +54,3 @@ struct target_fabric_configfs {
struct target_fabric_configfs_template tf_cit_tmpl;
};
-#define TF_CIT_TMPL(tf) (&(tf)->tf_cit_tmpl)
diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h
index 882b650e32be..4cf4fda404a3 100644
--- a/include/target/target_core_fabric.h
+++ b/include/target/target_core_fabric.h
@@ -137,6 +137,8 @@ void transport_generic_request_failure(struct se_cmd *, sense_reason_t);
void __target_execute_cmd(struct se_cmd *);
int transport_lookup_tmr_lun(struct se_cmd *, u32);
+struct se_node_acl *core_tpg_get_initiator_node_acl(struct se_portal_group *tpg,
+ unsigned char *);
struct se_node_acl *core_tpg_check_initiator_node_acl(struct se_portal_group *,
unsigned char *);
void core_tpg_clear_object_luns(struct se_portal_group *);
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
index f18b3b76e01e..4832d75dcbae 100644
--- a/include/trace/events/btrfs.h
+++ b/include/trace/events/btrfs.h
@@ -162,12 +162,14 @@ DEFINE_EVENT(btrfs__inode, btrfs_inode_evict,
{ EXTENT_FLAG_LOGGING, "LOGGING" }, \
{ EXTENT_FLAG_FILLING, "FILLING" })
-TRACE_EVENT(btrfs_get_extent,
+TRACE_EVENT_CONDITION(btrfs_get_extent,
TP_PROTO(struct btrfs_root *root, struct extent_map *map),
TP_ARGS(root, map),
+ TP_CONDITION(map),
+
TP_STRUCT__entry(
__field( u64, root_objectid )
__field( u64, start )
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index 52594b20179e..5c38606613d8 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -90,6 +90,10 @@
#define TRACE_EVENT_FLAGS(name, value) \
__TRACE_EVENT_FLAGS(name, value)
+#undef TRACE_EVENT_PERF_PERM
+#define TRACE_EVENT_PERF_PERM(name, expr...) \
+ __TRACE_EVENT_PERF_PERM(name, expr)
+
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
@@ -140,6 +144,9 @@
#undef TRACE_EVENT_FLAGS
#define TRACE_EVENT_FLAGS(event, flag)
+#undef TRACE_EVENT_PERF_PERM
+#define TRACE_EVENT_PERF_PERM(event, expr...)
+
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
/*
@@ -372,7 +379,8 @@ ftrace_define_fields_##call(struct ftrace_event_call *event_call) \
__data_size += (len) * sizeof(type);
#undef __string
-#define __string(item, src) __dynamic_array(char, item, strlen(src) + 1)
+#define __string(item, src) __dynamic_array(char, item, \
+ strlen((src) ? (const char *)(src) : "(null)") + 1)
#undef DECLARE_EVENT_CLASS
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
@@ -501,7 +509,7 @@ static inline notrace int ftrace_get_offsets_##call( \
#undef __assign_str
#define __assign_str(dst, src) \
- strcpy(__get_str(dst), src);
+ strcpy(__get_str(dst), (src) ? (const char *)(src) : "(null)");
#undef TP_fast_assign
#define TP_fast_assign(args...) args
diff --git a/include/uapi/drm/radeon_drm.h b/include/uapi/drm/radeon_drm.h
index 46d41e8b0dcc..2f3f7ea8c77b 100644
--- a/include/uapi/drm/radeon_drm.h
+++ b/include/uapi/drm/radeon_drm.h
@@ -981,6 +981,8 @@ struct drm_radeon_cs {
#define RADEON_INFO_SI_TILE_MODE_ARRAY 0x16
/* query if CP DMA is supported on the compute ring */
#define RADEON_INFO_SI_CP_DMA_COMPUTE 0x17
+/* CIK macrotile mode array */
+#define RADEON_INFO_CIK_MACROTILE_MODE_ARRAY 0x18
struct drm_radeon_info {
diff --git a/include/uapi/drm/vmwgfx_drm.h b/include/uapi/drm/vmwgfx_drm.h
index bcb0912afe7a..f854ca4a1372 100644
--- a/include/uapi/drm/vmwgfx_drm.h
+++ b/include/uapi/drm/vmwgfx_drm.h
@@ -75,6 +75,7 @@
#define DRM_VMW_PARAM_FIFO_CAPS 4
#define DRM_VMW_PARAM_MAX_FB_SIZE 5
#define DRM_VMW_PARAM_FIFO_HW_VERSION 6
+#define DRM_VMW_PARAM_MAX_SURF_MEMORY 7
/**
* struct drm_vmw_getparam_arg
diff --git a/include/uapi/linux/audit.h b/include/uapi/linux/audit.h
index db0b825b4810..44b05a09f193 100644
--- a/include/uapi/linux/audit.h
+++ b/include/uapi/linux/audit.h
@@ -68,6 +68,9 @@
#define AUDIT_MAKE_EQUIV 1015 /* Append to watched tree */
#define AUDIT_TTY_GET 1016 /* Get TTY auditing status */
#define AUDIT_TTY_SET 1017 /* Set TTY auditing status */
+#define AUDIT_SET_FEATURE 1018 /* Turn an audit feature on or off */
+#define AUDIT_GET_FEATURE 1019 /* Get which features are enabled */
+#define AUDIT_FEATURE_CHANGE 1020 /* audit log listing feature changes */
#define AUDIT_FIRST_USER_MSG 1100 /* Userspace messages mostly uninteresting to kernel */
#define AUDIT_USER_AVC 1107 /* We filter this differently */
@@ -357,6 +360,12 @@ enum {
#define AUDIT_PERM_READ 4
#define AUDIT_PERM_ATTR 8
+/* MAX_AUDIT_MESSAGE_LENGTH is set in audit:lib/libaudit.h as:
+ * 8970 // PATH_MAX*2+CONTEXT_SIZE*2+11+256+1
+ * max header+body+tailer: 44 + 29 + 32 + 262 + 7 + pad
+ */
+#define AUDIT_MESSAGE_TEXT_MAX 8560
+
struct audit_status {
__u32 mask; /* Bit mask for valid entries */
__u32 enabled; /* 1 = enabled, 0 = disabled */
@@ -368,11 +377,28 @@ struct audit_status {
__u32 backlog; /* messages waiting in queue */
};
+struct audit_features {
+#define AUDIT_FEATURE_VERSION 1
+ __u32 vers;
+ __u32 mask; /* which bits we are dealing with */
+ __u32 features; /* which feature to enable/disable */
+ __u32 lock; /* which features to lock */
+};
+
+#define AUDIT_FEATURE_ONLY_UNSET_LOGINUID 0
+#define AUDIT_FEATURE_LOGINUID_IMMUTABLE 1
+#define AUDIT_LAST_FEATURE AUDIT_FEATURE_LOGINUID_IMMUTABLE
+
+#define audit_feature_valid(x) ((x) >= 0 && (x) <= AUDIT_LAST_FEATURE)
+#define AUDIT_FEATURE_TO_MASK(x) (1 << ((x) & 31)) /* mask for __u32 */
+
struct audit_tty_status {
__u32 enabled; /* 1 = enabled, 0 = disabled */
__u32 log_passwd; /* 1 = enabled, 0 = disabled */
};
+#define AUDIT_UID_UNSET (unsigned int)-1
+
/* audit_rule_data supports filter rules with both integer and string
* fields. It corresponds with AUDIT_ADD_RULE, AUDIT_DEL_RULE and
* AUDIT_LIST_RULES requests.
diff --git a/include/uapi/linux/eventpoll.h b/include/uapi/linux/eventpoll.h
index 2c267bcbb85c..bc81fb2e1f0e 100644
--- a/include/uapi/linux/eventpoll.h
+++ b/include/uapi/linux/eventpoll.h
@@ -61,5 +61,16 @@ struct epoll_event {
__u64 data;
} EPOLL_PACKED;
-
+#ifdef CONFIG_PM_SLEEP
+static inline void ep_take_care_of_epollwakeup(struct epoll_event *epev)
+{
+ if ((epev->events & EPOLLWAKEUP) && !capable(CAP_BLOCK_SUSPEND))
+ epev->events &= ~EPOLLWAKEUP;
+}
+#else
+static inline void ep_take_care_of_epollwakeup(struct epoll_event *epev)
+{
+ epev->events &= ~EPOLLWAKEUP;
+}
+#endif
#endif /* _UAPI_LINUX_EVENTPOLL_H */
diff --git a/include/uapi/linux/genetlink.h b/include/uapi/linux/genetlink.h
index 1af72d8228e0..c3363ba1ae05 100644
--- a/include/uapi/linux/genetlink.h
+++ b/include/uapi/linux/genetlink.h
@@ -28,6 +28,7 @@ struct genlmsghdr {
#define GENL_ID_GENERATE 0
#define GENL_ID_CTRL NLMSG_MIN_TYPE
#define GENL_ID_VFS_DQUOT (NLMSG_MIN_TYPE + 1)
+#define GENL_ID_PMCRAID (NLMSG_MIN_TYPE + 2)
/**************************************************************************
* Controller
diff --git a/include/uapi/linux/hash_info.h b/include/uapi/linux/hash_info.h
new file mode 100644
index 000000000000..ca18c45f8304
--- /dev/null
+++ b/include/uapi/linux/hash_info.h
@@ -0,0 +1,37 @@
+/*
+ * Hash Info: Hash algorithms information
+ *
+ * Copyright (c) 2013 Dmitry Kasatkin <d.kasatkin@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+#ifndef _UAPI_LINUX_HASH_INFO_H
+#define _UAPI_LINUX_HASH_INFO_H
+
+enum hash_algo {
+ HASH_ALGO_MD4,
+ HASH_ALGO_MD5,
+ HASH_ALGO_SHA1,
+ HASH_ALGO_RIPE_MD_160,
+ HASH_ALGO_SHA256,
+ HASH_ALGO_SHA384,
+ HASH_ALGO_SHA512,
+ HASH_ALGO_SHA224,
+ HASH_ALGO_RIPE_MD_128,
+ HASH_ALGO_RIPE_MD_256,
+ HASH_ALGO_RIPE_MD_320,
+ HASH_ALGO_WP_256,
+ HASH_ALGO_WP_384,
+ HASH_ALGO_WP_512,
+ HASH_ALGO_TGR_128,
+ HASH_ALGO_TGR_160,
+ HASH_ALGO_TGR_192,
+ HASH_ALGO__LAST
+};
+
+#endif /* _UAPI_LINUX_HASH_INFO_H */
diff --git a/include/uapi/linux/if_addr.h b/include/uapi/linux/if_addr.h
index 23357ab81a77..cfed10be7529 100644
--- a/include/uapi/linux/if_addr.h
+++ b/include/uapi/linux/if_addr.h
@@ -18,6 +18,9 @@ struct ifaddrmsg {
* It makes no difference for normally configured broadcast interfaces,
* but for point-to-point IFA_ADDRESS is DESTINATION address,
* local address is supplied in IFA_LOCAL attribute.
+ *
+ * IFA_FLAGS is a u32 attribute that extends the u8 field ifa_flags.
+ * If present, the value from struct ifaddrmsg will be ignored.
*/
enum {
IFA_UNSPEC,
@@ -28,6 +31,7 @@ enum {
IFA_ANYCAST,
IFA_CACHEINFO,
IFA_MULTICAST,
+ IFA_FLAGS,
__IFA_MAX,
};
@@ -44,6 +48,7 @@ enum {
#define IFA_F_DEPRECATED 0x20
#define IFA_F_TENTATIVE 0x40
#define IFA_F_PERMANENT 0x80
+#define IFA_F_MANAGETEMPADDR 0x100
struct ifa_cacheinfo {
__u32 ifa_prefered;
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
index b78566f59aba..3e6bd3c7445d 100644
--- a/include/uapi/linux/if_link.h
+++ b/include/uapi/linux/if_link.h
@@ -331,11 +331,43 @@ enum {
IFLA_BOND_UNSPEC,
IFLA_BOND_MODE,
IFLA_BOND_ACTIVE_SLAVE,
+ IFLA_BOND_MIIMON,
+ IFLA_BOND_UPDELAY,
+ IFLA_BOND_DOWNDELAY,
+ IFLA_BOND_USE_CARRIER,
+ IFLA_BOND_ARP_INTERVAL,
+ IFLA_BOND_ARP_IP_TARGET,
+ IFLA_BOND_ARP_VALIDATE,
+ IFLA_BOND_ARP_ALL_TARGETS,
+ IFLA_BOND_PRIMARY,
+ IFLA_BOND_PRIMARY_RESELECT,
+ IFLA_BOND_FAIL_OVER_MAC,
+ IFLA_BOND_XMIT_HASH_POLICY,
+ IFLA_BOND_RESEND_IGMP,
+ IFLA_BOND_NUM_PEER_NOTIF,
+ IFLA_BOND_ALL_SLAVES_ACTIVE,
+ IFLA_BOND_MIN_LINKS,
+ IFLA_BOND_LP_INTERVAL,
+ IFLA_BOND_PACKETS_PER_SLAVE,
+ IFLA_BOND_AD_LACP_RATE,
+ IFLA_BOND_AD_SELECT,
+ IFLA_BOND_AD_INFO,
__IFLA_BOND_MAX,
};
#define IFLA_BOND_MAX (__IFLA_BOND_MAX - 1)
+enum {
+ IFLA_BOND_AD_INFO_AGGREGATOR,
+ IFLA_BOND_AD_INFO_NUM_PORTS,
+ IFLA_BOND_AD_INFO_ACTOR_KEY,
+ IFLA_BOND_AD_INFO_PARTNER_KEY,
+ IFLA_BOND_AD_INFO_PARTNER_MAC,
+ __IFLA_BOND_AD_INFO_MAX,
+};
+
+#define IFLA_BOND_AD_INFO_MAX (__IFLA_BOND_AD_INFO_MAX - 1)
+
/* SR-IOV virtual function management section */
enum {
@@ -488,7 +520,9 @@ enum {
IFLA_HSR_UNSPEC,
IFLA_HSR_SLAVE1,
IFLA_HSR_SLAVE2,
- IFLA_HSR_MULTICAST_SPEC,
+ IFLA_HSR_MULTICAST_SPEC, /* Last byte of supervision addr */
+ IFLA_HSR_SUPERVISION_ADDR, /* Supervision frame multicast addr */
+ IFLA_HSR_SEQ_NR,
__IFLA_HSR_MAX,
};
diff --git a/include/uapi/linux/if_packet.h b/include/uapi/linux/if_packet.h
index dbf06667394b..1988a02842cc 100644
--- a/include/uapi/linux/if_packet.h
+++ b/include/uapi/linux/if_packet.h
@@ -26,8 +26,10 @@ struct sockaddr_ll {
#define PACKET_MULTICAST 2 /* To group */
#define PACKET_OTHERHOST 3 /* To someone else */
#define PACKET_OUTGOING 4 /* Outgoing of any type */
-/* These ones are invisible by user level */
#define PACKET_LOOPBACK 5 /* MC/BRD frame looped back */
+#define PACKET_USER 6 /* To user space */
+#define PACKET_KERNEL 7 /* To kernel space */
+/* Unused, PACKET_FASTROUTE and PACKET_LOOPBACK are invisible to user space */
#define PACKET_FASTROUTE 6 /* Fastrouted frame */
/* Packet socket options */
@@ -51,6 +53,7 @@ struct sockaddr_ll {
#define PACKET_TIMESTAMP 17
#define PACKET_FANOUT 18
#define PACKET_TX_HAS_OFF 19
+#define PACKET_QDISC_BYPASS 20
#define PACKET_FANOUT_HASH 0
#define PACKET_FANOUT_LB 1
@@ -83,17 +86,18 @@ struct tpacket_auxdata {
__u16 tp_mac;
__u16 tp_net;
__u16 tp_vlan_tci;
- __u16 tp_padding;
+ __u16 tp_vlan_tpid;
};
/* Rx ring - header status */
-#define TP_STATUS_KERNEL 0
-#define TP_STATUS_USER (1 << 0)
-#define TP_STATUS_COPY (1 << 1)
-#define TP_STATUS_LOSING (1 << 2)
-#define TP_STATUS_CSUMNOTREADY (1 << 3)
-#define TP_STATUS_VLAN_VALID (1 << 4) /* auxdata has valid tp_vlan_tci */
-#define TP_STATUS_BLK_TMO (1 << 5)
+#define TP_STATUS_KERNEL 0
+#define TP_STATUS_USER (1 << 0)
+#define TP_STATUS_COPY (1 << 1)
+#define TP_STATUS_LOSING (1 << 2)
+#define TP_STATUS_CSUMNOTREADY (1 << 3)
+#define TP_STATUS_VLAN_VALID (1 << 4) /* auxdata has valid tp_vlan_tci */
+#define TP_STATUS_BLK_TMO (1 << 5)
+#define TP_STATUS_VLAN_TPID_VALID (1 << 6) /* auxdata has valid tp_vlan_tpid */
/* Tx ring - header status */
#define TP_STATUS_AVAILABLE 0
@@ -132,12 +136,15 @@ struct tpacket2_hdr {
__u32 tp_sec;
__u32 tp_nsec;
__u16 tp_vlan_tci;
- __u16 tp_padding;
+ __u16 tp_vlan_tpid;
+ __u8 tp_padding[4];
};
struct tpacket_hdr_variant1 {
__u32 tp_rxhash;
__u32 tp_vlan_tci;
+ __u16 tp_vlan_tpid;
+ __u16 tp_padding;
};
struct tpacket3_hdr {
@@ -153,6 +160,7 @@ struct tpacket3_hdr {
union {
struct tpacket_hdr_variant1 hv1;
};
+ __u8 tp_padding[8];
};
struct tpacket_bd_ts {
diff --git a/include/uapi/linux/in6.h b/include/uapi/linux/in6.h
index 440d5c479145..f94f1d013bf2 100644
--- a/include/uapi/linux/in6.h
+++ b/include/uapi/linux/in6.h
@@ -188,6 +188,10 @@ enum {
#define IPV6_PMTUDISC_WANT 1
#define IPV6_PMTUDISC_DO 2
#define IPV6_PMTUDISC_PROBE 3
+/* same as IPV6_PMTUDISC_PROBE, provided for symetry with IPv4
+ * also see comments on IP_PMTUDISC_INTERFACE
+ */
+#define IPV6_PMTUDISC_INTERFACE 4
/* Flowlabel */
#define IPV6_FLOWLABEL_MGR 32
diff --git a/include/uapi/linux/input.h b/include/uapi/linux/input.h
index a3726275876d..ecc88592ecbe 100644
--- a/include/uapi/linux/input.h
+++ b/include/uapi/linux/input.h
@@ -719,6 +719,8 @@ struct input_keymap_entry {
#define BTN_DPAD_LEFT 0x222
#define BTN_DPAD_RIGHT 0x223
+#define KEY_ALS_TOGGLE 0x230 /* Ambient light sensor */
+
#define BTN_TRIGGER_HAPPY 0x2c0
#define BTN_TRIGGER_HAPPY1 0x2c0
#define BTN_TRIGGER_HAPPY2 0x2c1
@@ -856,6 +858,7 @@ struct input_keymap_entry {
#define SW_FRONT_PROXIMITY 0x0b /* set = front proximity sensor active */
#define SW_ROTATE_LOCK 0x0c /* set = rotate locked/disabled */
#define SW_LINEIN_INSERT 0x0d /* set = inserted */
+#define SW_MUTE_DEVICE 0x0e /* set = device disabled */
#define SW_MAX 0x0f
#define SW_CNT (SW_MAX+1)
diff --git a/include/uapi/linux/keyctl.h b/include/uapi/linux/keyctl.h
index c9b7f4faf97a..840cb990abe2 100644
--- a/include/uapi/linux/keyctl.h
+++ b/include/uapi/linux/keyctl.h
@@ -56,5 +56,6 @@
#define KEYCTL_REJECT 19 /* reject a partially constructed key */
#define KEYCTL_INSTANTIATE_IOV 20 /* instantiate a partially constructed key */
#define KEYCTL_INVALIDATE 21 /* invalidate a key */
+#define KEYCTL_GET_PERSISTENT 22 /* get a user's persistent keyring */
#endif /* _LINUX_KEYCTL_H */
diff --git a/include/uapi/linux/mic_common.h b/include/uapi/linux/mic_common.h
index 17e7d95e4f53..6eb40244e019 100644
--- a/include/uapi/linux/mic_common.h
+++ b/include/uapi/linux/mic_common.h
@@ -23,12 +23,7 @@
#include <linux/virtio_ring.h>
-#ifndef __KERNEL__
-#define ALIGN(a, x) (((a) + (x) - 1) & ~((x) - 1))
-#define __aligned(x) __attribute__ ((aligned(x)))
-#endif
-
-#define mic_aligned_size(x) ALIGN(sizeof(x), 8)
+#define __mic_align(a, x) (((a) + (x) - 1) & ~((x) - 1))
/**
* struct mic_device_desc: Virtio device information shared between the
@@ -48,8 +43,8 @@ struct mic_device_desc {
__u8 feature_len;
__u8 config_len;
__u8 status;
- __u64 config[0];
-} __aligned(8);
+ __le64 config[0];
+} __attribute__ ((aligned(8)));
/**
* struct mic_device_ctrl: Per virtio device information in the device page
@@ -66,7 +61,7 @@ struct mic_device_desc {
* @h2c_vdev_db: The doorbell number to be used by host. Set by guest.
*/
struct mic_device_ctrl {
- __u64 vdev;
+ __le64 vdev;
__u8 config_change;
__u8 vdev_reset;
__u8 guest_ack;
@@ -74,7 +69,7 @@ struct mic_device_ctrl {
__u8 used_address_updated;
__s8 c2h_vdev_db;
__s8 h2c_vdev_db;
-} __aligned(8);
+} __attribute__ ((aligned(8)));
/**
* struct mic_bootparam: Virtio device independent information in device page
@@ -87,13 +82,13 @@ struct mic_device_ctrl {
* @shutdown_card: Set to 1 by the host when a card shutdown is initiated
*/
struct mic_bootparam {
- __u32 magic;
+ __le32 magic;
__s8 c2h_shutdown_db;
__s8 h2c_shutdown_db;
__s8 h2c_config_db;
__u8 shutdown_status;
__u8 shutdown_card;
-} __aligned(8);
+} __attribute__ ((aligned(8)));
/**
* struct mic_device_page: High level representation of the device page
@@ -116,10 +111,10 @@ struct mic_device_page {
* @num: The number of entries in the virtio_ring
*/
struct mic_vqconfig {
- __u64 address;
- __u64 used_address;
- __u16 num;
-} __aligned(8);
+ __le64 address;
+ __le64 used_address;
+ __le16 num;
+} __attribute__ ((aligned(8)));
/*
* The alignment to use between consumer and producer parts of vring.
@@ -154,7 +149,7 @@ struct mic_vqconfig {
*/
struct _mic_vring_info {
__u16 avail_idx;
- int magic;
+ __le32 magic;
};
/**
@@ -173,15 +168,13 @@ struct mic_vring {
int len;
};
-#define mic_aligned_desc_size(d) ALIGN(mic_desc_size(d), 8)
+#define mic_aligned_desc_size(d) __mic_align(mic_desc_size(d), 8)
#ifndef INTEL_MIC_CARD
static inline unsigned mic_desc_size(const struct mic_device_desc *desc)
{
- return mic_aligned_size(*desc)
- + desc->num_vq * mic_aligned_size(struct mic_vqconfig)
- + desc->feature_len * 2
- + desc->config_len;
+ return sizeof(*desc) + desc->num_vq * sizeof(struct mic_vqconfig)
+ + desc->feature_len * 2 + desc->config_len;
}
static inline struct mic_vqconfig *
@@ -201,8 +194,7 @@ static inline __u8 *mic_vq_configspace(const struct mic_device_desc *desc)
}
static inline unsigned mic_total_desc_size(struct mic_device_desc *desc)
{
- return mic_aligned_desc_size(desc) +
- mic_aligned_size(struct mic_device_ctrl);
+ return mic_aligned_desc_size(desc) + sizeof(struct mic_device_ctrl);
}
#endif
diff --git a/include/uapi/linux/net_tstamp.h b/include/uapi/linux/net_tstamp.h
index ae5df122e42f..f53879c0f590 100644
--- a/include/uapi/linux/net_tstamp.h
+++ b/include/uapi/linux/net_tstamp.h
@@ -26,17 +26,17 @@ enum {
};
/**
- * struct hwtstamp_config - %SIOCSHWTSTAMP parameter
+ * struct hwtstamp_config - %SIOCGHWTSTAMP and %SIOCSHWTSTAMP parameter
*
- * @flags: no flags defined right now, must be zero
+ * @flags: no flags defined right now, must be zero for %SIOCSHWTSTAMP
* @tx_type: one of HWTSTAMP_TX_*
- * @rx_type: one of one of HWTSTAMP_FILTER_*
+ * @rx_filter: one of HWTSTAMP_FILTER_*
*
- * %SIOCSHWTSTAMP expects a &struct ifreq with a ifr_data pointer to
- * this structure. dev_ifsioc() in the kernel takes care of the
- * translation between 32 bit userspace and 64 bit kernel. The
- * structure is intentionally chosen so that it has the same layout on
- * 32 and 64 bit systems, don't break this!
+ * %SIOCGHWTSTAMP and %SIOCSHWTSTAMP expect a &struct ifreq with a
+ * ifr_data pointer to this structure. For %SIOCSHWTSTAMP, if the
+ * driver or hardware does not support the requested @rx_filter value,
+ * the driver may use a more general filter mode. In this case
+ * @rx_filter will indicate the actual mode on return.
*/
struct hwtstamp_config {
int flags;
diff --git a/include/uapi/linux/netconf.h b/include/uapi/linux/netconf.h
index 64804a798b0c..669a1f0b1d97 100644
--- a/include/uapi/linux/netconf.h
+++ b/include/uapi/linux/netconf.h
@@ -14,6 +14,7 @@ enum {
NETCONFA_FORWARDING,
NETCONFA_RP_FILTER,
NETCONFA_MC_FORWARDING,
+ NETCONFA_PROXY_NEIGH,
__NETCONFA_MAX
};
#define NETCONFA_MAX (__NETCONFA_MAX - 1)
diff --git a/include/uapi/linux/netfilter.h b/include/uapi/linux/netfilter.h
index f7dc0ebeeba5..ef1b1f88ca18 100644
--- a/include/uapi/linux/netfilter.h
+++ b/include/uapi/linux/netfilter.h
@@ -53,6 +53,7 @@ enum nf_inet_hooks {
enum {
NFPROTO_UNSPEC = 0,
+ NFPROTO_INET = 1,
NFPROTO_IPV4 = 2,
NFPROTO_ARP = 3,
NFPROTO_BRIDGE = 7,
diff --git a/include/uapi/linux/netfilter/Kbuild b/include/uapi/linux/netfilter/Kbuild
index 17c3af2c4bb9..2344f5a319fc 100644
--- a/include/uapi/linux/netfilter/Kbuild
+++ b/include/uapi/linux/netfilter/Kbuild
@@ -39,6 +39,7 @@ header-y += xt_TEE.h
header-y += xt_TPROXY.h
header-y += xt_addrtype.h
header-y += xt_bpf.h
+header-y += xt_cgroup.h
header-y += xt_cluster.h
header-y += xt_comment.h
header-y += xt_connbytes.h
@@ -54,6 +55,7 @@ header-y += xt_ecn.h
header-y += xt_esp.h
header-y += xt_hashlimit.h
header-y += xt_helper.h
+header-y += xt_ipcomp.h
header-y += xt_iprange.h
header-y += xt_ipvs.h
header-y += xt_length.h
diff --git a/include/uapi/linux/netfilter/nf_nat.h b/include/uapi/linux/netfilter/nf_nat.h
index bf0cc373ffb6..1ad3659102b6 100644
--- a/include/uapi/linux/netfilter/nf_nat.h
+++ b/include/uapi/linux/netfilter/nf_nat.h
@@ -4,10 +4,14 @@
#include <linux/netfilter.h>
#include <linux/netfilter/nf_conntrack_tuple_common.h>
-#define NF_NAT_RANGE_MAP_IPS 1
-#define NF_NAT_RANGE_PROTO_SPECIFIED 2
-#define NF_NAT_RANGE_PROTO_RANDOM 4
-#define NF_NAT_RANGE_PERSISTENT 8
+#define NF_NAT_RANGE_MAP_IPS (1 << 0)
+#define NF_NAT_RANGE_PROTO_SPECIFIED (1 << 1)
+#define NF_NAT_RANGE_PROTO_RANDOM (1 << 2)
+#define NF_NAT_RANGE_PERSISTENT (1 << 3)
+#define NF_NAT_RANGE_PROTO_RANDOM_FULLY (1 << 4)
+
+#define NF_NAT_RANGE_PROTO_RANDOM_ALL \
+ (NF_NAT_RANGE_PROTO_RANDOM | NF_NAT_RANGE_PROTO_RANDOM_FULLY)
struct nf_nat_ipv4_range {
unsigned int flags;
diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
index fbfd229a8e99..83c985a6170b 100644
--- a/include/uapi/linux/netfilter/nf_tables.h
+++ b/include/uapi/linux/netfilter/nf_tables.h
@@ -110,11 +110,13 @@ enum nft_table_flags {
*
* @NFTA_TABLE_NAME: name of the table (NLA_STRING)
* @NFTA_TABLE_FLAGS: bitmask of enum nft_table_flags (NLA_U32)
+ * @NFTA_TABLE_USE: number of chains in this table (NLA_U32)
*/
enum nft_table_attributes {
NFTA_TABLE_UNSPEC,
NFTA_TABLE_NAME,
NFTA_TABLE_FLAGS,
+ NFTA_TABLE_USE,
__NFTA_TABLE_MAX
};
#define NFTA_TABLE_MAX (__NFTA_TABLE_MAX - 1)
@@ -529,6 +531,8 @@ enum nft_exthdr_attributes {
* @NFT_META_NFTRACE: packet nftrace bit
* @NFT_META_RTCLASSID: realm value of packet's route (skb->dst->tclassid)
* @NFT_META_SECMARK: packet secmark (skb->secmark)
+ * @NFT_META_NFPROTO: netfilter protocol
+ * @NFT_META_L4PROTO: layer 4 protocol number
*/
enum nft_meta_keys {
NFT_META_LEN,
@@ -546,6 +550,8 @@ enum nft_meta_keys {
NFT_META_NFTRACE,
NFT_META_RTCLASSID,
NFT_META_SECMARK,
+ NFT_META_NFPROTO,
+ NFT_META_L4PROTO,
};
/**
@@ -553,11 +559,13 @@ enum nft_meta_keys {
*
* @NFTA_META_DREG: destination register (NLA_U32)
* @NFTA_META_KEY: meta data item to load (NLA_U32: nft_meta_keys)
+ * @NFTA_META_SREG: source register (NLA_U32)
*/
enum nft_meta_attributes {
NFTA_META_UNSPEC,
NFTA_META_DREG,
NFTA_META_KEY,
+ NFTA_META_SREG,
__NFTA_META_MAX
};
#define NFTA_META_MAX (__NFTA_META_MAX - 1)
@@ -601,12 +609,14 @@ enum nft_ct_keys {
* @NFTA_CT_DREG: destination register (NLA_U32)
* @NFTA_CT_KEY: conntrack data item to load (NLA_U32: nft_ct_keys)
* @NFTA_CT_DIRECTION: direction in case of directional keys (NLA_U8)
+ * @NFTA_CT_SREG: source register (NLA_U32)
*/
enum nft_ct_attributes {
NFTA_CT_UNSPEC,
NFTA_CT_DREG,
NFTA_CT_KEY,
NFTA_CT_DIRECTION,
+ NFTA_CT_SREG,
__NFTA_CT_MAX
};
#define NFTA_CT_MAX (__NFTA_CT_MAX - 1)
@@ -658,6 +668,26 @@ enum nft_log_attributes {
#define NFTA_LOG_MAX (__NFTA_LOG_MAX - 1)
/**
+ * enum nft_queue_attributes - nf_tables queue expression netlink attributes
+ *
+ * @NFTA_QUEUE_NUM: netlink queue to send messages to (NLA_U16)
+ * @NFTA_QUEUE_TOTAL: number of queues to load balance packets on (NLA_U16)
+ * @NFTA_QUEUE_FLAGS: various flags (NLA_U16)
+ */
+enum nft_queue_attributes {
+ NFTA_QUEUE_UNSPEC,
+ NFTA_QUEUE_NUM,
+ NFTA_QUEUE_TOTAL,
+ NFTA_QUEUE_FLAGS,
+ __NFTA_QUEUE_MAX
+};
+#define NFTA_QUEUE_MAX (__NFTA_QUEUE_MAX - 1)
+
+#define NFT_QUEUE_FLAG_BYPASS 0x01 /* for compatibility with v2 */
+#define NFT_QUEUE_FLAG_CPU_FANOUT 0x02 /* use current CPU (no hashing) */
+#define NFT_QUEUE_FLAG_MASK 0x03
+
+/**
* enum nft_reject_types - nf_tables reject expression reject types
*
* @NFT_REJECT_ICMP_UNREACH: reject using ICMP unreachable
diff --git a/include/uapi/linux/netfilter/nfnetlink_queue.h b/include/uapi/linux/netfilter/nfnetlink_queue.h
index 0132bad79de7..8dd819e2b5fe 100644
--- a/include/uapi/linux/netfilter/nfnetlink_queue.h
+++ b/include/uapi/linux/netfilter/nfnetlink_queue.h
@@ -47,6 +47,8 @@ enum nfqnl_attr_type {
NFQA_CAP_LEN, /* __u32 length of captured packet */
NFQA_SKB_INFO, /* __u32 skb meta information */
NFQA_EXP, /* nf_conntrack_netlink.h */
+ NFQA_UID, /* __u32 sk uid */
+ NFQA_GID, /* __u32 sk gid */
__NFQA_MAX
};
@@ -99,7 +101,8 @@ enum nfqnl_attr_config {
#define NFQA_CFG_F_FAIL_OPEN (1 << 0)
#define NFQA_CFG_F_CONNTRACK (1 << 1)
#define NFQA_CFG_F_GSO (1 << 2)
-#define NFQA_CFG_F_MAX (1 << 3)
+#define NFQA_CFG_F_UID_GID (1 << 3)
+#define NFQA_CFG_F_MAX (1 << 4)
/* flags for NFQA_SKB_INFO */
/* packet appears to have wrong checksums, but they are ok */
diff --git a/include/uapi/linux/netfilter/xt_cgroup.h b/include/uapi/linux/netfilter/xt_cgroup.h
new file mode 100644
index 000000000000..43acb7e175f6
--- /dev/null
+++ b/include/uapi/linux/netfilter/xt_cgroup.h
@@ -0,0 +1,11 @@
+#ifndef _UAPI_XT_CGROUP_H
+#define _UAPI_XT_CGROUP_H
+
+#include <linux/types.h>
+
+struct xt_cgroup_info {
+ __u32 id;
+ __u32 invert;
+};
+
+#endif /* _UAPI_XT_CGROUP_H */
diff --git a/include/uapi/linux/netfilter/xt_ipcomp.h b/include/uapi/linux/netfilter/xt_ipcomp.h
new file mode 100644
index 000000000000..45c7e40eb8e1
--- /dev/null
+++ b/include/uapi/linux/netfilter/xt_ipcomp.h
@@ -0,0 +1,16 @@
+#ifndef _XT_IPCOMP_H
+#define _XT_IPCOMP_H
+
+#include <linux/types.h>
+
+struct xt_ipcomp {
+ __u32 spis[2]; /* Security Parameter Index */
+ __u8 invflags; /* Inverse flags */
+ __u8 hdrres; /* Test of the Reserved Filed */
+};
+
+/* Values for "invflags" field in struct xt_ipcomp. */
+#define XT_IPCOMP_INV_SPI 0x01 /* Invert the sense of spi. */
+#define XT_IPCOMP_INV_MASK 0x01 /* All possible flags. */
+
+#endif /*_XT_IPCOMP_H*/
diff --git a/include/uapi/linux/netfilter/xt_osf.h b/include/uapi/linux/netfilter/xt_osf.h
index 18afa495f973..5d66caeba3ee 100644
--- a/include/uapi/linux/netfilter/xt_osf.h
+++ b/include/uapi/linux/netfilter/xt_osf.h
@@ -13,8 +13,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _XT_OSF_H
diff --git a/include/uapi/linux/netlink_diag.h b/include/uapi/linux/netlink_diag.h
index 4e31db4eea41..f2159d30d1f5 100644
--- a/include/uapi/linux/netlink_diag.h
+++ b/include/uapi/linux/netlink_diag.h
@@ -33,6 +33,7 @@ struct netlink_diag_ring {
};
enum {
+ /* NETLINK_DIAG_NONE, standard nl API requires this attribute! */
NETLINK_DIAG_MEMINFO,
NETLINK_DIAG_GROUPS,
NETLINK_DIAG_RX_RING,
diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h
index d120f9fe0017..970553cbbc8e 100644
--- a/include/uapi/linux/openvswitch.h
+++ b/include/uapi/linux/openvswitch.h
@@ -40,7 +40,15 @@ struct ovs_header {
#define OVS_DATAPATH_FAMILY "ovs_datapath"
#define OVS_DATAPATH_MCGROUP "ovs_datapath"
-#define OVS_DATAPATH_VERSION 0x1
+
+/* V2:
+ * - API users are expected to provide OVS_DP_ATTR_USER_FEATURES
+ * when creating the datapath.
+ */
+#define OVS_DATAPATH_VERSION 2
+
+/* First OVS datapath version to support features */
+#define OVS_DP_VER_FEATURES 2
enum ovs_datapath_cmd {
OVS_DP_CMD_UNSPEC,
@@ -75,6 +83,7 @@ enum ovs_datapath_attr {
OVS_DP_ATTR_UPCALL_PID, /* Netlink PID to receive upcalls */
OVS_DP_ATTR_STATS, /* struct ovs_dp_stats */
OVS_DP_ATTR_MEGAFLOW_STATS, /* struct ovs_dp_megaflow_stats */
+ OVS_DP_ATTR_USER_FEATURES, /* OVS_DP_F_* */
__OVS_DP_ATTR_MAX
};
@@ -106,6 +115,9 @@ struct ovs_vport_stats {
__u64 tx_dropped; /* no space available in linux */
};
+/* Allow last Netlink attribute to be unaligned */
+#define OVS_DP_F_UNALIGNED (1 << 0)
+
/* Fixed logical ports. */
#define OVSP_LOCAL ((__u32)0)
diff --git a/include/uapi/linux/packet_diag.h b/include/uapi/linux/packet_diag.h
index b2cc0cd9c4d9..d08c63f3dd6f 100644
--- a/include/uapi/linux/packet_diag.h
+++ b/include/uapi/linux/packet_diag.h
@@ -29,6 +29,7 @@ struct packet_diag_msg {
};
enum {
+ /* PACKET_DIAG_NONE, standard nl API requires this attribute! */
PACKET_DIAG_INFO,
PACKET_DIAG_MCLIST,
PACKET_DIAG_RX_RING,
diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
index 0890556f779e..c870c2a71d65 100644
--- a/include/uapi/linux/pci_regs.h
+++ b/include/uapi/linux/pci_regs.h
@@ -13,10 +13,10 @@
* PCI to PCI Bridge Specification
* PCI System Design Guide
*
- * For hypertransport information, please consult the following manuals
- * from http://www.hypertransport.org
+ * For HyperTransport information, please consult the following manuals
+ * from http://www.hypertransport.org
*
- * The Hypertransport I/O Link Specification
+ * The HyperTransport I/O Link Specification
*/
#ifndef LINUX_PCI_REGS_H
@@ -37,7 +37,7 @@
#define PCI_COMMAND_INVALIDATE 0x10 /* Use memory write and invalidate */
#define PCI_COMMAND_VGA_PALETTE 0x20 /* Enable palette snooping */
#define PCI_COMMAND_PARITY 0x40 /* Enable parity checking */
-#define PCI_COMMAND_WAIT 0x80 /* Enable address/data stepping */
+#define PCI_COMMAND_WAIT 0x80 /* Enable address/data stepping */
#define PCI_COMMAND_SERR 0x100 /* Enable SERR */
#define PCI_COMMAND_FAST_BACK 0x200 /* Enable back-to-back writes */
#define PCI_COMMAND_INTX_DISABLE 0x400 /* INTx Emulation Disable */
@@ -45,7 +45,7 @@
#define PCI_STATUS 0x06 /* 16 bits */
#define PCI_STATUS_INTERRUPT 0x08 /* Interrupt status */
#define PCI_STATUS_CAP_LIST 0x10 /* Support Capability List */
-#define PCI_STATUS_66MHZ 0x20 /* Support 66 Mhz PCI 2.1 bus */
+#define PCI_STATUS_66MHZ 0x20 /* Support 66 MHz PCI 2.1 bus */
#define PCI_STATUS_UDF 0x40 /* Support User Definable Features [obsolete] */
#define PCI_STATUS_FAST_BACK 0x80 /* Accept fast-back to back */
#define PCI_STATUS_PARITY 0x100 /* Detected parity error */
@@ -205,14 +205,14 @@
#define PCI_CAP_ID_CHSWP 0x06 /* CompactPCI HotSwap */
#define PCI_CAP_ID_PCIX 0x07 /* PCI-X */
#define PCI_CAP_ID_HT 0x08 /* HyperTransport */
-#define PCI_CAP_ID_VNDR 0x09 /* Vendor specific */
+#define PCI_CAP_ID_VNDR 0x09 /* Vendor-Specific */
#define PCI_CAP_ID_DBG 0x0A /* Debug port */
#define PCI_CAP_ID_CCRC 0x0B /* CompactPCI Central Resource Control */
-#define PCI_CAP_ID_SHPC 0x0C /* PCI Standard Hot-Plug Controller */
+#define PCI_CAP_ID_SHPC 0x0C /* PCI Standard Hot-Plug Controller */
#define PCI_CAP_ID_SSVID 0x0D /* Bridge subsystem vendor/device ID */
#define PCI_CAP_ID_AGP3 0x0E /* AGP Target PCI-PCI bridge */
#define PCI_CAP_ID_SECDEV 0x0F /* Secure Device */
-#define PCI_CAP_ID_EXP 0x10 /* PCI Express */
+#define PCI_CAP_ID_EXP 0x10 /* PCI Express */
#define PCI_CAP_ID_MSIX 0x11 /* MSI-X */
#define PCI_CAP_ID_SATA 0x12 /* SATA Data/Index Conf. */
#define PCI_CAP_ID_AF 0x13 /* PCI Advanced Features */
@@ -268,8 +268,8 @@
#define PCI_AGP_COMMAND_RQ_MASK 0xff000000 /* Master: Maximum number of requests */
#define PCI_AGP_COMMAND_SBA 0x0200 /* Sideband addressing enabled */
#define PCI_AGP_COMMAND_AGP 0x0100 /* Allow processing of AGP transactions */
-#define PCI_AGP_COMMAND_64BIT 0x0020 /* Allow processing of 64-bit addresses */
-#define PCI_AGP_COMMAND_FW 0x0010 /* Force FW transfers */
+#define PCI_AGP_COMMAND_64BIT 0x0020 /* Allow processing of 64-bit addresses */
+#define PCI_AGP_COMMAND_FW 0x0010 /* Force FW transfers */
#define PCI_AGP_COMMAND_RATE4 0x0004 /* Use 4x rate */
#define PCI_AGP_COMMAND_RATE2 0x0002 /* Use 2x rate */
#define PCI_AGP_COMMAND_RATE1 0x0001 /* Use 1x rate */
@@ -321,7 +321,7 @@
#define PCI_MSIX_PBA_OFFSET 0xfffffff8 /* Offset into specified BAR */
#define PCI_CAP_MSIX_SIZEOF 12 /* size of MSIX registers */
-/* MSI-X entry's format */
+/* MSI-X Table entry format */
#define PCI_MSIX_ENTRY_SIZE 16
#define PCI_MSIX_ENTRY_LOWER_ADDR 0
#define PCI_MSIX_ENTRY_UPPER_ADDR 4
@@ -372,7 +372,7 @@
#define PCI_X_CMD_SPLIT_16 0x0060 /* Max 16 */
#define PCI_X_CMD_SPLIT_32 0x0070 /* Max 32 */
#define PCI_X_CMD_MAX_SPLIT 0x0070 /* Max Outstanding Split Transactions */
-#define PCI_X_CMD_VERSION(x) (((x) >> 12) & 3) /* Version */
+#define PCI_X_CMD_VERSION(x) (((x) >> 12) & 3) /* Version */
#define PCI_X_STATUS 4 /* PCI-X capabilities */
#define PCI_X_STATUS_DEVFN 0x000000ff /* A copy of devfn */
#define PCI_X_STATUS_BUS 0x0000ff00 /* A copy of bus nr */
@@ -407,8 +407,8 @@
/* PCI Bridge Subsystem ID registers */
-#define PCI_SSVID_VENDOR_ID 4 /* PCI-Bridge subsystem vendor id register */
-#define PCI_SSVID_DEVICE_ID 6 /* PCI-Bridge subsystem device id register */
+#define PCI_SSVID_VENDOR_ID 4 /* PCI Bridge subsystem vendor ID */
+#define PCI_SSVID_DEVICE_ID 6 /* PCI Bridge subsystem device ID */
/* PCI Express capability registers */
@@ -484,12 +484,17 @@
#define PCI_EXP_LNKCTL_CLKREQ_EN 0x0100 /* Enable clkreq */
#define PCI_EXP_LNKCTL_HAWD 0x0200 /* Hardware Autonomous Width Disable */
#define PCI_EXP_LNKCTL_LBMIE 0x0400 /* Link Bandwidth Management Interrupt Enable */
-#define PCI_EXP_LNKCTL_LABIE 0x0800 /* Lnk Autonomous Bandwidth Interrupt Enable */
+#define PCI_EXP_LNKCTL_LABIE 0x0800 /* Link Autonomous Bandwidth Interrupt Enable */
#define PCI_EXP_LNKSTA 18 /* Link Status */
#define PCI_EXP_LNKSTA_CLS 0x000f /* Current Link Speed */
#define PCI_EXP_LNKSTA_CLS_2_5GB 0x0001 /* Current Link Speed 2.5GT/s */
#define PCI_EXP_LNKSTA_CLS_5_0GB 0x0002 /* Current Link Speed 5.0GT/s */
-#define PCI_EXP_LNKSTA_NLW 0x03f0 /* Nogotiated Link Width */
+#define PCI_EXP_LNKSTA_CLS_8_0GB 0x0003 /* Current Link Speed 8.0GT/s */
+#define PCI_EXP_LNKSTA_NLW 0x03f0 /* Negotiated Link Width */
+#define PCI_EXP_LNKSTA_NLW_X1 0x0010 /* Current Link Width x1 */
+#define PCI_EXP_LNKSTA_NLW_X2 0x0020 /* Current Link Width x2 */
+#define PCI_EXP_LNKSTA_NLW_X4 0x0040 /* Current Link Width x4 */
+#define PCI_EXP_LNKSTA_NLW_X8 0x0080 /* Current Link Width x8 */
#define PCI_EXP_LNKSTA_NLW_SHIFT 4 /* start of NLW mask in link status */
#define PCI_EXP_LNKSTA_LT 0x0800 /* Link Training */
#define PCI_EXP_LNKSTA_SLC 0x1000 /* Slot Clock Configuration */
@@ -593,7 +598,7 @@
#define PCI_EXT_CAP_ID_MFVC 0x08 /* Multi-Function VC Capability */
#define PCI_EXT_CAP_ID_VC9 0x09 /* same as _VC */
#define PCI_EXT_CAP_ID_RCRB 0x0A /* Root Complex RB? */
-#define PCI_EXT_CAP_ID_VNDR 0x0B /* Vendor Specific */
+#define PCI_EXT_CAP_ID_VNDR 0x0B /* Vendor-Specific */
#define PCI_EXT_CAP_ID_CAC 0x0C /* Config Access - obsolete */
#define PCI_EXT_CAP_ID_ACS 0x0D /* Access Control Services */
#define PCI_EXT_CAP_ID_ARI 0x0E /* Alternate Routing ID */
@@ -602,12 +607,12 @@
#define PCI_EXT_CAP_ID_MRIOV 0x11 /* Multi Root I/O Virtualization */
#define PCI_EXT_CAP_ID_MCAST 0x12 /* Multicast */
#define PCI_EXT_CAP_ID_PRI 0x13 /* Page Request Interface */
-#define PCI_EXT_CAP_ID_AMD_XXX 0x14 /* reserved for AMD */
-#define PCI_EXT_CAP_ID_REBAR 0x15 /* resizable BAR */
-#define PCI_EXT_CAP_ID_DPA 0x16 /* dynamic power alloc */
-#define PCI_EXT_CAP_ID_TPH 0x17 /* TPH request */
-#define PCI_EXT_CAP_ID_LTR 0x18 /* latency tolerance reporting */
-#define PCI_EXT_CAP_ID_SECPCI 0x19 /* Secondary PCIe */
+#define PCI_EXT_CAP_ID_AMD_XXX 0x14 /* Reserved for AMD */
+#define PCI_EXT_CAP_ID_REBAR 0x15 /* Resizable BAR */
+#define PCI_EXT_CAP_ID_DPA 0x16 /* Dynamic Power Allocation */
+#define PCI_EXT_CAP_ID_TPH 0x17 /* TPH Requester */
+#define PCI_EXT_CAP_ID_LTR 0x18 /* Latency Tolerance Reporting */
+#define PCI_EXT_CAP_ID_SECPCI 0x19 /* Secondary PCIe Capability */
#define PCI_EXT_CAP_ID_PMUX 0x1A /* Protocol Multiplexing */
#define PCI_EXT_CAP_ID_PASID 0x1B /* Process Address Space ID */
#define PCI_EXT_CAP_ID_MAX PCI_EXT_CAP_ID_PASID
@@ -667,9 +672,9 @@
#define PCI_ERR_ROOT_COR_RCV 0x00000001 /* ERR_COR Received */
/* Multi ERR_COR Received */
#define PCI_ERR_ROOT_MULTI_COR_RCV 0x00000002
-/* ERR_FATAL/NONFATAL Recevied */
+/* ERR_FATAL/NONFATAL Received */
#define PCI_ERR_ROOT_UNCOR_RCV 0x00000004
-/* Multi ERR_FATAL/NONFATAL Recevied */
+/* Multi ERR_FATAL/NONFATAL Received */
#define PCI_ERR_ROOT_MULTI_UNCOR_RCV 0x00000008
#define PCI_ERR_ROOT_FIRST_FATAL 0x00000010 /* First Fatal */
#define PCI_ERR_ROOT_NONFATAL_RCV 0x00000020 /* Non-Fatal Received */
@@ -678,7 +683,7 @@
/* Virtual Channel */
#define PCI_VC_PORT_REG1 4
-#define PCI_VC_REG1_EVCC 0x7 /* extended vc count */
+#define PCI_VC_REG1_EVCC 0x7 /* extended VC count */
#define PCI_VC_PORT_REG2 8
#define PCI_VC_REG2_32_PHASE 0x2
#define PCI_VC_REG2_64_PHASE 0x4
@@ -711,7 +716,7 @@
#define PCI_VNDR_HEADER_LEN(x) (((x) >> 20) & 0xfff)
/*
- * Hypertransport sub capability types
+ * HyperTransport sub capability types
*
* Unfortunately there are both 3 bit and 5 bit capability types defined
* in the HT spec, catering for that is a little messy. You probably don't
@@ -739,8 +744,8 @@
#define HT_CAPTYPE_DIRECT_ROUTE 0xB0 /* Direct routing configuration */
#define HT_CAPTYPE_VCSET 0xB8 /* Virtual Channel configuration */
#define HT_CAPTYPE_ERROR_RETRY 0xC0 /* Retry on error configuration */
-#define HT_CAPTYPE_GEN3 0xD0 /* Generation 3 hypertransport configuration */
-#define HT_CAPTYPE_PM 0xE0 /* Hypertransport powermanagement configuration */
+#define HT_CAPTYPE_GEN3 0xD0 /* Generation 3 HyperTransport configuration */
+#define HT_CAPTYPE_PM 0xE0 /* HyperTransport power management configuration */
#define HT_CAP_SIZEOF_LONG 28 /* slave & primary */
#define HT_CAP_SIZEOF_SHORT 24 /* host & secondary */
@@ -777,14 +782,14 @@
#define PCI_PRI_ALLOC_REQ 0x0c /* PRI max reqs allowed */
#define PCI_EXT_CAP_PRI_SIZEOF 16
-/* PASID capability */
+/* Process Address Space ID */
#define PCI_PASID_CAP 0x04 /* PASID feature register */
#define PCI_PASID_CAP_EXEC 0x02 /* Exec permissions Supported */
-#define PCI_PASID_CAP_PRIV 0x04 /* Priviledge Mode Supported */
+#define PCI_PASID_CAP_PRIV 0x04 /* Privilege Mode Supported */
#define PCI_PASID_CTRL 0x06 /* PASID control register */
#define PCI_PASID_CTRL_ENABLE 0x01 /* Enable bit */
#define PCI_PASID_CTRL_EXEC 0x02 /* Exec permissions Enable */
-#define PCI_PASID_CTRL_PRIV 0x04 /* Priviledge Mode Enable */
+#define PCI_PASID_CTRL_PRIV 0x04 /* Privilege Mode Enable */
#define PCI_EXT_CAP_PASID_SIZEOF 8
/* Single Root I/O Virtualization */
@@ -839,22 +844,22 @@
#define PCI_ACS_CTRL 0x06 /* ACS Control Register */
#define PCI_ACS_EGRESS_CTL_V 0x08 /* ACS Egress Control Vector */
-#define PCI_VSEC_HDR 4 /* extended cap - vendor specific */
+#define PCI_VSEC_HDR 4 /* extended cap - vendor-specific */
#define PCI_VSEC_HDR_LEN_SHIFT 20 /* shift for length field */
-/* sata capability */
+/* SATA capability */
#define PCI_SATA_REGS 4 /* SATA REGs specifier */
#define PCI_SATA_REGS_MASK 0xF /* location - BAR#/inline */
#define PCI_SATA_REGS_INLINE 0xF /* REGS in config space */
#define PCI_SATA_SIZEOF_SHORT 8
#define PCI_SATA_SIZEOF_LONG 16
-/* resizable BARs */
+/* Resizable BARs */
#define PCI_REBAR_CTRL 8 /* control register */
#define PCI_REBAR_CTRL_NBAR_MASK (7 << 5) /* mask for # bars */
#define PCI_REBAR_CTRL_NBAR_SHIFT 5 /* shift for # bars */
-/* dynamic power allocation */
+/* Dynamic Power Allocation */
#define PCI_DPA_CAP 4 /* capability register */
#define PCI_DPA_CAP_SUBSTATE_MASK 0x1F /* # substates - 1 */
#define PCI_DPA_BASE_SIZEOF 16 /* size with 0 substates */
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index e1802d6153ae..959d454f76a1 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -679,6 +679,7 @@ enum perf_event_type {
*
* { u64 weight; } && PERF_SAMPLE_WEIGHT
* { u64 data_src; } && PERF_SAMPLE_DATA_SRC
+ * { u64 transaction; } && PERF_SAMPLE_TRANSACTION
* };
*/
PERF_RECORD_SAMPLE = 9,
diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h
index a806687ad98f..d62316baae94 100644
--- a/include/uapi/linux/pkt_sched.h
+++ b/include/uapi/linux/pkt_sched.h
@@ -173,6 +173,8 @@ enum {
TCA_TBF_PTAB,
TCA_TBF_RATE64,
TCA_TBF_PRATE64,
+ TCA_TBF_BURST,
+ TCA_TBF_PBURST,
__TCA_TBF_MAX,
};
@@ -523,6 +525,7 @@ enum {
TCA_NETEM_LOSS,
TCA_NETEM_RATE,
TCA_NETEM_ECN,
+ TCA_NETEM_RATE64,
__TCA_NETEM_MAX,
};
@@ -790,4 +793,54 @@ struct tc_fq_qd_stats {
__u32 throttled_flows;
__u32 pad;
};
+
+/* Heavy-Hitter Filter */
+
+enum {
+ TCA_HHF_UNSPEC,
+ TCA_HHF_BACKLOG_LIMIT,
+ TCA_HHF_QUANTUM,
+ TCA_HHF_HH_FLOWS_LIMIT,
+ TCA_HHF_RESET_TIMEOUT,
+ TCA_HHF_ADMIT_BYTES,
+ TCA_HHF_EVICT_TIMEOUT,
+ TCA_HHF_NON_HH_WEIGHT,
+ __TCA_HHF_MAX
+};
+
+#define TCA_HHF_MAX (__TCA_HHF_MAX - 1)
+
+struct tc_hhf_xstats {
+ __u32 drop_overlimit; /* number of times max qdisc packet limit
+ * was hit
+ */
+ __u32 hh_overlimit; /* number of times max heavy-hitters was hit */
+ __u32 hh_tot_count; /* number of captured heavy-hitters so far */
+ __u32 hh_cur_count; /* number of current heavy-hitters */
+};
+
+/* PIE */
+enum {
+ TCA_PIE_UNSPEC,
+ TCA_PIE_TARGET,
+ TCA_PIE_LIMIT,
+ TCA_PIE_TUPDATE,
+ TCA_PIE_ALPHA,
+ TCA_PIE_BETA,
+ TCA_PIE_ECN,
+ TCA_PIE_BYTEMODE,
+ __TCA_PIE_MAX
+};
+#define TCA_PIE_MAX (__TCA_PIE_MAX - 1)
+
+struct tc_pie_xstats {
+ __u32 prob; /* current probability */
+ __u32 delay; /* current delay in ms */
+ __u32 avg_dq_rate; /* current average dq_rate in bits/pie_time */
+ __u32 packets_in; /* total number of packets enqueued */
+ __u32 dropped; /* packets dropped due to pie_action */
+ __u32 overlimit; /* dropped due to lack of space in queue */
+ __u32 maxq; /* maximum queue size */
+ __u32 ecn_mark; /* packets marked with ecn*/
+};
#endif
diff --git a/include/uapi/linux/raid/md_p.h b/include/uapi/linux/raid/md_p.h
index fe1a5406d4d9..f7cf7f351144 100644
--- a/include/uapi/linux/raid/md_p.h
+++ b/include/uapi/linux/raid/md_p.h
@@ -16,6 +16,7 @@
#define _MD_P_H
#include <linux/types.h>
+#include <asm/byteorder.h>
/*
* RAID superblock.
diff --git a/include/uapi/linux/sctp.h b/include/uapi/linux/sctp.h
index ca451e99b28b..266022a2be4a 100644
--- a/include/uapi/linux/sctp.h
+++ b/include/uapi/linux/sctp.h
@@ -22,9 +22,8 @@
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with GNU CC; see the file COPYING. If not, write to
- * the Free Software Foundation, 59 Temple Place - Suite 330,
- * Boston, MA 02111-1307, USA.
+ * along with GNU CC; see the file COPYING. If not, see
+ * <http://www.gnu.org/licenses/>.
*
* Please send any bug reports or fixes you make to the
* email address(es):
diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h
index 1bdb4a39d1e1..bbaba22f2d1b 100644
--- a/include/uapi/linux/snmp.h
+++ b/include/uapi/linux/snmp.h
@@ -258,6 +258,7 @@ enum
LINUX_MIB_TCPFASTOPENCOOKIEREQD, /* TCPFastOpenCookieReqd */
LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES, /* TCPSpuriousRtxHostQueues */
LINUX_MIB_BUSYPOLLRXPACKETS, /* BusyPollRxPackets */
+ LINUX_MIB_TCPAUTOCORKING, /* TCPAutoCorking */
__LINUX_MIB_MAX
};
diff --git a/include/uapi/linux/sockios.h b/include/uapi/linux/sockios.h
index 7997a506ad41..e888b1aed69f 100644
--- a/include/uapi/linux/sockios.h
+++ b/include/uapi/linux/sockios.h
@@ -125,7 +125,8 @@
#define SIOCBRDELIF 0x89a3 /* remove interface from bridge */
/* hardware time stamping: parameters in linux/net_tstamp.h */
-#define SIOCSHWTSTAMP 0x89b0
+#define SIOCSHWTSTAMP 0x89b0 /* set and get config */
+#define SIOCGHWTSTAMP 0x89b1 /* get config */
/* Device private ioctl calls */
diff --git a/include/uapi/linux/unix_diag.h b/include/uapi/linux/unix_diag.h
index b9e2a6a7446f..1eb0b8dd1830 100644
--- a/include/uapi/linux/unix_diag.h
+++ b/include/uapi/linux/unix_diag.h
@@ -31,6 +31,7 @@ struct unix_diag_msg {
};
enum {
+ /* UNIX_DIAG_NONE, standard nl API requires this attribute! */
UNIX_DIAG_NAME,
UNIX_DIAG_VFS,
UNIX_DIAG_PEER,
diff --git a/include/uapi/sound/compress_offload.h b/include/uapi/sound/compress_offload.h
index d630163b9a2e..5759810e1c1b 100644
--- a/include/uapi/sound/compress_offload.h
+++ b/include/uapi/sound/compress_offload.h
@@ -30,7 +30,7 @@
#include <sound/compress_params.h>
-#define SNDRV_COMPRESS_VERSION SNDRV_PROTOCOL_VERSION(0, 1, 1)
+#define SNDRV_COMPRESS_VERSION SNDRV_PROTOCOL_VERSION(0, 1, 2)
/**
* struct snd_compressed_buffer: compressed buffer
* @fragment_size: size of buffer fragment in bytes
@@ -67,8 +67,8 @@ struct snd_compr_params {
struct snd_compr_tstamp {
__u32 byte_offset;
__u32 copied_total;
- snd_pcm_uframes_t pcm_frames;
- snd_pcm_uframes_t pcm_io_frames;
+ __u32 pcm_frames;
+ __u32 pcm_io_frames;
__u32 sampling_rate;
};
diff --git a/include/xen/interface/io/blkif.h b/include/xen/interface/io/blkif.h
index 65e12099ef89..ae665ac59c36 100644
--- a/include/xen/interface/io/blkif.h
+++ b/include/xen/interface/io/blkif.h
@@ -146,7 +146,7 @@ struct blkif_request_segment_aligned {
struct blkif_request_rw {
uint8_t nr_segments; /* number of segments */
blkif_vdev_t handle; /* only for read/write requests */
-#ifdef CONFIG_X86_64
+#ifndef CONFIG_X86_32
uint32_t _pad1; /* offsetof(blkif_request,u.rw.id) == 8 */
#endif
uint64_t id; /* private guest value, echoed in resp */
@@ -163,7 +163,7 @@ struct blkif_request_discard {
uint8_t flag; /* BLKIF_DISCARD_SECURE or zero. */
#define BLKIF_DISCARD_SECURE (1<<0) /* ignored if discard-secure=0 */
blkif_vdev_t _pad1; /* only for read/write requests */
-#ifdef CONFIG_X86_64
+#ifndef CONFIG_X86_32
uint32_t _pad2; /* offsetof(blkif_req..,u.discard.id)==8*/
#endif
uint64_t id; /* private guest value, echoed in resp */
@@ -175,7 +175,7 @@ struct blkif_request_discard {
struct blkif_request_other {
uint8_t _pad1;
blkif_vdev_t _pad2; /* only for read/write requests */
-#ifdef CONFIG_X86_64
+#ifndef CONFIG_X86_32
uint32_t _pad3; /* offsetof(blkif_req..,u.other.id)==8*/
#endif
uint64_t id; /* private guest value, echoed in resp */
@@ -184,7 +184,7 @@ struct blkif_request_other {
struct blkif_request_indirect {
uint8_t indirect_op;
uint16_t nr_segments;
-#ifdef CONFIG_X86_64
+#ifndef CONFIG_X86_32
uint32_t _pad1; /* offsetof(blkif_...,u.indirect.id) == 8 */
#endif
uint64_t id;
@@ -192,7 +192,7 @@ struct blkif_request_indirect {
blkif_vdev_t handle;
uint16_t _pad2;
grant_ref_t indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST];
-#ifdef CONFIG_X86_64
+#ifndef CONFIG_X86_32
uint32_t _pad3; /* make it 64 byte aligned */
#else
uint64_t _pad3; /* make it 64 byte aligned */
diff --git a/init/Kconfig b/init/Kconfig
index 3fc8a2f2fac4..4e5d96ab2034 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -301,20 +301,6 @@ config AUDIT_TREE
depends on AUDITSYSCALL
select FSNOTIFY
-config AUDIT_LOGINUID_IMMUTABLE
- bool "Make audit loginuid immutable"
- depends on AUDIT
- help
- The config option toggles if a task setting its loginuid requires
- CAP_SYS_AUDITCONTROL or if that task should require no special permissions
- but should instead only allow setting its loginuid if it was never
- previously set. On systems which use systemd or a similar central
- process to restart login services this should be set to true. On older
- systems in which an admin would typically have to directly stop and
- start processes this should be set to false. Setting this to true allows
- one to drop potentially dangerous capabilites from the login tasks,
- but may not be backwards compatible with older init systems.
-
source "kernel/irq/Kconfig"
source "kernel/time/Kconfig"
@@ -823,6 +809,12 @@ config GENERIC_SCHED_CLOCK
config ARCH_SUPPORTS_NUMA_BALANCING
bool
+#
+# For architectures that know their GCC __int128 support is sound
+#
+config ARCH_SUPPORTS_INT128
+ bool
+
# For architectures that (ab)use NUMA to represent different memory regions
# all cpu-local but of different latencies, such as SuperH.
#
@@ -1669,6 +1661,18 @@ config BASE_SMALL
default 0 if BASE_FULL
default 1 if !BASE_FULL
+config SYSTEM_TRUSTED_KEYRING
+ bool "Provide system-wide ring of trusted keys"
+ depends on KEYS
+ help
+ Provide a system keyring to which trusted keys can be added. Keys in
+ the keyring are considered to be trusted. Keys may be added at will
+ by the kernel from compiled-in data and from hardware key stores, but
+ userspace may only add extra keys if those keys can be verified by
+ keys already in the keyring.
+
+ Keys in this keyring are used by module signature checking.
+
menuconfig MODULES
bool "Enable loadable module support"
option modules
@@ -1742,6 +1746,7 @@ config MODULE_SRCVERSION_ALL
config MODULE_SIG
bool "Module signature verification"
depends on MODULES
+ select SYSTEM_TRUSTED_KEYRING
select KEYS
select CRYPTO
select ASYMMETRIC_KEY_TYPE
diff --git a/init/main.c b/init/main.c
index 01573fdfa186..febc511e078a 100644
--- a/init/main.c
+++ b/init/main.c
@@ -476,7 +476,7 @@ static void __init mm_init(void)
mem_init();
kmem_cache_init();
percpu_init_late();
- pgtable_init();
+ pgtable_cache_init();
vmalloc_init();
}
diff --git a/ipc/shm.c b/ipc/shm.c
index d69739610fd4..7a51443a51d6 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -208,15 +208,18 @@ static void shm_open(struct vm_area_struct *vma)
*/
static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
{
+ struct file *shm_file;
+
+ shm_file = shp->shm_file;
+ shp->shm_file = NULL;
ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
shm_rmid(ns, shp);
shm_unlock(shp);
- if (!is_file_hugepages(shp->shm_file))
- shmem_lock(shp->shm_file, 0, shp->mlock_user);
+ if (!is_file_hugepages(shm_file))
+ shmem_lock(shm_file, 0, shp->mlock_user);
else if (shp->mlock_user)
- user_shm_unlock(file_inode(shp->shm_file)->i_size,
- shp->mlock_user);
- fput (shp->shm_file);
+ user_shm_unlock(file_inode(shm_file)->i_size, shp->mlock_user);
+ fput(shm_file);
ipc_rcu_putref(shp, shm_rcu_free);
}
@@ -974,15 +977,25 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
ipc_lock_object(&shp->shm_perm);
if (!ns_capable(ns->user_ns, CAP_IPC_LOCK)) {
kuid_t euid = current_euid();
- err = -EPERM;
if (!uid_eq(euid, shp->shm_perm.uid) &&
- !uid_eq(euid, shp->shm_perm.cuid))
+ !uid_eq(euid, shp->shm_perm.cuid)) {
+ err = -EPERM;
goto out_unlock0;
- if (cmd == SHM_LOCK && !rlimit(RLIMIT_MEMLOCK))
+ }
+ if (cmd == SHM_LOCK && !rlimit(RLIMIT_MEMLOCK)) {
+ err = -EPERM;
goto out_unlock0;
+ }
}
shm_file = shp->shm_file;
+
+ /* check if shm_destroy() is tearing down shp */
+ if (shm_file == NULL) {
+ err = -EIDRM;
+ goto out_unlock0;
+ }
+
if (is_file_hugepages(shm_file))
goto out_unlock0;
@@ -1101,6 +1114,14 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
goto out_unlock;
ipc_lock_object(&shp->shm_perm);
+
+ /* check if shm_destroy() is tearing down shp */
+ if (shp->shm_file == NULL) {
+ ipc_unlock_object(&shp->shm_perm);
+ err = -EIDRM;
+ goto out_unlock;
+ }
+
path = shp->shm_file->f_path;
path_get(&path);
shp->shm_nattch++;
diff --git a/kernel/.gitignore b/kernel/.gitignore
index b3097bde4e9c..790d83c7d160 100644
--- a/kernel/.gitignore
+++ b/kernel/.gitignore
@@ -5,3 +5,4 @@ config_data.h
config_data.gz
timeconst.h
hz.bc
+x509_certificate_list
diff --git a/kernel/Makefile b/kernel/Makefile
index 09a9c94f42bd..bc010ee272b6 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -41,8 +41,9 @@ ifneq ($(CONFIG_SMP),y)
obj-y += up.o
endif
obj-$(CONFIG_UID16) += uid16.o
+obj-$(CONFIG_SYSTEM_TRUSTED_KEYRING) += system_keyring.o system_certificates.o
obj-$(CONFIG_MODULES) += module.o
-obj-$(CONFIG_MODULE_SIG) += module_signing.o modsign_pubkey.o modsign_certificate.o
+obj-$(CONFIG_MODULE_SIG) += module_signing.o
obj-$(CONFIG_KALLSYMS) += kallsyms.o
obj-$(CONFIG_BSD_PROCESS_ACCT) += acct.o
obj-$(CONFIG_KEXEC) += kexec.o
@@ -122,19 +123,53 @@ targets += timeconst.h
$(obj)/timeconst.h: $(obj)/hz.bc $(src)/timeconst.bc FORCE
$(call if_changed,bc)
-ifeq ($(CONFIG_MODULE_SIG),y)
+###############################################################################
+#
+# Roll all the X.509 certificates that we can find together and pull them into
+# the kernel so that they get loaded into the system trusted keyring during
+# boot.
#
-# Pull the signing certificate and any extra certificates into the kernel
+# We look in the source root and the build root for all files whose name ends
+# in ".x509". Unfortunately, this will generate duplicate filenames, so we
+# have make canonicalise the pathnames and then sort them to discard the
+# duplicates.
#
+###############################################################################
+ifeq ($(CONFIG_SYSTEM_TRUSTED_KEYRING),y)
+X509_CERTIFICATES-y := $(wildcard *.x509) $(wildcard $(srctree)/*.x509)
+X509_CERTIFICATES-$(CONFIG_MODULE_SIG) += $(objtree)/signing_key.x509
+X509_CERTIFICATES-raw := $(sort $(foreach CERT,$(X509_CERTIFICATES-y), \
+ $(or $(realpath $(CERT)),$(CERT))))
+X509_CERTIFICATES := $(subst $(realpath $(objtree))/,,$(X509_CERTIFICATES-raw))
+
+ifeq ($(X509_CERTIFICATES),)
+$(warning *** No X.509 certificates found ***)
+endif
+
+ifneq ($(wildcard $(obj)/.x509.list),)
+ifneq ($(shell cat $(obj)/.x509.list),$(X509_CERTIFICATES))
+$(info X.509 certificate list changed)
+$(shell rm $(obj)/.x509.list)
+endif
+endif
+
+kernel/system_certificates.o: $(obj)/x509_certificate_list
-quiet_cmd_touch = TOUCH $@
- cmd_touch = touch $@
+quiet_cmd_x509certs = CERTS $@
+ cmd_x509certs = cat $(X509_CERTIFICATES) /dev/null >$@ $(foreach X509,$(X509_CERTIFICATES),; echo " - Including cert $(X509)")
-extra_certificates:
- $(call cmd,touch)
+targets += $(obj)/x509_certificate_list
+$(obj)/x509_certificate_list: $(X509_CERTIFICATES) $(obj)/.x509.list
+ $(call if_changed,x509certs)
-kernel/modsign_certificate.o: signing_key.x509 extra_certificates
+targets += $(obj)/.x509.list
+$(obj)/.x509.list:
+ @echo $(X509_CERTIFICATES) >$@
+endif
+
+clean-files := x509_certificate_list .x509.list
+ifeq ($(CONFIG_MODULE_SIG),y)
###############################################################################
#
# If module signing is requested, say by allyesconfig, but a key has not been
diff --git a/kernel/audit.c b/kernel/audit.c
index 7b0e23a740ce..906ae5a0233a 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -60,7 +60,6 @@
#ifdef CONFIG_SECURITY
#include <linux/security.h>
#endif
-#include <net/netlink.h>
#include <linux/freezer.h>
#include <linux/tty.h>
#include <linux/pid_namespace.h>
@@ -140,6 +139,17 @@ static struct task_struct *kauditd_task;
static DECLARE_WAIT_QUEUE_HEAD(kauditd_wait);
static DECLARE_WAIT_QUEUE_HEAD(audit_backlog_wait);
+static struct audit_features af = {.vers = AUDIT_FEATURE_VERSION,
+ .mask = -1,
+ .features = 0,
+ .lock = 0,};
+
+static char *audit_feature_names[2] = {
+ "only_unset_loginuid",
+ "loginuid_immutable",
+};
+
+
/* Serialize requests from userspace. */
DEFINE_MUTEX(audit_cmd_mutex);
@@ -584,6 +594,8 @@ static int audit_netlink_ok(struct sk_buff *skb, u16 msg_type)
return -EOPNOTSUPP;
case AUDIT_GET:
case AUDIT_SET:
+ case AUDIT_GET_FEATURE:
+ case AUDIT_SET_FEATURE:
case AUDIT_LIST_RULES:
case AUDIT_ADD_RULE:
case AUDIT_DEL_RULE:
@@ -613,7 +625,7 @@ static int audit_log_common_recv_msg(struct audit_buffer **ab, u16 msg_type)
int rc = 0;
uid_t uid = from_kuid(&init_user_ns, current_uid());
- if (!audit_enabled) {
+ if (!audit_enabled && msg_type != AUDIT_USER_AVC) {
*ab = NULL;
return rc;
}
@@ -628,6 +640,94 @@ static int audit_log_common_recv_msg(struct audit_buffer **ab, u16 msg_type)
return rc;
}
+int is_audit_feature_set(int i)
+{
+ return af.features & AUDIT_FEATURE_TO_MASK(i);
+}
+
+
+static int audit_get_feature(struct sk_buff *skb)
+{
+ u32 seq;
+
+ seq = nlmsg_hdr(skb)->nlmsg_seq;
+
+ audit_send_reply(NETLINK_CB(skb).portid, seq, AUDIT_GET, 0, 0,
+ &af, sizeof(af));
+
+ return 0;
+}
+
+static void audit_log_feature_change(int which, u32 old_feature, u32 new_feature,
+ u32 old_lock, u32 new_lock, int res)
+{
+ struct audit_buffer *ab;
+
+ ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_FEATURE_CHANGE);
+ audit_log_format(ab, "feature=%s new=%d old=%d old_lock=%d new_lock=%d res=%d",
+ audit_feature_names[which], !!old_feature, !!new_feature,
+ !!old_lock, !!new_lock, res);
+ audit_log_end(ab);
+}
+
+static int audit_set_feature(struct sk_buff *skb)
+{
+ struct audit_features *uaf;
+ int i;
+
+ BUILD_BUG_ON(AUDIT_LAST_FEATURE + 1 > sizeof(audit_feature_names)/sizeof(audit_feature_names[0]));
+ uaf = nlmsg_data(nlmsg_hdr(skb));
+
+ /* if there is ever a version 2 we should handle that here */
+
+ for (i = 0; i <= AUDIT_LAST_FEATURE; i++) {
+ u32 feature = AUDIT_FEATURE_TO_MASK(i);
+ u32 old_feature, new_feature, old_lock, new_lock;
+
+ /* if we are not changing this feature, move along */
+ if (!(feature & uaf->mask))
+ continue;
+
+ old_feature = af.features & feature;
+ new_feature = uaf->features & feature;
+ new_lock = (uaf->lock | af.lock) & feature;
+ old_lock = af.lock & feature;
+
+ /* are we changing a locked feature? */
+ if ((af.lock & feature) && (new_feature != old_feature)) {
+ audit_log_feature_change(i, old_feature, new_feature,
+ old_lock, new_lock, 0);
+ return -EPERM;
+ }
+ }
+ /* nothing invalid, do the changes */
+ for (i = 0; i <= AUDIT_LAST_FEATURE; i++) {
+ u32 feature = AUDIT_FEATURE_TO_MASK(i);
+ u32 old_feature, new_feature, old_lock, new_lock;
+
+ /* if we are not changing this feature, move along */
+ if (!(feature & uaf->mask))
+ continue;
+
+ old_feature = af.features & feature;
+ new_feature = uaf->features & feature;
+ old_lock = af.lock & feature;
+ new_lock = (uaf->lock | af.lock) & feature;
+
+ if (new_feature != old_feature)
+ audit_log_feature_change(i, old_feature, new_feature,
+ old_lock, new_lock, 1);
+
+ if (new_feature)
+ af.features |= feature;
+ else
+ af.features &= ~feature;
+ af.lock |= new_lock;
+ }
+
+ return 0;
+}
+
static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
{
u32 seq;
@@ -659,6 +759,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
switch (msg_type) {
case AUDIT_GET:
+ memset(&status_set, 0, sizeof(status_set));
status_set.enabled = audit_enabled;
status_set.failure = audit_failure;
status_set.pid = audit_pid;
@@ -670,7 +771,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
&status_set, sizeof(status_set));
break;
case AUDIT_SET:
- if (nlh->nlmsg_len < sizeof(struct audit_status))
+ if (nlmsg_len(nlh) < sizeof(struct audit_status))
return -EINVAL;
status_get = (struct audit_status *)data;
if (status_get->mask & AUDIT_STATUS_ENABLED) {
@@ -699,6 +800,16 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
if (status_get->mask & AUDIT_STATUS_BACKLOG_LIMIT)
err = audit_set_backlog_limit(status_get->backlog_limit);
break;
+ case AUDIT_GET_FEATURE:
+ err = audit_get_feature(skb);
+ if (err)
+ return err;
+ break;
+ case AUDIT_SET_FEATURE:
+ err = audit_set_feature(skb);
+ if (err)
+ return err;
+ break;
case AUDIT_USER:
case AUDIT_FIRST_USER_MSG ... AUDIT_LAST_USER_MSG:
case AUDIT_FIRST_USER_MSG2 ... AUDIT_LAST_USER_MSG2:
@@ -715,7 +826,8 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
}
audit_log_common_recv_msg(&ab, msg_type);
if (msg_type != AUDIT_USER_TTY)
- audit_log_format(ab, " msg='%.1024s'",
+ audit_log_format(ab, " msg='%.*s'",
+ AUDIT_MESSAGE_TEXT_MAX,
(char *)data);
else {
int size;
@@ -818,7 +930,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
struct task_struct *tsk = current;
spin_lock(&tsk->sighand->siglock);
- s.enabled = tsk->signal->audit_tty != 0;
+ s.enabled = tsk->signal->audit_tty;
s.log_passwd = tsk->signal->audit_tty_log_passwd;
spin_unlock(&tsk->sighand->siglock);
@@ -832,7 +944,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
memset(&s, 0, sizeof(s));
/* guard against past and future API changes */
- memcpy(&s, data, min(sizeof(s), (size_t)nlh->nlmsg_len));
+ memcpy(&s, data, min_t(size_t, sizeof(s), nlmsg_len(nlh)));
if ((s.enabled != 0 && s.enabled != 1) ||
(s.log_passwd != 0 && s.log_passwd != 1))
return -EINVAL;
@@ -1067,13 +1179,6 @@ static void wait_for_auditd(unsigned long sleep_time)
remove_wait_queue(&audit_backlog_wait, &wait);
}
-/* Obtain an audit buffer. This routine does locking to obtain the
- * audit buffer, but then no locking is required for calls to
- * audit_log_*format. If the tsk is a task that is currently in a
- * syscall, then the syscall is marked as auditable and an audit record
- * will be written at syscall exit. If there is no associated task, tsk
- * should be NULL. */
-
/**
* audit_log_start - obtain an audit buffer
* @ctx: audit_context (may be NULL)
@@ -1389,7 +1494,7 @@ void audit_log_session_info(struct audit_buffer *ab)
u32 sessionid = audit_get_sessionid(current);
uid_t auid = from_kuid(&init_user_ns, audit_get_loginuid(current));
- audit_log_format(ab, " auid=%u ses=%u\n", auid, sessionid);
+ audit_log_format(ab, " auid=%u ses=%u", auid, sessionid);
}
void audit_log_key(struct audit_buffer *ab, char *key)
@@ -1536,6 +1641,26 @@ void audit_log_name(struct audit_context *context, struct audit_names *n,
}
}
+ /* log the audit_names record type */
+ audit_log_format(ab, " nametype=");
+ switch(n->type) {
+ case AUDIT_TYPE_NORMAL:
+ audit_log_format(ab, "NORMAL");
+ break;
+ case AUDIT_TYPE_PARENT:
+ audit_log_format(ab, "PARENT");
+ break;
+ case AUDIT_TYPE_CHILD_DELETE:
+ audit_log_format(ab, "DELETE");
+ break;
+ case AUDIT_TYPE_CHILD_CREATE:
+ audit_log_format(ab, "CREATE");
+ break;
+ default:
+ audit_log_format(ab, "UNKNOWN");
+ break;
+ }
+
audit_log_fcaps(ab, n);
audit_log_end(ab);
}
diff --git a/kernel/audit.h b/kernel/audit.h
index 123c9b7c3979..b779642b29af 100644
--- a/kernel/audit.h
+++ b/kernel/audit.h
@@ -197,6 +197,9 @@ struct audit_context {
int fd;
int flags;
} mmap;
+ struct {
+ int argc;
+ } execve;
};
int fds[2];
diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c
index f7aee8be7fb2..51f3fd4c1ed3 100644
--- a/kernel/auditfilter.c
+++ b/kernel/auditfilter.c
@@ -343,6 +343,7 @@ static int audit_field_valid(struct audit_entry *entry, struct audit_field *f)
case AUDIT_DEVMINOR:
case AUDIT_EXIT:
case AUDIT_SUCCESS:
+ case AUDIT_INODE:
/* bit ops are only useful on syscall args */
if (f->op == Audit_bitmask || f->op == Audit_bittest)
return -EINVAL;
@@ -423,7 +424,7 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
f->lsm_rule = NULL;
/* Support legacy tests for a valid loginuid */
- if ((f->type == AUDIT_LOGINUID) && (f->val == ~0U)) {
+ if ((f->type == AUDIT_LOGINUID) && (f->val == AUDIT_UID_UNSET)) {
f->type = AUDIT_LOGINUID_SET;
f->val = 0;
}
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index 9845cb32b60a..90594c9f7552 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -95,13 +95,6 @@ struct audit_aux_data {
/* Number of target pids per aux struct. */
#define AUDIT_AUX_PIDS 16
-struct audit_aux_data_execve {
- struct audit_aux_data d;
- int argc;
- int envc;
- struct mm_struct *mm;
-};
-
struct audit_aux_data_pids {
struct audit_aux_data d;
pid_t target_pid[AUDIT_AUX_PIDS];
@@ -121,12 +114,6 @@ struct audit_aux_data_bprm_fcaps {
struct audit_cap_data new_pcap;
};
-struct audit_aux_data_capset {
- struct audit_aux_data d;
- pid_t pid;
- struct audit_cap_data cap;
-};
-
struct audit_tree_refs {
struct audit_tree_refs *next;
struct audit_chunk *c[31];
@@ -566,7 +553,7 @@ static int audit_filter_rules(struct task_struct *tsk,
break;
case AUDIT_INODE:
if (name)
- result = (name->ino == f->val);
+ result = audit_comparator(name->ino, f->op, f->val);
else if (ctx) {
list_for_each_entry(n, &ctx->names_list, list) {
if (audit_comparator(n->ino, f->op, f->val)) {
@@ -943,8 +930,10 @@ int audit_alloc(struct task_struct *tsk)
return 0; /* Return if not auditing. */
state = audit_filter_task(tsk, &key);
- if (state == AUDIT_DISABLED)
+ if (state == AUDIT_DISABLED) {
+ clear_tsk_thread_flag(tsk, TIF_SYSCALL_AUDIT);
return 0;
+ }
if (!(context = audit_alloc_context(state))) {
kfree(key);
@@ -1149,20 +1138,16 @@ static int audit_log_single_execve_arg(struct audit_context *context,
}
static void audit_log_execve_info(struct audit_context *context,
- struct audit_buffer **ab,
- struct audit_aux_data_execve *axi)
+ struct audit_buffer **ab)
{
int i, len;
size_t len_sent = 0;
const char __user *p;
char *buf;
- if (axi->mm != current->mm)
- return; /* execve failed, no additional info */
-
- p = (const char __user *)axi->mm->arg_start;
+ p = (const char __user *)current->mm->arg_start;
- audit_log_format(*ab, "argc=%d", axi->argc);
+ audit_log_format(*ab, "argc=%d", context->execve.argc);
/*
* we need some kernel buffer to hold the userspace args. Just
@@ -1176,7 +1161,7 @@ static void audit_log_execve_info(struct audit_context *context,
return;
}
- for (i = 0; i < axi->argc; i++) {
+ for (i = 0; i < context->execve.argc; i++) {
len = audit_log_single_execve_arg(context, ab, i,
&len_sent, p, buf);
if (len <= 0)
@@ -1279,6 +1264,9 @@ static void show_special(struct audit_context *context, int *call_panic)
audit_log_format(ab, "fd=%d flags=0x%x", context->mmap.fd,
context->mmap.flags);
break; }
+ case AUDIT_EXECVE: {
+ audit_log_execve_info(context, &ab);
+ break; }
}
audit_log_end(ab);
}
@@ -1325,11 +1313,6 @@ static void audit_log_exit(struct audit_context *context, struct task_struct *ts
switch (aux->type) {
- case AUDIT_EXECVE: {
- struct audit_aux_data_execve *axi = (void *)aux;
- audit_log_execve_info(context, &ab, axi);
- break; }
-
case AUDIT_BPRM_FCAPS: {
struct audit_aux_data_bprm_fcaps *axs = (void *)aux;
audit_log_format(ab, "fver=%x", axs->fcap_ver);
@@ -1964,6 +1947,43 @@ int auditsc_get_stamp(struct audit_context *ctx,
/* global counter which is incremented every time something logs in */
static atomic_t session_id = ATOMIC_INIT(0);
+static int audit_set_loginuid_perm(kuid_t loginuid)
+{
+ /* if we are unset, we don't need privs */
+ if (!audit_loginuid_set(current))
+ return 0;
+ /* if AUDIT_FEATURE_LOGINUID_IMMUTABLE means never ever allow a change*/
+ if (is_audit_feature_set(AUDIT_FEATURE_LOGINUID_IMMUTABLE))
+ return -EPERM;
+ /* it is set, you need permission */
+ if (!capable(CAP_AUDIT_CONTROL))
+ return -EPERM;
+ /* reject if this is not an unset and we don't allow that */
+ if (is_audit_feature_set(AUDIT_FEATURE_ONLY_UNSET_LOGINUID) && uid_valid(loginuid))
+ return -EPERM;
+ return 0;
+}
+
+static void audit_log_set_loginuid(kuid_t koldloginuid, kuid_t kloginuid,
+ unsigned int oldsessionid, unsigned int sessionid,
+ int rc)
+{
+ struct audit_buffer *ab;
+ uid_t uid, ologinuid, nloginuid;
+
+ uid = from_kuid(&init_user_ns, task_uid(current));
+ ologinuid = from_kuid(&init_user_ns, koldloginuid);
+ nloginuid = from_kuid(&init_user_ns, kloginuid),
+
+ ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_LOGIN);
+ if (!ab)
+ return;
+ audit_log_format(ab, "pid=%d uid=%u old auid=%u new auid=%u old "
+ "ses=%u new ses=%u res=%d", current->pid, uid, ologinuid,
+ nloginuid, oldsessionid, sessionid, !rc);
+ audit_log_end(ab);
+}
+
/**
* audit_set_loginuid - set current task's audit_context loginuid
* @loginuid: loginuid value
@@ -1975,37 +1995,26 @@ static atomic_t session_id = ATOMIC_INIT(0);
int audit_set_loginuid(kuid_t loginuid)
{
struct task_struct *task = current;
- struct audit_context *context = task->audit_context;
- unsigned int sessionid;
+ unsigned int oldsessionid, sessionid = (unsigned int)-1;
+ kuid_t oldloginuid;
+ int rc;
-#ifdef CONFIG_AUDIT_LOGINUID_IMMUTABLE
- if (audit_loginuid_set(task))
- return -EPERM;
-#else /* CONFIG_AUDIT_LOGINUID_IMMUTABLE */
- if (!capable(CAP_AUDIT_CONTROL))
- return -EPERM;
-#endif /* CONFIG_AUDIT_LOGINUID_IMMUTABLE */
+ oldloginuid = audit_get_loginuid(current);
+ oldsessionid = audit_get_sessionid(current);
- sessionid = atomic_inc_return(&session_id);
- if (context && context->in_syscall) {
- struct audit_buffer *ab;
+ rc = audit_set_loginuid_perm(loginuid);
+ if (rc)
+ goto out;
+
+ /* are we setting or clearing? */
+ if (uid_valid(loginuid))
+ sessionid = atomic_inc_return(&session_id);
- ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_LOGIN);
- if (ab) {
- audit_log_format(ab, "login pid=%d uid=%u "
- "old auid=%u new auid=%u"
- " old ses=%u new ses=%u",
- task->pid,
- from_kuid(&init_user_ns, task_uid(task)),
- from_kuid(&init_user_ns, task->loginuid),
- from_kuid(&init_user_ns, loginuid),
- task->sessionid, sessionid);
- audit_log_end(ab);
- }
- }
task->sessionid = sessionid;
task->loginuid = loginuid;
- return 0;
+out:
+ audit_log_set_loginuid(oldloginuid, loginuid, oldsessionid, sessionid, rc);
+ return rc;
}
/**
@@ -2126,22 +2135,12 @@ void __audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, umode_t mo
context->ipc.has_perm = 1;
}
-int __audit_bprm(struct linux_binprm *bprm)
+void __audit_bprm(struct linux_binprm *bprm)
{
- struct audit_aux_data_execve *ax;
struct audit_context *context = current->audit_context;
- ax = kmalloc(sizeof(*ax), GFP_KERNEL);
- if (!ax)
- return -ENOMEM;
-
- ax->argc = bprm->argc;
- ax->envc = bprm->envc;
- ax->mm = bprm->mm;
- ax->d.type = AUDIT_EXECVE;
- ax->d.next = context->aux;
- context->aux = (void *)ax;
- return 0;
+ context->type = AUDIT_EXECVE;
+ context->execve.argc = bprm->argc;
}
diff --git a/kernel/bounds.c b/kernel/bounds.c
index 5253204afdca..9fd4246b04b8 100644
--- a/kernel/bounds.c
+++ b/kernel/bounds.c
@@ -22,6 +22,6 @@ void foo(void)
#ifdef CONFIG_SMP
DEFINE(NR_CPUS_BITS, ilog2(CONFIG_NR_CPUS));
#endif
- DEFINE(BLOATED_SPINLOCKS, sizeof(spinlock_t) > sizeof(int));
+ DEFINE(SPINLOCK_SIZE, sizeof(spinlock_t));
/* End of constants */
}
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index e0839bcd48c8..bc1dcabe9217 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -90,6 +90,14 @@ static DEFINE_MUTEX(cgroup_mutex);
static DEFINE_MUTEX(cgroup_root_mutex);
/*
+ * cgroup destruction makes heavy use of work items and there can be a lot
+ * of concurrent destructions. Use a separate workqueue so that cgroup
+ * destruction work items don't end up filling up max_active of system_wq
+ * which may lead to deadlock.
+ */
+static struct workqueue_struct *cgroup_destroy_wq;
+
+/*
* Generate an array of cgroup subsystem pointers. At boot time, this is
* populated with the built in subsystems, and modular subsystems are
* registered after that. The mutable section of this array is protected by
@@ -191,6 +199,7 @@ static void cgroup_destroy_css_killed(struct cgroup *cgrp);
static int cgroup_destroy_locked(struct cgroup *cgrp);
static int cgroup_addrm_files(struct cgroup *cgrp, struct cftype cfts[],
bool is_add);
+static int cgroup_file_release(struct inode *inode, struct file *file);
/**
* cgroup_css - obtain a cgroup's css for the specified subsystem
@@ -871,7 +880,7 @@ static void cgroup_free_rcu(struct rcu_head *head)
struct cgroup *cgrp = container_of(head, struct cgroup, rcu_head);
INIT_WORK(&cgrp->destroy_work, cgroup_free_fn);
- schedule_work(&cgrp->destroy_work);
+ queue_work(cgroup_destroy_wq, &cgrp->destroy_work);
}
static void cgroup_diput(struct dentry *dentry, struct inode *inode)
@@ -881,6 +890,16 @@ static void cgroup_diput(struct dentry *dentry, struct inode *inode)
struct cgroup *cgrp = dentry->d_fsdata;
BUG_ON(!(cgroup_is_dead(cgrp)));
+
+ /*
+ * XXX: cgrp->id is only used to look up css's. As cgroup
+ * and css's lifetimes will be decoupled, it should be made
+ * per-subsystem and moved to css->id so that lookups are
+ * successful until the target css is released.
+ */
+ idr_remove(&cgrp->root->cgroup_idr, cgrp->id);
+ cgrp->id = -1;
+
call_rcu(&cgrp->rcu_head, cgroup_free_rcu);
} else {
struct cfent *cfe = __d_cfe(dentry);
@@ -895,11 +914,6 @@ static void cgroup_diput(struct dentry *dentry, struct inode *inode)
iput(inode);
}
-static int cgroup_delete(const struct dentry *d)
-{
- return 1;
-}
-
static void remove_dir(struct dentry *d)
{
struct dentry *parent = dget(d->d_parent);
@@ -1486,7 +1500,7 @@ static int cgroup_get_rootdir(struct super_block *sb)
{
static const struct dentry_operations cgroup_dops = {
.d_iput = cgroup_diput,
- .d_delete = cgroup_delete,
+ .d_delete = always_delete_dentry,
};
struct inode *inode =
@@ -2426,7 +2440,7 @@ static const struct file_operations cgroup_seqfile_operations = {
.read = seq_read,
.write = cgroup_file_write,
.llseek = seq_lseek,
- .release = single_release,
+ .release = cgroup_file_release,
};
static int cgroup_file_open(struct inode *inode, struct file *file)
@@ -2487,6 +2501,8 @@ static int cgroup_file_release(struct inode *inode, struct file *file)
ret = cft->release(inode, file);
if (css->ss)
css_put(css);
+ if (file->f_op == &cgroup_seqfile_operations)
+ single_release(inode, file);
return ret;
}
@@ -4254,7 +4270,7 @@ static void css_free_rcu_fn(struct rcu_head *rcu_head)
* css_put(). dput() requires process context which we don't have.
*/
INIT_WORK(&css->destroy_work, css_free_work_fn);
- schedule_work(&css->destroy_work);
+ queue_work(cgroup_destroy_wq, &css->destroy_work);
}
static void css_release(struct percpu_ref *ref)
@@ -4262,6 +4278,7 @@ static void css_release(struct percpu_ref *ref)
struct cgroup_subsys_state *css =
container_of(ref, struct cgroup_subsys_state, refcnt);
+ rcu_assign_pointer(css->cgroup->subsys[css->ss->subsys_id], NULL);
call_rcu(&css->rcu_head, css_free_rcu_fn);
}
@@ -4420,14 +4437,6 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
list_add_tail_rcu(&cgrp->sibling, &cgrp->parent->children);
root->number_of_cgroups++;
- /* each css holds a ref to the cgroup's dentry and the parent css */
- for_each_root_subsys(root, ss) {
- struct cgroup_subsys_state *css = css_ar[ss->subsys_id];
-
- dget(dentry);
- css_get(css->parent);
- }
-
/* hold a ref to the parent's dentry */
dget(parent->dentry);
@@ -4439,6 +4448,13 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
if (err)
goto err_destroy;
+ /* each css holds a ref to the cgroup's dentry and parent css */
+ dget(dentry);
+ css_get(css->parent);
+
+ /* mark it consumed for error path */
+ css_ar[ss->subsys_id] = NULL;
+
if (ss->broken_hierarchy && !ss->warned_broken_hierarchy &&
parent->parent) {
pr_warning("cgroup: %s (%d) created nested cgroup for controller \"%s\" which has incomplete hierarchy support. Nested cgroups may change behavior in the future.\n",
@@ -4485,6 +4501,14 @@ err_free_cgrp:
return err;
err_destroy:
+ for_each_root_subsys(root, ss) {
+ struct cgroup_subsys_state *css = css_ar[ss->subsys_id];
+
+ if (css) {
+ percpu_ref_cancel_init(&css->refcnt);
+ ss->css_free(css);
+ }
+ }
cgroup_destroy_locked(cgrp);
mutex_unlock(&cgroup_mutex);
mutex_unlock(&dentry->d_inode->i_mutex);
@@ -4544,7 +4568,7 @@ static void css_killed_ref_fn(struct percpu_ref *ref)
container_of(ref, struct cgroup_subsys_state, refcnt);
INIT_WORK(&css->destroy_work, css_killed_work_fn);
- schedule_work(&css->destroy_work);
+ queue_work(cgroup_destroy_wq, &css->destroy_work);
}
/**
@@ -4646,8 +4670,12 @@ static int cgroup_destroy_locked(struct cgroup *cgrp)
* will be invoked to perform the rest of destruction once the
* percpu refs of all css's are confirmed to be killed.
*/
- for_each_root_subsys(cgrp->root, ss)
- kill_css(cgroup_css(cgrp, ss));
+ for_each_root_subsys(cgrp->root, ss) {
+ struct cgroup_subsys_state *css = cgroup_css(cgrp, ss);
+
+ if (css)
+ kill_css(css);
+ }
/*
* Mark @cgrp dead. This prevents further task migration and child
@@ -4716,14 +4744,6 @@ static void cgroup_destroy_css_killed(struct cgroup *cgrp)
/* delete this cgroup from parent->children */
list_del_rcu(&cgrp->sibling);
- /*
- * We should remove the cgroup object from idr before its grace
- * period starts, so we won't be looking up a cgroup while the
- * cgroup is being freed.
- */
- idr_remove(&cgrp->root->cgroup_idr, cgrp->id);
- cgrp->id = -1;
-
dput(d);
set_bit(CGRP_RELEASABLE, &parent->flags);
@@ -5068,6 +5088,22 @@ out:
return err;
}
+static int __init cgroup_wq_init(void)
+{
+ /*
+ * There isn't much point in executing destruction path in
+ * parallel. Good chunk is serialized with cgroup_mutex anyway.
+ * Use 1 for @max_active.
+ *
+ * We would prefer to do this in cgroup_init() above, but that
+ * is called before init_workqueues(): so leave this until after.
+ */
+ cgroup_destroy_wq = alloc_workqueue("cgroup_destroy", 0, 1);
+ BUG_ON(!cgroup_destroy_wq);
+ return 0;
+}
+core_initcall(cgroup_wq_init);
+
/*
* proc_cgroup_show()
* - Print task's cgroup paths into seq_file, one line for each hierarchy
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 6bf981e13c43..4772034b4b17 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -1033,8 +1033,10 @@ static void cpuset_change_task_nodemask(struct task_struct *tsk,
need_loop = task_has_mempolicy(tsk) ||
!nodes_intersects(*newmems, tsk->mems_allowed);
- if (need_loop)
+ if (need_loop) {
+ local_irq_disable();
write_seqcount_begin(&tsk->mems_allowed_seq);
+ }
nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems);
mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP1);
@@ -1042,8 +1044,10 @@ static void cpuset_change_task_nodemask(struct task_struct *tsk,
mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP2);
tsk->mems_allowed = *newmems;
- if (need_loop)
+ if (need_loop) {
write_seqcount_end(&tsk->mems_allowed_seq);
+ local_irq_enable();
+ }
task_unlock(tsk);
}
diff --git a/kernel/events/core.c b/kernel/events/core.c
index d724e7757cd1..f5744010a8d2 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -1396,6 +1396,8 @@ event_sched_out(struct perf_event *event,
if (event->state != PERF_EVENT_STATE_ACTIVE)
return;
+ perf_pmu_disable(event->pmu);
+
event->state = PERF_EVENT_STATE_INACTIVE;
if (event->pending_disable) {
event->pending_disable = 0;
@@ -1412,6 +1414,8 @@ event_sched_out(struct perf_event *event,
ctx->nr_freq--;
if (event->attr.exclusive || !cpuctx->active_oncpu)
cpuctx->exclusive = 0;
+
+ perf_pmu_enable(event->pmu);
}
static void
@@ -1652,6 +1656,7 @@ event_sched_in(struct perf_event *event,
struct perf_event_context *ctx)
{
u64 tstamp = perf_event_time(event);
+ int ret = 0;
if (event->state <= PERF_EVENT_STATE_OFF)
return 0;
@@ -1674,10 +1679,13 @@ event_sched_in(struct perf_event *event,
*/
smp_wmb();
+ perf_pmu_disable(event->pmu);
+
if (event->pmu->add(event, PERF_EF_START)) {
event->state = PERF_EVENT_STATE_INACTIVE;
event->oncpu = -1;
- return -EAGAIN;
+ ret = -EAGAIN;
+ goto out;
}
event->tstamp_running += tstamp - event->tstamp_stopped;
@@ -1693,7 +1701,10 @@ event_sched_in(struct perf_event *event,
if (event->attr.exclusive)
cpuctx->exclusive = 1;
- return 0;
+out:
+ perf_pmu_enable(event->pmu);
+
+ return ret;
}
static int
@@ -2743,6 +2754,8 @@ static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx,
if (!event_filter_match(event))
continue;
+ perf_pmu_disable(event->pmu);
+
hwc = &event->hw;
if (hwc->interrupts == MAX_INTERRUPTS) {
@@ -2752,7 +2765,7 @@ static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx,
}
if (!event->attr.freq || !event->attr.sample_freq)
- continue;
+ goto next;
/*
* stop the event and update event->count
@@ -2774,6 +2787,8 @@ static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx,
perf_adjust_period(event, period, delta, false);
event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0);
+ next:
+ perf_pmu_enable(event->pmu);
}
perf_pmu_enable(ctx->pmu);
@@ -5680,11 +5695,6 @@ static void swevent_hlist_put(struct perf_event *event)
{
int cpu;
- if (event->cpu != -1) {
- swevent_hlist_put_cpu(event, event->cpu);
- return;
- }
-
for_each_possible_cpu(cpu)
swevent_hlist_put_cpu(event, cpu);
}
@@ -5718,9 +5728,6 @@ static int swevent_hlist_get(struct perf_event *event)
int err;
int cpu, failed_cpu;
- if (event->cpu != -1)
- return swevent_hlist_get_cpu(event, event->cpu);
-
get_online_cpus();
for_each_possible_cpu(cpu) {
err = swevent_hlist_get_cpu(event, cpu);
diff --git a/kernel/extable.c b/kernel/extable.c
index 832cb28105bb..763faf037ec1 100644
--- a/kernel/extable.c
+++ b/kernel/extable.c
@@ -61,7 +61,7 @@ const struct exception_table_entry *search_exception_tables(unsigned long addr)
static inline int init_kernel_text(unsigned long addr)
{
if (addr >= (unsigned long)_sinittext &&
- addr <= (unsigned long)_einittext)
+ addr < (unsigned long)_einittext)
return 1;
return 0;
}
@@ -69,7 +69,7 @@ static inline int init_kernel_text(unsigned long addr)
int core_kernel_text(unsigned long addr)
{
if (addr >= (unsigned long)_stext &&
- addr <= (unsigned long)_etext)
+ addr < (unsigned long)_etext)
return 1;
if (system_state == SYSTEM_BOOTING &&
diff --git a/kernel/fork.c b/kernel/fork.c
index 728d5be9548c..5721f0e3f2da 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -537,6 +537,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p)
spin_lock_init(&mm->page_table_lock);
mm_init_aio(mm);
mm_init_owner(mm, p);
+ clear_tlb_flush_pending(mm);
if (likely(!mm_alloc_pgd(mm))) {
mm->def_flags = 0;
diff --git a/kernel/freezer.c b/kernel/freezer.c
index b462fa197517..aa6a8aadb911 100644
--- a/kernel/freezer.c
+++ b/kernel/freezer.c
@@ -19,6 +19,12 @@ EXPORT_SYMBOL(system_freezing_cnt);
bool pm_freezing;
bool pm_nosig_freezing;
+/*
+ * Temporary export for the deadlock workaround in ata_scsi_hotplug().
+ * Remove once the hack becomes unnecessary.
+ */
+EXPORT_SYMBOL_GPL(pm_freezing);
+
/* protects freezing and frozen transitions */
static DEFINE_SPINLOCK(freezer_lock);
diff --git a/kernel/futex.c b/kernel/futex.c
index 80ba086f021d..f6ff0191ecf7 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -251,6 +251,9 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
return -EINVAL;
address -= key->both.offset;
+ if (unlikely(!access_ok(rw, uaddr, sizeof(u32))))
+ return -EFAULT;
+
/*
* PROCESS_PRIVATE futexes are fast.
* As the mm cannot disappear under us and the 'key' only needs
@@ -259,8 +262,6 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
* but access_ok() should be faster than find_vma()
*/
if (!fshared) {
- if (unlikely(!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))))
- return -EFAULT;
key->private.mm = mm;
key->private.address = address;
get_futex_key_refs(key);
@@ -288,7 +289,7 @@ again:
put_page(page);
/* serialize against __split_huge_page_splitting() */
local_irq_disable();
- if (likely(__get_user_pages_fast(address, 1, 1, &page) == 1)) {
+ if (likely(__get_user_pages_fast(address, 1, !ro, &page) == 1)) {
page_head = compound_head(page);
/*
* page_head is valid pointer but we must pin
diff --git a/kernel/irq/pm.c b/kernel/irq/pm.c
index cb228bf21760..abcd6ca86cb7 100644
--- a/kernel/irq/pm.c
+++ b/kernel/irq/pm.c
@@ -50,7 +50,7 @@ static void resume_irqs(bool want_early)
bool is_early = desc->action &&
desc->action->flags & IRQF_EARLY_RESUME;
- if (is_early != want_early)
+ if (!is_early && want_early)
continue;
raw_spin_lock_irqsave(&desc->lock, flags);
diff --git a/kernel/kexec.c b/kernel/kexec.c
index 490afc03627e..9c970167e402 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -47,6 +47,9 @@ u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4];
size_t vmcoreinfo_size;
size_t vmcoreinfo_max_size = sizeof(vmcoreinfo_data);
+/* Flag to indicate we are going to kexec a new kernel */
+bool kexec_in_progress = false;
+
/* Location of the reserved area for the crash kernel */
struct resource crashk_res = {
.name = "Crash kernel",
@@ -1675,7 +1678,9 @@ int kernel_kexec(void)
} else
#endif
{
+ kexec_in_progress = true;
kernel_restart_prepare(NULL);
+ migrate_to_reboot_cpu();
printk(KERN_EMERG "Starting new kernel\n");
machine_shutdown();
}
diff --git a/kernel/modsign_certificate.S b/kernel/modsign_certificate.S
deleted file mode 100644
index 4a9a86d12c8b..000000000000
--- a/kernel/modsign_certificate.S
+++ /dev/null
@@ -1,12 +0,0 @@
-#include <linux/export.h>
-
-#define GLOBAL(name) \
- .globl VMLINUX_SYMBOL(name); \
- VMLINUX_SYMBOL(name):
-
- .section ".init.data","aw"
-
-GLOBAL(modsign_certificate_list)
- .incbin "signing_key.x509"
- .incbin "extra_certificates"
-GLOBAL(modsign_certificate_list_end)
diff --git a/kernel/modsign_pubkey.c b/kernel/modsign_pubkey.c
deleted file mode 100644
index 7cbd4507a7e6..000000000000
--- a/kernel/modsign_pubkey.c
+++ /dev/null
@@ -1,104 +0,0 @@
-/* Public keys for module signature verification
- *
- * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public Licence
- * as published by the Free Software Foundation; either version
- * 2 of the Licence, or (at your option) any later version.
- */
-
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/cred.h>
-#include <linux/err.h>
-#include <keys/asymmetric-type.h>
-#include "module-internal.h"
-
-struct key *modsign_keyring;
-
-extern __initconst const u8 modsign_certificate_list[];
-extern __initconst const u8 modsign_certificate_list_end[];
-
-/*
- * We need to make sure ccache doesn't cache the .o file as it doesn't notice
- * if modsign.pub changes.
- */
-static __initconst const char annoy_ccache[] = __TIME__ "foo";
-
-/*
- * Load the compiled-in keys
- */
-static __init int module_verify_init(void)
-{
- pr_notice("Initialise module verification\n");
-
- modsign_keyring = keyring_alloc(".module_sign",
- KUIDT_INIT(0), KGIDT_INIT(0),
- current_cred(),
- ((KEY_POS_ALL & ~KEY_POS_SETATTR) |
- KEY_USR_VIEW | KEY_USR_READ),
- KEY_ALLOC_NOT_IN_QUOTA, NULL);
- if (IS_ERR(modsign_keyring))
- panic("Can't allocate module signing keyring\n");
-
- return 0;
-}
-
-/*
- * Must be initialised before we try and load the keys into the keyring.
- */
-device_initcall(module_verify_init);
-
-/*
- * Load the compiled-in keys
- */
-static __init int load_module_signing_keys(void)
-{
- key_ref_t key;
- const u8 *p, *end;
- size_t plen;
-
- pr_notice("Loading module verification certificates\n");
-
- end = modsign_certificate_list_end;
- p = modsign_certificate_list;
- while (p < end) {
- /* Each cert begins with an ASN.1 SEQUENCE tag and must be more
- * than 256 bytes in size.
- */
- if (end - p < 4)
- goto dodgy_cert;
- if (p[0] != 0x30 &&
- p[1] != 0x82)
- goto dodgy_cert;
- plen = (p[2] << 8) | p[3];
- plen += 4;
- if (plen > end - p)
- goto dodgy_cert;
-
- key = key_create_or_update(make_key_ref(modsign_keyring, 1),
- "asymmetric",
- NULL,
- p,
- plen,
- (KEY_POS_ALL & ~KEY_POS_SETATTR) |
- KEY_USR_VIEW,
- KEY_ALLOC_NOT_IN_QUOTA);
- if (IS_ERR(key))
- pr_err("MODSIGN: Problem loading in-kernel X.509 certificate (%ld)\n",
- PTR_ERR(key));
- else
- pr_notice("MODSIGN: Loaded cert '%s'\n",
- key_ref_to_ptr(key)->description);
- p += plen;
- }
-
- return 0;
-
-dodgy_cert:
- pr_err("MODSIGN: Problem parsing in-kernel X.509 certificate list\n");
- return 0;
-}
-late_initcall(load_module_signing_keys);
diff --git a/kernel/module-internal.h b/kernel/module-internal.h
index 24f9247b7d02..915e123a430f 100644
--- a/kernel/module-internal.h
+++ b/kernel/module-internal.h
@@ -9,6 +9,4 @@
* 2 of the Licence, or (at your option) any later version.
*/
-extern struct key *modsign_keyring;
-
extern int mod_verify_sig(const void *mod, unsigned long *_modlen);
diff --git a/kernel/module_signing.c b/kernel/module_signing.c
index f2970bddc5ea..be5b8fac4bd0 100644
--- a/kernel/module_signing.c
+++ b/kernel/module_signing.c
@@ -14,6 +14,7 @@
#include <crypto/public_key.h>
#include <crypto/hash.h>
#include <keys/asymmetric-type.h>
+#include <keys/system_keyring.h>
#include "module-internal.h"
/*
@@ -28,7 +29,7 @@
*/
struct module_signature {
u8 algo; /* Public-key crypto algorithm [enum pkey_algo] */
- u8 hash; /* Digest algorithm [enum pkey_hash_algo] */
+ u8 hash; /* Digest algorithm [enum hash_algo] */
u8 id_type; /* Key identifier type [enum pkey_id_type] */
u8 signer_len; /* Length of signer's name */
u8 key_id_len; /* Length of key identifier */
@@ -39,7 +40,7 @@ struct module_signature {
/*
* Digest the module contents.
*/
-static struct public_key_signature *mod_make_digest(enum pkey_hash_algo hash,
+static struct public_key_signature *mod_make_digest(enum hash_algo hash,
const void *mod,
unsigned long modlen)
{
@@ -54,7 +55,7 @@ static struct public_key_signature *mod_make_digest(enum pkey_hash_algo hash,
/* Allocate the hashing algorithm we're going to need and find out how
* big the hash operational data will be.
*/
- tfm = crypto_alloc_shash(pkey_hash_algo[hash], 0, 0);
+ tfm = crypto_alloc_shash(hash_algo_name[hash], 0, 0);
if (IS_ERR(tfm))
return (PTR_ERR(tfm) == -ENOENT) ? ERR_PTR(-ENOPKG) : ERR_CAST(tfm);
@@ -157,7 +158,7 @@ static struct key *request_asymmetric_key(const char *signer, size_t signer_len,
pr_debug("Look up: \"%s\"\n", id);
- key = keyring_search(make_key_ref(modsign_keyring, 1),
+ key = keyring_search(make_key_ref(system_trusted_keyring, 1),
&key_type_asymmetric, id);
if (IS_ERR(key))
pr_warn("Request for unknown module key '%s' err %ld\n",
@@ -217,7 +218,7 @@ int mod_verify_sig(const void *mod, unsigned long *_modlen)
return -ENOPKG;
if (ms.hash >= PKEY_HASH__LAST ||
- !pkey_hash_algo[ms.hash])
+ !hash_algo_name[ms.hash])
return -ENOPKG;
key = request_asymmetric_key(sig, ms.signer_len,
diff --git a/kernel/padata.c b/kernel/padata.c
index 07af2c95dcfe..2abd25d79cc8 100644
--- a/kernel/padata.c
+++ b/kernel/padata.c
@@ -46,6 +46,7 @@ static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index)
static int padata_cpu_hash(struct parallel_data *pd)
{
+ unsigned int seq_nr;
int cpu_index;
/*
@@ -53,10 +54,8 @@ static int padata_cpu_hash(struct parallel_data *pd)
* seq_nr mod. number of cpus in use.
*/
- spin_lock(&pd->seq_lock);
- cpu_index = pd->seq_nr % cpumask_weight(pd->cpumask.pcpu);
- pd->seq_nr++;
- spin_unlock(&pd->seq_lock);
+ seq_nr = atomic_inc_return(&pd->seq_nr);
+ cpu_index = seq_nr % cpumask_weight(pd->cpumask.pcpu);
return padata_index_to_cpu(pd, cpu_index);
}
@@ -429,7 +428,7 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
padata_init_pqueues(pd);
padata_init_squeues(pd);
setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
- pd->seq_nr = 0;
+ atomic_set(&pd->seq_nr, -1);
atomic_set(&pd->reorder_objects, 0);
atomic_set(&pd->refcnt, 0);
pd->pinst = pinst;
diff --git a/kernel/power/console.c b/kernel/power/console.c
index 463aa6736751..eacb8bd8cab4 100644
--- a/kernel/power/console.c
+++ b/kernel/power/console.c
@@ -81,6 +81,7 @@ void pm_vt_switch_unregister(struct device *dev)
list_for_each_entry(tmp, &pm_vt_switch_list, head) {
if (tmp->dev == dev) {
list_del(&tmp->head);
+ kfree(tmp);
break;
}
}
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 10c22cae83a0..b38109e204af 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -792,7 +792,8 @@ void free_basic_memory_bitmaps(void)
{
struct memory_bitmap *bm1, *bm2;
- BUG_ON(!(forbidden_pages_map && free_pages_map));
+ if (WARN_ON(!(forbidden_pages_map && free_pages_map)))
+ return;
bm1 = forbidden_pages_map;
bm2 = free_pages_map;
diff --git a/kernel/power/user.c b/kernel/power/user.c
index 24850270c802..98d357584cd6 100644
--- a/kernel/power/user.c
+++ b/kernel/power/user.c
@@ -70,6 +70,7 @@ static int snapshot_open(struct inode *inode, struct file *filp)
data->swap = swsusp_resume_device ?
swap_type_of(swsusp_resume_device, 0, NULL) : -1;
data->mode = O_RDONLY;
+ data->free_bitmaps = false;
error = pm_notifier_call_chain(PM_HIBERNATION_PREPARE);
if (error)
pm_notifier_call_chain(PM_POST_HIBERNATION);
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 6abb03dff5c0..08a765232432 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -1632,7 +1632,7 @@ module_param(rcu_idle_gp_delay, int, 0644);
static int rcu_idle_lazy_gp_delay = RCU_IDLE_LAZY_GP_DELAY;
module_param(rcu_idle_lazy_gp_delay, int, 0644);
-extern int tick_nohz_enabled;
+extern int tick_nohz_active;
/*
* Try to advance callbacks for all flavors of RCU on the current CPU, but
@@ -1729,7 +1729,7 @@ static void rcu_prepare_for_idle(int cpu)
int tne;
/* Handle nohz enablement switches conservatively. */
- tne = ACCESS_ONCE(tick_nohz_enabled);
+ tne = ACCESS_ONCE(tick_nohz_active);
if (tne != rdtp->tick_nohz_enabled_snap) {
if (rcu_cpu_has_callbacks(cpu, NULL))
invoke_rcu_core(); /* force nohz to see update. */
diff --git a/kernel/reboot.c b/kernel/reboot.c
index f813b3474646..662c83fc16b7 100644
--- a/kernel/reboot.c
+++ b/kernel/reboot.c
@@ -104,7 +104,7 @@ int unregister_reboot_notifier(struct notifier_block *nb)
}
EXPORT_SYMBOL(unregister_reboot_notifier);
-static void migrate_to_reboot_cpu(void)
+void migrate_to_reboot_cpu(void)
{
/* The boot cpu is always logical cpu 0 */
int cpu = reboot_cpu;
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index c1808606ee5f..a88f4a485c5e 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2660,6 +2660,7 @@ asmlinkage void __sched notrace preempt_schedule(void)
} while (need_resched());
}
EXPORT_SYMBOL(preempt_schedule);
+#endif /* CONFIG_PREEMPT */
/*
* this is the entry point to schedule() from kernel preemption
@@ -2693,8 +2694,6 @@ asmlinkage void __sched preempt_schedule_irq(void)
exception_exit(prev_state);
}
-#endif /* CONFIG_PREEMPT */
-
int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags,
void *key)
{
@@ -4762,7 +4761,7 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd)
cpumask_clear_cpu(rq->cpu, old_rd->span);
/*
- * If we dont want to free the old_rt yet then
+ * If we dont want to free the old_rd yet then
* set old_rd to NULL to skip the freeing later
* in this function:
*/
@@ -4903,6 +4902,7 @@ DEFINE_PER_CPU(struct sched_domain *, sd_asym);
static void update_top_cache_domain(int cpu)
{
struct sched_domain *sd;
+ struct sched_domain *busy_sd = NULL;
int id = cpu;
int size = 1;
@@ -4910,8 +4910,9 @@ static void update_top_cache_domain(int cpu)
if (sd) {
id = cpumask_first(sched_domain_span(sd));
size = cpumask_weight(sched_domain_span(sd));
- rcu_assign_pointer(per_cpu(sd_busy, cpu), sd->parent);
+ busy_sd = sd->parent; /* sd_busy */
}
+ rcu_assign_pointer(per_cpu(sd_busy, cpu), busy_sd);
rcu_assign_pointer(per_cpu(sd_llc, cpu), sd);
per_cpu(sd_llc_size, cpu) = size;
@@ -5112,6 +5113,7 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu)
* die on a /0 trap.
*/
sg->sgp->power = SCHED_POWER_SCALE * cpumask_weight(sg_span);
+ sg->sgp->power_orig = sg->sgp->power;
/*
* Make sure the first group of this domain contains the
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index e8b652ebe027..c7395d97e4cb 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -178,59 +178,61 @@ void sched_init_granularity(void)
update_sysctl();
}
-#if BITS_PER_LONG == 32
-# define WMULT_CONST (~0UL)
-#else
-# define WMULT_CONST (1UL << 32)
-#endif
-
+#define WMULT_CONST (~0U)
#define WMULT_SHIFT 32
-/*
- * Shift right and round:
- */
-#define SRR(x, y) (((x) + (1UL << ((y) - 1))) >> (y))
+static void __update_inv_weight(struct load_weight *lw)
+{
+ unsigned long w;
+
+ if (likely(lw->inv_weight))
+ return;
+
+ w = scale_load_down(lw->weight);
+
+ if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST))
+ lw->inv_weight = 1;
+ else if (unlikely(!w))
+ lw->inv_weight = WMULT_CONST;
+ else
+ lw->inv_weight = WMULT_CONST / w;
+}
/*
- * delta *= weight / lw
+ * delta_exec * weight / lw.weight
+ * OR
+ * (delta_exec * (weight * lw->inv_weight)) >> WMULT_SHIFT
+ *
+ * Either weight := NICE_0_LOAD and lw \e prio_to_wmult[], in which case
+ * we're guaranteed shift stays positive because inv_weight is guaranteed to
+ * fit 32 bits, and NICE_0_LOAD gives another 10 bits; therefore shift >= 22.
+ *
+ * Or, weight =< lw.weight (because lw.weight is the runqueue weight), thus
+ * weight/lw.weight <= 1, and therefore our shift will also be positive.
*/
-static unsigned long
-calc_delta_mine(unsigned long delta_exec, unsigned long weight,
- struct load_weight *lw)
+static u64 __calc_delta(u64 delta_exec, unsigned long weight, struct load_weight *lw)
{
- u64 tmp;
+ u64 fact = scale_load_down(weight);
+ int shift = WMULT_SHIFT;
- /*
- * weight can be less than 2^SCHED_LOAD_RESOLUTION for task group sched
- * entities since MIN_SHARES = 2. Treat weight as 1 if less than
- * 2^SCHED_LOAD_RESOLUTION.
- */
- if (likely(weight > (1UL << SCHED_LOAD_RESOLUTION)))
- tmp = (u64)delta_exec * scale_load_down(weight);
- else
- tmp = (u64)delta_exec;
+ __update_inv_weight(lw);
- if (!lw->inv_weight) {
- unsigned long w = scale_load_down(lw->weight);
-
- if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST))
- lw->inv_weight = 1;
- else if (unlikely(!w))
- lw->inv_weight = WMULT_CONST;
- else
- lw->inv_weight = WMULT_CONST / w;
+ if (unlikely(fact >> 32)) {
+ while (fact >> 32) {
+ fact >>= 1;
+ shift--;
+ }
}
- /*
- * Check whether we'd overflow the 64-bit multiplication:
- */
- if (unlikely(tmp > WMULT_CONST))
- tmp = SRR(SRR(tmp, WMULT_SHIFT/2) * lw->inv_weight,
- WMULT_SHIFT/2);
- else
- tmp = SRR(tmp * lw->inv_weight, WMULT_SHIFT);
+ /* hint to use a 32x32->64 mul */
+ fact = (u64)(u32)fact * lw->inv_weight;
- return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX);
+ while (fact >> 32) {
+ fact >>= 1;
+ shift--;
+ }
+
+ return mul_u64_u32_shr(delta_exec, fact, shift);
}
@@ -443,7 +445,7 @@ find_matching_se(struct sched_entity **se, struct sched_entity **pse)
#endif /* CONFIG_FAIR_GROUP_SCHED */
static __always_inline
-void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, unsigned long delta_exec);
+void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec);
/**************************************************************
* Scheduling class tree data structure manipulation methods:
@@ -612,11 +614,10 @@ int sched_proc_update_handler(struct ctl_table *table, int write,
/*
* delta /= w
*/
-static inline unsigned long
-calc_delta_fair(unsigned long delta, struct sched_entity *se)
+static inline u64 calc_delta_fair(u64 delta, struct sched_entity *se)
{
if (unlikely(se->load.weight != NICE_0_LOAD))
- delta = calc_delta_mine(delta, NICE_0_LOAD, &se->load);
+ delta = __calc_delta(delta, NICE_0_LOAD, &se->load);
return delta;
}
@@ -665,7 +666,7 @@ static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
update_load_add(&lw, se->load.weight);
load = &lw;
}
- slice = calc_delta_mine(slice, se->load.weight, load);
+ slice = __calc_delta(slice, se->load.weight, load);
}
return slice;
}
@@ -703,47 +704,32 @@ void init_task_runnable_average(struct task_struct *p)
#endif
/*
- * Update the current task's runtime statistics. Skip current tasks that
- * are not in our scheduling class.
+ * Update the current task's runtime statistics.
*/
-static inline void
-__update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
- unsigned long delta_exec)
-{
- unsigned long delta_exec_weighted;
-
- schedstat_set(curr->statistics.exec_max,
- max((u64)delta_exec, curr->statistics.exec_max));
-
- curr->sum_exec_runtime += delta_exec;
- schedstat_add(cfs_rq, exec_clock, delta_exec);
- delta_exec_weighted = calc_delta_fair(delta_exec, curr);
-
- curr->vruntime += delta_exec_weighted;
- update_min_vruntime(cfs_rq);
-}
-
static void update_curr(struct cfs_rq *cfs_rq)
{
struct sched_entity *curr = cfs_rq->curr;
u64 now = rq_clock_task(rq_of(cfs_rq));
- unsigned long delta_exec;
+ u64 delta_exec;
if (unlikely(!curr))
return;
- /*
- * Get the amount of time the current task was running
- * since the last time we changed load (this cannot
- * overflow on 32 bits):
- */
- delta_exec = (unsigned long)(now - curr->exec_start);
- if (!delta_exec)
+ delta_exec = now - curr->exec_start;
+ if (unlikely((s64)delta_exec <= 0))
return;
- __update_curr(cfs_rq, curr, delta_exec);
curr->exec_start = now;
+ schedstat_set(curr->statistics.exec_max,
+ max(delta_exec, curr->statistics.exec_max));
+
+ curr->sum_exec_runtime += delta_exec;
+ schedstat_add(cfs_rq, exec_clock, delta_exec);
+
+ curr->vruntime += calc_delta_fair(delta_exec, curr);
+ update_min_vruntime(cfs_rq);
+
if (entity_is_task(curr)) {
struct task_struct *curtask = task_of(curr);
@@ -1752,6 +1738,13 @@ void task_numa_work(struct callback_head *work)
(vma->vm_file && (vma->vm_flags & (VM_READ|VM_WRITE)) == (VM_READ)))
continue;
+ /*
+ * Skip inaccessible VMAs to avoid any confusion between
+ * PROT_NONE and NUMA hinting ptes
+ */
+ if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
+ continue;
+
do {
start = max(start, vma->vm_start);
end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE);
@@ -3015,8 +3008,7 @@ static void expire_cfs_rq_runtime(struct cfs_rq *cfs_rq)
}
}
-static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq,
- unsigned long delta_exec)
+static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
{
/* dock delta_exec before expiring quota (as it could span periods) */
cfs_rq->runtime_remaining -= delta_exec;
@@ -3034,7 +3026,7 @@ static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq,
}
static __always_inline
-void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, unsigned long delta_exec)
+void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
{
if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled)
return;
@@ -3574,8 +3566,7 @@ static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
return rq_clock_task(rq_of(cfs_rq));
}
-static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq,
- unsigned long delta_exec) {}
+static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) {}
static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {}
static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
@@ -5379,10 +5370,31 @@ void update_group_power(struct sched_domain *sd, int cpu)
*/
for_each_cpu(cpu, sched_group_cpus(sdg)) {
- struct sched_group *sg = cpu_rq(cpu)->sd->groups;
+ struct sched_group_power *sgp;
+ struct rq *rq = cpu_rq(cpu);
+
+ /*
+ * build_sched_domains() -> init_sched_groups_power()
+ * gets here before we've attached the domains to the
+ * runqueues.
+ *
+ * Use power_of(), which is set irrespective of domains
+ * in update_cpu_power().
+ *
+ * This avoids power/power_orig from being 0 and
+ * causing divide-by-zero issues on boot.
+ *
+ * Runtime updates will correct power_orig.
+ */
+ if (unlikely(!rq->sd)) {
+ power_orig += power_of(cpu);
+ power += power_of(cpu);
+ continue;
+ }
- power_orig += sg->sgp->power_orig;
- power += sg->sgp->power;
+ sgp = rq->sd->groups->sgp;
+ power_orig += sgp->power_orig;
+ power += sgp->power;
}
} else {
/*
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 7d57275fc396..1c4065575fa2 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -901,6 +901,13 @@ inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
{
struct rq *rq = rq_of_rt_rq(rt_rq);
+#ifdef CONFIG_RT_GROUP_SCHED
+ /*
+ * Change rq's cpupri only if rt_rq is the top queue.
+ */
+ if (&rq->rt != rt_rq)
+ return;
+#endif
if (rq->online && prio < prev_prio)
cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
}
@@ -910,6 +917,13 @@ dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
{
struct rq *rq = rq_of_rt_rq(rt_rq);
+#ifdef CONFIG_RT_GROUP_SCHED
+ /*
+ * Change rq's cpupri only if rt_rq is the top queue.
+ */
+ if (&rq->rt != rt_rq)
+ return;
+#endif
if (rq->online && rt_rq->highest_prio.curr != prev_prio)
cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
}
diff --git a/kernel/system_certificates.S b/kernel/system_certificates.S
new file mode 100644
index 000000000000..3e9868d47535
--- /dev/null
+++ b/kernel/system_certificates.S
@@ -0,0 +1,20 @@
+#include <linux/export.h>
+#include <linux/init.h>
+
+ __INITRODATA
+
+ .align 8
+ .globl VMLINUX_SYMBOL(system_certificate_list)
+VMLINUX_SYMBOL(system_certificate_list):
+__cert_list_start:
+ .incbin "kernel/x509_certificate_list"
+__cert_list_end:
+
+ .align 8
+ .globl VMLINUX_SYMBOL(system_certificate_list_size)
+VMLINUX_SYMBOL(system_certificate_list_size):
+#ifdef CONFIG_64BIT
+ .quad __cert_list_end - __cert_list_start
+#else
+ .long __cert_list_end - __cert_list_start
+#endif
diff --git a/kernel/system_keyring.c b/kernel/system_keyring.c
new file mode 100644
index 000000000000..52ebc70263f4
--- /dev/null
+++ b/kernel/system_keyring.c
@@ -0,0 +1,105 @@
+/* System trusted keyring for trusted public keys
+ *
+ * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/cred.h>
+#include <linux/err.h>
+#include <keys/asymmetric-type.h>
+#include <keys/system_keyring.h>
+#include "module-internal.h"
+
+struct key *system_trusted_keyring;
+EXPORT_SYMBOL_GPL(system_trusted_keyring);
+
+extern __initconst const u8 system_certificate_list[];
+extern __initconst const unsigned long system_certificate_list_size;
+
+/*
+ * Load the compiled-in keys
+ */
+static __init int system_trusted_keyring_init(void)
+{
+ pr_notice("Initialise system trusted keyring\n");
+
+ system_trusted_keyring =
+ keyring_alloc(".system_keyring",
+ KUIDT_INIT(0), KGIDT_INIT(0), current_cred(),
+ ((KEY_POS_ALL & ~KEY_POS_SETATTR) |
+ KEY_USR_VIEW | KEY_USR_READ | KEY_USR_SEARCH),
+ KEY_ALLOC_NOT_IN_QUOTA, NULL);
+ if (IS_ERR(system_trusted_keyring))
+ panic("Can't allocate system trusted keyring\n");
+
+ set_bit(KEY_FLAG_TRUSTED_ONLY, &system_trusted_keyring->flags);
+ return 0;
+}
+
+/*
+ * Must be initialised before we try and load the keys into the keyring.
+ */
+device_initcall(system_trusted_keyring_init);
+
+/*
+ * Load the compiled-in list of X.509 certificates.
+ */
+static __init int load_system_certificate_list(void)
+{
+ key_ref_t key;
+ const u8 *p, *end;
+ size_t plen;
+
+ pr_notice("Loading compiled-in X.509 certificates\n");
+
+ p = system_certificate_list;
+ end = p + system_certificate_list_size;
+ while (p < end) {
+ /* Each cert begins with an ASN.1 SEQUENCE tag and must be more
+ * than 256 bytes in size.
+ */
+ if (end - p < 4)
+ goto dodgy_cert;
+ if (p[0] != 0x30 &&
+ p[1] != 0x82)
+ goto dodgy_cert;
+ plen = (p[2] << 8) | p[3];
+ plen += 4;
+ if (plen > end - p)
+ goto dodgy_cert;
+
+ key = key_create_or_update(make_key_ref(system_trusted_keyring, 1),
+ "asymmetric",
+ NULL,
+ p,
+ plen,
+ ((KEY_POS_ALL & ~KEY_POS_SETATTR) |
+ KEY_USR_VIEW | KEY_USR_READ),
+ KEY_ALLOC_NOT_IN_QUOTA |
+ KEY_ALLOC_TRUSTED);
+ if (IS_ERR(key)) {
+ pr_err("Problem loading in-kernel X.509 certificate (%ld)\n",
+ PTR_ERR(key));
+ } else {
+ pr_notice("Loaded X.509 cert '%s'\n",
+ key_ref_to_ptr(key)->description);
+ key_ref_put(key);
+ }
+ p += plen;
+ }
+
+ return 0;
+
+dodgy_cert:
+ pr_err("Problem parsing in-kernel X.509 certificate list\n");
+ return 0;
+}
+late_initcall(load_system_certificate_list);
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index 64522ecdfe0e..162b03ab0ad2 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -33,6 +33,21 @@ DEFINE_PER_CPU(struct tick_device, tick_cpu_device);
*/
ktime_t tick_next_period;
ktime_t tick_period;
+
+/*
+ * tick_do_timer_cpu is a timer core internal variable which holds the CPU NR
+ * which is responsible for calling do_timer(), i.e. the timekeeping stuff. This
+ * variable has two functions:
+ *
+ * 1) Prevent a thundering herd issue of a gazillion of CPUs trying to grab the
+ * timekeeping lock all at once. Only the CPU which is assigned to do the
+ * update is handling it.
+ *
+ * 2) Hand off the duty in the NOHZ idle case by setting the value to
+ * TICK_DO_TIMER_NONE, i.e. a non existing CPU. So the next cpu which looks
+ * at it will take over and keep the time keeping alive. The handover
+ * procedure also covers cpu hotplug.
+ */
int tick_do_timer_cpu __read_mostly = TICK_DO_TIMER_BOOT;
/*
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 3612fc77f834..ea20f7d1ac2c 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -361,8 +361,8 @@ void __init tick_nohz_init(void)
/*
* NO HZ enabled ?
*/
-int tick_nohz_enabled __read_mostly = 1;
-
+static int tick_nohz_enabled __read_mostly = 1;
+int tick_nohz_active __read_mostly;
/*
* Enable / Disable tickless mode
*/
@@ -465,7 +465,7 @@ u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time)
struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
ktime_t now, idle;
- if (!tick_nohz_enabled)
+ if (!tick_nohz_active)
return -1;
now = ktime_get();
@@ -506,7 +506,7 @@ u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time)
struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
ktime_t now, iowait;
- if (!tick_nohz_enabled)
+ if (!tick_nohz_active)
return -1;
now = ktime_get();
@@ -711,8 +711,10 @@ static bool can_stop_idle_tick(int cpu, struct tick_sched *ts)
return false;
}
- if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE))
+ if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) {
+ ts->sleep_length = (ktime_t) { .tv64 = NSEC_PER_SEC/HZ };
return false;
+ }
if (need_resched())
return false;
@@ -799,11 +801,6 @@ void tick_nohz_idle_enter(void)
local_irq_disable();
ts = &__get_cpu_var(tick_cpu_sched);
- /*
- * set ts->inidle unconditionally. even if the system did not
- * switch to nohz mode the cpu frequency governers rely on the
- * update of the idle time accounting in tick_nohz_start_idle().
- */
ts->inidle = 1;
__tick_nohz_idle_enter(ts);
@@ -973,7 +970,7 @@ static void tick_nohz_switch_to_nohz(void)
struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
ktime_t next;
- if (!tick_nohz_enabled)
+ if (!tick_nohz_active)
return;
local_irq_disable();
@@ -981,7 +978,7 @@ static void tick_nohz_switch_to_nohz(void)
local_irq_enable();
return;
}
-
+ tick_nohz_active = 1;
ts->nohz_mode = NOHZ_MODE_LOWRES;
/*
@@ -1139,8 +1136,10 @@ void tick_setup_sched_timer(void)
}
#ifdef CONFIG_NO_HZ_COMMON
- if (tick_nohz_enabled)
+ if (tick_nohz_enabled) {
ts->nohz_mode = NOHZ_MODE_HIGHRES;
+ tick_nohz_active = 1;
+ }
#endif
}
#endif /* HIGH_RES_TIMERS */
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 3abf53418b67..87b4f00284c9 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -1347,7 +1347,7 @@ static inline void old_vsyscall_fixup(struct timekeeper *tk)
tk->xtime_nsec -= remainder;
tk->xtime_nsec += 1ULL << tk->shift;
tk->ntp_error += remainder << tk->ntp_error_shift;
-
+ tk->ntp_error -= (1ULL << tk->shift) << tk->ntp_error_shift;
}
#else
#define old_vsyscall_fixup(tk)
diff --git a/kernel/timer.c b/kernel/timer.c
index 6582b82fa966..accfd241b9e5 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -1518,9 +1518,8 @@ static int init_timers_cpu(int cpu)
/*
* The APs use this path later in boot
*/
- base = kmalloc_node(sizeof(*base),
- GFP_KERNEL | __GFP_ZERO,
- cpu_to_node(cpu));
+ base = kzalloc_node(sizeof(*base), GFP_KERNEL,
+ cpu_to_node(cpu));
if (!base)
return -ENOMEM;
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 22fa55696760..72a0f81dc5a8 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -367,9 +367,6 @@ static int remove_ftrace_list_ops(struct ftrace_ops **list,
static int __register_ftrace_function(struct ftrace_ops *ops)
{
- if (unlikely(ftrace_disabled))
- return -ENODEV;
-
if (FTRACE_WARN_ON(ops == &global_ops))
return -EINVAL;
@@ -428,9 +425,6 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
{
int ret;
- if (ftrace_disabled)
- return -ENODEV;
-
if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED)))
return -EBUSY;
@@ -781,7 +775,7 @@ static int ftrace_profile_init(void)
int cpu;
int ret = 0;
- for_each_online_cpu(cpu) {
+ for_each_possible_cpu(cpu) {
ret = ftrace_profile_init_cpu(cpu);
if (ret)
break;
@@ -2088,10 +2082,15 @@ static void ftrace_startup_enable(int command)
static int ftrace_startup(struct ftrace_ops *ops, int command)
{
bool hash_enable = true;
+ int ret;
if (unlikely(ftrace_disabled))
return -ENODEV;
+ ret = __register_ftrace_function(ops);
+ if (ret)
+ return ret;
+
ftrace_start_up++;
command |= FTRACE_UPDATE_CALLS;
@@ -2113,12 +2112,17 @@ static int ftrace_startup(struct ftrace_ops *ops, int command)
return 0;
}
-static void ftrace_shutdown(struct ftrace_ops *ops, int command)
+static int ftrace_shutdown(struct ftrace_ops *ops, int command)
{
bool hash_disable = true;
+ int ret;
if (unlikely(ftrace_disabled))
- return;
+ return -ENODEV;
+
+ ret = __unregister_ftrace_function(ops);
+ if (ret)
+ return ret;
ftrace_start_up--;
/*
@@ -2153,9 +2157,10 @@ static void ftrace_shutdown(struct ftrace_ops *ops, int command)
}
if (!command || !ftrace_enabled)
- return;
+ return 0;
ftrace_run_update_code(command);
+ return 0;
}
static void ftrace_startup_sysctl(void)
@@ -3060,16 +3065,13 @@ static void __enable_ftrace_function_probe(void)
if (i == FTRACE_FUNC_HASHSIZE)
return;
- ret = __register_ftrace_function(&trace_probe_ops);
- if (!ret)
- ret = ftrace_startup(&trace_probe_ops, 0);
+ ret = ftrace_startup(&trace_probe_ops, 0);
ftrace_probe_registered = 1;
}
static void __disable_ftrace_function_probe(void)
{
- int ret;
int i;
if (!ftrace_probe_registered)
@@ -3082,9 +3084,7 @@ static void __disable_ftrace_function_probe(void)
}
/* no more funcs left */
- ret = __unregister_ftrace_function(&trace_probe_ops);
- if (!ret)
- ftrace_shutdown(&trace_probe_ops, 0);
+ ftrace_shutdown(&trace_probe_ops, 0);
ftrace_probe_registered = 0;
}
@@ -4366,12 +4366,15 @@ core_initcall(ftrace_nodyn_init);
static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
static inline void ftrace_startup_enable(int command) { }
/* Keep as macros so we do not need to define the commands */
-# define ftrace_startup(ops, command) \
- ({ \
- (ops)->flags |= FTRACE_OPS_FL_ENABLED; \
- 0; \
+# define ftrace_startup(ops, command) \
+ ({ \
+ int ___ret = __register_ftrace_function(ops); \
+ if (!___ret) \
+ (ops)->flags |= FTRACE_OPS_FL_ENABLED; \
+ ___ret; \
})
-# define ftrace_shutdown(ops, command) do { } while (0)
+# define ftrace_shutdown(ops, command) __unregister_ftrace_function(ops)
+
# define ftrace_startup_sysctl() do { } while (0)
# define ftrace_shutdown_sysctl() do { } while (0)
@@ -4780,9 +4783,7 @@ int register_ftrace_function(struct ftrace_ops *ops)
mutex_lock(&ftrace_lock);
- ret = __register_ftrace_function(ops);
- if (!ret)
- ret = ftrace_startup(ops, 0);
+ ret = ftrace_startup(ops, 0);
mutex_unlock(&ftrace_lock);
@@ -4801,9 +4802,7 @@ int unregister_ftrace_function(struct ftrace_ops *ops)
int ret;
mutex_lock(&ftrace_lock);
- ret = __unregister_ftrace_function(ops);
- if (!ret)
- ftrace_shutdown(ops, 0);
+ ret = ftrace_shutdown(ops, 0);
mutex_unlock(&ftrace_lock);
return ret;
@@ -4997,6 +4996,13 @@ ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
return NOTIFY_DONE;
}
+/* Just a place holder for function graph */
+static struct ftrace_ops fgraph_ops __read_mostly = {
+ .func = ftrace_stub,
+ .flags = FTRACE_OPS_FL_STUB | FTRACE_OPS_FL_GLOBAL |
+ FTRACE_OPS_FL_RECURSION_SAFE,
+};
+
int register_ftrace_graph(trace_func_graph_ret_t retfunc,
trace_func_graph_ent_t entryfunc)
{
@@ -5023,7 +5029,7 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
ftrace_graph_return = retfunc;
ftrace_graph_entry = entryfunc;
- ret = ftrace_startup(&global_ops, FTRACE_START_FUNC_RET);
+ ret = ftrace_startup(&fgraph_ops, FTRACE_START_FUNC_RET);
out:
mutex_unlock(&ftrace_lock);
@@ -5040,7 +5046,7 @@ void unregister_ftrace_graph(void)
ftrace_graph_active--;
ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
ftrace_graph_entry = ftrace_graph_entry_stub;
- ftrace_shutdown(&global_ops, FTRACE_STOP_FUNC_RET);
+ ftrace_shutdown(&fgraph_ops, FTRACE_STOP_FUNC_RET);
unregister_pm_notifier(&ftrace_suspend_notifier);
unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c
index 78e27e3b52ac..e854f420e033 100644
--- a/kernel/trace/trace_event_perf.c
+++ b/kernel/trace/trace_event_perf.c
@@ -24,6 +24,12 @@ static int total_ref_count;
static int perf_trace_event_perm(struct ftrace_event_call *tp_event,
struct perf_event *p_event)
{
+ if (tp_event->perf_perm) {
+ int ret = tp_event->perf_perm(tp_event, p_event);
+ if (ret)
+ return ret;
+ }
+
/* The ftrace function trace is allowed only for root. */
if (ftrace_event_is_function(tp_event) &&
perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN))
@@ -173,7 +179,7 @@ static int perf_trace_event_init(struct ftrace_event_call *tp_event,
int perf_trace_init(struct perf_event *p_event)
{
struct ftrace_event_call *tp_event;
- int event_id = p_event->attr.config;
+ u64 event_id = p_event->attr.config;
int ret = -EINVAL;
mutex_lock(&event_mutex);
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index f919a2e21bf3..a11800ae96de 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -2314,6 +2314,9 @@ int event_trace_del_tracer(struct trace_array *tr)
/* Disable any running events */
__ftrace_set_clr_event_nolock(tr, NULL, NULL, NULL, 0);
+ /* Access to events are within rcu_read_lock_sched() */
+ synchronize_sched();
+
down_write(&trace_event_sem);
__trace_remove_event_dirs(tr);
debugfs_remove_recursive(tr->event_dir);
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index e4b6d11bdf78..ea90eb5f6f17 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -431,11 +431,6 @@ static void unreg_event_syscall_enter(struct ftrace_event_file *file,
if (!tr->sys_refcount_enter)
unregister_trace_sys_enter(ftrace_syscall_enter, tr);
mutex_unlock(&syscall_trace_lock);
- /*
- * Callers expect the event to be completely disabled on
- * return, so wait for current handlers to finish.
- */
- synchronize_sched();
}
static int reg_event_syscall_exit(struct ftrace_event_file *file,
@@ -474,11 +469,6 @@ static void unreg_event_syscall_exit(struct ftrace_event_file *file,
if (!tr->sys_refcount_exit)
unregister_trace_sys_exit(ftrace_syscall_exit, tr);
mutex_unlock(&syscall_trace_lock);
- /*
- * Callers expect the event to be completely disabled on
- * return, so wait for current handlers to finish.
- */
- synchronize_sched();
}
static int __init init_syscall_trace(struct ftrace_event_call *call)
diff --git a/kernel/user.c b/kernel/user.c
index 5bbb91988e69..c006131beb77 100644
--- a/kernel/user.c
+++ b/kernel/user.c
@@ -51,6 +51,10 @@ struct user_namespace init_user_ns = {
.owner = GLOBAL_ROOT_UID,
.group = GLOBAL_ROOT_GID,
.proc_inum = PROC_USER_INIT_INO,
+#ifdef CONFIG_PERSISTENT_KEYRINGS
+ .persistent_keyring_register_sem =
+ __RWSEM_INITIALIZER(init_user_ns.persistent_keyring_register_sem),
+#endif
};
EXPORT_SYMBOL_GPL(init_user_ns);
diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
index 13fb1134ba58..240fb62cf394 100644
--- a/kernel/user_namespace.c
+++ b/kernel/user_namespace.c
@@ -101,6 +101,9 @@ int create_user_ns(struct cred *new)
set_cred_user_ns(new, ns);
+#ifdef CONFIG_PERSISTENT_KEYRINGS
+ init_rwsem(&ns->persistent_keyring_register_sem);
+#endif
return 0;
}
@@ -130,6 +133,9 @@ void free_user_ns(struct user_namespace *ns)
do {
parent = ns->parent;
+#ifdef CONFIG_PERSISTENT_KEYRINGS
+ key_put(ns->persistent_keyring_register);
+#endif
proc_free_inum(ns->proc_inum);
kmem_cache_free(user_ns_cachep, ns);
ns = parent;
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 987293d03ebc..b010eac595d2 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -305,6 +305,9 @@ static DEFINE_HASHTABLE(unbound_pool_hash, UNBOUND_POOL_HASH_ORDER);
/* I: attributes used when instantiating standard unbound pools on demand */
static struct workqueue_attrs *unbound_std_wq_attrs[NR_STD_WORKER_POOLS];
+/* I: attributes used when instantiating ordered pools on demand */
+static struct workqueue_attrs *ordered_wq_attrs[NR_STD_WORKER_POOLS];
+
struct workqueue_struct *system_wq __read_mostly;
EXPORT_SYMBOL(system_wq);
struct workqueue_struct *system_highpri_wq __read_mostly;
@@ -518,14 +521,21 @@ static inline void debug_work_activate(struct work_struct *work) { }
static inline void debug_work_deactivate(struct work_struct *work) { }
#endif
-/* allocate ID and assign it to @pool */
+/**
+ * worker_pool_assign_id - allocate ID and assing it to @pool
+ * @pool: the pool pointer of interest
+ *
+ * Returns 0 if ID in [0, WORK_OFFQ_POOL_NONE) is allocated and assigned
+ * successfully, -errno on failure.
+ */
static int worker_pool_assign_id(struct worker_pool *pool)
{
int ret;
lockdep_assert_held(&wq_pool_mutex);
- ret = idr_alloc(&worker_pool_idr, pool, 0, 0, GFP_KERNEL);
+ ret = idr_alloc(&worker_pool_idr, pool, 0, WORK_OFFQ_POOL_NONE,
+ GFP_KERNEL);
if (ret >= 0) {
pool->id = ret;
return 0;
@@ -1320,7 +1330,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
debug_work_activate(work);
- /* if dying, only works from the same workqueue are allowed */
+ /* if draining, only works from the same workqueue are allowed */
if (unlikely(wq->flags & __WQ_DRAINING) &&
WARN_ON_ONCE(!is_chained_work(wq)))
return;
@@ -1736,16 +1746,17 @@ static struct worker *create_worker(struct worker_pool *pool)
if (IS_ERR(worker->task))
goto fail;
+ set_user_nice(worker->task, pool->attrs->nice);
+
+ /* prevent userland from meddling with cpumask of workqueue workers */
+ worker->task->flags |= PF_NO_SETAFFINITY;
+
/*
* set_cpus_allowed_ptr() will fail if the cpumask doesn't have any
* online CPUs. It'll be re-applied when any of the CPUs come up.
*/
- set_user_nice(worker->task, pool->attrs->nice);
set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask);
- /* prevent userland from meddling with cpumask of workqueue workers */
- worker->task->flags |= PF_NO_SETAFFINITY;
-
/*
* The caller is responsible for ensuring %POOL_DISASSOCIATED
* remains stable across this function. See the comments above the
@@ -2840,19 +2851,6 @@ already_gone:
return false;
}
-static bool __flush_work(struct work_struct *work)
-{
- struct wq_barrier barr;
-
- if (start_flush_work(work, &barr)) {
- wait_for_completion(&barr.done);
- destroy_work_on_stack(&barr.work);
- return true;
- } else {
- return false;
- }
-}
-
/**
* flush_work - wait for a work to finish executing the last queueing instance
* @work: the work to flush
@@ -2866,10 +2864,18 @@ static bool __flush_work(struct work_struct *work)
*/
bool flush_work(struct work_struct *work)
{
+ struct wq_barrier barr;
+
lock_map_acquire(&work->lockdep_map);
lock_map_release(&work->lockdep_map);
- return __flush_work(work);
+ if (start_flush_work(work, &barr)) {
+ wait_for_completion(&barr.done);
+ destroy_work_on_stack(&barr.work);
+ return true;
+ } else {
+ return false;
+ }
}
EXPORT_SYMBOL_GPL(flush_work);
@@ -4106,7 +4112,7 @@ out_unlock:
static int alloc_and_link_pwqs(struct workqueue_struct *wq)
{
bool highpri = wq->flags & WQ_HIGHPRI;
- int cpu;
+ int cpu, ret;
if (!(wq->flags & WQ_UNBOUND)) {
wq->cpu_pwqs = alloc_percpu(struct pool_workqueue);
@@ -4126,6 +4132,13 @@ static int alloc_and_link_pwqs(struct workqueue_struct *wq)
mutex_unlock(&wq->mutex);
}
return 0;
+ } else if (wq->flags & __WQ_ORDERED) {
+ ret = apply_workqueue_attrs(wq, ordered_wq_attrs[highpri]);
+ /* there should only be single pwq for ordering guarantee */
+ WARN(!ret && (wq->pwqs.next != &wq->dfl_pwq->pwqs_node ||
+ wq->pwqs.prev != &wq->dfl_pwq->pwqs_node),
+ "ordering guarantee broken for workqueue %s\n", wq->name);
+ return ret;
} else {
return apply_workqueue_attrs(wq, unbound_std_wq_attrs[highpri]);
}
@@ -4814,14 +4827,7 @@ long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn);
schedule_work_on(cpu, &wfc.work);
-
- /*
- * The work item is on-stack and can't lead to deadlock through
- * flushing. Use __flush_work() to avoid spurious lockdep warnings
- * when work_on_cpu()s are nested.
- */
- __flush_work(&wfc.work);
-
+ flush_work(&wfc.work);
return wfc.ret;
}
EXPORT_SYMBOL_GPL(work_on_cpu);
@@ -5009,10 +5015,6 @@ static int __init init_workqueues(void)
int std_nice[NR_STD_WORKER_POOLS] = { 0, HIGHPRI_NICE_LEVEL };
int i, cpu;
- /* make sure we have enough bits for OFFQ pool ID */
- BUILD_BUG_ON((1LU << (BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT)) <
- WORK_CPU_END * NR_STD_WORKER_POOLS);
-
WARN_ON(__alignof__(struct pool_workqueue) < __alignof__(long long));
pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC);
@@ -5051,13 +5053,23 @@ static int __init init_workqueues(void)
}
}
- /* create default unbound wq attrs */
+ /* create default unbound and ordered wq attrs */
for (i = 0; i < NR_STD_WORKER_POOLS; i++) {
struct workqueue_attrs *attrs;
BUG_ON(!(attrs = alloc_workqueue_attrs(GFP_KERNEL)));
attrs->nice = std_nice[i];
unbound_std_wq_attrs[i] = attrs;
+
+ /*
+ * An ordered wq should have only one pwq as ordering is
+ * guaranteed by max_active which is enforced by pwqs.
+ * Turn off NUMA so that dfl_pwq is used for all nodes.
+ */
+ BUG_ON(!(attrs = alloc_workqueue_attrs(GFP_KERNEL)));
+ attrs->nice = std_nice[i];
+ attrs->no_numa = true;
+ ordered_wq_attrs[i] = attrs;
}
system_wq = alloc_workqueue("events", 0, 0);
diff --git a/lib/Kconfig b/lib/Kconfig
index 06dc74200a51..991c98bc4a3f 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -322,6 +322,20 @@ config TEXTSEARCH_FSM
config BTREE
boolean
+config ASSOCIATIVE_ARRAY
+ bool
+ help
+ Generic associative array. Can be searched and iterated over whilst
+ it is being modified. It is also reasonably quick to search and
+ modify. The algorithms are non-recursive, and the trees are highly
+ capacious.
+
+ See:
+
+ Documentation/assoc_array.txt
+
+ for more information.
+
config HAS_IOMEM
boolean
depends on !NO_IOMEM
diff --git a/lib/Makefile b/lib/Makefile
index d480a8c92385..d0f79c547d97 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -13,7 +13,7 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \
sha1.o md5.o irq_regs.o reciprocal_div.o argv_split.o \
proportions.o flex_proportions.o prio_heap.o ratelimit.o show_mem.o \
is_single_threaded.o plist.o decompress.o kobject_uevent.o \
- earlycpio.o percpu-refcount.o percpu_ida.o
+ earlycpio.o
obj-$(CONFIG_ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS) += usercopy.o
lib-$(CONFIG_MMU) += ioremap.o
@@ -26,7 +26,7 @@ obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \
gcd.o lcm.o list_sort.o uuid.o flex_array.o iovec.o clz_ctz.o \
bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o \
- percpu_ida.o
+ percpu-refcount.o percpu_ida.o hash.o
obj-y += string_helpers.o
obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o
obj-y += kstrtox.o
@@ -47,6 +47,7 @@ CFLAGS_hweight.o = $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS))
obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
obj-$(CONFIG_BTREE) += btree.o
+obj-$(CONFIG_ASSOCIATIVE_ARRAY) += assoc_array.o
obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
obj-$(CONFIG_DEBUG_LIST) += list_debug.o
obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o
diff --git a/lib/assoc_array.c b/lib/assoc_array.c
new file mode 100644
index 000000000000..1b6a44f1ec3e
--- /dev/null
+++ b/lib/assoc_array.c
@@ -0,0 +1,1746 @@
+/* Generic associative array implementation.
+ *
+ * See Documentation/assoc_array.txt for information.
+ *
+ * Copyright (C) 2013 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+//#define DEBUG
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/assoc_array_priv.h>
+
+/*
+ * Iterate over an associative array. The caller must hold the RCU read lock
+ * or better.
+ */
+static int assoc_array_subtree_iterate(const struct assoc_array_ptr *root,
+ const struct assoc_array_ptr *stop,
+ int (*iterator)(const void *leaf,
+ void *iterator_data),
+ void *iterator_data)
+{
+ const struct assoc_array_shortcut *shortcut;
+ const struct assoc_array_node *node;
+ const struct assoc_array_ptr *cursor, *ptr, *parent;
+ unsigned long has_meta;
+ int slot, ret;
+
+ cursor = root;
+
+begin_node:
+ if (assoc_array_ptr_is_shortcut(cursor)) {
+ /* Descend through a shortcut */
+ shortcut = assoc_array_ptr_to_shortcut(cursor);
+ smp_read_barrier_depends();
+ cursor = ACCESS_ONCE(shortcut->next_node);
+ }
+
+ node = assoc_array_ptr_to_node(cursor);
+ smp_read_barrier_depends();
+ slot = 0;
+
+ /* We perform two passes of each node.
+ *
+ * The first pass does all the leaves in this node. This means we
+ * don't miss any leaves if the node is split up by insertion whilst
+ * we're iterating over the branches rooted here (we may, however, see
+ * some leaves twice).
+ */
+ has_meta = 0;
+ for (; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
+ ptr = ACCESS_ONCE(node->slots[slot]);
+ has_meta |= (unsigned long)ptr;
+ if (ptr && assoc_array_ptr_is_leaf(ptr)) {
+ /* We need a barrier between the read of the pointer
+ * and dereferencing the pointer - but only if we are
+ * actually going to dereference it.
+ */
+ smp_read_barrier_depends();
+
+ /* Invoke the callback */
+ ret = iterator(assoc_array_ptr_to_leaf(ptr),
+ iterator_data);
+ if (ret)
+ return ret;
+ }
+ }
+
+ /* The second pass attends to all the metadata pointers. If we follow
+ * one of these we may find that we don't come back here, but rather go
+ * back to a replacement node with the leaves in a different layout.
+ *
+ * We are guaranteed to make progress, however, as the slot number for
+ * a particular portion of the key space cannot change - and we
+ * continue at the back pointer + 1.
+ */
+ if (!(has_meta & ASSOC_ARRAY_PTR_META_TYPE))
+ goto finished_node;
+ slot = 0;
+
+continue_node:
+ node = assoc_array_ptr_to_node(cursor);
+ smp_read_barrier_depends();
+
+ for (; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
+ ptr = ACCESS_ONCE(node->slots[slot]);
+ if (assoc_array_ptr_is_meta(ptr)) {
+ cursor = ptr;
+ goto begin_node;
+ }
+ }
+
+finished_node:
+ /* Move up to the parent (may need to skip back over a shortcut) */
+ parent = ACCESS_ONCE(node->back_pointer);
+ slot = node->parent_slot;
+ if (parent == stop)
+ return 0;
+
+ if (assoc_array_ptr_is_shortcut(parent)) {
+ shortcut = assoc_array_ptr_to_shortcut(parent);
+ smp_read_barrier_depends();
+ cursor = parent;
+ parent = ACCESS_ONCE(shortcut->back_pointer);
+ slot = shortcut->parent_slot;
+ if (parent == stop)
+ return 0;
+ }
+
+ /* Ascend to next slot in parent node */
+ cursor = parent;
+ slot++;
+ goto continue_node;
+}
+
+/**
+ * assoc_array_iterate - Pass all objects in the array to a callback
+ * @array: The array to iterate over.
+ * @iterator: The callback function.
+ * @iterator_data: Private data for the callback function.
+ *
+ * Iterate over all the objects in an associative array. Each one will be
+ * presented to the iterator function.
+ *
+ * If the array is being modified concurrently with the iteration then it is
+ * possible that some objects in the array will be passed to the iterator
+ * callback more than once - though every object should be passed at least
+ * once. If this is undesirable then the caller must lock against modification
+ * for the duration of this function.
+ *
+ * The function will return 0 if no objects were in the array or else it will
+ * return the result of the last iterator function called. Iteration stops
+ * immediately if any call to the iteration function results in a non-zero
+ * return.
+ *
+ * The caller should hold the RCU read lock or better if concurrent
+ * modification is possible.
+ */
+int assoc_array_iterate(const struct assoc_array *array,
+ int (*iterator)(const void *object,
+ void *iterator_data),
+ void *iterator_data)
+{
+ struct assoc_array_ptr *root = ACCESS_ONCE(array->root);
+
+ if (!root)
+ return 0;
+ return assoc_array_subtree_iterate(root, NULL, iterator, iterator_data);
+}
+
+enum assoc_array_walk_status {
+ assoc_array_walk_tree_empty,
+ assoc_array_walk_found_terminal_node,
+ assoc_array_walk_found_wrong_shortcut,
+} status;
+
+struct assoc_array_walk_result {
+ struct {
+ struct assoc_array_node *node; /* Node in which leaf might be found */
+ int level;
+ int slot;
+ } terminal_node;
+ struct {
+ struct assoc_array_shortcut *shortcut;
+ int level;
+ int sc_level;
+ unsigned long sc_segments;
+ unsigned long dissimilarity;
+ } wrong_shortcut;
+};
+
+/*
+ * Navigate through the internal tree looking for the closest node to the key.
+ */
+static enum assoc_array_walk_status
+assoc_array_walk(const struct assoc_array *array,
+ const struct assoc_array_ops *ops,
+ const void *index_key,
+ struct assoc_array_walk_result *result)
+{
+ struct assoc_array_shortcut *shortcut;
+ struct assoc_array_node *node;
+ struct assoc_array_ptr *cursor, *ptr;
+ unsigned long sc_segments, dissimilarity;
+ unsigned long segments;
+ int level, sc_level, next_sc_level;
+ int slot;
+
+ pr_devel("-->%s()\n", __func__);
+
+ cursor = ACCESS_ONCE(array->root);
+ if (!cursor)
+ return assoc_array_walk_tree_empty;
+
+ level = 0;
+
+ /* Use segments from the key for the new leaf to navigate through the
+ * internal tree, skipping through nodes and shortcuts that are on
+ * route to the destination. Eventually we'll come to a slot that is
+ * either empty or contains a leaf at which point we've found a node in
+ * which the leaf we're looking for might be found or into which it
+ * should be inserted.
+ */
+jumped:
+ segments = ops->get_key_chunk(index_key, level);
+ pr_devel("segments[%d]: %lx\n", level, segments);
+
+ if (assoc_array_ptr_is_shortcut(cursor))
+ goto follow_shortcut;
+
+consider_node:
+ node = assoc_array_ptr_to_node(cursor);
+ smp_read_barrier_depends();
+
+ slot = segments >> (level & ASSOC_ARRAY_KEY_CHUNK_MASK);
+ slot &= ASSOC_ARRAY_FAN_MASK;
+ ptr = ACCESS_ONCE(node->slots[slot]);
+
+ pr_devel("consider slot %x [ix=%d type=%lu]\n",
+ slot, level, (unsigned long)ptr & 3);
+
+ if (!assoc_array_ptr_is_meta(ptr)) {
+ /* The node doesn't have a node/shortcut pointer in the slot
+ * corresponding to the index key that we have to follow.
+ */
+ result->terminal_node.node = node;
+ result->terminal_node.level = level;
+ result->terminal_node.slot = slot;
+ pr_devel("<--%s() = terminal_node\n", __func__);
+ return assoc_array_walk_found_terminal_node;
+ }
+
+ if (assoc_array_ptr_is_node(ptr)) {
+ /* There is a pointer to a node in the slot corresponding to
+ * this index key segment, so we need to follow it.
+ */
+ cursor = ptr;
+ level += ASSOC_ARRAY_LEVEL_STEP;
+ if ((level & ASSOC_ARRAY_KEY_CHUNK_MASK) != 0)
+ goto consider_node;
+ goto jumped;
+ }
+
+ /* There is a shortcut in the slot corresponding to the index key
+ * segment. We follow the shortcut if its partial index key matches
+ * this leaf's. Otherwise we need to split the shortcut.
+ */
+ cursor = ptr;
+follow_shortcut:
+ shortcut = assoc_array_ptr_to_shortcut(cursor);
+ smp_read_barrier_depends();
+ pr_devel("shortcut to %d\n", shortcut->skip_to_level);
+ sc_level = level + ASSOC_ARRAY_LEVEL_STEP;
+ BUG_ON(sc_level > shortcut->skip_to_level);
+
+ do {
+ /* Check the leaf against the shortcut's index key a word at a
+ * time, trimming the final word (the shortcut stores the index
+ * key completely from the root to the shortcut's target).
+ */
+ if ((sc_level & ASSOC_ARRAY_KEY_CHUNK_MASK) == 0)
+ segments = ops->get_key_chunk(index_key, sc_level);
+
+ sc_segments = shortcut->index_key[sc_level >> ASSOC_ARRAY_KEY_CHUNK_SHIFT];
+ dissimilarity = segments ^ sc_segments;
+
+ if (round_up(sc_level, ASSOC_ARRAY_KEY_CHUNK_SIZE) > shortcut->skip_to_level) {
+ /* Trim segments that are beyond the shortcut */
+ int shift = shortcut->skip_to_level & ASSOC_ARRAY_KEY_CHUNK_MASK;
+ dissimilarity &= ~(ULONG_MAX << shift);
+ next_sc_level = shortcut->skip_to_level;
+ } else {
+ next_sc_level = sc_level + ASSOC_ARRAY_KEY_CHUNK_SIZE;
+ next_sc_level = round_down(next_sc_level, ASSOC_ARRAY_KEY_CHUNK_SIZE);
+ }
+
+ if (dissimilarity != 0) {
+ /* This shortcut points elsewhere */
+ result->wrong_shortcut.shortcut = shortcut;
+ result->wrong_shortcut.level = level;
+ result->wrong_shortcut.sc_level = sc_level;
+ result->wrong_shortcut.sc_segments = sc_segments;
+ result->wrong_shortcut.dissimilarity = dissimilarity;
+ return assoc_array_walk_found_wrong_shortcut;
+ }
+
+ sc_level = next_sc_level;
+ } while (sc_level < shortcut->skip_to_level);
+
+ /* The shortcut matches the leaf's index to this point. */
+ cursor = ACCESS_ONCE(shortcut->next_node);
+ if (((level ^ sc_level) & ~ASSOC_ARRAY_KEY_CHUNK_MASK) != 0) {
+ level = sc_level;
+ goto jumped;
+ } else {
+ level = sc_level;
+ goto consider_node;
+ }
+}
+
+/**
+ * assoc_array_find - Find an object by index key
+ * @array: The associative array to search.
+ * @ops: The operations to use.
+ * @index_key: The key to the object.
+ *
+ * Find an object in an associative array by walking through the internal tree
+ * to the node that should contain the object and then searching the leaves
+ * there. NULL is returned if the requested object was not found in the array.
+ *
+ * The caller must hold the RCU read lock or better.
+ */
+void *assoc_array_find(const struct assoc_array *array,
+ const struct assoc_array_ops *ops,
+ const void *index_key)
+{
+ struct assoc_array_walk_result result;
+ const struct assoc_array_node *node;
+ const struct assoc_array_ptr *ptr;
+ const void *leaf;
+ int slot;
+
+ if (assoc_array_walk(array, ops, index_key, &result) !=
+ assoc_array_walk_found_terminal_node)
+ return NULL;
+
+ node = result.terminal_node.node;
+ smp_read_barrier_depends();
+
+ /* If the target key is available to us, it's has to be pointed to by
+ * the terminal node.
+ */
+ for (slot = 0; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
+ ptr = ACCESS_ONCE(node->slots[slot]);
+ if (ptr && assoc_array_ptr_is_leaf(ptr)) {
+ /* We need a barrier between the read of the pointer
+ * and dereferencing the pointer - but only if we are
+ * actually going to dereference it.
+ */
+ leaf = assoc_array_ptr_to_leaf(ptr);
+ smp_read_barrier_depends();
+ if (ops->compare_object(leaf, index_key))
+ return (void *)leaf;
+ }
+ }
+
+ return NULL;
+}
+
+/*
+ * Destructively iterate over an associative array. The caller must prevent
+ * other simultaneous accesses.
+ */
+static void assoc_array_destroy_subtree(struct assoc_array_ptr *root,
+ const struct assoc_array_ops *ops)
+{
+ struct assoc_array_shortcut *shortcut;
+ struct assoc_array_node *node;
+ struct assoc_array_ptr *cursor, *parent = NULL;
+ int slot = -1;
+
+ pr_devel("-->%s()\n", __func__);
+
+ cursor = root;
+ if (!cursor) {
+ pr_devel("empty\n");
+ return;
+ }
+
+move_to_meta:
+ if (assoc_array_ptr_is_shortcut(cursor)) {
+ /* Descend through a shortcut */
+ pr_devel("[%d] shortcut\n", slot);
+ BUG_ON(!assoc_array_ptr_is_shortcut(cursor));
+ shortcut = assoc_array_ptr_to_shortcut(cursor);
+ BUG_ON(shortcut->back_pointer != parent);
+ BUG_ON(slot != -1 && shortcut->parent_slot != slot);
+ parent = cursor;
+ cursor = shortcut->next_node;
+ slot = -1;
+ BUG_ON(!assoc_array_ptr_is_node(cursor));
+ }
+
+ pr_devel("[%d] node\n", slot);
+ node = assoc_array_ptr_to_node(cursor);
+ BUG_ON(node->back_pointer != parent);
+ BUG_ON(slot != -1 && node->parent_slot != slot);
+ slot = 0;
+
+continue_node:
+ pr_devel("Node %p [back=%p]\n", node, node->back_pointer);
+ for (; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
+ struct assoc_array_ptr *ptr = node->slots[slot];
+ if (!ptr)
+ continue;
+ if (assoc_array_ptr_is_meta(ptr)) {
+ parent = cursor;
+ cursor = ptr;
+ goto move_to_meta;
+ }
+
+ if (ops) {
+ pr_devel("[%d] free leaf\n", slot);
+ ops->free_object(assoc_array_ptr_to_leaf(ptr));
+ }
+ }
+
+ parent = node->back_pointer;
+ slot = node->parent_slot;
+ pr_devel("free node\n");
+ kfree(node);
+ if (!parent)
+ return; /* Done */
+
+ /* Move back up to the parent (may need to free a shortcut on
+ * the way up) */
+ if (assoc_array_ptr_is_shortcut(parent)) {
+ shortcut = assoc_array_ptr_to_shortcut(parent);
+ BUG_ON(shortcut->next_node != cursor);
+ cursor = parent;
+ parent = shortcut->back_pointer;
+ slot = shortcut->parent_slot;
+ pr_devel("free shortcut\n");
+ kfree(shortcut);
+ if (!parent)
+ return;
+
+ BUG_ON(!assoc_array_ptr_is_node(parent));
+ }
+
+ /* Ascend to next slot in parent node */
+ pr_devel("ascend to %p[%d]\n", parent, slot);
+ cursor = parent;
+ node = assoc_array_ptr_to_node(cursor);
+ slot++;
+ goto continue_node;
+}
+
+/**
+ * assoc_array_destroy - Destroy an associative array
+ * @array: The array to destroy.
+ * @ops: The operations to use.
+ *
+ * Discard all metadata and free all objects in an associative array. The
+ * array will be empty and ready to use again upon completion. This function
+ * cannot fail.
+ *
+ * The caller must prevent all other accesses whilst this takes place as no
+ * attempt is made to adjust pointers gracefully to permit RCU readlock-holding
+ * accesses to continue. On the other hand, no memory allocation is required.
+ */
+void assoc_array_destroy(struct assoc_array *array,
+ const struct assoc_array_ops *ops)
+{
+ assoc_array_destroy_subtree(array->root, ops);
+ array->root = NULL;
+}
+
+/*
+ * Handle insertion into an empty tree.
+ */
+static bool assoc_array_insert_in_empty_tree(struct assoc_array_edit *edit)
+{
+ struct assoc_array_node *new_n0;
+
+ pr_devel("-->%s()\n", __func__);
+
+ new_n0 = kzalloc(sizeof(struct assoc_array_node), GFP_KERNEL);
+ if (!new_n0)
+ return false;
+
+ edit->new_meta[0] = assoc_array_node_to_ptr(new_n0);
+ edit->leaf_p = &new_n0->slots[0];
+ edit->adjust_count_on = new_n0;
+ edit->set[0].ptr = &edit->array->root;
+ edit->set[0].to = assoc_array_node_to_ptr(new_n0);
+
+ pr_devel("<--%s() = ok [no root]\n", __func__);
+ return true;
+}
+
+/*
+ * Handle insertion into a terminal node.
+ */
+static bool assoc_array_insert_into_terminal_node(struct assoc_array_edit *edit,
+ const struct assoc_array_ops *ops,
+ const void *index_key,
+ struct assoc_array_walk_result *result)
+{
+ struct assoc_array_shortcut *shortcut, *new_s0;
+ struct assoc_array_node *node, *new_n0, *new_n1, *side;
+ struct assoc_array_ptr *ptr;
+ unsigned long dissimilarity, base_seg, blank;
+ size_t keylen;
+ bool have_meta;
+ int level, diff;
+ int slot, next_slot, free_slot, i, j;
+
+ node = result->terminal_node.node;
+ level = result->terminal_node.level;
+ edit->segment_cache[ASSOC_ARRAY_FAN_OUT] = result->terminal_node.slot;
+
+ pr_devel("-->%s()\n", __func__);
+
+ /* We arrived at a node which doesn't have an onward node or shortcut
+ * pointer that we have to follow. This means that (a) the leaf we
+ * want must go here (either by insertion or replacement) or (b) we
+ * need to split this node and insert in one of the fragments.
+ */
+ free_slot = -1;
+
+ /* Firstly, we have to check the leaves in this node to see if there's
+ * a matching one we should replace in place.
+ */
+ for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
+ ptr = node->slots[i];
+ if (!ptr) {
+ free_slot = i;
+ continue;
+ }
+ if (ops->compare_object(assoc_array_ptr_to_leaf(ptr), index_key)) {
+ pr_devel("replace in slot %d\n", i);
+ edit->leaf_p = &node->slots[i];
+ edit->dead_leaf = node->slots[i];
+ pr_devel("<--%s() = ok [replace]\n", __func__);
+ return true;
+ }
+ }
+
+ /* If there is a free slot in this node then we can just insert the
+ * leaf here.
+ */
+ if (free_slot >= 0) {
+ pr_devel("insert in free slot %d\n", free_slot);
+ edit->leaf_p = &node->slots[free_slot];
+ edit->adjust_count_on = node;
+ pr_devel("<--%s() = ok [insert]\n", __func__);
+ return true;
+ }
+
+ /* The node has no spare slots - so we're either going to have to split
+ * it or insert another node before it.
+ *
+ * Whatever, we're going to need at least two new nodes - so allocate
+ * those now. We may also need a new shortcut, but we deal with that
+ * when we need it.
+ */
+ new_n0 = kzalloc(sizeof(struct assoc_array_node), GFP_KERNEL);
+ if (!new_n0)
+ return false;
+ edit->new_meta[0] = assoc_array_node_to_ptr(new_n0);
+ new_n1 = kzalloc(sizeof(struct assoc_array_node), GFP_KERNEL);
+ if (!new_n1)
+ return false;
+ edit->new_meta[1] = assoc_array_node_to_ptr(new_n1);
+
+ /* We need to find out how similar the leaves are. */
+ pr_devel("no spare slots\n");
+ have_meta = false;
+ for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
+ ptr = node->slots[i];
+ if (assoc_array_ptr_is_meta(ptr)) {
+ edit->segment_cache[i] = 0xff;
+ have_meta = true;
+ continue;
+ }
+ base_seg = ops->get_object_key_chunk(
+ assoc_array_ptr_to_leaf(ptr), level);
+ base_seg >>= level & ASSOC_ARRAY_KEY_CHUNK_MASK;
+ edit->segment_cache[i] = base_seg & ASSOC_ARRAY_FAN_MASK;
+ }
+
+ if (have_meta) {
+ pr_devel("have meta\n");
+ goto split_node;
+ }
+
+ /* The node contains only leaves */
+ dissimilarity = 0;
+ base_seg = edit->segment_cache[0];
+ for (i = 1; i < ASSOC_ARRAY_FAN_OUT; i++)
+ dissimilarity |= edit->segment_cache[i] ^ base_seg;
+
+ pr_devel("only leaves; dissimilarity=%lx\n", dissimilarity);
+
+ if ((dissimilarity & ASSOC_ARRAY_FAN_MASK) == 0) {
+ /* The old leaves all cluster in the same slot. We will need
+ * to insert a shortcut if the new node wants to cluster with them.
+ */
+ if ((edit->segment_cache[ASSOC_ARRAY_FAN_OUT] ^ base_seg) == 0)
+ goto all_leaves_cluster_together;
+
+ /* Otherwise we can just insert a new node ahead of the old
+ * one.
+ */
+ goto present_leaves_cluster_but_not_new_leaf;
+ }
+
+split_node:
+ pr_devel("split node\n");
+
+ /* We need to split the current node; we know that the node doesn't
+ * simply contain a full set of leaves that cluster together (it
+ * contains meta pointers and/or non-clustering leaves).
+ *
+ * We need to expel at least two leaves out of a set consisting of the
+ * leaves in the node and the new leaf.
+ *
+ * We need a new node (n0) to replace the current one and a new node to
+ * take the expelled nodes (n1).
+ */
+ edit->set[0].to = assoc_array_node_to_ptr(new_n0);
+ new_n0->back_pointer = node->back_pointer;
+ new_n0->parent_slot = node->parent_slot;
+ new_n1->back_pointer = assoc_array_node_to_ptr(new_n0);
+ new_n1->parent_slot = -1; /* Need to calculate this */
+
+do_split_node:
+ pr_devel("do_split_node\n");
+
+ new_n0->nr_leaves_on_branch = node->nr_leaves_on_branch;
+ new_n1->nr_leaves_on_branch = 0;
+
+ /* Begin by finding two matching leaves. There have to be at least two
+ * that match - even if there are meta pointers - because any leaf that
+ * would match a slot with a meta pointer in it must be somewhere
+ * behind that meta pointer and cannot be here. Further, given N
+ * remaining leaf slots, we now have N+1 leaves to go in them.
+ */
+ for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
+ slot = edit->segment_cache[i];
+ if (slot != 0xff)
+ for (j = i + 1; j < ASSOC_ARRAY_FAN_OUT + 1; j++)
+ if (edit->segment_cache[j] == slot)
+ goto found_slot_for_multiple_occupancy;
+ }
+found_slot_for_multiple_occupancy:
+ pr_devel("same slot: %x %x [%02x]\n", i, j, slot);
+ BUG_ON(i >= ASSOC_ARRAY_FAN_OUT);
+ BUG_ON(j >= ASSOC_ARRAY_FAN_OUT + 1);
+ BUG_ON(slot >= ASSOC_ARRAY_FAN_OUT);
+
+ new_n1->parent_slot = slot;
+
+ /* Metadata pointers cannot change slot */
+ for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++)
+ if (assoc_array_ptr_is_meta(node->slots[i]))
+ new_n0->slots[i] = node->slots[i];
+ else
+ new_n0->slots[i] = NULL;
+ BUG_ON(new_n0->slots[slot] != NULL);
+ new_n0->slots[slot] = assoc_array_node_to_ptr(new_n1);
+
+ /* Filter the leaf pointers between the new nodes */
+ free_slot = -1;
+ next_slot = 0;
+ for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
+ if (assoc_array_ptr_is_meta(node->slots[i]))
+ continue;
+ if (edit->segment_cache[i] == slot) {
+ new_n1->slots[next_slot++] = node->slots[i];
+ new_n1->nr_leaves_on_branch++;
+ } else {
+ do {
+ free_slot++;
+ } while (new_n0->slots[free_slot] != NULL);
+ new_n0->slots[free_slot] = node->slots[i];
+ }
+ }
+
+ pr_devel("filtered: f=%x n=%x\n", free_slot, next_slot);
+
+ if (edit->segment_cache[ASSOC_ARRAY_FAN_OUT] != slot) {
+ do {
+ free_slot++;
+ } while (new_n0->slots[free_slot] != NULL);
+ edit->leaf_p = &new_n0->slots[free_slot];
+ edit->adjust_count_on = new_n0;
+ } else {
+ edit->leaf_p = &new_n1->slots[next_slot++];
+ edit->adjust_count_on = new_n1;
+ }
+
+ BUG_ON(next_slot <= 1);
+
+ edit->set_backpointers_to = assoc_array_node_to_ptr(new_n0);
+ for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
+ if (edit->segment_cache[i] == 0xff) {
+ ptr = node->slots[i];
+ BUG_ON(assoc_array_ptr_is_leaf(ptr));
+ if (assoc_array_ptr_is_node(ptr)) {
+ side = assoc_array_ptr_to_node(ptr);
+ edit->set_backpointers[i] = &side->back_pointer;
+ } else {
+ shortcut = assoc_array_ptr_to_shortcut(ptr);
+ edit->set_backpointers[i] = &shortcut->back_pointer;
+ }
+ }
+ }
+
+ ptr = node->back_pointer;
+ if (!ptr)
+ edit->set[0].ptr = &edit->array->root;
+ else if (assoc_array_ptr_is_node(ptr))
+ edit->set[0].ptr = &assoc_array_ptr_to_node(ptr)->slots[node->parent_slot];
+ else
+ edit->set[0].ptr = &assoc_array_ptr_to_shortcut(ptr)->next_node;
+ edit->excised_meta[0] = assoc_array_node_to_ptr(node);
+ pr_devel("<--%s() = ok [split node]\n", __func__);
+ return true;
+
+present_leaves_cluster_but_not_new_leaf:
+ /* All the old leaves cluster in the same slot, but the new leaf wants
+ * to go into a different slot, so we create a new node to hold the new
+ * leaf and a pointer to a new node holding all the old leaves.
+ */
+ pr_devel("present leaves cluster but not new leaf\n");
+
+ new_n0->back_pointer = node->back_pointer;
+ new_n0->parent_slot = node->parent_slot;
+ new_n0->nr_leaves_on_branch = node->nr_leaves_on_branch;
+ new_n1->back_pointer = assoc_array_node_to_ptr(new_n0);
+ new_n1->parent_slot = edit->segment_cache[0];
+ new_n1->nr_leaves_on_branch = node->nr_leaves_on_branch;
+ edit->adjust_count_on = new_n0;
+
+ for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++)
+ new_n1->slots[i] = node->slots[i];
+
+ new_n0->slots[edit->segment_cache[0]] = assoc_array_node_to_ptr(new_n0);
+ edit->leaf_p = &new_n0->slots[edit->segment_cache[ASSOC_ARRAY_FAN_OUT]];
+
+ edit->set[0].ptr = &assoc_array_ptr_to_node(node->back_pointer)->slots[node->parent_slot];
+ edit->set[0].to = assoc_array_node_to_ptr(new_n0);
+ edit->excised_meta[0] = assoc_array_node_to_ptr(node);
+ pr_devel("<--%s() = ok [insert node before]\n", __func__);
+ return true;
+
+all_leaves_cluster_together:
+ /* All the leaves, new and old, want to cluster together in this node
+ * in the same slot, so we have to replace this node with a shortcut to
+ * skip over the identical parts of the key and then place a pair of
+ * nodes, one inside the other, at the end of the shortcut and
+ * distribute the keys between them.
+ *
+ * Firstly we need to work out where the leaves start diverging as a
+ * bit position into their keys so that we know how big the shortcut
+ * needs to be.
+ *
+ * We only need to make a single pass of N of the N+1 leaves because if
+ * any keys differ between themselves at bit X then at least one of
+ * them must also differ with the base key at bit X or before.
+ */
+ pr_devel("all leaves cluster together\n");
+ diff = INT_MAX;
+ for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
+ int x = ops->diff_objects(assoc_array_ptr_to_leaf(node->slots[i]),
+ index_key);
+ if (x < diff) {
+ BUG_ON(x < 0);
+ diff = x;
+ }
+ }
+ BUG_ON(diff == INT_MAX);
+ BUG_ON(diff < level + ASSOC_ARRAY_LEVEL_STEP);
+
+ keylen = round_up(diff, ASSOC_ARRAY_KEY_CHUNK_SIZE);
+ keylen >>= ASSOC_ARRAY_KEY_CHUNK_SHIFT;
+
+ new_s0 = kzalloc(sizeof(struct assoc_array_shortcut) +
+ keylen * sizeof(unsigned long), GFP_KERNEL);
+ if (!new_s0)
+ return false;
+ edit->new_meta[2] = assoc_array_shortcut_to_ptr(new_s0);
+
+ edit->set[0].to = assoc_array_shortcut_to_ptr(new_s0);
+ new_s0->back_pointer = node->back_pointer;
+ new_s0->parent_slot = node->parent_slot;
+ new_s0->next_node = assoc_array_node_to_ptr(new_n0);
+ new_n0->back_pointer = assoc_array_shortcut_to_ptr(new_s0);
+ new_n0->parent_slot = 0;
+ new_n1->back_pointer = assoc_array_node_to_ptr(new_n0);
+ new_n1->parent_slot = -1; /* Need to calculate this */
+
+ new_s0->skip_to_level = level = diff & ~ASSOC_ARRAY_LEVEL_STEP_MASK;
+ pr_devel("skip_to_level = %d [diff %d]\n", level, diff);
+ BUG_ON(level <= 0);
+
+ for (i = 0; i < keylen; i++)
+ new_s0->index_key[i] =
+ ops->get_key_chunk(index_key, i * ASSOC_ARRAY_KEY_CHUNK_SIZE);
+
+ blank = ULONG_MAX << (level & ASSOC_ARRAY_KEY_CHUNK_MASK);
+ pr_devel("blank off [%zu] %d: %lx\n", keylen - 1, level, blank);
+ new_s0->index_key[keylen - 1] &= ~blank;
+
+ /* This now reduces to a node splitting exercise for which we'll need
+ * to regenerate the disparity table.
+ */
+ for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
+ ptr = node->slots[i];
+ base_seg = ops->get_object_key_chunk(assoc_array_ptr_to_leaf(ptr),
+ level);
+ base_seg >>= level & ASSOC_ARRAY_KEY_CHUNK_MASK;
+ edit->segment_cache[i] = base_seg & ASSOC_ARRAY_FAN_MASK;
+ }
+
+ base_seg = ops->get_key_chunk(index_key, level);
+ base_seg >>= level & ASSOC_ARRAY_KEY_CHUNK_MASK;
+ edit->segment_cache[ASSOC_ARRAY_FAN_OUT] = base_seg & ASSOC_ARRAY_FAN_MASK;
+ goto do_split_node;
+}
+
+/*
+ * Handle insertion into the middle of a shortcut.
+ */
+static bool assoc_array_insert_mid_shortcut(struct assoc_array_edit *edit,
+ const struct assoc_array_ops *ops,
+ struct assoc_array_walk_result *result)
+{
+ struct assoc_array_shortcut *shortcut, *new_s0, *new_s1;
+ struct assoc_array_node *node, *new_n0, *side;
+ unsigned long sc_segments, dissimilarity, blank;
+ size_t keylen;
+ int level, sc_level, diff;
+ int sc_slot;
+
+ shortcut = result->wrong_shortcut.shortcut;
+ level = result->wrong_shortcut.level;
+ sc_level = result->wrong_shortcut.sc_level;
+ sc_segments = result->wrong_shortcut.sc_segments;
+ dissimilarity = result->wrong_shortcut.dissimilarity;
+
+ pr_devel("-->%s(ix=%d dis=%lx scix=%d)\n",
+ __func__, level, dissimilarity, sc_level);
+
+ /* We need to split a shortcut and insert a node between the two
+ * pieces. Zero-length pieces will be dispensed with entirely.
+ *
+ * First of all, we need to find out in which level the first
+ * difference was.
+ */
+ diff = __ffs(dissimilarity);
+ diff &= ~ASSOC_ARRAY_LEVEL_STEP_MASK;
+ diff += sc_level & ~ASSOC_ARRAY_KEY_CHUNK_MASK;
+ pr_devel("diff=%d\n", diff);
+
+ if (!shortcut->back_pointer) {
+ edit->set[0].ptr = &edit->array->root;
+ } else if (assoc_array_ptr_is_node(shortcut->back_pointer)) {
+ node = assoc_array_ptr_to_node(shortcut->back_pointer);
+ edit->set[0].ptr = &node->slots[shortcut->parent_slot];
+ } else {
+ BUG();
+ }
+
+ edit->excised_meta[0] = assoc_array_shortcut_to_ptr(shortcut);
+
+ /* Create a new node now since we're going to need it anyway */
+ new_n0 = kzalloc(sizeof(struct assoc_array_node), GFP_KERNEL);
+ if (!new_n0)
+ return false;
+ edit->new_meta[0] = assoc_array_node_to_ptr(new_n0);
+ edit->adjust_count_on = new_n0;
+
+ /* Insert a new shortcut before the new node if this segment isn't of
+ * zero length - otherwise we just connect the new node directly to the
+ * parent.
+ */
+ level += ASSOC_ARRAY_LEVEL_STEP;
+ if (diff > level) {
+ pr_devel("pre-shortcut %d...%d\n", level, diff);
+ keylen = round_up(diff, ASSOC_ARRAY_KEY_CHUNK_SIZE);
+ keylen >>= ASSOC_ARRAY_KEY_CHUNK_SHIFT;
+
+ new_s0 = kzalloc(sizeof(struct assoc_array_shortcut) +
+ keylen * sizeof(unsigned long), GFP_KERNEL);
+ if (!new_s0)
+ return false;
+ edit->new_meta[1] = assoc_array_shortcut_to_ptr(new_s0);
+ edit->set[0].to = assoc_array_shortcut_to_ptr(new_s0);
+ new_s0->back_pointer = shortcut->back_pointer;
+ new_s0->parent_slot = shortcut->parent_slot;
+ new_s0->next_node = assoc_array_node_to_ptr(new_n0);
+ new_s0->skip_to_level = diff;
+
+ new_n0->back_pointer = assoc_array_shortcut_to_ptr(new_s0);
+ new_n0->parent_slot = 0;
+
+ memcpy(new_s0->index_key, shortcut->index_key,
+ keylen * sizeof(unsigned long));
+
+ blank = ULONG_MAX << (diff & ASSOC_ARRAY_KEY_CHUNK_MASK);
+ pr_devel("blank off [%zu] %d: %lx\n", keylen - 1, diff, blank);
+ new_s0->index_key[keylen - 1] &= ~blank;
+ } else {
+ pr_devel("no pre-shortcut\n");
+ edit->set[0].to = assoc_array_node_to_ptr(new_n0);
+ new_n0->back_pointer = shortcut->back_pointer;
+ new_n0->parent_slot = shortcut->parent_slot;
+ }
+
+ side = assoc_array_ptr_to_node(shortcut->next_node);
+ new_n0->nr_leaves_on_branch = side->nr_leaves_on_branch;
+
+ /* We need to know which slot in the new node is going to take a
+ * metadata pointer.
+ */
+ sc_slot = sc_segments >> (diff & ASSOC_ARRAY_KEY_CHUNK_MASK);
+ sc_slot &= ASSOC_ARRAY_FAN_MASK;
+
+ pr_devel("new slot %lx >> %d -> %d\n",
+ sc_segments, diff & ASSOC_ARRAY_KEY_CHUNK_MASK, sc_slot);
+
+ /* Determine whether we need to follow the new node with a replacement
+ * for the current shortcut. We could in theory reuse the current
+ * shortcut if its parent slot number doesn't change - but that's a
+ * 1-in-16 chance so not worth expending the code upon.
+ */
+ level = diff + ASSOC_ARRAY_LEVEL_STEP;
+ if (level < shortcut->skip_to_level) {
+ pr_devel("post-shortcut %d...%d\n", level, shortcut->skip_to_level);
+ keylen = round_up(shortcut->skip_to_level, ASSOC_ARRAY_KEY_CHUNK_SIZE);
+ keylen >>= ASSOC_ARRAY_KEY_CHUNK_SHIFT;
+
+ new_s1 = kzalloc(sizeof(struct assoc_array_shortcut) +
+ keylen * sizeof(unsigned long), GFP_KERNEL);
+ if (!new_s1)
+ return false;
+ edit->new_meta[2] = assoc_array_shortcut_to_ptr(new_s1);
+
+ new_s1->back_pointer = assoc_array_node_to_ptr(new_n0);
+ new_s1->parent_slot = sc_slot;
+ new_s1->next_node = shortcut->next_node;
+ new_s1->skip_to_level = shortcut->skip_to_level;
+
+ new_n0->slots[sc_slot] = assoc_array_shortcut_to_ptr(new_s1);
+
+ memcpy(new_s1->index_key, shortcut->index_key,
+ keylen * sizeof(unsigned long));
+
+ edit->set[1].ptr = &side->back_pointer;
+ edit->set[1].to = assoc_array_shortcut_to_ptr(new_s1);
+ } else {
+ pr_devel("no post-shortcut\n");
+
+ /* We don't have to replace the pointed-to node as long as we
+ * use memory barriers to make sure the parent slot number is
+ * changed before the back pointer (the parent slot number is
+ * irrelevant to the old parent shortcut).
+ */
+ new_n0->slots[sc_slot] = shortcut->next_node;
+ edit->set_parent_slot[0].p = &side->parent_slot;
+ edit->set_parent_slot[0].to = sc_slot;
+ edit->set[1].ptr = &side->back_pointer;
+ edit->set[1].to = assoc_array_node_to_ptr(new_n0);
+ }
+
+ /* Install the new leaf in a spare slot in the new node. */
+ if (sc_slot == 0)
+ edit->leaf_p = &new_n0->slots[1];
+ else
+ edit->leaf_p = &new_n0->slots[0];
+
+ pr_devel("<--%s() = ok [split shortcut]\n", __func__);
+ return edit;
+}
+
+/**
+ * assoc_array_insert - Script insertion of an object into an associative array
+ * @array: The array to insert into.
+ * @ops: The operations to use.
+ * @index_key: The key to insert at.
+ * @object: The object to insert.
+ *
+ * Precalculate and preallocate a script for the insertion or replacement of an
+ * object in an associative array. This results in an edit script that can
+ * either be applied or cancelled.
+ *
+ * The function returns a pointer to an edit script or -ENOMEM.
+ *
+ * The caller should lock against other modifications and must continue to hold
+ * the lock until assoc_array_apply_edit() has been called.
+ *
+ * Accesses to the tree may take place concurrently with this function,
+ * provided they hold the RCU read lock.
+ */
+struct assoc_array_edit *assoc_array_insert(struct assoc_array *array,
+ const struct assoc_array_ops *ops,
+ const void *index_key,
+ void *object)
+{
+ struct assoc_array_walk_result result;
+ struct assoc_array_edit *edit;
+
+ pr_devel("-->%s()\n", __func__);
+
+ /* The leaf pointer we're given must not have the bottom bit set as we
+ * use those for type-marking the pointer. NULL pointers are also not
+ * allowed as they indicate an empty slot but we have to allow them
+ * here as they can be updated later.
+ */
+ BUG_ON(assoc_array_ptr_is_meta(object));
+
+ edit = kzalloc(sizeof(struct assoc_array_edit), GFP_KERNEL);
+ if (!edit)
+ return ERR_PTR(-ENOMEM);
+ edit->array = array;
+ edit->ops = ops;
+ edit->leaf = assoc_array_leaf_to_ptr(object);
+ edit->adjust_count_by = 1;
+
+ switch (assoc_array_walk(array, ops, index_key, &result)) {
+ case assoc_array_walk_tree_empty:
+ /* Allocate a root node if there isn't one yet */
+ if (!assoc_array_insert_in_empty_tree(edit))
+ goto enomem;
+ return edit;
+
+ case assoc_array_walk_found_terminal_node:
+ /* We found a node that doesn't have a node/shortcut pointer in
+ * the slot corresponding to the index key that we have to
+ * follow.
+ */
+ if (!assoc_array_insert_into_terminal_node(edit, ops, index_key,
+ &result))
+ goto enomem;
+ return edit;
+
+ case assoc_array_walk_found_wrong_shortcut:
+ /* We found a shortcut that didn't match our key in a slot we
+ * needed to follow.
+ */
+ if (!assoc_array_insert_mid_shortcut(edit, ops, &result))
+ goto enomem;
+ return edit;
+ }
+
+enomem:
+ /* Clean up after an out of memory error */
+ pr_devel("enomem\n");
+ assoc_array_cancel_edit(edit);
+ return ERR_PTR(-ENOMEM);
+}
+
+/**
+ * assoc_array_insert_set_object - Set the new object pointer in an edit script
+ * @edit: The edit script to modify.
+ * @object: The object pointer to set.
+ *
+ * Change the object to be inserted in an edit script. The object pointed to
+ * by the old object is not freed. This must be done prior to applying the
+ * script.
+ */
+void assoc_array_insert_set_object(struct assoc_array_edit *edit, void *object)
+{
+ BUG_ON(!object);
+ edit->leaf = assoc_array_leaf_to_ptr(object);
+}
+
+struct assoc_array_delete_collapse_context {
+ struct assoc_array_node *node;
+ const void *skip_leaf;
+ int slot;
+};
+
+/*
+ * Subtree collapse to node iterator.
+ */
+static int assoc_array_delete_collapse_iterator(const void *leaf,
+ void *iterator_data)
+{
+ struct assoc_array_delete_collapse_context *collapse = iterator_data;
+
+ if (leaf == collapse->skip_leaf)
+ return 0;
+
+ BUG_ON(collapse->slot >= ASSOC_ARRAY_FAN_OUT);
+
+ collapse->node->slots[collapse->slot++] = assoc_array_leaf_to_ptr(leaf);
+ return 0;
+}
+
+/**
+ * assoc_array_delete - Script deletion of an object from an associative array
+ * @array: The array to search.
+ * @ops: The operations to use.
+ * @index_key: The key to the object.
+ *
+ * Precalculate and preallocate a script for the deletion of an object from an
+ * associative array. This results in an edit script that can either be
+ * applied or cancelled.
+ *
+ * The function returns a pointer to an edit script if the object was found,
+ * NULL if the object was not found or -ENOMEM.
+ *
+ * The caller should lock against other modifications and must continue to hold
+ * the lock until assoc_array_apply_edit() has been called.
+ *
+ * Accesses to the tree may take place concurrently with this function,
+ * provided they hold the RCU read lock.
+ */
+struct assoc_array_edit *assoc_array_delete(struct assoc_array *array,
+ const struct assoc_array_ops *ops,
+ const void *index_key)
+{
+ struct assoc_array_delete_collapse_context collapse;
+ struct assoc_array_walk_result result;
+ struct assoc_array_node *node, *new_n0;
+ struct assoc_array_edit *edit;
+ struct assoc_array_ptr *ptr;
+ bool has_meta;
+ int slot, i;
+
+ pr_devel("-->%s()\n", __func__);
+
+ edit = kzalloc(sizeof(struct assoc_array_edit), GFP_KERNEL);
+ if (!edit)
+ return ERR_PTR(-ENOMEM);
+ edit->array = array;
+ edit->ops = ops;
+ edit->adjust_count_by = -1;
+
+ switch (assoc_array_walk(array, ops, index_key, &result)) {
+ case assoc_array_walk_found_terminal_node:
+ /* We found a node that should contain the leaf we've been
+ * asked to remove - *if* it's in the tree.
+ */
+ pr_devel("terminal_node\n");
+ node = result.terminal_node.node;
+
+ for (slot = 0; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
+ ptr = node->slots[slot];
+ if (ptr &&
+ assoc_array_ptr_is_leaf(ptr) &&
+ ops->compare_object(assoc_array_ptr_to_leaf(ptr),
+ index_key))
+ goto found_leaf;
+ }
+ case assoc_array_walk_tree_empty:
+ case assoc_array_walk_found_wrong_shortcut:
+ default:
+ assoc_array_cancel_edit(edit);
+ pr_devel("not found\n");
+ return NULL;
+ }
+
+found_leaf:
+ BUG_ON(array->nr_leaves_on_tree <= 0);
+
+ /* In the simplest form of deletion we just clear the slot and release
+ * the leaf after a suitable interval.
+ */
+ edit->dead_leaf = node->slots[slot];
+ edit->set[0].ptr = &node->slots[slot];
+ edit->set[0].to = NULL;
+ edit->adjust_count_on = node;
+
+ /* If that concludes erasure of the last leaf, then delete the entire
+ * internal array.
+ */
+ if (array->nr_leaves_on_tree == 1) {
+ edit->set[1].ptr = &array->root;
+ edit->set[1].to = NULL;
+ edit->adjust_count_on = NULL;
+ edit->excised_subtree = array->root;
+ pr_devel("all gone\n");
+ return edit;
+ }
+
+ /* However, we'd also like to clear up some metadata blocks if we
+ * possibly can.
+ *
+ * We go for a simple algorithm of: if this node has FAN_OUT or fewer
+ * leaves in it, then attempt to collapse it - and attempt to
+ * recursively collapse up the tree.
+ *
+ * We could also try and collapse in partially filled subtrees to take
+ * up space in this node.
+ */
+ if (node->nr_leaves_on_branch <= ASSOC_ARRAY_FAN_OUT + 1) {
+ struct assoc_array_node *parent, *grandparent;
+ struct assoc_array_ptr *ptr;
+
+ /* First of all, we need to know if this node has metadata so
+ * that we don't try collapsing if all the leaves are already
+ * here.
+ */
+ has_meta = false;
+ for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
+ ptr = node->slots[i];
+ if (assoc_array_ptr_is_meta(ptr)) {
+ has_meta = true;
+ break;
+ }
+ }
+
+ pr_devel("leaves: %ld [m=%d]\n",
+ node->nr_leaves_on_branch - 1, has_meta);
+
+ /* Look further up the tree to see if we can collapse this node
+ * into a more proximal node too.
+ */
+ parent = node;
+ collapse_up:
+ pr_devel("collapse subtree: %ld\n", parent->nr_leaves_on_branch);
+
+ ptr = parent->back_pointer;
+ if (!ptr)
+ goto do_collapse;
+ if (assoc_array_ptr_is_shortcut(ptr)) {
+ struct assoc_array_shortcut *s = assoc_array_ptr_to_shortcut(ptr);
+ ptr = s->back_pointer;
+ if (!ptr)
+ goto do_collapse;
+ }
+
+ grandparent = assoc_array_ptr_to_node(ptr);
+ if (grandparent->nr_leaves_on_branch <= ASSOC_ARRAY_FAN_OUT + 1) {
+ parent = grandparent;
+ goto collapse_up;
+ }
+
+ do_collapse:
+ /* There's no point collapsing if the original node has no meta
+ * pointers to discard and if we didn't merge into one of that
+ * node's ancestry.
+ */
+ if (has_meta || parent != node) {
+ node = parent;
+
+ /* Create a new node to collapse into */
+ new_n0 = kzalloc(sizeof(struct assoc_array_node), GFP_KERNEL);
+ if (!new_n0)
+ goto enomem;
+ edit->new_meta[0] = assoc_array_node_to_ptr(new_n0);
+
+ new_n0->back_pointer = node->back_pointer;
+ new_n0->parent_slot = node->parent_slot;
+ new_n0->nr_leaves_on_branch = node->nr_leaves_on_branch;
+ edit->adjust_count_on = new_n0;
+
+ collapse.node = new_n0;
+ collapse.skip_leaf = assoc_array_ptr_to_leaf(edit->dead_leaf);
+ collapse.slot = 0;
+ assoc_array_subtree_iterate(assoc_array_node_to_ptr(node),
+ node->back_pointer,
+ assoc_array_delete_collapse_iterator,
+ &collapse);
+ pr_devel("collapsed %d,%lu\n", collapse.slot, new_n0->nr_leaves_on_branch);
+ BUG_ON(collapse.slot != new_n0->nr_leaves_on_branch - 1);
+
+ if (!node->back_pointer) {
+ edit->set[1].ptr = &array->root;
+ } else if (assoc_array_ptr_is_leaf(node->back_pointer)) {
+ BUG();
+ } else if (assoc_array_ptr_is_node(node->back_pointer)) {
+ struct assoc_array_node *p =
+ assoc_array_ptr_to_node(node->back_pointer);
+ edit->set[1].ptr = &p->slots[node->parent_slot];
+ } else if (assoc_array_ptr_is_shortcut(node->back_pointer)) {
+ struct assoc_array_shortcut *s =
+ assoc_array_ptr_to_shortcut(node->back_pointer);
+ edit->set[1].ptr = &s->next_node;
+ }
+ edit->set[1].to = assoc_array_node_to_ptr(new_n0);
+ edit->excised_subtree = assoc_array_node_to_ptr(node);
+ }
+ }
+
+ return edit;
+
+enomem:
+ /* Clean up after an out of memory error */
+ pr_devel("enomem\n");
+ assoc_array_cancel_edit(edit);
+ return ERR_PTR(-ENOMEM);
+}
+
+/**
+ * assoc_array_clear - Script deletion of all objects from an associative array
+ * @array: The array to clear.
+ * @ops: The operations to use.
+ *
+ * Precalculate and preallocate a script for the deletion of all the objects
+ * from an associative array. This results in an edit script that can either
+ * be applied or cancelled.
+ *
+ * The function returns a pointer to an edit script if there are objects to be
+ * deleted, NULL if there are no objects in the array or -ENOMEM.
+ *
+ * The caller should lock against other modifications and must continue to hold
+ * the lock until assoc_array_apply_edit() has been called.
+ *
+ * Accesses to the tree may take place concurrently with this function,
+ * provided they hold the RCU read lock.
+ */
+struct assoc_array_edit *assoc_array_clear(struct assoc_array *array,
+ const struct assoc_array_ops *ops)
+{
+ struct assoc_array_edit *edit;
+
+ pr_devel("-->%s()\n", __func__);
+
+ if (!array->root)
+ return NULL;
+
+ edit = kzalloc(sizeof(struct assoc_array_edit), GFP_KERNEL);
+ if (!edit)
+ return ERR_PTR(-ENOMEM);
+ edit->array = array;
+ edit->ops = ops;
+ edit->set[1].ptr = &array->root;
+ edit->set[1].to = NULL;
+ edit->excised_subtree = array->root;
+ edit->ops_for_excised_subtree = ops;
+ pr_devel("all gone\n");
+ return edit;
+}
+
+/*
+ * Handle the deferred destruction after an applied edit.
+ */
+static void assoc_array_rcu_cleanup(struct rcu_head *head)
+{
+ struct assoc_array_edit *edit =
+ container_of(head, struct assoc_array_edit, rcu);
+ int i;
+
+ pr_devel("-->%s()\n", __func__);
+
+ if (edit->dead_leaf)
+ edit->ops->free_object(assoc_array_ptr_to_leaf(edit->dead_leaf));
+ for (i = 0; i < ARRAY_SIZE(edit->excised_meta); i++)
+ if (edit->excised_meta[i])
+ kfree(assoc_array_ptr_to_node(edit->excised_meta[i]));
+
+ if (edit->excised_subtree) {
+ BUG_ON(assoc_array_ptr_is_leaf(edit->excised_subtree));
+ if (assoc_array_ptr_is_node(edit->excised_subtree)) {
+ struct assoc_array_node *n =
+ assoc_array_ptr_to_node(edit->excised_subtree);
+ n->back_pointer = NULL;
+ } else {
+ struct assoc_array_shortcut *s =
+ assoc_array_ptr_to_shortcut(edit->excised_subtree);
+ s->back_pointer = NULL;
+ }
+ assoc_array_destroy_subtree(edit->excised_subtree,
+ edit->ops_for_excised_subtree);
+ }
+
+ kfree(edit);
+}
+
+/**
+ * assoc_array_apply_edit - Apply an edit script to an associative array
+ * @edit: The script to apply.
+ *
+ * Apply an edit script to an associative array to effect an insertion,
+ * deletion or clearance. As the edit script includes preallocated memory,
+ * this is guaranteed not to fail.
+ *
+ * The edit script, dead objects and dead metadata will be scheduled for
+ * destruction after an RCU grace period to permit those doing read-only
+ * accesses on the array to continue to do so under the RCU read lock whilst
+ * the edit is taking place.
+ */
+void assoc_array_apply_edit(struct assoc_array_edit *edit)
+{
+ struct assoc_array_shortcut *shortcut;
+ struct assoc_array_node *node;
+ struct assoc_array_ptr *ptr;
+ int i;
+
+ pr_devel("-->%s()\n", __func__);
+
+ smp_wmb();
+ if (edit->leaf_p)
+ *edit->leaf_p = edit->leaf;
+
+ smp_wmb();
+ for (i = 0; i < ARRAY_SIZE(edit->set_parent_slot); i++)
+ if (edit->set_parent_slot[i].p)
+ *edit->set_parent_slot[i].p = edit->set_parent_slot[i].to;
+
+ smp_wmb();
+ for (i = 0; i < ARRAY_SIZE(edit->set_backpointers); i++)
+ if (edit->set_backpointers[i])
+ *edit->set_backpointers[i] = edit->set_backpointers_to;
+
+ smp_wmb();
+ for (i = 0; i < ARRAY_SIZE(edit->set); i++)
+ if (edit->set[i].ptr)
+ *edit->set[i].ptr = edit->set[i].to;
+
+ if (edit->array->root == NULL) {
+ edit->array->nr_leaves_on_tree = 0;
+ } else if (edit->adjust_count_on) {
+ node = edit->adjust_count_on;
+ for (;;) {
+ node->nr_leaves_on_branch += edit->adjust_count_by;
+
+ ptr = node->back_pointer;
+ if (!ptr)
+ break;
+ if (assoc_array_ptr_is_shortcut(ptr)) {
+ shortcut = assoc_array_ptr_to_shortcut(ptr);
+ ptr = shortcut->back_pointer;
+ if (!ptr)
+ break;
+ }
+ BUG_ON(!assoc_array_ptr_is_node(ptr));
+ node = assoc_array_ptr_to_node(ptr);
+ }
+
+ edit->array->nr_leaves_on_tree += edit->adjust_count_by;
+ }
+
+ call_rcu(&edit->rcu, assoc_array_rcu_cleanup);
+}
+
+/**
+ * assoc_array_cancel_edit - Discard an edit script.
+ * @edit: The script to discard.
+ *
+ * Free an edit script and all the preallocated data it holds without making
+ * any changes to the associative array it was intended for.
+ *
+ * NOTE! In the case of an insertion script, this does _not_ release the leaf
+ * that was to be inserted. That is left to the caller.
+ */
+void assoc_array_cancel_edit(struct assoc_array_edit *edit)
+{
+ struct assoc_array_ptr *ptr;
+ int i;
+
+ pr_devel("-->%s()\n", __func__);
+
+ /* Clean up after an out of memory error */
+ for (i = 0; i < ARRAY_SIZE(edit->new_meta); i++) {
+ ptr = edit->new_meta[i];
+ if (ptr) {
+ if (assoc_array_ptr_is_node(ptr))
+ kfree(assoc_array_ptr_to_node(ptr));
+ else
+ kfree(assoc_array_ptr_to_shortcut(ptr));
+ }
+ }
+ kfree(edit);
+}
+
+/**
+ * assoc_array_gc - Garbage collect an associative array.
+ * @array: The array to clean.
+ * @ops: The operations to use.
+ * @iterator: A callback function to pass judgement on each object.
+ * @iterator_data: Private data for the callback function.
+ *
+ * Collect garbage from an associative array and pack down the internal tree to
+ * save memory.
+ *
+ * The iterator function is asked to pass judgement upon each object in the
+ * array. If it returns false, the object is discard and if it returns true,
+ * the object is kept. If it returns true, it must increment the object's
+ * usage count (or whatever it needs to do to retain it) before returning.
+ *
+ * This function returns 0 if successful or -ENOMEM if out of memory. In the
+ * latter case, the array is not changed.
+ *
+ * The caller should lock against other modifications and must continue to hold
+ * the lock until assoc_array_apply_edit() has been called.
+ *
+ * Accesses to the tree may take place concurrently with this function,
+ * provided they hold the RCU read lock.
+ */
+int assoc_array_gc(struct assoc_array *array,
+ const struct assoc_array_ops *ops,
+ bool (*iterator)(void *object, void *iterator_data),
+ void *iterator_data)
+{
+ struct assoc_array_shortcut *shortcut, *new_s;
+ struct assoc_array_node *node, *new_n;
+ struct assoc_array_edit *edit;
+ struct assoc_array_ptr *cursor, *ptr;
+ struct assoc_array_ptr *new_root, *new_parent, **new_ptr_pp;
+ unsigned long nr_leaves_on_tree;
+ int keylen, slot, nr_free, next_slot, i;
+
+ pr_devel("-->%s()\n", __func__);
+
+ if (!array->root)
+ return 0;
+
+ edit = kzalloc(sizeof(struct assoc_array_edit), GFP_KERNEL);
+ if (!edit)
+ return -ENOMEM;
+ edit->array = array;
+ edit->ops = ops;
+ edit->ops_for_excised_subtree = ops;
+ edit->set[0].ptr = &array->root;
+ edit->excised_subtree = array->root;
+
+ new_root = new_parent = NULL;
+ new_ptr_pp = &new_root;
+ cursor = array->root;
+
+descend:
+ /* If this point is a shortcut, then we need to duplicate it and
+ * advance the target cursor.
+ */
+ if (assoc_array_ptr_is_shortcut(cursor)) {
+ shortcut = assoc_array_ptr_to_shortcut(cursor);
+ keylen = round_up(shortcut->skip_to_level, ASSOC_ARRAY_KEY_CHUNK_SIZE);
+ keylen >>= ASSOC_ARRAY_KEY_CHUNK_SHIFT;
+ new_s = kmalloc(sizeof(struct assoc_array_shortcut) +
+ keylen * sizeof(unsigned long), GFP_KERNEL);
+ if (!new_s)
+ goto enomem;
+ pr_devel("dup shortcut %p -> %p\n", shortcut, new_s);
+ memcpy(new_s, shortcut, (sizeof(struct assoc_array_shortcut) +
+ keylen * sizeof(unsigned long)));
+ new_s->back_pointer = new_parent;
+ new_s->parent_slot = shortcut->parent_slot;
+ *new_ptr_pp = new_parent = assoc_array_shortcut_to_ptr(new_s);
+ new_ptr_pp = &new_s->next_node;
+ cursor = shortcut->next_node;
+ }
+
+ /* Duplicate the node at this position */
+ node = assoc_array_ptr_to_node(cursor);
+ new_n = kzalloc(sizeof(struct assoc_array_node), GFP_KERNEL);
+ if (!new_n)
+ goto enomem;
+ pr_devel("dup node %p -> %p\n", node, new_n);
+ new_n->back_pointer = new_parent;
+ new_n->parent_slot = node->parent_slot;
+ *new_ptr_pp = new_parent = assoc_array_node_to_ptr(new_n);
+ new_ptr_pp = NULL;
+ slot = 0;
+
+continue_node:
+ /* Filter across any leaves and gc any subtrees */
+ for (; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
+ ptr = node->slots[slot];
+ if (!ptr)
+ continue;
+
+ if (assoc_array_ptr_is_leaf(ptr)) {
+ if (iterator(assoc_array_ptr_to_leaf(ptr),
+ iterator_data))
+ /* The iterator will have done any reference
+ * counting on the object for us.
+ */
+ new_n->slots[slot] = ptr;
+ continue;
+ }
+
+ new_ptr_pp = &new_n->slots[slot];
+ cursor = ptr;
+ goto descend;
+ }
+
+ pr_devel("-- compress node %p --\n", new_n);
+
+ /* Count up the number of empty slots in this node and work out the
+ * subtree leaf count.
+ */
+ new_n->nr_leaves_on_branch = 0;
+ nr_free = 0;
+ for (slot = 0; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
+ ptr = new_n->slots[slot];
+ if (!ptr)
+ nr_free++;
+ else if (assoc_array_ptr_is_leaf(ptr))
+ new_n->nr_leaves_on_branch++;
+ }
+ pr_devel("free=%d, leaves=%lu\n", nr_free, new_n->nr_leaves_on_branch);
+
+ /* See what we can fold in */
+ next_slot = 0;
+ for (slot = 0; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
+ struct assoc_array_shortcut *s;
+ struct assoc_array_node *child;
+
+ ptr = new_n->slots[slot];
+ if (!ptr || assoc_array_ptr_is_leaf(ptr))
+ continue;
+
+ s = NULL;
+ if (assoc_array_ptr_is_shortcut(ptr)) {
+ s = assoc_array_ptr_to_shortcut(ptr);
+ ptr = s->next_node;
+ }
+
+ child = assoc_array_ptr_to_node(ptr);
+ new_n->nr_leaves_on_branch += child->nr_leaves_on_branch;
+
+ if (child->nr_leaves_on_branch <= nr_free + 1) {
+ /* Fold the child node into this one */
+ pr_devel("[%d] fold node %lu/%d [nx %d]\n",
+ slot, child->nr_leaves_on_branch, nr_free + 1,
+ next_slot);
+
+ /* We would already have reaped an intervening shortcut
+ * on the way back up the tree.
+ */
+ BUG_ON(s);
+
+ new_n->slots[slot] = NULL;
+ nr_free++;
+ if (slot < next_slot)
+ next_slot = slot;
+ for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
+ struct assoc_array_ptr *p = child->slots[i];
+ if (!p)
+ continue;
+ BUG_ON(assoc_array_ptr_is_meta(p));
+ while (new_n->slots[next_slot])
+ next_slot++;
+ BUG_ON(next_slot >= ASSOC_ARRAY_FAN_OUT);
+ new_n->slots[next_slot++] = p;
+ nr_free--;
+ }
+ kfree(child);
+ } else {
+ pr_devel("[%d] retain node %lu/%d [nx %d]\n",
+ slot, child->nr_leaves_on_branch, nr_free + 1,
+ next_slot);
+ }
+ }
+
+ pr_devel("after: %lu\n", new_n->nr_leaves_on_branch);
+
+ nr_leaves_on_tree = new_n->nr_leaves_on_branch;
+
+ /* Excise this node if it is singly occupied by a shortcut */
+ if (nr_free == ASSOC_ARRAY_FAN_OUT - 1) {
+ for (slot = 0; slot < ASSOC_ARRAY_FAN_OUT; slot++)
+ if ((ptr = new_n->slots[slot]))
+ break;
+
+ if (assoc_array_ptr_is_meta(ptr) &&
+ assoc_array_ptr_is_shortcut(ptr)) {
+ pr_devel("excise node %p with 1 shortcut\n", new_n);
+ new_s = assoc_array_ptr_to_shortcut(ptr);
+ new_parent = new_n->back_pointer;
+ slot = new_n->parent_slot;
+ kfree(new_n);
+ if (!new_parent) {
+ new_s->back_pointer = NULL;
+ new_s->parent_slot = 0;
+ new_root = ptr;
+ goto gc_complete;
+ }
+
+ if (assoc_array_ptr_is_shortcut(new_parent)) {
+ /* We can discard any preceding shortcut also */
+ struct assoc_array_shortcut *s =
+ assoc_array_ptr_to_shortcut(new_parent);
+
+ pr_devel("excise preceding shortcut\n");
+
+ new_parent = new_s->back_pointer = s->back_pointer;
+ slot = new_s->parent_slot = s->parent_slot;
+ kfree(s);
+ if (!new_parent) {
+ new_s->back_pointer = NULL;
+ new_s->parent_slot = 0;
+ new_root = ptr;
+ goto gc_complete;
+ }
+ }
+
+ new_s->back_pointer = new_parent;
+ new_s->parent_slot = slot;
+ new_n = assoc_array_ptr_to_node(new_parent);
+ new_n->slots[slot] = ptr;
+ goto ascend_old_tree;
+ }
+ }
+
+ /* Excise any shortcuts we might encounter that point to nodes that
+ * only contain leaves.
+ */
+ ptr = new_n->back_pointer;
+ if (!ptr)
+ goto gc_complete;
+
+ if (assoc_array_ptr_is_shortcut(ptr)) {
+ new_s = assoc_array_ptr_to_shortcut(ptr);
+ new_parent = new_s->back_pointer;
+ slot = new_s->parent_slot;
+
+ if (new_n->nr_leaves_on_branch <= ASSOC_ARRAY_FAN_OUT) {
+ struct assoc_array_node *n;
+
+ pr_devel("excise shortcut\n");
+ new_n->back_pointer = new_parent;
+ new_n->parent_slot = slot;
+ kfree(new_s);
+ if (!new_parent) {
+ new_root = assoc_array_node_to_ptr(new_n);
+ goto gc_complete;
+ }
+
+ n = assoc_array_ptr_to_node(new_parent);
+ n->slots[slot] = assoc_array_node_to_ptr(new_n);
+ }
+ } else {
+ new_parent = ptr;
+ }
+ new_n = assoc_array_ptr_to_node(new_parent);
+
+ascend_old_tree:
+ ptr = node->back_pointer;
+ if (assoc_array_ptr_is_shortcut(ptr)) {
+ shortcut = assoc_array_ptr_to_shortcut(ptr);
+ slot = shortcut->parent_slot;
+ cursor = shortcut->back_pointer;
+ } else {
+ slot = node->parent_slot;
+ cursor = ptr;
+ }
+ BUG_ON(!ptr);
+ node = assoc_array_ptr_to_node(cursor);
+ slot++;
+ goto continue_node;
+
+gc_complete:
+ edit->set[0].to = new_root;
+ assoc_array_apply_edit(edit);
+ edit->array->nr_leaves_on_tree = nr_leaves_on_tree;
+ return 0;
+
+enomem:
+ pr_devel("enomem\n");
+ assoc_array_destroy_subtree(new_root, edit->ops);
+ kfree(edit);
+ return -ENOMEM;
+}
diff --git a/lib/hash.c b/lib/hash.c
new file mode 100644
index 000000000000..fea973f4bd57
--- /dev/null
+++ b/lib/hash.c
@@ -0,0 +1,39 @@
+/* General purpose hashing library
+ *
+ * That's a start of a kernel hashing library, which can be extended
+ * with further algorithms in future. arch_fast_hash{2,}() will
+ * eventually resolve to an architecture optimized implementation.
+ *
+ * Copyright 2013 Francesco Fusco <ffusco@redhat.com>
+ * Copyright 2013 Daniel Borkmann <dborkman@redhat.com>
+ * Copyright 2013 Thomas Graf <tgraf@redhat.com>
+ * Licensed under the GNU General Public License, version 2.0 (GPLv2)
+ */
+
+#include <linux/jhash.h>
+#include <linux/hash.h>
+#include <linux/cache.h>
+
+static struct fast_hash_ops arch_hash_ops __read_mostly = {
+ .hash = jhash,
+ .hash2 = jhash2,
+};
+
+u32 arch_fast_hash(const void *data, u32 len, u32 seed)
+{
+ return arch_hash_ops.hash(data, len, seed);
+}
+EXPORT_SYMBOL_GPL(arch_fast_hash);
+
+u32 arch_fast_hash2(const u32 *data, u32 len, u32 seed)
+{
+ return arch_hash_ops.hash2(data, len, seed);
+}
+EXPORT_SYMBOL_GPL(arch_fast_hash2);
+
+static int __init hashlib_init(void)
+{
+ setup_arch_fast_hash(&arch_hash_ops);
+ return 0;
+}
+early_initcall(hashlib_init);
diff --git a/lib/lockref.c b/lib/lockref.c
index d2b123f8456b..f07a40d33871 100644
--- a/lib/lockref.c
+++ b/lib/lockref.c
@@ -1,5 +1,6 @@
#include <linux/export.h>
#include <linux/lockref.h>
+#include <linux/mutex.h>
#if USE_CMPXCHG_LOCKREF
@@ -12,14 +13,6 @@
#endif
/*
- * Allow architectures to override the default cpu_relax() within CMPXCHG_LOOP.
- * This is useful for architectures with an expensive cpu_relax().
- */
-#ifndef arch_mutex_cpu_relax
-# define arch_mutex_cpu_relax() cpu_relax()
-#endif
-
-/*
* Note that the "cmpxchg()" reloads the "old" value for the
* failure case.
*/
diff --git a/lib/mpi/mpiutil.c b/lib/mpi/mpiutil.c
index 657979f71bef..bf076d281d40 100644
--- a/lib/mpi/mpiutil.c
+++ b/lib/mpi/mpiutil.c
@@ -121,3 +121,6 @@ void mpi_free(MPI a)
kfree(a);
}
EXPORT_SYMBOL_GPL(mpi_free);
+
+MODULE_DESCRIPTION("Multiprecision maths library");
+MODULE_LICENSE("GPL");
diff --git a/lib/percpu_ida.c b/lib/percpu_ida.c
index b0698ea972c6..9d054bf91d0f 100644
--- a/lib/percpu_ida.c
+++ b/lib/percpu_ida.c
@@ -117,8 +117,7 @@ static inline void alloc_global_tags(struct percpu_ida *pool,
min(pool->nr_free, pool->percpu_batch_size));
}
-static inline unsigned alloc_local_tag(struct percpu_ida *pool,
- struct percpu_ida_cpu *tags)
+static inline unsigned alloc_local_tag(struct percpu_ida_cpu *tags)
{
int tag = -ENOSPC;
@@ -159,7 +158,7 @@ int percpu_ida_alloc(struct percpu_ida *pool, gfp_t gfp)
tags = this_cpu_ptr(pool->tag_cpu);
/* Fastpath */
- tag = alloc_local_tag(pool, tags);
+ tag = alloc_local_tag(tags);
if (likely(tag >= 0)) {
local_irq_restore(flags);
return tag;
diff --git a/mm/Kconfig b/mm/Kconfig
index eb69f352401d..723bbe04a0b0 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -543,7 +543,7 @@ config ZSWAP
config MEM_SOFT_DIRTY
bool "Track memory changes"
- depends on CHECKPOINT_RESTORE && HAVE_ARCH_SOFT_DIRTY
+ depends on CHECKPOINT_RESTORE && HAVE_ARCH_SOFT_DIRTY && PROC_FS
select PROC_PAGE_MONITOR
help
This option enables memory changes tracking by introducing a
diff --git a/mm/compaction.c b/mm/compaction.c
index 805165bcd3dd..f58bcd016f43 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -134,6 +134,10 @@ static void update_pageblock_skip(struct compact_control *cc,
bool migrate_scanner)
{
struct zone *zone = cc->zone;
+
+ if (cc->ignore_skip_hint)
+ return;
+
if (!page)
return;
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index bccd5a628ea6..7de1bf85f683 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -882,6 +882,10 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
ret = 0;
goto out_unlock;
}
+
+ /* mmap_sem prevents this happening but warn if that changes */
+ WARN_ON(pmd_trans_migrating(pmd));
+
if (unlikely(pmd_trans_splitting(pmd))) {
/* split huge page running from under us */
spin_unlock(src_ptl);
@@ -1243,6 +1247,10 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd))
return ERR_PTR(-EFAULT);
+ /* Full NUMA hinting faults to serialise migration in fault paths */
+ if ((flags & FOLL_NUMA) && pmd_numa(*pmd))
+ goto out;
+
page = pmd_page(*pmd);
VM_BUG_ON(!PageHead(page));
if (flags & FOLL_TOUCH) {
@@ -1295,6 +1303,17 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
if (unlikely(!pmd_same(pmd, *pmdp)))
goto out_unlock;
+ /*
+ * If there are potential migrations, wait for completion and retry
+ * without disrupting NUMA hinting information. Do not relock and
+ * check_same as the page may no longer be mapped.
+ */
+ if (unlikely(pmd_trans_migrating(*pmdp))) {
+ spin_unlock(ptl);
+ wait_migrate_huge_page(vma->anon_vma, pmdp);
+ goto out;
+ }
+
page = pmd_page(pmd);
BUG_ON(is_huge_zero_page(page));
page_nid = page_to_nid(page);
@@ -1323,23 +1342,22 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
/* If the page was locked, there are no parallel migrations */
if (page_locked)
goto clear_pmdnuma;
+ }
- /*
- * Otherwise wait for potential migrations and retry. We do
- * relock and check_same as the page may no longer be mapped.
- * As the fault is being retried, do not account for it.
- */
+ /* Migration could have started since the pmd_trans_migrating check */
+ if (!page_locked) {
spin_unlock(ptl);
wait_on_page_locked(page);
page_nid = -1;
goto out;
}
- /* Page is misplaced, serialise migrations and parallel THP splits */
+ /*
+ * Page is misplaced. Page lock serialises migrations. Acquire anon_vma
+ * to serialises splits
+ */
get_page(page);
spin_unlock(ptl);
- if (!page_locked)
- lock_page(page);
anon_vma = page_lock_anon_vma_read(page);
/* Confirm the PMD did not change while page_table_lock was released */
@@ -1351,6 +1369,13 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
goto out_unlock;
}
+ /* Bail if we fail to protect against THP splits for any reason */
+ if (unlikely(!anon_vma)) {
+ put_page(page);
+ page_nid = -1;
+ goto clear_pmdnuma;
+ }
+
/*
* Migrate the THP to the requested node, returns with page unlocked
* and pmd_numa cleared.
@@ -1481,8 +1506,18 @@ int move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma,
pmd = pmdp_get_and_clear(mm, old_addr, old_pmd);
VM_BUG_ON(!pmd_none(*new_pmd));
set_pmd_at(mm, new_addr, new_pmd, pmd_mksoft_dirty(pmd));
- if (new_ptl != old_ptl)
+ if (new_ptl != old_ptl) {
+ pgtable_t pgtable;
+
+ /*
+ * Move preallocated PTE page table if new_pmd is on
+ * different PMD page table.
+ */
+ pgtable = pgtable_trans_huge_withdraw(mm, old_pmd);
+ pgtable_trans_huge_deposit(mm, new_pmd, pgtable);
+
spin_unlock(new_ptl);
+ }
spin_unlock(old_ptl);
}
out:
@@ -1507,6 +1542,8 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
ret = 1;
if (!prot_numa) {
entry = pmdp_get_and_clear(mm, addr, pmd);
+ if (pmd_numa(entry))
+ entry = pmd_mknonnuma(entry);
entry = pmd_modify(entry, newprot);
ret = HPAGE_PMD_NR;
BUG_ON(pmd_write(entry));
@@ -1521,7 +1558,7 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
*/
if (!is_huge_zero_page(page) &&
!pmd_numa(*pmd)) {
- entry = pmdp_get_and_clear(mm, addr, pmd);
+ entry = *pmd;
entry = pmd_mknuma(entry);
ret = HPAGE_PMD_NR;
}
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 7d57af21f49e..dee6cf4e6d34 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -476,40 +476,6 @@ static int vma_has_reserves(struct vm_area_struct *vma, long chg)
return 0;
}
-static void copy_gigantic_page(struct page *dst, struct page *src)
-{
- int i;
- struct hstate *h = page_hstate(src);
- struct page *dst_base = dst;
- struct page *src_base = src;
-
- for (i = 0; i < pages_per_huge_page(h); ) {
- cond_resched();
- copy_highpage(dst, src);
-
- i++;
- dst = mem_map_next(dst, dst_base, i);
- src = mem_map_next(src, src_base, i);
- }
-}
-
-void copy_huge_page(struct page *dst, struct page *src)
-{
- int i;
- struct hstate *h = page_hstate(src);
-
- if (unlikely(pages_per_huge_page(h) > MAX_ORDER_NR_PAGES)) {
- copy_gigantic_page(dst, src);
- return;
- }
-
- might_sleep();
- for (i = 0; i < pages_per_huge_page(h); i++) {
- cond_resched();
- copy_highpage(dst + i, src + i);
- }
-}
-
static void enqueue_huge_page(struct hstate *h, struct page *page)
{
int nid = page_to_nid(page);
@@ -736,6 +702,23 @@ int PageHuge(struct page *page)
}
EXPORT_SYMBOL_GPL(PageHuge);
+/*
+ * PageHeadHuge() only returns true for hugetlbfs head page, but not for
+ * normal or transparent huge pages.
+ */
+int PageHeadHuge(struct page *page_head)
+{
+ compound_page_dtor *dtor;
+
+ if (!PageHead(page_head))
+ return 0;
+
+ dtor = get_compound_page_dtor(page_head);
+
+ return dtor == free_huge_page;
+}
+EXPORT_SYMBOL_GPL(PageHeadHuge);
+
pgoff_t __basepage_index(struct page *page)
{
struct page *page_head = compound_head(page);
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index f1a0ae6e11b8..bf5e89457149 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2694,7 +2694,10 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
goto bypass;
if (unlikely(task_in_memcg_oom(current)))
- goto bypass;
+ goto nomem;
+
+ if (gfp_mask & __GFP_NOFAIL)
+ oom = false;
/*
* We always charge the cgroup the mm_struct belongs to.
@@ -6352,6 +6355,42 @@ static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
{
struct mem_cgroup *memcg = mem_cgroup_from_css(css);
+ /*
+ * XXX: css_offline() would be where we should reparent all
+ * memory to prepare the cgroup for destruction. However,
+ * memcg does not do css_tryget() and res_counter charging
+ * under the same RCU lock region, which means that charging
+ * could race with offlining. Offlining only happens to
+ * cgroups with no tasks in them but charges can show up
+ * without any tasks from the swapin path when the target
+ * memcg is looked up from the swapout record and not from the
+ * current task as it usually is. A race like this can leak
+ * charges and put pages with stale cgroup pointers into
+ * circulation:
+ *
+ * #0 #1
+ * lookup_swap_cgroup_id()
+ * rcu_read_lock()
+ * mem_cgroup_lookup()
+ * css_tryget()
+ * rcu_read_unlock()
+ * disable css_tryget()
+ * call_rcu()
+ * offline_css()
+ * reparent_charges()
+ * res_counter_charge()
+ * css_put()
+ * css_free()
+ * pc->mem_cgroup = dead memcg
+ * add page to lru
+ *
+ * The bulk of the charges are still moved in offline_css() to
+ * avoid pinning a lot of pages in case a long-term reference
+ * like a swapout record is deferring the css_free() to long
+ * after offlining. But this makes sure we catch any charges
+ * made after offlining:
+ */
+ mem_cgroup_reparent_charges(memcg);
memcg_destroy_kmem(memcg);
__mem_cgroup_free(memcg);
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index b7c171602ba1..db08af92c6fc 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1505,10 +1505,16 @@ static int soft_offline_huge_page(struct page *page, int flags)
if (ret > 0)
ret = -EIO;
} else {
- set_page_hwpoison_huge_page(hpage);
- dequeue_hwpoisoned_huge_page(hpage);
- atomic_long_add(1 << compound_order(hpage),
- &num_poisoned_pages);
+ /* overcommit hugetlb page will be freed to buddy */
+ if (PageHuge(page)) {
+ set_page_hwpoison_huge_page(hpage);
+ dequeue_hwpoisoned_huge_page(hpage);
+ atomic_long_add(1 << compound_order(hpage),
+ &num_poisoned_pages);
+ } else {
+ SetPageHWPoison(page);
+ atomic_long_inc(&num_poisoned_pages);
+ }
}
return ret;
}
diff --git a/mm/memory.c b/mm/memory.c
index 0409e8f43fa0..6768ce9e57d2 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -4271,14 +4271,7 @@ void copy_user_huge_page(struct page *dst, struct page *src,
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
-#if USE_SPLIT_PTE_PTLOCKS && BLOATED_SPINLOCKS
-static struct kmem_cache *page_ptl_cachep;
-void __init ptlock_cache_init(void)
-{
- page_ptl_cachep = kmem_cache_create("page->ptl", sizeof(spinlock_t), 0,
- SLAB_PANIC, NULL);
-}
-
+#if USE_SPLIT_PTE_PTLOCKS && ALLOC_SPLIT_PTLOCKS
bool ptlock_alloc(struct page *page)
{
spinlock_t *ptl;
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index c4403cdf3433..0cd2c4d4e270 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1197,14 +1197,16 @@ static struct page *new_vma_page(struct page *page, unsigned long private, int *
break;
vma = vma->vm_next;
}
+
+ if (PageHuge(page)) {
+ if (vma)
+ return alloc_huge_page_noerr(vma, address, 1);
+ else
+ return NULL;
+ }
/*
- * queue_pages_range() confirms that @page belongs to some vma,
- * so vma shouldn't be NULL.
+ * if !vma, alloc_page_vma() will use task or system default policy
*/
- BUG_ON(!vma);
-
- if (PageHuge(page))
- return alloc_huge_page_noerr(vma, address, 1);
return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
}
#else
@@ -1318,7 +1320,7 @@ static long do_mbind(unsigned long start, unsigned long len,
if (nr_failed && (flags & MPOL_MF_STRICT))
err = -EIO;
} else
- putback_lru_pages(&pagelist);
+ putback_movable_pages(&pagelist);
up_write(&mm->mmap_sem);
mpol_out:
@@ -2950,7 +2952,7 @@ void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
return;
}
- p += snprintf(p, maxlen, policy_modes[mode]);
+ p += snprintf(p, maxlen, "%s", policy_modes[mode]);
if (flags & MPOL_MODE_FLAGS) {
p += snprintf(p, buffer + maxlen - p, "=");
diff --git a/mm/migrate.c b/mm/migrate.c
index 316e720a2023..9194375b2307 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -36,6 +36,7 @@
#include <linux/hugetlb_cgroup.h>
#include <linux/gfp.h>
#include <linux/balloon_compaction.h>
+#include <linux/mmu_notifier.h>
#include <asm/tlbflush.h>
@@ -316,14 +317,15 @@ static inline bool buffer_migrate_lock_buffers(struct buffer_head *head,
*/
int migrate_page_move_mapping(struct address_space *mapping,
struct page *newpage, struct page *page,
- struct buffer_head *head, enum migrate_mode mode)
+ struct buffer_head *head, enum migrate_mode mode,
+ int extra_count)
{
- int expected_count = 0;
+ int expected_count = 1 + extra_count;
void **pslot;
if (!mapping) {
/* Anonymous page without mapping */
- if (page_count(page) != 1)
+ if (page_count(page) != expected_count)
return -EAGAIN;
return MIGRATEPAGE_SUCCESS;
}
@@ -333,7 +335,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
pslot = radix_tree_lookup_slot(&mapping->page_tree,
page_index(page));
- expected_count = 2 + page_has_private(page);
+ expected_count += 1 + page_has_private(page);
if (page_count(page) != expected_count ||
radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) {
spin_unlock_irq(&mapping->tree_lock);
@@ -442,6 +444,54 @@ int migrate_huge_page_move_mapping(struct address_space *mapping,
}
/*
+ * Gigantic pages are so large that we do not guarantee that page++ pointer
+ * arithmetic will work across the entire page. We need something more
+ * specialized.
+ */
+static void __copy_gigantic_page(struct page *dst, struct page *src,
+ int nr_pages)
+{
+ int i;
+ struct page *dst_base = dst;
+ struct page *src_base = src;
+
+ for (i = 0; i < nr_pages; ) {
+ cond_resched();
+ copy_highpage(dst, src);
+
+ i++;
+ dst = mem_map_next(dst, dst_base, i);
+ src = mem_map_next(src, src_base, i);
+ }
+}
+
+static void copy_huge_page(struct page *dst, struct page *src)
+{
+ int i;
+ int nr_pages;
+
+ if (PageHuge(src)) {
+ /* hugetlbfs page */
+ struct hstate *h = page_hstate(src);
+ nr_pages = pages_per_huge_page(h);
+
+ if (unlikely(nr_pages > MAX_ORDER_NR_PAGES)) {
+ __copy_gigantic_page(dst, src, nr_pages);
+ return;
+ }
+ } else {
+ /* thp page */
+ BUG_ON(!PageTransHuge(src));
+ nr_pages = hpage_nr_pages(src);
+ }
+
+ for (i = 0; i < nr_pages; i++) {
+ cond_resched();
+ copy_highpage(dst + i, src + i);
+ }
+}
+
+/*
* Copy the page to its new location
*/
void migrate_page_copy(struct page *newpage, struct page *page)
@@ -535,7 +585,7 @@ int migrate_page(struct address_space *mapping,
BUG_ON(PageWriteback(page)); /* Writeback must be complete */
- rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode);
+ rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode, 0);
if (rc != MIGRATEPAGE_SUCCESS)
return rc;
@@ -562,7 +612,7 @@ int buffer_migrate_page(struct address_space *mapping,
head = page_buffers(page);
- rc = migrate_page_move_mapping(mapping, newpage, page, head, mode);
+ rc = migrate_page_move_mapping(mapping, newpage, page, head, mode, 0);
if (rc != MIGRATEPAGE_SUCCESS)
return rc;
@@ -1606,6 +1656,18 @@ int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
return 1;
}
+bool pmd_trans_migrating(pmd_t pmd)
+{
+ struct page *page = pmd_page(pmd);
+ return PageLocked(page);
+}
+
+void wait_migrate_huge_page(struct anon_vma *anon_vma, pmd_t *pmd)
+{
+ struct page *page = pmd_page(*pmd);
+ wait_on_page_locked(page);
+}
+
/*
* Attempt to migrate a misplaced page to the specified destination
* node. Caller is expected to have an elevated reference count on
@@ -1668,12 +1730,14 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
struct page *page, int node)
{
spinlock_t *ptl;
- unsigned long haddr = address & HPAGE_PMD_MASK;
pg_data_t *pgdat = NODE_DATA(node);
int isolated = 0;
struct page *new_page = NULL;
struct mem_cgroup *memcg = NULL;
int page_lru = page_is_file_cache(page);
+ unsigned long mmun_start = address & HPAGE_PMD_MASK;
+ unsigned long mmun_end = mmun_start + HPAGE_PMD_SIZE;
+ pmd_t orig_entry;
/*
* Rate-limit the amount of data that is being migrated to a node.
@@ -1696,6 +1760,9 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
goto out_fail;
}
+ if (mm_tlb_flush_pending(mm))
+ flush_tlb_range(vma, mmun_start, mmun_end);
+
/* Prepare a page as a migration target */
__set_page_locked(new_page);
SetPageSwapBacked(new_page);
@@ -1707,9 +1774,12 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
WARN_ON(PageLRU(new_page));
/* Recheck the target PMD */
+ mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
ptl = pmd_lock(mm, pmd);
- if (unlikely(!pmd_same(*pmd, entry))) {
+ if (unlikely(!pmd_same(*pmd, entry) || page_count(page) != 2)) {
+fail_putback:
spin_unlock(ptl);
+ mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
/* Reverse changes made by migrate_page_copy() */
if (TestClearPageActive(new_page))
@@ -1726,7 +1796,8 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
putback_lru_page(page);
mod_zone_page_state(page_zone(page),
NR_ISOLATED_ANON + page_lru, -HPAGE_PMD_NR);
- goto out_fail;
+
+ goto out_unlock;
}
/*
@@ -1738,16 +1809,35 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
*/
mem_cgroup_prepare_migration(page, new_page, &memcg);
+ orig_entry = *pmd;
entry = mk_pmd(new_page, vma->vm_page_prot);
- entry = pmd_mknonnuma(entry);
- entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
entry = pmd_mkhuge(entry);
+ entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
- pmdp_clear_flush(vma, haddr, pmd);
- set_pmd_at(mm, haddr, pmd, entry);
- page_add_new_anon_rmap(new_page, vma, haddr);
+ /*
+ * Clear the old entry under pagetable lock and establish the new PTE.
+ * Any parallel GUP will either observe the old page blocking on the
+ * page lock, block on the page table lock or observe the new page.
+ * The SetPageUptodate on the new page and page_add_new_anon_rmap
+ * guarantee the copy is visible before the pagetable update.
+ */
+ flush_cache_range(vma, mmun_start, mmun_end);
+ page_add_new_anon_rmap(new_page, vma, mmun_start);
+ pmdp_clear_flush(vma, mmun_start, pmd);
+ set_pmd_at(mm, mmun_start, pmd, entry);
+ flush_tlb_range(vma, mmun_start, mmun_end);
update_mmu_cache_pmd(vma, address, &entry);
+
+ if (page_count(page) != 2) {
+ set_pmd_at(mm, mmun_start, pmd, orig_entry);
+ flush_tlb_range(vma, mmun_start, mmun_end);
+ update_mmu_cache_pmd(vma, address, &entry);
+ page_remove_rmap(new_page);
+ goto fail_putback;
+ }
+
page_remove_rmap(page);
+
/*
* Finish the charge transaction under the page table lock to
* prevent split_huge_page() from dividing up the charge
@@ -1755,6 +1845,7 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
*/
mem_cgroup_end_migration(memcg, page, new_page, true);
spin_unlock(ptl);
+ mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
unlock_page(new_page);
unlock_page(page);
@@ -1772,10 +1863,15 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
out_fail:
count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR);
out_dropref:
- entry = pmd_mknonnuma(entry);
- set_pmd_at(mm, haddr, pmd, entry);
- update_mmu_cache_pmd(vma, address, &entry);
+ ptl = pmd_lock(mm, pmd);
+ if (pmd_same(*pmd, entry)) {
+ entry = pmd_mknonnuma(entry);
+ set_pmd_at(mm, mmun_start, pmd, entry);
+ update_mmu_cache_pmd(vma, address, &entry);
+ }
+ spin_unlock(ptl);
+out_unlock:
unlock_page(page);
put_page(page);
return 0;
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 26667971c824..bb53a6591aea 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -52,17 +52,21 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
pte_t ptent;
bool updated = false;
- ptent = ptep_modify_prot_start(mm, addr, pte);
if (!prot_numa) {
+ ptent = ptep_modify_prot_start(mm, addr, pte);
+ if (pte_numa(ptent))
+ ptent = pte_mknonnuma(ptent);
ptent = pte_modify(ptent, newprot);
updated = true;
} else {
struct page *page;
+ ptent = *pte;
page = vm_normal_page(vma, addr, oldpte);
if (page) {
if (!pte_numa(oldpte)) {
ptent = pte_mknuma(ptent);
+ set_pte_at(mm, addr, pte, ptent);
updated = true;
}
}
@@ -79,7 +83,10 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
if (updated)
pages++;
- ptep_modify_prot_commit(mm, addr, pte, ptent);
+
+ /* Only !prot_numa always clears the pte */
+ if (!prot_numa)
+ ptep_modify_prot_commit(mm, addr, pte, ptent);
} else if (IS_ENABLED(CONFIG_MIGRATION) && !pte_file(oldpte)) {
swp_entry_t entry = pte_to_swp_entry(oldpte);
@@ -181,6 +188,7 @@ static unsigned long change_protection_range(struct vm_area_struct *vma,
BUG_ON(addr >= end);
pgd = pgd_offset(mm, addr);
flush_cache_range(vma, addr, end);
+ set_tlb_flush_pending(mm);
do {
next = pgd_addr_end(addr, end);
if (pgd_none_or_clear_bad(pgd))
@@ -192,6 +200,7 @@ static unsigned long change_protection_range(struct vm_area_struct *vma,
/* Only flush the TLB if we actually modified any entries: */
if (pages)
flush_tlb_range(vma, start, end);
+ clear_tlb_flush_pending(mm);
return pages;
}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 580a5f075ed0..5248fe070aa4 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1816,7 +1816,7 @@ static void zlc_clear_zones_full(struct zonelist *zonelist)
static bool zone_local(struct zone *local_zone, struct zone *zone)
{
- return node_distance(local_zone->node, zone->node) == LOCAL_DISTANCE;
+ return local_zone->node == zone->node;
}
static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
@@ -1913,18 +1913,17 @@ zonelist_scan:
* page was allocated in should have no effect on the
* time the page has in memory before being reclaimed.
*
- * When zone_reclaim_mode is enabled, try to stay in
- * local zones in the fastpath. If that fails, the
- * slowpath is entered, which will do another pass
- * starting with the local zones, but ultimately fall
- * back to remote zones that do not partake in the
- * fairness round-robin cycle of this zonelist.
+ * Try to stay in local zones in the fastpath. If
+ * that fails, the slowpath is entered, which will do
+ * another pass starting with the local zones, but
+ * ultimately fall back to remote zones that do not
+ * partake in the fairness round-robin cycle of this
+ * zonelist.
*/
if (alloc_flags & ALLOC_WMARK_LOW) {
if (zone_page_state(zone, NR_ALLOC_BATCH) <= 0)
continue;
- if (zone_reclaim_mode &&
- !zone_local(preferred_zone, zone))
+ if (!zone_local(preferred_zone, zone))
continue;
}
/*
@@ -2390,7 +2389,7 @@ static void prepare_slowpath(gfp_t gfp_mask, unsigned int order,
* thrash fairness information for zones that are not
* actually part of this zonelist's round-robin cycle.
*/
- if (zone_reclaim_mode && !zone_local(preferred_zone, zone))
+ if (!zone_local(preferred_zone, zone))
continue;
mod_zone_page_state(zone, NR_ALLOC_BATCH,
high_wmark_pages(zone) -
diff --git a/mm/pgtable-generic.c b/mm/pgtable-generic.c
index cbb38545d9d6..a8b919925934 100644
--- a/mm/pgtable-generic.c
+++ b/mm/pgtable-generic.c
@@ -110,9 +110,10 @@ int pmdp_clear_flush_young(struct vm_area_struct *vma,
pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address,
pte_t *ptep)
{
+ struct mm_struct *mm = (vma)->vm_mm;
pte_t pte;
- pte = ptep_get_and_clear((vma)->vm_mm, address, ptep);
- if (pte_accessible(pte))
+ pte = ptep_get_and_clear(mm, address, ptep);
+ if (pte_accessible(mm, pte))
flush_tlb_page(vma, address);
return pte;
}
@@ -191,6 +192,9 @@ pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
pmd_t *pmdp)
{
+ pmd_t entry = *pmdp;
+ if (pmd_numa(entry))
+ entry = pmd_mknonnuma(entry);
set_pmd_at(vma->vm_mm, address, pmdp, pmd_mknotpresent(*pmdp));
flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
}
diff --git a/mm/rmap.c b/mm/rmap.c
index 55c8b8dc9ffb..068522d8502a 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -600,7 +600,11 @@ pte_t *__page_check_address(struct page *page, struct mm_struct *mm,
spinlock_t *ptl;
if (unlikely(PageHuge(page))) {
+ /* when pud is not present, pte will be NULL */
pte = huge_pte_offset(mm, address);
+ if (!pte)
+ return NULL;
+
ptl = huge_pte_lockptr(page_hstate(page), mm, pte);
goto check;
}
diff --git a/mm/shmem.c b/mm/shmem.c
index 8297623fcaed..902a14842b74 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -2918,13 +2918,8 @@ static struct dentry_operations anon_ops = {
.d_dname = simple_dname
};
-/**
- * shmem_file_setup - get an unlinked file living in tmpfs
- * @name: name for dentry (to be seen in /proc/<pid>/maps
- * @size: size to be set for the file
- * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
- */
-struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags)
+static struct file *__shmem_file_setup(const char *name, loff_t size,
+ unsigned long flags, unsigned int i_flags)
{
struct file *res;
struct inode *inode;
@@ -2957,6 +2952,7 @@ struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags
if (!inode)
goto put_dentry;
+ inode->i_flags |= i_flags;
d_instantiate(path.dentry, inode);
inode->i_size = size;
clear_nlink(inode); /* It is unlinked */
@@ -2977,6 +2973,32 @@ put_memory:
shmem_unacct_size(flags, size);
return res;
}
+
+/**
+ * shmem_kernel_file_setup - get an unlinked file living in tmpfs which must be
+ * kernel internal. There will be NO LSM permission checks against the
+ * underlying inode. So users of this interface must do LSM checks at a
+ * higher layer. The one user is the big_key implementation. LSM checks
+ * are provided at the key level rather than the inode level.
+ * @name: name for dentry (to be seen in /proc/<pid>/maps
+ * @size: size to be set for the file
+ * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
+ */
+struct file *shmem_kernel_file_setup(const char *name, loff_t size, unsigned long flags)
+{
+ return __shmem_file_setup(name, size, flags, S_PRIVATE);
+}
+
+/**
+ * shmem_file_setup - get an unlinked file living in tmpfs
+ * @name: name for dentry (to be seen in /proc/<pid>/maps
+ * @size: size to be set for the file
+ * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
+ */
+struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags)
+{
+ return __shmem_file_setup(name, size, flags, 0);
+}
EXPORT_SYMBOL_GPL(shmem_file_setup);
/**
diff --git a/mm/slab.c b/mm/slab.c
index 0c8967bb2018..eb043bf05f4c 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -164,72 +164,6 @@
static bool pfmemalloc_active __read_mostly;
/*
- * kmem_bufctl_t:
- *
- * Bufctl's are used for linking objs within a slab
- * linked offsets.
- *
- * This implementation relies on "struct page" for locating the cache &
- * slab an object belongs to.
- * This allows the bufctl structure to be small (one int), but limits
- * the number of objects a slab (not a cache) can contain when off-slab
- * bufctls are used. The limit is the size of the largest general cache
- * that does not use off-slab slabs.
- * For 32bit archs with 4 kB pages, is this 56.
- * This is not serious, as it is only for large objects, when it is unwise
- * to have too many per slab.
- * Note: This limit can be raised by introducing a general cache whose size
- * is less than 512 (PAGE_SIZE<<3), but greater than 256.
- */
-
-typedef unsigned int kmem_bufctl_t;
-#define BUFCTL_END (((kmem_bufctl_t)(~0U))-0)
-#define BUFCTL_FREE (((kmem_bufctl_t)(~0U))-1)
-#define BUFCTL_ACTIVE (((kmem_bufctl_t)(~0U))-2)
-#define SLAB_LIMIT (((kmem_bufctl_t)(~0U))-3)
-
-/*
- * struct slab_rcu
- *
- * slab_destroy on a SLAB_DESTROY_BY_RCU cache uses this structure to
- * arrange for kmem_freepages to be called via RCU. This is useful if
- * we need to approach a kernel structure obliquely, from its address
- * obtained without the usual locking. We can lock the structure to
- * stabilize it and check it's still at the given address, only if we
- * can be sure that the memory has not been meanwhile reused for some
- * other kind of object (which our subsystem's lock might corrupt).
- *
- * rcu_read_lock before reading the address, then rcu_read_unlock after
- * taking the spinlock within the structure expected at that address.
- */
-struct slab_rcu {
- struct rcu_head head;
- struct kmem_cache *cachep;
- void *addr;
-};
-
-/*
- * struct slab
- *
- * Manages the objs in a slab. Placed either at the beginning of mem allocated
- * for a slab, or allocated from an general cache.
- * Slabs are chained into three list: fully used, partial, fully free slabs.
- */
-struct slab {
- union {
- struct {
- struct list_head list;
- unsigned long colouroff;
- void *s_mem; /* including colour offset */
- unsigned int inuse; /* num of objs active in slab */
- kmem_bufctl_t free;
- unsigned short nodeid;
- };
- struct slab_rcu __slab_cover_slab_rcu;
- };
-};
-
-/*
* struct array_cache
*
* Purpose:
@@ -456,18 +390,10 @@ static inline struct kmem_cache *virt_to_cache(const void *obj)
return page->slab_cache;
}
-static inline struct slab *virt_to_slab(const void *obj)
-{
- struct page *page = virt_to_head_page(obj);
-
- VM_BUG_ON(!PageSlab(page));
- return page->slab_page;
-}
-
-static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
+static inline void *index_to_obj(struct kmem_cache *cache, struct page *page,
unsigned int idx)
{
- return slab->s_mem + cache->size * idx;
+ return page->s_mem + cache->size * idx;
}
/*
@@ -477,9 +403,9 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
* reciprocal_divide(offset, cache->reciprocal_buffer_size)
*/
static inline unsigned int obj_to_index(const struct kmem_cache *cache,
- const struct slab *slab, void *obj)
+ const struct page *page, void *obj)
{
- u32 offset = (obj - slab->s_mem);
+ u32 offset = (obj - page->s_mem);
return reciprocal_divide(offset, cache->reciprocal_buffer_size);
}
@@ -641,7 +567,7 @@ static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
static size_t slab_mgmt_size(size_t nr_objs, size_t align)
{
- return ALIGN(sizeof(struct slab)+nr_objs*sizeof(kmem_bufctl_t), align);
+ return ALIGN(nr_objs * sizeof(unsigned int), align);
}
/*
@@ -660,8 +586,7 @@ static void cache_estimate(unsigned long gfporder, size_t buffer_size,
* on it. For the latter case, the memory allocated for a
* slab is used for:
*
- * - The struct slab
- * - One kmem_bufctl_t for each object
+ * - One unsigned int for each object
* - Padding to respect alignment of @align
* - @buffer_size bytes for each object
*
@@ -674,8 +599,6 @@ static void cache_estimate(unsigned long gfporder, size_t buffer_size,
mgmt_size = 0;
nr_objs = slab_size / buffer_size;
- if (nr_objs > SLAB_LIMIT)
- nr_objs = SLAB_LIMIT;
} else {
/*
* Ignore padding for the initial guess. The padding
@@ -685,8 +608,7 @@ static void cache_estimate(unsigned long gfporder, size_t buffer_size,
* into the memory allocation when taking the padding
* into account.
*/
- nr_objs = (slab_size - sizeof(struct slab)) /
- (buffer_size + sizeof(kmem_bufctl_t));
+ nr_objs = (slab_size) / (buffer_size + sizeof(unsigned int));
/*
* This calculated number will be either the right
@@ -696,9 +618,6 @@ static void cache_estimate(unsigned long gfporder, size_t buffer_size,
> slab_size)
nr_objs--;
- if (nr_objs > SLAB_LIMIT)
- nr_objs = SLAB_LIMIT;
-
mgmt_size = slab_mgmt_size(nr_objs, align);
}
*num = nr_objs;
@@ -829,10 +748,8 @@ static struct array_cache *alloc_arraycache(int node, int entries,
return nc;
}
-static inline bool is_slab_pfmemalloc(struct slab *slabp)
+static inline bool is_slab_pfmemalloc(struct page *page)
{
- struct page *page = virt_to_page(slabp->s_mem);
-
return PageSlabPfmemalloc(page);
}
@@ -841,23 +758,23 @@ static void recheck_pfmemalloc_active(struct kmem_cache *cachep,
struct array_cache *ac)
{
struct kmem_cache_node *n = cachep->node[numa_mem_id()];
- struct slab *slabp;
+ struct page *page;
unsigned long flags;
if (!pfmemalloc_active)
return;
spin_lock_irqsave(&n->list_lock, flags);
- list_for_each_entry(slabp, &n->slabs_full, list)
- if (is_slab_pfmemalloc(slabp))
+ list_for_each_entry(page, &n->slabs_full, lru)
+ if (is_slab_pfmemalloc(page))
goto out;
- list_for_each_entry(slabp, &n->slabs_partial, list)
- if (is_slab_pfmemalloc(slabp))
+ list_for_each_entry(page, &n->slabs_partial, lru)
+ if (is_slab_pfmemalloc(page))
goto out;
- list_for_each_entry(slabp, &n->slabs_free, list)
- if (is_slab_pfmemalloc(slabp))
+ list_for_each_entry(page, &n->slabs_free, lru)
+ if (is_slab_pfmemalloc(page))
goto out;
pfmemalloc_active = false;
@@ -897,8 +814,8 @@ static void *__ac_get_obj(struct kmem_cache *cachep, struct array_cache *ac,
*/
n = cachep->node[numa_mem_id()];
if (!list_empty(&n->slabs_free) && force_refill) {
- struct slab *slabp = virt_to_slab(objp);
- ClearPageSlabPfmemalloc(virt_to_head_page(slabp->s_mem));
+ struct page *page = virt_to_head_page(objp);
+ ClearPageSlabPfmemalloc(page);
clear_obj_pfmemalloc(&objp);
recheck_pfmemalloc_active(cachep, ac);
return objp;
@@ -1099,8 +1016,7 @@ static void drain_alien_cache(struct kmem_cache *cachep,
static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
{
- struct slab *slabp = virt_to_slab(objp);
- int nodeid = slabp->nodeid;
+ int nodeid = page_to_nid(virt_to_page(objp));
struct kmem_cache_node *n;
struct array_cache *alien = NULL;
int node;
@@ -1111,7 +1027,7 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
* Make sure we are not freeing a object from another node to the array
* cache on this cpu.
*/
- if (likely(slabp->nodeid == node))
+ if (likely(nodeid == node))
return 0;
n = cachep->node[node];
@@ -1512,6 +1428,8 @@ void __init kmem_cache_init(void)
{
int i;
+ BUILD_BUG_ON(sizeof(((struct page *)NULL)->lru) <
+ sizeof(struct rcu_head));
kmem_cache = &kmem_cache_boot;
setup_node_pointer(kmem_cache);
@@ -1687,7 +1605,7 @@ static noinline void
slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid)
{
struct kmem_cache_node *n;
- struct slab *slabp;
+ struct page *page;
unsigned long flags;
int node;
@@ -1706,15 +1624,15 @@ slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid)
continue;
spin_lock_irqsave(&n->list_lock, flags);
- list_for_each_entry(slabp, &n->slabs_full, list) {
+ list_for_each_entry(page, &n->slabs_full, lru) {
active_objs += cachep->num;
active_slabs++;
}
- list_for_each_entry(slabp, &n->slabs_partial, list) {
- active_objs += slabp->inuse;
+ list_for_each_entry(page, &n->slabs_partial, lru) {
+ active_objs += page->active;
active_slabs++;
}
- list_for_each_entry(slabp, &n->slabs_free, list)
+ list_for_each_entry(page, &n->slabs_free, lru)
num_slabs++;
free_objects += n->free_objects;
@@ -1736,19 +1654,11 @@ slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid)
* did not request dmaable memory, we might get it, but that
* would be relatively rare and ignorable.
*/
-static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid)
+static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
+ int nodeid)
{
struct page *page;
int nr_pages;
- int i;
-
-#ifndef CONFIG_MMU
- /*
- * Nommu uses slab's for process anonymous memory allocations, and thus
- * requires __GFP_COMP to properly refcount higher order allocations
- */
- flags |= __GFP_COMP;
-#endif
flags |= cachep->allocflags;
if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
@@ -1772,12 +1682,9 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid)
else
add_zone_page_state(page_zone(page),
NR_SLAB_UNRECLAIMABLE, nr_pages);
- for (i = 0; i < nr_pages; i++) {
- __SetPageSlab(page + i);
-
- if (page->pfmemalloc)
- SetPageSlabPfmemalloc(page + i);
- }
+ __SetPageSlab(page);
+ if (page->pfmemalloc)
+ SetPageSlabPfmemalloc(page);
memcg_bind_pages(cachep, cachep->gfporder);
if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) {
@@ -1789,17 +1696,15 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid)
kmemcheck_mark_unallocated_pages(page, nr_pages);
}
- return page_address(page);
+ return page;
}
/*
* Interface to system's page release.
*/
-static void kmem_freepages(struct kmem_cache *cachep, void *addr)
+static void kmem_freepages(struct kmem_cache *cachep, struct page *page)
{
- unsigned long i = (1 << cachep->gfporder);
- struct page *page = virt_to_page(addr);
- const unsigned long nr_freed = i;
+ const unsigned long nr_freed = (1 << cachep->gfporder);
kmemcheck_free_shadow(page, cachep->gfporder);
@@ -1809,27 +1714,28 @@ static void kmem_freepages(struct kmem_cache *cachep, void *addr)
else
sub_zone_page_state(page_zone(page),
NR_SLAB_UNRECLAIMABLE, nr_freed);
- while (i--) {
- BUG_ON(!PageSlab(page));
- __ClearPageSlabPfmemalloc(page);
- __ClearPageSlab(page);
- page++;
- }
+
+ BUG_ON(!PageSlab(page));
+ __ClearPageSlabPfmemalloc(page);
+ __ClearPageSlab(page);
+ page_mapcount_reset(page);
+ page->mapping = NULL;
memcg_release_pages(cachep, cachep->gfporder);
if (current->reclaim_state)
current->reclaim_state->reclaimed_slab += nr_freed;
- free_memcg_kmem_pages((unsigned long)addr, cachep->gfporder);
+ __free_memcg_kmem_pages(page, cachep->gfporder);
}
static void kmem_rcu_free(struct rcu_head *head)
{
- struct slab_rcu *slab_rcu = (struct slab_rcu *)head;
- struct kmem_cache *cachep = slab_rcu->cachep;
+ struct kmem_cache *cachep;
+ struct page *page;
- kmem_freepages(cachep, slab_rcu->addr);
- if (OFF_SLAB(cachep))
- kmem_cache_free(cachep->slabp_cache, slab_rcu);
+ page = container_of(head, struct page, rcu_head);
+ cachep = page->slab_cache;
+
+ kmem_freepages(cachep, page);
}
#if DEBUG
@@ -1978,19 +1884,19 @@ static void check_poison_obj(struct kmem_cache *cachep, void *objp)
/* Print some data about the neighboring objects, if they
* exist:
*/
- struct slab *slabp = virt_to_slab(objp);
+ struct page *page = virt_to_head_page(objp);
unsigned int objnr;
- objnr = obj_to_index(cachep, slabp, objp);
+ objnr = obj_to_index(cachep, page, objp);
if (objnr) {
- objp = index_to_obj(cachep, slabp, objnr - 1);
+ objp = index_to_obj(cachep, page, objnr - 1);
realobj = (char *)objp + obj_offset(cachep);
printk(KERN_ERR "Prev obj: start=%p, len=%d\n",
realobj, size);
print_objinfo(cachep, objp, 2);
}
if (objnr + 1 < cachep->num) {
- objp = index_to_obj(cachep, slabp, objnr + 1);
+ objp = index_to_obj(cachep, page, objnr + 1);
realobj = (char *)objp + obj_offset(cachep);
printk(KERN_ERR "Next obj: start=%p, len=%d\n",
realobj, size);
@@ -2001,11 +1907,12 @@ static void check_poison_obj(struct kmem_cache *cachep, void *objp)
#endif
#if DEBUG
-static void slab_destroy_debugcheck(struct kmem_cache *cachep, struct slab *slabp)
+static void slab_destroy_debugcheck(struct kmem_cache *cachep,
+ struct page *page)
{
int i;
for (i = 0; i < cachep->num; i++) {
- void *objp = index_to_obj(cachep, slabp, i);
+ void *objp = index_to_obj(cachep, page, i);
if (cachep->flags & SLAB_POISON) {
#ifdef CONFIG_DEBUG_PAGEALLOC
@@ -2030,7 +1937,8 @@ static void slab_destroy_debugcheck(struct kmem_cache *cachep, struct slab *slab
}
}
#else
-static void slab_destroy_debugcheck(struct kmem_cache *cachep, struct slab *slabp)
+static void slab_destroy_debugcheck(struct kmem_cache *cachep,
+ struct page *page)
{
}
#endif
@@ -2044,23 +1952,34 @@ static void slab_destroy_debugcheck(struct kmem_cache *cachep, struct slab *slab
* Before calling the slab must have been unlinked from the cache. The
* cache-lock is not held/needed.
*/
-static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp)
+static void slab_destroy(struct kmem_cache *cachep, struct page *page)
{
- void *addr = slabp->s_mem - slabp->colouroff;
+ void *freelist;
- slab_destroy_debugcheck(cachep, slabp);
+ freelist = page->freelist;
+ slab_destroy_debugcheck(cachep, page);
if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) {
- struct slab_rcu *slab_rcu;
+ struct rcu_head *head;
+
+ /*
+ * RCU free overloads the RCU head over the LRU.
+ * slab_page has been overloeaded over the LRU,
+ * however it is not used from now on so that
+ * we can use it safely.
+ */
+ head = (void *)&page->rcu_head;
+ call_rcu(head, kmem_rcu_free);
- slab_rcu = (struct slab_rcu *)slabp;
- slab_rcu->cachep = cachep;
- slab_rcu->addr = addr;
- call_rcu(&slab_rcu->head, kmem_rcu_free);
} else {
- kmem_freepages(cachep, addr);
- if (OFF_SLAB(cachep))
- kmem_cache_free(cachep->slabp_cache, slabp);
+ kmem_freepages(cachep, page);
}
+
+ /*
+ * From now on, we don't use freelist
+ * although actual page can be freed in rcu context
+ */
+ if (OFF_SLAB(cachep))
+ kmem_cache_free(cachep->freelist_cache, freelist);
}
/**
@@ -2097,8 +2016,8 @@ static size_t calculate_slab_order(struct kmem_cache *cachep,
* use off-slab slabs. Needed to avoid a possible
* looping condition in cache_grow().
*/
- offslab_limit = size - sizeof(struct slab);
- offslab_limit /= sizeof(kmem_bufctl_t);
+ offslab_limit = size;
+ offslab_limit /= sizeof(unsigned int);
if (num > offslab_limit)
break;
@@ -2220,7 +2139,7 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
int
__kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
{
- size_t left_over, slab_size, ralign;
+ size_t left_over, freelist_size, ralign;
gfp_t gfp;
int err;
size_t size = cachep->size;
@@ -2339,22 +2258,21 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
if (!cachep->num)
return -E2BIG;
- slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t)
- + sizeof(struct slab), cachep->align);
+ freelist_size =
+ ALIGN(cachep->num * sizeof(unsigned int), cachep->align);
/*
* If the slab has been placed off-slab, and we have enough space then
* move it on-slab. This is at the expense of any extra colouring.
*/
- if (flags & CFLGS_OFF_SLAB && left_over >= slab_size) {
+ if (flags & CFLGS_OFF_SLAB && left_over >= freelist_size) {
flags &= ~CFLGS_OFF_SLAB;
- left_over -= slab_size;
+ left_over -= freelist_size;
}
if (flags & CFLGS_OFF_SLAB) {
/* really off slab. No need for manual alignment */
- slab_size =
- cachep->num * sizeof(kmem_bufctl_t) + sizeof(struct slab);
+ freelist_size = cachep->num * sizeof(unsigned int);
#ifdef CONFIG_PAGE_POISONING
/* If we're going to use the generic kernel_map_pages()
@@ -2371,16 +2289,16 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
if (cachep->colour_off < cachep->align)
cachep->colour_off = cachep->align;
cachep->colour = left_over / cachep->colour_off;
- cachep->slab_size = slab_size;
+ cachep->freelist_size = freelist_size;
cachep->flags = flags;
- cachep->allocflags = 0;
+ cachep->allocflags = __GFP_COMP;
if (CONFIG_ZONE_DMA_FLAG && (flags & SLAB_CACHE_DMA))
cachep->allocflags |= GFP_DMA;
cachep->size = size;
cachep->reciprocal_buffer_size = reciprocal_value(size);
if (flags & CFLGS_OFF_SLAB) {
- cachep->slabp_cache = kmalloc_slab(slab_size, 0u);
+ cachep->freelist_cache = kmalloc_slab(freelist_size, 0u);
/*
* This is a possibility for one of the malloc_sizes caches.
* But since we go off slab only for object size greater than
@@ -2388,7 +2306,7 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
* this should not happen at all.
* But leave a BUG_ON for some lucky dude.
*/
- BUG_ON(ZERO_OR_NULL_PTR(cachep->slabp_cache));
+ BUG_ON(ZERO_OR_NULL_PTR(cachep->freelist_cache));
}
err = setup_cpu_cache(cachep, gfp);
@@ -2494,7 +2412,7 @@ static int drain_freelist(struct kmem_cache *cache,
{
struct list_head *p;
int nr_freed;
- struct slab *slabp;
+ struct page *page;
nr_freed = 0;
while (nr_freed < tofree && !list_empty(&n->slabs_free)) {
@@ -2506,18 +2424,18 @@ static int drain_freelist(struct kmem_cache *cache,
goto out;
}
- slabp = list_entry(p, struct slab, list);
+ page = list_entry(p, struct page, lru);
#if DEBUG
- BUG_ON(slabp->inuse);
+ BUG_ON(page->active);
#endif
- list_del(&slabp->list);
+ list_del(&page->lru);
/*
* Safe to drop the lock. The slab is no longer linked
* to the cache.
*/
n->free_objects -= cache->num;
spin_unlock_irq(&n->list_lock);
- slab_destroy(cache, slabp);
+ slab_destroy(cache, page);
nr_freed++;
}
out:
@@ -2600,52 +2518,42 @@ int __kmem_cache_shutdown(struct kmem_cache *cachep)
* descriptors in kmem_cache_create, we search through the malloc_sizes array.
* If we are creating a malloc_sizes cache here it would not be visible to
* kmem_find_general_cachep till the initialization is complete.
- * Hence we cannot have slabp_cache same as the original cache.
+ * Hence we cannot have freelist_cache same as the original cache.
*/
-static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp,
- int colour_off, gfp_t local_flags,
- int nodeid)
+static void *alloc_slabmgmt(struct kmem_cache *cachep,
+ struct page *page, int colour_off,
+ gfp_t local_flags, int nodeid)
{
- struct slab *slabp;
+ void *freelist;
+ void *addr = page_address(page);
if (OFF_SLAB(cachep)) {
/* Slab management obj is off-slab. */
- slabp = kmem_cache_alloc_node(cachep->slabp_cache,
+ freelist = kmem_cache_alloc_node(cachep->freelist_cache,
local_flags, nodeid);
- /*
- * If the first object in the slab is leaked (it's allocated
- * but no one has a reference to it), we want to make sure
- * kmemleak does not treat the ->s_mem pointer as a reference
- * to the object. Otherwise we will not report the leak.
- */
- kmemleak_scan_area(&slabp->list, sizeof(struct list_head),
- local_flags);
- if (!slabp)
+ if (!freelist)
return NULL;
} else {
- slabp = objp + colour_off;
- colour_off += cachep->slab_size;
+ freelist = addr + colour_off;
+ colour_off += cachep->freelist_size;
}
- slabp->inuse = 0;
- slabp->colouroff = colour_off;
- slabp->s_mem = objp + colour_off;
- slabp->nodeid = nodeid;
- slabp->free = 0;
- return slabp;
+ page->active = 0;
+ page->s_mem = addr + colour_off;
+ return freelist;
}
-static inline kmem_bufctl_t *slab_bufctl(struct slab *slabp)
+static inline unsigned int *slab_freelist(struct page *page)
{
- return (kmem_bufctl_t *) (slabp + 1);
+ return (unsigned int *)(page->freelist);
}
static void cache_init_objs(struct kmem_cache *cachep,
- struct slab *slabp)
+ struct page *page)
{
int i;
for (i = 0; i < cachep->num; i++) {
- void *objp = index_to_obj(cachep, slabp, i);
+ void *objp = index_to_obj(cachep, page, i);
#if DEBUG
/* need to poison the objs? */
if (cachep->flags & SLAB_POISON)
@@ -2681,9 +2589,8 @@ static void cache_init_objs(struct kmem_cache *cachep,
if (cachep->ctor)
cachep->ctor(objp);
#endif
- slab_bufctl(slabp)[i] = i + 1;
+ slab_freelist(page)[i] = i;
}
- slab_bufctl(slabp)[i - 1] = BUFCTL_END;
}
static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags)
@@ -2696,41 +2603,41 @@ static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags)
}
}
-static void *slab_get_obj(struct kmem_cache *cachep, struct slab *slabp,
+static void *slab_get_obj(struct kmem_cache *cachep, struct page *page,
int nodeid)
{
- void *objp = index_to_obj(cachep, slabp, slabp->free);
- kmem_bufctl_t next;
+ void *objp;
- slabp->inuse++;
- next = slab_bufctl(slabp)[slabp->free];
+ objp = index_to_obj(cachep, page, slab_freelist(page)[page->active]);
+ page->active++;
#if DEBUG
- slab_bufctl(slabp)[slabp->free] = BUFCTL_FREE;
- WARN_ON(slabp->nodeid != nodeid);
+ WARN_ON(page_to_nid(virt_to_page(objp)) != nodeid);
#endif
- slabp->free = next;
return objp;
}
-static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp,
+static void slab_put_obj(struct kmem_cache *cachep, struct page *page,
void *objp, int nodeid)
{
- unsigned int objnr = obj_to_index(cachep, slabp, objp);
-
+ unsigned int objnr = obj_to_index(cachep, page, objp);
#if DEBUG
+ unsigned int i;
+
/* Verify that the slab belongs to the intended node */
- WARN_ON(slabp->nodeid != nodeid);
+ WARN_ON(page_to_nid(virt_to_page(objp)) != nodeid);
- if (slab_bufctl(slabp)[objnr] + 1 <= SLAB_LIMIT + 1) {
- printk(KERN_ERR "slab: double free detected in cache "
- "'%s', objp %p\n", cachep->name, objp);
- BUG();
+ /* Verify double free bug */
+ for (i = page->active; i < cachep->num; i++) {
+ if (slab_freelist(page)[i] == objnr) {
+ printk(KERN_ERR "slab: double free detected in cache "
+ "'%s', objp %p\n", cachep->name, objp);
+ BUG();
+ }
}
#endif
- slab_bufctl(slabp)[objnr] = slabp->free;
- slabp->free = objnr;
- slabp->inuse--;
+ page->active--;
+ slab_freelist(page)[page->active] = objnr;
}
/*
@@ -2738,23 +2645,11 @@ static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp,
* for the slab allocator to be able to lookup the cache and slab of a
* virtual address for kfree, ksize, and slab debugging.
*/
-static void slab_map_pages(struct kmem_cache *cache, struct slab *slab,
- void *addr)
+static void slab_map_pages(struct kmem_cache *cache, struct page *page,
+ void *freelist)
{
- int nr_pages;
- struct page *page;
-
- page = virt_to_page(addr);
-
- nr_pages = 1;
- if (likely(!PageCompound(page)))
- nr_pages <<= cache->gfporder;
-
- do {
- page->slab_cache = cache;
- page->slab_page = slab;
- page++;
- } while (--nr_pages);
+ page->slab_cache = cache;
+ page->freelist = freelist;
}
/*
@@ -2762,9 +2657,9 @@ static void slab_map_pages(struct kmem_cache *cache, struct slab *slab,
* kmem_cache_alloc() when there are no active objs left in a cache.
*/
static int cache_grow(struct kmem_cache *cachep,
- gfp_t flags, int nodeid, void *objp)
+ gfp_t flags, int nodeid, struct page *page)
{
- struct slab *slabp;
+ void *freelist;
size_t offset;
gfp_t local_flags;
struct kmem_cache_node *n;
@@ -2805,20 +2700,20 @@ static int cache_grow(struct kmem_cache *cachep,
* Get mem for the objs. Attempt to allocate a physical page from
* 'nodeid'.
*/
- if (!objp)
- objp = kmem_getpages(cachep, local_flags, nodeid);
- if (!objp)
+ if (!page)
+ page = kmem_getpages(cachep, local_flags, nodeid);
+ if (!page)
goto failed;
/* Get slab management. */
- slabp = alloc_slabmgmt(cachep, objp, offset,
+ freelist = alloc_slabmgmt(cachep, page, offset,
local_flags & ~GFP_CONSTRAINT_MASK, nodeid);
- if (!slabp)
+ if (!freelist)
goto opps1;
- slab_map_pages(cachep, slabp, objp);
+ slab_map_pages(cachep, page, freelist);
- cache_init_objs(cachep, slabp);
+ cache_init_objs(cachep, page);
if (local_flags & __GFP_WAIT)
local_irq_disable();
@@ -2826,13 +2721,13 @@ static int cache_grow(struct kmem_cache *cachep,
spin_lock(&n->list_lock);
/* Make slab active. */
- list_add_tail(&slabp->list, &(n->slabs_free));
+ list_add_tail(&page->lru, &(n->slabs_free));
STATS_INC_GROWN(cachep);
n->free_objects += cachep->num;
spin_unlock(&n->list_lock);
return 1;
opps1:
- kmem_freepages(cachep, objp);
+ kmem_freepages(cachep, page);
failed:
if (local_flags & __GFP_WAIT)
local_irq_disable();
@@ -2880,9 +2775,8 @@ static inline void verify_redzone_free(struct kmem_cache *cache, void *obj)
static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
unsigned long caller)
{
- struct page *page;
unsigned int objnr;
- struct slab *slabp;
+ struct page *page;
BUG_ON(virt_to_cache(objp) != cachep);
@@ -2890,8 +2784,6 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
kfree_debugcheck(objp);
page = virt_to_head_page(objp);
- slabp = page->slab_page;
-
if (cachep->flags & SLAB_RED_ZONE) {
verify_redzone_free(cachep, objp);
*dbg_redzone1(cachep, objp) = RED_INACTIVE;
@@ -2900,14 +2792,11 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
if (cachep->flags & SLAB_STORE_USER)
*dbg_userword(cachep, objp) = (void *)caller;
- objnr = obj_to_index(cachep, slabp, objp);
+ objnr = obj_to_index(cachep, page, objp);
BUG_ON(objnr >= cachep->num);
- BUG_ON(objp != index_to_obj(cachep, slabp, objnr));
+ BUG_ON(objp != index_to_obj(cachep, page, objnr));
-#ifdef CONFIG_DEBUG_SLAB_LEAK
- slab_bufctl(slabp)[objnr] = BUFCTL_FREE;
-#endif
if (cachep->flags & SLAB_POISON) {
#ifdef CONFIG_DEBUG_PAGEALLOC
if ((cachep->size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) {
@@ -2924,33 +2813,9 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
return objp;
}
-static void check_slabp(struct kmem_cache *cachep, struct slab *slabp)
-{
- kmem_bufctl_t i;
- int entries = 0;
-
- /* Check slab's freelist to see if this obj is there. */
- for (i = slabp->free; i != BUFCTL_END; i = slab_bufctl(slabp)[i]) {
- entries++;
- if (entries > cachep->num || i >= cachep->num)
- goto bad;
- }
- if (entries != cachep->num - slabp->inuse) {
-bad:
- printk(KERN_ERR "slab: Internal list corruption detected in "
- "cache '%s'(%d), slabp %p(%d). Tainted(%s). Hexdump:\n",
- cachep->name, cachep->num, slabp, slabp->inuse,
- print_tainted());
- print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, slabp,
- sizeof(*slabp) + cachep->num * sizeof(kmem_bufctl_t),
- 1);
- BUG();
- }
-}
#else
#define kfree_debugcheck(x) do { } while(0)
#define cache_free_debugcheck(x,objp,z) (objp)
-#define check_slabp(x,y) do { } while(0)
#endif
static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags,
@@ -2989,7 +2854,7 @@ retry:
while (batchcount > 0) {
struct list_head *entry;
- struct slab *slabp;
+ struct page *page;
/* Get slab alloc is to come from. */
entry = n->slabs_partial.next;
if (entry == &n->slabs_partial) {
@@ -2999,8 +2864,7 @@ retry:
goto must_grow;
}
- slabp = list_entry(entry, struct slab, list);
- check_slabp(cachep, slabp);
+ page = list_entry(entry, struct page, lru);
check_spinlock_acquired(cachep);
/*
@@ -3008,24 +2872,23 @@ retry:
* there must be at least one object available for
* allocation.
*/
- BUG_ON(slabp->inuse >= cachep->num);
+ BUG_ON(page->active >= cachep->num);
- while (slabp->inuse < cachep->num && batchcount--) {
+ while (page->active < cachep->num && batchcount--) {
STATS_INC_ALLOCED(cachep);
STATS_INC_ACTIVE(cachep);
STATS_SET_HIGH(cachep);
- ac_put_obj(cachep, ac, slab_get_obj(cachep, slabp,
+ ac_put_obj(cachep, ac, slab_get_obj(cachep, page,
node));
}
- check_slabp(cachep, slabp);
/* move slabp to correct slabp list: */
- list_del(&slabp->list);
- if (slabp->free == BUFCTL_END)
- list_add(&slabp->list, &n->slabs_full);
+ list_del(&page->lru);
+ if (page->active == cachep->num)
+ list_add(&page->list, &n->slabs_full);
else
- list_add(&slabp->list, &n->slabs_partial);
+ list_add(&page->list, &n->slabs_partial);
}
must_grow:
@@ -3097,16 +2960,6 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
*dbg_redzone1(cachep, objp) = RED_ACTIVE;
*dbg_redzone2(cachep, objp) = RED_ACTIVE;
}
-#ifdef CONFIG_DEBUG_SLAB_LEAK
- {
- struct slab *slabp;
- unsigned objnr;
-
- slabp = virt_to_head_page(objp)->slab_page;
- objnr = (unsigned)(objp - slabp->s_mem) / cachep->size;
- slab_bufctl(slabp)[objnr] = BUFCTL_ACTIVE;
- }
-#endif
objp += obj_offset(cachep);
if (cachep->ctor && cachep->flags & SLAB_POISON)
cachep->ctor(objp);
@@ -3248,18 +3101,20 @@ retry:
* We may trigger various forms of reclaim on the allowed
* set and go into memory reserves if necessary.
*/
+ struct page *page;
+
if (local_flags & __GFP_WAIT)
local_irq_enable();
kmem_flagcheck(cache, flags);
- obj = kmem_getpages(cache, local_flags, numa_mem_id());
+ page = kmem_getpages(cache, local_flags, numa_mem_id());
if (local_flags & __GFP_WAIT)
local_irq_disable();
- if (obj) {
+ if (page) {
/*
* Insert into the appropriate per node queues
*/
- nid = page_to_nid(virt_to_page(obj));
- if (cache_grow(cache, flags, nid, obj)) {
+ nid = page_to_nid(page);
+ if (cache_grow(cache, flags, nid, page)) {
obj = ____cache_alloc_node(cache,
flags | GFP_THISNODE, nid);
if (!obj)
@@ -3288,7 +3143,7 @@ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
int nodeid)
{
struct list_head *entry;
- struct slab *slabp;
+ struct page *page;
struct kmem_cache_node *n;
void *obj;
int x;
@@ -3308,26 +3163,24 @@ retry:
goto must_grow;
}
- slabp = list_entry(entry, struct slab, list);
+ page = list_entry(entry, struct page, lru);
check_spinlock_acquired_node(cachep, nodeid);
- check_slabp(cachep, slabp);
STATS_INC_NODEALLOCS(cachep);
STATS_INC_ACTIVE(cachep);
STATS_SET_HIGH(cachep);
- BUG_ON(slabp->inuse == cachep->num);
+ BUG_ON(page->active == cachep->num);
- obj = slab_get_obj(cachep, slabp, nodeid);
- check_slabp(cachep, slabp);
+ obj = slab_get_obj(cachep, page, nodeid);
n->free_objects--;
/* move slabp to correct slabp list: */
- list_del(&slabp->list);
+ list_del(&page->lru);
- if (slabp->free == BUFCTL_END)
- list_add(&slabp->list, &n->slabs_full);
+ if (page->active == cachep->num)
+ list_add(&page->lru, &n->slabs_full);
else
- list_add(&slabp->list, &n->slabs_partial);
+ list_add(&page->lru, &n->slabs_partial);
spin_unlock(&n->list_lock);
goto done;
@@ -3477,23 +3330,21 @@ static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects,
for (i = 0; i < nr_objects; i++) {
void *objp;
- struct slab *slabp;
+ struct page *page;
clear_obj_pfmemalloc(&objpp[i]);
objp = objpp[i];
- slabp = virt_to_slab(objp);
+ page = virt_to_head_page(objp);
n = cachep->node[node];
- list_del(&slabp->list);
+ list_del(&page->lru);
check_spinlock_acquired_node(cachep, node);
- check_slabp(cachep, slabp);
- slab_put_obj(cachep, slabp, objp, node);
+ slab_put_obj(cachep, page, objp, node);
STATS_DEC_ACTIVE(cachep);
n->free_objects++;
- check_slabp(cachep, slabp);
/* fixup slab chains */
- if (slabp->inuse == 0) {
+ if (page->active == 0) {
if (n->free_objects > n->free_limit) {
n->free_objects -= cachep->num;
/* No need to drop any previously held
@@ -3502,16 +3353,16 @@ static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects,
* a different cache, refer to comments before
* alloc_slabmgmt.
*/
- slab_destroy(cachep, slabp);
+ slab_destroy(cachep, page);
} else {
- list_add(&slabp->list, &n->slabs_free);
+ list_add(&page->lru, &n->slabs_free);
}
} else {
/* Unconditionally move a slab to the end of the
* partial list on free - maximum time for the
* other objects to be freed, too.
*/
- list_add_tail(&slabp->list, &n->slabs_partial);
+ list_add_tail(&page->lru, &n->slabs_partial);
}
}
}
@@ -3551,10 +3402,10 @@ free_done:
p = n->slabs_free.next;
while (p != &(n->slabs_free)) {
- struct slab *slabp;
+ struct page *page;
- slabp = list_entry(p, struct slab, list);
- BUG_ON(slabp->inuse);
+ page = list_entry(p, struct page, lru);
+ BUG_ON(page->active);
i++;
p = p->next;
@@ -4158,7 +4009,7 @@ out:
#ifdef CONFIG_SLABINFO
void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo)
{
- struct slab *slabp;
+ struct page *page;
unsigned long active_objs;
unsigned long num_objs;
unsigned long active_slabs = 0;
@@ -4178,23 +4029,23 @@ void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo)
check_irq_on();
spin_lock_irq(&n->list_lock);
- list_for_each_entry(slabp, &n->slabs_full, list) {
- if (slabp->inuse != cachep->num && !error)
+ list_for_each_entry(page, &n->slabs_full, lru) {
+ if (page->active != cachep->num && !error)
error = "slabs_full accounting error";
active_objs += cachep->num;
active_slabs++;
}
- list_for_each_entry(slabp, &n->slabs_partial, list) {
- if (slabp->inuse == cachep->num && !error)
- error = "slabs_partial inuse accounting error";
- if (!slabp->inuse && !error)
- error = "slabs_partial/inuse accounting error";
- active_objs += slabp->inuse;
+ list_for_each_entry(page, &n->slabs_partial, lru) {
+ if (page->active == cachep->num && !error)
+ error = "slabs_partial accounting error";
+ if (!page->active && !error)
+ error = "slabs_partial accounting error";
+ active_objs += page->active;
active_slabs++;
}
- list_for_each_entry(slabp, &n->slabs_free, list) {
- if (slabp->inuse && !error)
- error = "slabs_free/inuse accounting error";
+ list_for_each_entry(page, &n->slabs_free, lru) {
+ if (page->active && !error)
+ error = "slabs_free accounting error";
num_slabs++;
}
free_objects += n->free_objects;
@@ -4346,15 +4197,27 @@ static inline int add_caller(unsigned long *n, unsigned long v)
return 1;
}
-static void handle_slab(unsigned long *n, struct kmem_cache *c, struct slab *s)
+static void handle_slab(unsigned long *n, struct kmem_cache *c,
+ struct page *page)
{
void *p;
- int i;
+ int i, j;
+
if (n[0] == n[1])
return;
- for (i = 0, p = s->s_mem; i < c->num; i++, p += c->size) {
- if (slab_bufctl(s)[i] != BUFCTL_ACTIVE)
+ for (i = 0, p = page->s_mem; i < c->num; i++, p += c->size) {
+ bool active = true;
+
+ for (j = page->active; j < c->num; j++) {
+ /* Skip freed item */
+ if (slab_freelist(page)[j] == i) {
+ active = false;
+ break;
+ }
+ }
+ if (!active)
continue;
+
if (!add_caller(n, (unsigned long)*dbg_userword(c, p)))
return;
}
@@ -4379,7 +4242,7 @@ static void show_symbol(struct seq_file *m, unsigned long address)
static int leaks_show(struct seq_file *m, void *p)
{
struct kmem_cache *cachep = list_entry(p, struct kmem_cache, list);
- struct slab *slabp;
+ struct page *page;
struct kmem_cache_node *n;
const char *name;
unsigned long *x = m->private;
@@ -4403,10 +4266,10 @@ static int leaks_show(struct seq_file *m, void *p)
check_irq_on();
spin_lock_irq(&n->list_lock);
- list_for_each_entry(slabp, &n->slabs_full, list)
- handle_slab(x, cachep, slabp);
- list_for_each_entry(slabp, &n->slabs_partial, list)
- handle_slab(x, cachep, slabp);
+ list_for_each_entry(page, &n->slabs_full, lru)
+ handle_slab(x, cachep, page);
+ list_for_each_entry(page, &n->slabs_partial, lru)
+ handle_slab(x, cachep, page);
spin_unlock_irq(&n->list_lock);
}
name = cachep->name;
diff --git a/mm/slub.c b/mm/slub.c
index 7e8bd8d828bc..545a170ebf9f 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -155,7 +155,7 @@ static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s)
/*
* Maximum number of desirable partial slabs.
* The existence of more partial slabs makes kmem_cache_shrink
- * sort the partial list by the number of objects in the.
+ * sort the partial list by the number of objects in use.
*/
#define MAX_PARTIAL 10
@@ -933,6 +933,16 @@ static void trace(struct kmem_cache *s, struct page *page, void *object,
* Hooks for other subsystems that check memory allocations. In a typical
* production configuration these hooks all should produce no code at all.
*/
+static inline void kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags)
+{
+ kmemleak_alloc(ptr, size, 1, flags);
+}
+
+static inline void kfree_hook(const void *x)
+{
+ kmemleak_free(x);
+}
+
static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
{
flags &= gfp_allowed_mask;
@@ -1217,8 +1227,8 @@ static unsigned long kmem_cache_flags(unsigned long object_size,
/*
* Enable debugging if selected on the kernel commandline.
*/
- if (slub_debug && (!slub_debug_slabs ||
- !strncmp(slub_debug_slabs, name, strlen(slub_debug_slabs))))
+ if (slub_debug && (!slub_debug_slabs || (name &&
+ !strncmp(slub_debug_slabs, name, strlen(slub_debug_slabs)))))
flags |= slub_debug;
return flags;
@@ -1260,13 +1270,30 @@ static inline void inc_slabs_node(struct kmem_cache *s, int node,
static inline void dec_slabs_node(struct kmem_cache *s, int node,
int objects) {}
+static inline void kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags)
+{
+ kmemleak_alloc(ptr, size, 1, flags);
+}
+
+static inline void kfree_hook(const void *x)
+{
+ kmemleak_free(x);
+}
+
static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
{ return 0; }
static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
- void *object) {}
+ void *object)
+{
+ kmemleak_alloc_recursive(object, s->object_size, 1, s->flags,
+ flags & gfp_allowed_mask);
+}
-static inline void slab_free_hook(struct kmem_cache *s, void *x) {}
+static inline void slab_free_hook(struct kmem_cache *s, void *x)
+{
+ kmemleak_free_recursive(x, s->flags);
+}
#endif /* CONFIG_SLUB_DEBUG */
@@ -2829,8 +2856,8 @@ static struct kmem_cache *kmem_cache_node;
* slab on the node for this slabcache. There are no concurrent accesses
* possible.
*
- * Note that this function only works on the kmalloc_node_cache
- * when allocating for the kmalloc_node_cache. This is used for bootstrapping
+ * Note that this function only works on the kmem_cache_node
+ * when allocating for the kmem_cache_node. This is used for bootstrapping
* memory on a fresh node that has no slab structures yet.
*/
static void early_kmem_cache_node_alloc(int node)
@@ -3272,7 +3299,7 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
if (page)
ptr = page_address(page);
- kmemleak_alloc(ptr, size, 1, flags);
+ kmalloc_large_node_hook(ptr, size, flags);
return ptr;
}
@@ -3336,7 +3363,7 @@ void kfree(const void *x)
page = virt_to_head_page(x);
if (unlikely(!PageSlab(page))) {
BUG_ON(!PageCompound(page));
- kmemleak_free(x);
+ kfree_hook(x);
__free_memcg_kmem_pages(page, compound_order(page));
return;
}
diff --git a/mm/swap.c b/mm/swap.c
index 7a9f80d451f5..84b26aaabd03 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -82,19 +82,6 @@ static void __put_compound_page(struct page *page)
static void put_compound_page(struct page *page)
{
- /*
- * hugetlbfs pages cannot be split from under us. If this is a
- * hugetlbfs page, check refcount on head page and release the page if
- * the refcount becomes zero.
- */
- if (PageHuge(page)) {
- page = compound_head(page);
- if (put_page_testzero(page))
- __put_compound_page(page);
-
- return;
- }
-
if (unlikely(PageTail(page))) {
/* __split_huge_page_refcount can run under us */
struct page *page_head = compound_trans_head(page);
@@ -111,14 +98,31 @@ static void put_compound_page(struct page *page)
* still hot on arches that do not support
* this_cpu_cmpxchg_double().
*/
- if (PageSlab(page_head)) {
- if (PageTail(page)) {
+ if (PageSlab(page_head) || PageHeadHuge(page_head)) {
+ if (likely(PageTail(page))) {
+ /*
+ * __split_huge_page_refcount
+ * cannot race here.
+ */
+ VM_BUG_ON(!PageHead(page_head));
+ atomic_dec(&page->_mapcount);
if (put_page_testzero(page_head))
VM_BUG_ON(1);
-
- atomic_dec(&page->_mapcount);
- goto skip_lock_tail;
+ if (put_page_testzero(page_head))
+ __put_compound_page(page_head);
+ return;
} else
+ /*
+ * __split_huge_page_refcount
+ * run before us, "page" was a
+ * THP tail. The split
+ * page_head has been freed
+ * and reallocated as slab or
+ * hugetlbfs page of smaller
+ * order (only possible if
+ * reallocated as slab on
+ * x86).
+ */
goto skip_lock;
}
/*
@@ -132,8 +136,27 @@ static void put_compound_page(struct page *page)
/* __split_huge_page_refcount run before us */
compound_unlock_irqrestore(page_head, flags);
skip_lock:
- if (put_page_testzero(page_head))
- __put_single_page(page_head);
+ if (put_page_testzero(page_head)) {
+ /*
+ * The head page may have been
+ * freed and reallocated as a
+ * compound page of smaller
+ * order and then freed again.
+ * All we know is that it
+ * cannot have become: a THP
+ * page, a compound page of
+ * higher order, a tail page.
+ * That is because we still
+ * hold the refcount of the
+ * split THP tail and
+ * page_head was the THP head
+ * before the split.
+ */
+ if (PageHead(page_head))
+ __put_compound_page(page_head);
+ else
+ __put_single_page(page_head);
+ }
out_put_single:
if (put_page_testzero(page))
__put_single_page(page);
@@ -155,7 +178,6 @@ out_put_single:
VM_BUG_ON(atomic_read(&page->_count) != 0);
compound_unlock_irqrestore(page_head, flags);
-skip_lock_tail:
if (put_page_testzero(page_head)) {
if (PageHead(page_head))
__put_compound_page(page_head);
@@ -198,51 +220,52 @@ bool __get_page_tail(struct page *page)
* proper PT lock that already serializes against
* split_huge_page().
*/
+ unsigned long flags;
bool got = false;
- struct page *page_head;
-
- /*
- * If this is a hugetlbfs page it cannot be split under us. Simply
- * increment refcount for the head page.
- */
- if (PageHuge(page)) {
- page_head = compound_head(page);
- atomic_inc(&page_head->_count);
- got = true;
- } else {
- unsigned long flags;
+ struct page *page_head = compound_trans_head(page);
- page_head = compound_trans_head(page);
- if (likely(page != page_head &&
- get_page_unless_zero(page_head))) {
-
- /* Ref to put_compound_page() comment. */
- if (PageSlab(page_head)) {
- if (likely(PageTail(page))) {
- __get_page_tail_foll(page, false);
- return true;
- } else {
- put_page(page_head);
- return false;
- }
- }
-
- /*
- * page_head wasn't a dangling pointer but it
- * may not be a head page anymore by the time
- * we obtain the lock. That is ok as long as it
- * can't be freed from under us.
- */
- flags = compound_lock_irqsave(page_head);
- /* here __split_huge_page_refcount won't run anymore */
+ if (likely(page != page_head && get_page_unless_zero(page_head))) {
+ /* Ref to put_compound_page() comment. */
+ if (PageSlab(page_head) || PageHeadHuge(page_head)) {
if (likely(PageTail(page))) {
+ /*
+ * This is a hugetlbfs page or a slab
+ * page. __split_huge_page_refcount
+ * cannot race here.
+ */
+ VM_BUG_ON(!PageHead(page_head));
__get_page_tail_foll(page, false);
- got = true;
- }
- compound_unlock_irqrestore(page_head, flags);
- if (unlikely(!got))
+ return true;
+ } else {
+ /*
+ * __split_huge_page_refcount run
+ * before us, "page" was a THP
+ * tail. The split page_head has been
+ * freed and reallocated as slab or
+ * hugetlbfs page of smaller order
+ * (only possible if reallocated as
+ * slab on x86).
+ */
put_page(page_head);
+ return false;
+ }
+ }
+
+ /*
+ * page_head wasn't a dangling pointer but it
+ * may not be a head page anymore by the time
+ * we obtain the lock. That is ok as long as it
+ * can't be freed from under us.
+ */
+ flags = compound_lock_irqsave(page_head);
+ /* here __split_huge_page_refcount won't run anymore */
+ if (likely(PageTail(page))) {
+ __get_page_tail_foll(page, false);
+ got = true;
}
+ compound_unlock_irqrestore(page_head, flags);
+ if (unlikely(!got))
+ put_page(page_head);
}
return got;
}
diff --git a/net/802/hippi.c b/net/802/hippi.c
index 51a1f530417d..a97a3bde77bb 100644
--- a/net/802/hippi.c
+++ b/net/802/hippi.c
@@ -172,14 +172,14 @@ EXPORT_SYMBOL(hippi_mac_addr);
int hippi_neigh_setup_dev(struct net_device *dev, struct neigh_parms *p)
{
/* Never send broadcast/multicast ARP messages */
- p->mcast_probes = 0;
+ NEIGH_VAR_SET(p, MCAST_PROBES, 0);
/* In IPv6 unicast probes are valid even on NBMA,
* because they are encapsulated in normal IPv6 protocol.
* Should be a generic flag.
*/
if (p->tbl->family != AF_INET6)
- p->ucast_probes = 0;
+ NEIGH_VAR_SET(p, UCAST_PROBES, 0);
return 0;
}
EXPORT_SYMBOL(hippi_neigh_setup_dev);
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 762896ebfcf5..47c908f1f626 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -530,6 +530,23 @@ static const struct header_ops vlan_header_ops = {
.parse = eth_header_parse,
};
+static int vlan_passthru_hard_header(struct sk_buff *skb, struct net_device *dev,
+ unsigned short type,
+ const void *daddr, const void *saddr,
+ unsigned int len)
+{
+ struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
+ struct net_device *real_dev = vlan->real_dev;
+
+ return dev_hard_header(skb, real_dev, type, daddr, saddr, len);
+}
+
+static const struct header_ops vlan_passthru_header_ops = {
+ .create = vlan_passthru_hard_header,
+ .rebuild = dev_rebuild_header,
+ .parse = eth_header_parse,
+};
+
static struct device_type vlan_type = {
.name = "vlan",
};
@@ -573,7 +590,7 @@ static int vlan_dev_init(struct net_device *dev)
dev->needed_headroom = real_dev->needed_headroom;
if (real_dev->features & NETIF_F_HW_VLAN_CTAG_TX) {
- dev->header_ops = real_dev->header_ops;
+ dev->header_ops = &vlan_passthru_header_ops;
dev->hard_header_len = real_dev->hard_header_len;
} else {
dev->header_ops = &vlan_header_ops;
diff --git a/net/Kconfig b/net/Kconfig
index 0715db64a5c3..e411046a62e3 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -224,7 +224,7 @@ source "net/hsr/Kconfig"
config RPS
boolean
- depends on SMP && SYSFS && USE_GENERIC_SMP_HELPERS
+ depends on SMP && SYSFS
default y
config RFS_ACCEL
@@ -235,15 +235,22 @@ config RFS_ACCEL
config XPS
boolean
- depends on SMP && USE_GENERIC_SMP_HELPERS
+ depends on SMP
default y
-config NETPRIO_CGROUP
+config CGROUP_NET_PRIO
tristate "Network priority cgroup"
depends on CGROUPS
---help---
Cgroup subsystem for use in assigning processes to network priorities on
- a per-interface basis
+ a per-interface basis.
+
+config CGROUP_NET_CLASSID
+ boolean "Network classid cgroup"
+ depends on CGROUPS
+ ---help---
+ Cgroup subsystem for use as general purpose socket classid marker that is
+ being used in cls_cgroup and for netfilter matching.
config NET_RX_BUSY_POLL
boolean
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
index a2b480a90872..b9c8a6eedf45 100644
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -307,9 +307,9 @@ static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
hard_iface->bat_iv.ogm_buff = ogm_buff;
batadv_ogm_packet = (struct batadv_ogm_packet *)ogm_buff;
- batadv_ogm_packet->header.packet_type = BATADV_IV_OGM;
- batadv_ogm_packet->header.version = BATADV_COMPAT_VERSION;
- batadv_ogm_packet->header.ttl = 2;
+ batadv_ogm_packet->packet_type = BATADV_IV_OGM;
+ batadv_ogm_packet->version = BATADV_COMPAT_VERSION;
+ batadv_ogm_packet->ttl = 2;
batadv_ogm_packet->flags = BATADV_NO_FLAGS;
batadv_ogm_packet->reserved = 0;
batadv_ogm_packet->tq = BATADV_TQ_MAX_VALUE;
@@ -346,7 +346,7 @@ batadv_iv_ogm_primary_iface_set(struct batadv_hard_iface *hard_iface)
batadv_ogm_packet = (struct batadv_ogm_packet *)ogm_buff;
batadv_ogm_packet->flags = BATADV_PRIMARIES_FIRST_HOP;
- batadv_ogm_packet->header.ttl = BATADV_TTL;
+ batadv_ogm_packet->ttl = BATADV_TTL;
}
/* when do we schedule our own ogm to be sent */
@@ -435,7 +435,7 @@ static void batadv_iv_ogm_send_to_if(struct batadv_forw_packet *forw_packet,
fwd_str, (packet_num > 0 ? "aggregated " : ""),
batadv_ogm_packet->orig,
ntohl(batadv_ogm_packet->seqno),
- batadv_ogm_packet->tq, batadv_ogm_packet->header.ttl,
+ batadv_ogm_packet->tq, batadv_ogm_packet->ttl,
(batadv_ogm_packet->flags & BATADV_DIRECTLINK ?
"on" : "off"),
hard_iface->net_dev->name,
@@ -491,7 +491,7 @@ static void batadv_iv_ogm_emit(struct batadv_forw_packet *forw_packet)
/* multihomed peer assumed
* non-primary OGMs are only broadcasted on their interface
*/
- if ((directlink && (batadv_ogm_packet->header.ttl == 1)) ||
+ if ((directlink && (batadv_ogm_packet->ttl == 1)) ||
(forw_packet->own && (forw_packet->if_incoming != primary_if))) {
/* FIXME: what about aggregated packets ? */
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
@@ -499,7 +499,7 @@ static void batadv_iv_ogm_emit(struct batadv_forw_packet *forw_packet)
(forw_packet->own ? "Sending own" : "Forwarding"),
batadv_ogm_packet->orig,
ntohl(batadv_ogm_packet->seqno),
- batadv_ogm_packet->header.ttl,
+ batadv_ogm_packet->ttl,
forw_packet->if_incoming->net_dev->name,
forw_packet->if_incoming->net_dev->dev_addr);
@@ -572,7 +572,7 @@ batadv_iv_ogm_can_aggregate(const struct batadv_ogm_packet *new_bat_ogm_packet,
*/
if ((!directlink) &&
(!(batadv_ogm_packet->flags & BATADV_DIRECTLINK)) &&
- (batadv_ogm_packet->header.ttl != 1) &&
+ (batadv_ogm_packet->ttl != 1) &&
/* own packets originating non-primary
* interfaces leave only that interface
@@ -587,7 +587,7 @@ batadv_iv_ogm_can_aggregate(const struct batadv_ogm_packet *new_bat_ogm_packet,
* interface only - we still can aggregate
*/
if ((directlink) &&
- (new_bat_ogm_packet->header.ttl == 1) &&
+ (new_bat_ogm_packet->ttl == 1) &&
(forw_packet->if_incoming == if_incoming) &&
/* packets from direct neighbors or
@@ -778,7 +778,7 @@ static void batadv_iv_ogm_forward(struct batadv_orig_node *orig_node,
struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
uint16_t tvlv_len;
- if (batadv_ogm_packet->header.ttl <= 1) {
+ if (batadv_ogm_packet->ttl <= 1) {
batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "ttl exceeded\n");
return;
}
@@ -798,7 +798,7 @@ static void batadv_iv_ogm_forward(struct batadv_orig_node *orig_node,
tvlv_len = ntohs(batadv_ogm_packet->tvlv_len);
- batadv_ogm_packet->header.ttl--;
+ batadv_ogm_packet->ttl--;
memcpy(batadv_ogm_packet->prev_sender, ethhdr->h_source, ETH_ALEN);
/* apply hop penalty */
@@ -807,7 +807,7 @@ static void batadv_iv_ogm_forward(struct batadv_orig_node *orig_node,
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"Forwarding packet: tq: %i, ttl: %i\n",
- batadv_ogm_packet->tq, batadv_ogm_packet->header.ttl);
+ batadv_ogm_packet->tq, batadv_ogm_packet->ttl);
/* switch of primaries first hop flag when forwarding */
batadv_ogm_packet->flags &= ~BATADV_PRIMARIES_FIRST_HOP;
@@ -972,8 +972,8 @@ batadv_iv_ogm_orig_update(struct batadv_priv *bat_priv,
spin_unlock_bh(&neigh_node->bat_iv.lq_update_lock);
if (dup_status == BATADV_NO_DUP) {
- orig_node->last_ttl = batadv_ogm_packet->header.ttl;
- neigh_node->last_ttl = batadv_ogm_packet->header.ttl;
+ orig_node->last_ttl = batadv_ogm_packet->ttl;
+ neigh_node->last_ttl = batadv_ogm_packet->ttl;
}
batadv_bonding_candidate_add(bat_priv, orig_node, neigh_node);
@@ -1247,7 +1247,7 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr,
* packet in an aggregation. Here we expect that the padding
* is always zero (or not 0x01)
*/
- if (batadv_ogm_packet->header.packet_type != BATADV_IV_OGM)
+ if (batadv_ogm_packet->packet_type != BATADV_IV_OGM)
return;
/* could be changed by schedule_own_packet() */
@@ -1267,8 +1267,8 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr,
if_incoming->net_dev->dev_addr, batadv_ogm_packet->orig,
batadv_ogm_packet->prev_sender,
ntohl(batadv_ogm_packet->seqno), batadv_ogm_packet->tq,
- batadv_ogm_packet->header.ttl,
- batadv_ogm_packet->header.version, has_directlink_flag);
+ batadv_ogm_packet->ttl,
+ batadv_ogm_packet->version, has_directlink_flag);
rcu_read_lock();
list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
@@ -1433,7 +1433,7 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr,
* seqno and similar ttl as the non-duplicate
*/
sameseq = orig_node->last_real_seqno == ntohl(batadv_ogm_packet->seqno);
- similar_ttl = orig_node->last_ttl - 3 <= batadv_ogm_packet->header.ttl;
+ similar_ttl = orig_node->last_ttl - 3 <= batadv_ogm_packet->ttl;
if (is_bidirect && ((dup_status == BATADV_NO_DUP) ||
(sameseq && similar_ttl)))
batadv_iv_ogm_orig_update(bat_priv, orig_node, ethhdr,
diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c
index 6c8c3934bd7b..b316a4cb6f14 100644
--- a/net/batman-adv/distributed-arp-table.c
+++ b/net/batman-adv/distributed-arp-table.c
@@ -349,7 +349,7 @@ static void batadv_dbg_arp(struct batadv_priv *bat_priv, struct sk_buff *skb,
unicast_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data;
- switch (unicast_4addr_packet->u.header.packet_type) {
+ switch (unicast_4addr_packet->u.packet_type) {
case BATADV_UNICAST:
batadv_dbg(BATADV_DBG_DAT, bat_priv,
"* encapsulated within a UNICAST packet\n");
@@ -374,7 +374,7 @@ static void batadv_dbg_arp(struct batadv_priv *bat_priv, struct sk_buff *skb,
break;
default:
batadv_dbg(BATADV_DBG_DAT, bat_priv, "* type: Unknown (%u)!\n",
- unicast_4addr_packet->u.header.packet_type);
+ unicast_4addr_packet->u.packet_type);
}
break;
case BATADV_BCAST:
@@ -387,7 +387,7 @@ static void batadv_dbg_arp(struct batadv_priv *bat_priv, struct sk_buff *skb,
default:
batadv_dbg(BATADV_DBG_DAT, bat_priv,
"* encapsulated within an unknown packet type (0x%x)\n",
- unicast_4addr_packet->u.header.packet_type);
+ unicast_4addr_packet->u.packet_type);
}
}
diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
index 271d321b3a04..6ddb6145ffb5 100644
--- a/net/batman-adv/fragmentation.c
+++ b/net/batman-adv/fragmentation.c
@@ -355,7 +355,7 @@ bool batadv_frag_skb_fwd(struct sk_buff *skb,
batadv_add_counter(bat_priv, BATADV_CNT_FRAG_FWD_BYTES,
skb->len + ETH_HLEN);
- packet->header.ttl--;
+ packet->ttl--;
batadv_send_skb_packet(skb, neigh_node->if_incoming,
neigh_node->addr);
ret = true;
@@ -444,9 +444,9 @@ bool batadv_frag_send_packet(struct sk_buff *skb,
goto out_err;
/* Create one header to be copied to all fragments */
- frag_header.header.packet_type = BATADV_UNICAST_FRAG;
- frag_header.header.version = BATADV_COMPAT_VERSION;
- frag_header.header.ttl = BATADV_TTL;
+ frag_header.packet_type = BATADV_UNICAST_FRAG;
+ frag_header.version = BATADV_COMPAT_VERSION;
+ frag_header.ttl = BATADV_TTL;
frag_header.seqno = htons(atomic_inc_return(&bat_priv->frag_seqno));
frag_header.reserved = 0;
frag_header.no = 0;
diff --git a/net/batman-adv/icmp_socket.c b/net/batman-adv/icmp_socket.c
index 29ae4efe3543..130cc3217e2b 100644
--- a/net/batman-adv/icmp_socket.c
+++ b/net/batman-adv/icmp_socket.c
@@ -194,7 +194,7 @@ static ssize_t batadv_socket_write(struct file *file, const char __user *buff,
goto free_skb;
}
- if (icmp_header->header.packet_type != BATADV_ICMP) {
+ if (icmp_header->packet_type != BATADV_ICMP) {
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"Error - can't send packet from char device: got bogus packet type (expected: BAT_ICMP)\n");
len = -EINVAL;
@@ -243,9 +243,9 @@ static ssize_t batadv_socket_write(struct file *file, const char __user *buff,
icmp_header->uid = socket_client->index;
- if (icmp_header->header.version != BATADV_COMPAT_VERSION) {
+ if (icmp_header->version != BATADV_COMPAT_VERSION) {
icmp_header->msg_type = BATADV_PARAMETER_PROBLEM;
- icmp_header->header.version = BATADV_COMPAT_VERSION;
+ icmp_header->version = BATADV_COMPAT_VERSION;
batadv_socket_add_packet(socket_client, icmp_header,
packet_len);
goto free_skb;
diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c
index c51a5e568f0a..1511f64a6cea 100644
--- a/net/batman-adv/main.c
+++ b/net/batman-adv/main.c
@@ -383,17 +383,17 @@ int batadv_batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
batadv_ogm_packet = (struct batadv_ogm_packet *)skb->data;
- if (batadv_ogm_packet->header.version != BATADV_COMPAT_VERSION) {
+ if (batadv_ogm_packet->version != BATADV_COMPAT_VERSION) {
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"Drop packet: incompatible batman version (%i)\n",
- batadv_ogm_packet->header.version);
+ batadv_ogm_packet->version);
goto err_free;
}
/* all receive handlers return whether they received or reused
* the supplied skb. if not, we have to free the skb.
*/
- idx = batadv_ogm_packet->header.packet_type;
+ idx = batadv_ogm_packet->packet_type;
ret = (*batadv_rx_handler[idx])(skb, hard_iface);
if (ret == NET_RX_DROP)
@@ -426,8 +426,8 @@ static void batadv_recv_handler_init(void)
BUILD_BUG_ON(offsetof(struct batadv_unicast_packet, dest) != 4);
BUILD_BUG_ON(offsetof(struct batadv_unicast_tvlv_packet, dst) != 4);
BUILD_BUG_ON(offsetof(struct batadv_frag_packet, dest) != 4);
- BUILD_BUG_ON(offsetof(struct batadv_icmp_packet, icmph.dst) != 4);
- BUILD_BUG_ON(offsetof(struct batadv_icmp_packet_rr, icmph.dst) != 4);
+ BUILD_BUG_ON(offsetof(struct batadv_icmp_packet, dst) != 4);
+ BUILD_BUG_ON(offsetof(struct batadv_icmp_packet_rr, dst) != 4);
/* broadcast packet */
batadv_rx_handler[BATADV_BCAST] = batadv_recv_bcast_packet;
@@ -1119,9 +1119,9 @@ void batadv_tvlv_unicast_send(struct batadv_priv *bat_priv, uint8_t *src,
skb_reserve(skb, ETH_HLEN);
tvlv_buff = skb_put(skb, sizeof(*unicast_tvlv_packet) + tvlv_len);
unicast_tvlv_packet = (struct batadv_unicast_tvlv_packet *)tvlv_buff;
- unicast_tvlv_packet->header.packet_type = BATADV_UNICAST_TVLV;
- unicast_tvlv_packet->header.version = BATADV_COMPAT_VERSION;
- unicast_tvlv_packet->header.ttl = BATADV_TTL;
+ unicast_tvlv_packet->packet_type = BATADV_UNICAST_TVLV;
+ unicast_tvlv_packet->version = BATADV_COMPAT_VERSION;
+ unicast_tvlv_packet->ttl = BATADV_TTL;
unicast_tvlv_packet->reserved = 0;
unicast_tvlv_packet->tvlv_len = htons(tvlv_len);
unicast_tvlv_packet->align = 0;
diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h
index f94f287b8670..322dd7323732 100644
--- a/net/batman-adv/main.h
+++ b/net/batman-adv/main.h
@@ -266,7 +266,7 @@ static inline void batadv_dbg(int type __always_unused,
*/
static inline int batadv_compare_eth(const void *data1, const void *data2)
{
- return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
+ return ether_addr_equal_unaligned(data1, data2);
}
/**
diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c
index 351e199bc0af..511d7e1eea38 100644
--- a/net/batman-adv/network-coding.c
+++ b/net/batman-adv/network-coding.c
@@ -722,7 +722,7 @@ static bool batadv_can_nc_with_orig(struct batadv_priv *bat_priv,
{
if (orig_node->last_real_seqno != ntohl(ogm_packet->seqno))
return false;
- if (orig_node->last_ttl != ogm_packet->header.ttl + 1)
+ if (orig_node->last_ttl != ogm_packet->ttl + 1)
return false;
if (!batadv_compare_eth(ogm_packet->orig, ogm_packet->prev_sender))
return false;
@@ -1082,9 +1082,9 @@ static bool batadv_nc_code_packets(struct batadv_priv *bat_priv,
coded_packet = (struct batadv_coded_packet *)skb_dest->data;
skb_reset_mac_header(skb_dest);
- coded_packet->header.packet_type = BATADV_CODED;
- coded_packet->header.version = BATADV_COMPAT_VERSION;
- coded_packet->header.ttl = packet1->header.ttl;
+ coded_packet->packet_type = BATADV_CODED;
+ coded_packet->version = BATADV_COMPAT_VERSION;
+ coded_packet->ttl = packet1->ttl;
/* Info about first unicast packet */
memcpy(coded_packet->first_source, first_source, ETH_ALEN);
@@ -1097,7 +1097,7 @@ static bool batadv_nc_code_packets(struct batadv_priv *bat_priv,
memcpy(coded_packet->second_source, second_source, ETH_ALEN);
memcpy(coded_packet->second_orig_dest, packet2->dest, ETH_ALEN);
coded_packet->second_crc = packet_id2;
- coded_packet->second_ttl = packet2->header.ttl;
+ coded_packet->second_ttl = packet2->ttl;
coded_packet->second_ttvn = packet2->ttvn;
coded_packet->coded_len = htons(coding_len);
@@ -1452,7 +1452,7 @@ bool batadv_nc_skb_forward(struct sk_buff *skb,
/* We only handle unicast packets */
payload = skb_network_header(skb);
packet = (struct batadv_unicast_packet *)payload;
- if (packet->header.packet_type != BATADV_UNICAST)
+ if (packet->packet_type != BATADV_UNICAST)
goto out;
/* Try to find a coding opportunity and send the skb if one is found */
@@ -1505,7 +1505,7 @@ void batadv_nc_skb_store_for_decoding(struct batadv_priv *bat_priv,
/* Check for supported packet type */
payload = skb_network_header(skb);
packet = (struct batadv_unicast_packet *)payload;
- if (packet->header.packet_type != BATADV_UNICAST)
+ if (packet->packet_type != BATADV_UNICAST)
goto out;
/* Find existing nc_path or create a new */
@@ -1623,7 +1623,7 @@ batadv_nc_skb_decode_packet(struct batadv_priv *bat_priv, struct sk_buff *skb,
ttvn = coded_packet_tmp.second_ttvn;
} else {
orig_dest = coded_packet_tmp.first_orig_dest;
- ttl = coded_packet_tmp.header.ttl;
+ ttl = coded_packet_tmp.ttl;
ttvn = coded_packet_tmp.first_ttvn;
}
@@ -1648,9 +1648,9 @@ batadv_nc_skb_decode_packet(struct batadv_priv *bat_priv, struct sk_buff *skb,
/* Create decoded unicast packet */
unicast_packet = (struct batadv_unicast_packet *)skb->data;
- unicast_packet->header.packet_type = BATADV_UNICAST;
- unicast_packet->header.version = BATADV_COMPAT_VERSION;
- unicast_packet->header.ttl = ttl;
+ unicast_packet->packet_type = BATADV_UNICAST;
+ unicast_packet->version = BATADV_COMPAT_VERSION;
+ unicast_packet->ttl = ttl;
memcpy(unicast_packet->dest, orig_dest, ETH_ALEN);
unicast_packet->ttvn = ttvn;
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
index 8ab14340d10f..803ab4be40c2 100644
--- a/net/batman-adv/originator.c
+++ b/net/batman-adv/originator.c
@@ -41,7 +41,7 @@ int batadv_compare_orig(const struct hlist_node *node, const void *data2)
const void *data1 = container_of(node, struct batadv_orig_node,
hash_entry);
- return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
+ return batadv_compare_eth(data1, data2);
}
/**
diff --git a/net/batman-adv/packet.h b/net/batman-adv/packet.h
index 207459b62966..2dd8f2422550 100644
--- a/net/batman-adv/packet.h
+++ b/net/batman-adv/packet.h
@@ -155,6 +155,7 @@ enum batadv_tvlv_type {
BATADV_TVLV_ROAM = 0x05,
};
+#pragma pack(2)
/* the destination hardware field in the ARP frame is used to
* transport the claim type and the group id
*/
@@ -163,24 +164,20 @@ struct batadv_bla_claim_dst {
uint8_t type; /* bla_claimframe */
__be16 group; /* group id */
};
-
-struct batadv_header {
- uint8_t packet_type;
- uint8_t version; /* batman version field */
- uint8_t ttl;
- /* the parent struct has to add a byte after the header to make
- * everything 4 bytes aligned again
- */
-};
+#pragma pack()
/**
* struct batadv_ogm_packet - ogm (routing protocol) packet
- * @header: common batman packet header
+ * @packet_type: batman-adv packet type, part of the general header
+ * @version: batman-adv protocol version, part of the genereal header
+ * @ttl: time to live for this packet, part of the genereal header
* @flags: contains routing relevant flags - see enum batadv_iv_flags
* @tvlv_len: length of tvlv data following the ogm header
*/
struct batadv_ogm_packet {
- struct batadv_header header;
+ uint8_t packet_type;
+ uint8_t version;
+ uint8_t ttl;
uint8_t flags;
__be32 seqno;
uint8_t orig[ETH_ALEN];
@@ -196,29 +193,51 @@ struct batadv_ogm_packet {
#define BATADV_OGM_HLEN sizeof(struct batadv_ogm_packet)
/**
- * batadv_icmp_header - common ICMP header
- * @header: common batman header
+ * batadv_icmp_header - common members among all the ICMP packets
+ * @packet_type: batman-adv packet type, part of the general header
+ * @version: batman-adv protocol version, part of the genereal header
+ * @ttl: time to live for this packet, part of the genereal header
* @msg_type: ICMP packet type
* @dst: address of the destination node
* @orig: address of the source node
* @uid: local ICMP socket identifier
+ * @align: not used - useful for alignment purposes only
+ *
+ * This structure is used for ICMP packets parsing only and it is never sent
+ * over the wire. The alignment field at the end is there to ensure that
+ * members are padded the same way as they are in real packets.
*/
struct batadv_icmp_header {
- struct batadv_header header;
+ uint8_t packet_type;
+ uint8_t version;
+ uint8_t ttl;
uint8_t msg_type; /* see ICMP message types above */
uint8_t dst[ETH_ALEN];
uint8_t orig[ETH_ALEN];
uint8_t uid;
+ uint8_t align[3];
};
/**
* batadv_icmp_packet - ICMP packet
- * @icmph: common ICMP header
+ * @packet_type: batman-adv packet type, part of the general header
+ * @version: batman-adv protocol version, part of the genereal header
+ * @ttl: time to live for this packet, part of the genereal header
+ * @msg_type: ICMP packet type
+ * @dst: address of the destination node
+ * @orig: address of the source node
+ * @uid: local ICMP socket identifier
* @reserved: not used - useful for alignment
* @seqno: ICMP sequence number
*/
struct batadv_icmp_packet {
- struct batadv_icmp_header icmph;
+ uint8_t packet_type;
+ uint8_t version;
+ uint8_t ttl;
+ uint8_t msg_type; /* see ICMP message types above */
+ uint8_t dst[ETH_ALEN];
+ uint8_t orig[ETH_ALEN];
+ uint8_t uid;
uint8_t reserved;
__be16 seqno;
};
@@ -227,13 +246,25 @@ struct batadv_icmp_packet {
/**
* batadv_icmp_packet_rr - ICMP RouteRecord packet
- * @icmph: common ICMP header
+ * @packet_type: batman-adv packet type, part of the general header
+ * @version: batman-adv protocol version, part of the genereal header
+ * @ttl: time to live for this packet, part of the genereal header
+ * @msg_type: ICMP packet type
+ * @dst: address of the destination node
+ * @orig: address of the source node
+ * @uid: local ICMP socket identifier
* @rr_cur: number of entries the rr array
* @seqno: ICMP sequence number
* @rr: route record array
*/
struct batadv_icmp_packet_rr {
- struct batadv_icmp_header icmph;
+ uint8_t packet_type;
+ uint8_t version;
+ uint8_t ttl;
+ uint8_t msg_type; /* see ICMP message types above */
+ uint8_t dst[ETH_ALEN];
+ uint8_t orig[ETH_ALEN];
+ uint8_t uid;
uint8_t rr_cur;
__be16 seqno;
uint8_t rr[BATADV_RR_LEN][ETH_ALEN];
@@ -253,8 +284,18 @@ struct batadv_icmp_packet_rr {
*/
#pragma pack(2)
+/**
+ * struct batadv_unicast_packet - unicast packet for network payload
+ * @packet_type: batman-adv packet type, part of the general header
+ * @version: batman-adv protocol version, part of the genereal header
+ * @ttl: time to live for this packet, part of the genereal header
+ * @ttvn: translation table version number
+ * @dest: originator destination of the unicast packet
+ */
struct batadv_unicast_packet {
- struct batadv_header header;
+ uint8_t packet_type;
+ uint8_t version;
+ uint8_t ttl;
uint8_t ttvn; /* destination translation table version number */
uint8_t dest[ETH_ALEN];
/* "4 bytes boundary + 2 bytes" long to make the payload after the
@@ -280,7 +321,9 @@ struct batadv_unicast_4addr_packet {
/**
* struct batadv_frag_packet - fragmented packet
- * @header: common batman packet header with type, compatversion, and ttl
+ * @packet_type: batman-adv packet type, part of the general header
+ * @version: batman-adv protocol version, part of the genereal header
+ * @ttl: time to live for this packet, part of the genereal header
* @dest: final destination used when routing fragments
* @orig: originator of the fragment used when merging the packet
* @no: fragment number within this sequence
@@ -289,7 +332,9 @@ struct batadv_unicast_4addr_packet {
* @total_size: size of the merged packet
*/
struct batadv_frag_packet {
- struct batadv_header header;
+ uint8_t packet_type;
+ uint8_t version; /* batman version field */
+ uint8_t ttl;
#if defined(__BIG_ENDIAN_BITFIELD)
uint8_t no:4;
uint8_t reserved:4;
@@ -305,8 +350,19 @@ struct batadv_frag_packet {
__be16 total_size;
};
+/**
+ * struct batadv_bcast_packet - broadcast packet for network payload
+ * @packet_type: batman-adv packet type, part of the general header
+ * @version: batman-adv protocol version, part of the genereal header
+ * @ttl: time to live for this packet, part of the genereal header
+ * @reserved: reserved byte for alignment
+ * @seqno: sequence identification
+ * @orig: originator of the broadcast packet
+ */
struct batadv_bcast_packet {
- struct batadv_header header;
+ uint8_t packet_type;
+ uint8_t version; /* batman version field */
+ uint8_t ttl;
uint8_t reserved;
__be32 seqno;
uint8_t orig[ETH_ALEN];
@@ -315,11 +371,11 @@ struct batadv_bcast_packet {
*/
};
-#pragma pack()
-
/**
* struct batadv_coded_packet - network coded packet
- * @header: common batman packet header and ttl of first included packet
+ * @packet_type: batman-adv packet type, part of the general header
+ * @version: batman-adv protocol version, part of the genereal header
+ * @ttl: time to live for this packet, part of the genereal header
* @reserved: Align following fields to 2-byte boundaries
* @first_source: original source of first included packet
* @first_orig_dest: original destinal of first included packet
@@ -334,7 +390,9 @@ struct batadv_bcast_packet {
* @coded_len: length of network coded part of the payload
*/
struct batadv_coded_packet {
- struct batadv_header header;
+ uint8_t packet_type;
+ uint8_t version; /* batman version field */
+ uint8_t ttl;
uint8_t first_ttvn;
/* uint8_t first_dest[ETH_ALEN]; - saved in mac header destination */
uint8_t first_source[ETH_ALEN];
@@ -349,9 +407,13 @@ struct batadv_coded_packet {
__be16 coded_len;
};
+#pragma pack()
+
/**
* struct batadv_unicast_tvlv - generic unicast packet with tvlv payload
- * @header: common batman packet header
+ * @packet_type: batman-adv packet type, part of the general header
+ * @version: batman-adv protocol version, part of the genereal header
+ * @ttl: time to live for this packet, part of the genereal header
* @reserved: reserved field (for packet alignment)
* @src: address of the source
* @dst: address of the destination
@@ -359,7 +421,9 @@ struct batadv_coded_packet {
* @align: 2 bytes to align the header to a 4 byte boundry
*/
struct batadv_unicast_tvlv_packet {
- struct batadv_header header;
+ uint8_t packet_type;
+ uint8_t version; /* batman version field */
+ uint8_t ttl;
uint8_t reserved;
uint8_t dst[ETH_ALEN];
uint8_t src[ETH_ALEN];
@@ -420,13 +484,13 @@ struct batadv_tvlv_tt_vlan_data {
* struct batadv_tvlv_tt_change - translation table diff data
* @flags: status indicators concerning the non-mesh client (see
* batadv_tt_client_flags)
- * @reserved: reserved field
+ * @reserved: reserved field - useful for alignment purposes only
* @addr: mac address of non-mesh client that triggered this tt change
* @vid: VLAN identifier
*/
struct batadv_tvlv_tt_change {
uint8_t flags;
- uint8_t reserved;
+ uint8_t reserved[3];
uint8_t addr[ETH_ALEN];
__be16 vid;
};
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index d4114d775ad6..46278bfb8fdb 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -308,7 +308,7 @@ static int batadv_recv_my_icmp_packet(struct batadv_priv *bat_priv,
memcpy(icmph->dst, icmph->orig, ETH_ALEN);
memcpy(icmph->orig, primary_if->net_dev->dev_addr, ETH_ALEN);
icmph->msg_type = BATADV_ECHO_REPLY;
- icmph->header.ttl = BATADV_TTL;
+ icmph->ttl = BATADV_TTL;
res = batadv_send_skb_to_orig(skb, orig_node, NULL);
if (res != NET_XMIT_DROP)
@@ -338,9 +338,9 @@ static int batadv_recv_icmp_ttl_exceeded(struct batadv_priv *bat_priv,
icmp_packet = (struct batadv_icmp_packet *)skb->data;
/* send TTL exceeded if packet is an echo request (traceroute) */
- if (icmp_packet->icmph.msg_type != BATADV_ECHO_REQUEST) {
+ if (icmp_packet->msg_type != BATADV_ECHO_REQUEST) {
pr_debug("Warning - can't forward icmp packet from %pM to %pM: ttl exceeded\n",
- icmp_packet->icmph.orig, icmp_packet->icmph.dst);
+ icmp_packet->orig, icmp_packet->dst);
goto out;
}
@@ -349,7 +349,7 @@ static int batadv_recv_icmp_ttl_exceeded(struct batadv_priv *bat_priv,
goto out;
/* get routing information */
- orig_node = batadv_orig_hash_find(bat_priv, icmp_packet->icmph.orig);
+ orig_node = batadv_orig_hash_find(bat_priv, icmp_packet->orig);
if (!orig_node)
goto out;
@@ -359,11 +359,11 @@ static int batadv_recv_icmp_ttl_exceeded(struct batadv_priv *bat_priv,
icmp_packet = (struct batadv_icmp_packet *)skb->data;
- memcpy(icmp_packet->icmph.dst, icmp_packet->icmph.orig, ETH_ALEN);
- memcpy(icmp_packet->icmph.orig, primary_if->net_dev->dev_addr,
+ memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
+ memcpy(icmp_packet->orig, primary_if->net_dev->dev_addr,
ETH_ALEN);
- icmp_packet->icmph.msg_type = BATADV_TTL_EXCEEDED;
- icmp_packet->icmph.header.ttl = BATADV_TTL;
+ icmp_packet->msg_type = BATADV_TTL_EXCEEDED;
+ icmp_packet->ttl = BATADV_TTL;
if (batadv_send_skb_to_orig(skb, orig_node, NULL) != NET_XMIT_DROP)
ret = NET_RX_SUCCESS;
@@ -434,7 +434,7 @@ int batadv_recv_icmp_packet(struct sk_buff *skb,
return batadv_recv_my_icmp_packet(bat_priv, skb);
/* TTL exceeded */
- if (icmph->header.ttl < 2)
+ if (icmph->ttl < 2)
return batadv_recv_icmp_ttl_exceeded(bat_priv, skb);
/* get routing information */
@@ -449,7 +449,7 @@ int batadv_recv_icmp_packet(struct sk_buff *skb,
icmph = (struct batadv_icmp_header *)skb->data;
/* decrement ttl */
- icmph->header.ttl--;
+ icmph->ttl--;
/* route it */
if (batadv_send_skb_to_orig(skb, orig_node, recv_if) != NET_XMIT_DROP)
@@ -709,7 +709,7 @@ static int batadv_route_unicast_packet(struct sk_buff *skb,
unicast_packet = (struct batadv_unicast_packet *)skb->data;
/* TTL exceeded */
- if (unicast_packet->header.ttl < 2) {
+ if (unicast_packet->ttl < 2) {
pr_debug("Warning - can't forward unicast packet from %pM to %pM: ttl exceeded\n",
ethhdr->h_source, unicast_packet->dest);
goto out;
@@ -727,9 +727,9 @@ static int batadv_route_unicast_packet(struct sk_buff *skb,
/* decrement ttl */
unicast_packet = (struct batadv_unicast_packet *)skb->data;
- unicast_packet->header.ttl--;
+ unicast_packet->ttl--;
- switch (unicast_packet->header.packet_type) {
+ switch (unicast_packet->packet_type) {
case BATADV_UNICAST_4ADDR:
hdr_len = sizeof(struct batadv_unicast_4addr_packet);
break;
@@ -970,7 +970,7 @@ int batadv_recv_unicast_packet(struct sk_buff *skb,
unicast_packet = (struct batadv_unicast_packet *)skb->data;
unicast_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data;
- is4addr = unicast_packet->header.packet_type == BATADV_UNICAST_4ADDR;
+ is4addr = unicast_packet->packet_type == BATADV_UNICAST_4ADDR;
/* the caller function should have already pulled 2 bytes */
if (is4addr)
hdr_size = sizeof(*unicast_4addr_packet);
@@ -1160,7 +1160,7 @@ int batadv_recv_bcast_packet(struct sk_buff *skb,
if (batadv_is_my_mac(bat_priv, bcast_packet->orig))
goto out;
- if (bcast_packet->header.ttl < 2)
+ if (bcast_packet->ttl < 2)
goto out;
orig_node = batadv_orig_hash_find(bat_priv, bcast_packet->orig);
diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c
index c83be5ebaa28..fba4dcfcfac2 100644
--- a/net/batman-adv/send.c
+++ b/net/batman-adv/send.c
@@ -161,11 +161,11 @@ batadv_send_skb_push_fill_unicast(struct sk_buff *skb, int hdr_size,
return false;
unicast_packet = (struct batadv_unicast_packet *)skb->data;
- unicast_packet->header.version = BATADV_COMPAT_VERSION;
+ unicast_packet->version = BATADV_COMPAT_VERSION;
/* batman packet type: unicast */
- unicast_packet->header.packet_type = BATADV_UNICAST;
+ unicast_packet->packet_type = BATADV_UNICAST;
/* set unicast ttl */
- unicast_packet->header.ttl = BATADV_TTL;
+ unicast_packet->ttl = BATADV_TTL;
/* copy the destination for faster routing */
memcpy(unicast_packet->dest, orig_node->orig, ETH_ALEN);
/* set the destination tt version number */
@@ -221,7 +221,7 @@ bool batadv_send_skb_prepare_unicast_4addr(struct batadv_priv *bat_priv,
goto out;
uc_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data;
- uc_4addr_packet->u.header.packet_type = BATADV_UNICAST_4ADDR;
+ uc_4addr_packet->u.packet_type = BATADV_UNICAST_4ADDR;
memcpy(uc_4addr_packet->src, primary_if->net_dev->dev_addr, ETH_ALEN);
uc_4addr_packet->subtype = packet_subtype;
uc_4addr_packet->reserved = 0;
@@ -436,7 +436,7 @@ int batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
/* as we have a copy now, it is safe to decrease the TTL */
bcast_packet = (struct batadv_bcast_packet *)newskb->data;
- bcast_packet->header.ttl--;
+ bcast_packet->ttl--;
skb_reset_mac_header(newskb);
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index 36f050876f82..a8f99d1486c0 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -264,11 +264,11 @@ static int batadv_interface_tx(struct sk_buff *skb,
goto dropped;
bcast_packet = (struct batadv_bcast_packet *)skb->data;
- bcast_packet->header.version = BATADV_COMPAT_VERSION;
- bcast_packet->header.ttl = BATADV_TTL;
+ bcast_packet->version = BATADV_COMPAT_VERSION;
+ bcast_packet->ttl = BATADV_TTL;
/* batman packet type: broadcast */
- bcast_packet->header.packet_type = BATADV_BCAST;
+ bcast_packet->packet_type = BATADV_BCAST;
bcast_packet->reserved = 0;
/* hw address of first interface is the orig mac because only
@@ -328,7 +328,7 @@ void batadv_interface_rx(struct net_device *soft_iface,
struct sk_buff *skb, struct batadv_hard_iface *recv_if,
int hdr_size, struct batadv_orig_node *orig_node)
{
- struct batadv_header *batadv_header = (struct batadv_header *)skb->data;
+ struct batadv_bcast_packet *batadv_bcast_packet;
struct batadv_priv *bat_priv = netdev_priv(soft_iface);
__be16 ethertype = htons(ETH_P_BATMAN);
struct vlan_ethhdr *vhdr;
@@ -336,7 +336,8 @@ void batadv_interface_rx(struct net_device *soft_iface,
unsigned short vid;
bool is_bcast;
- is_bcast = (batadv_header->packet_type == BATADV_BCAST);
+ batadv_bcast_packet = (struct batadv_bcast_packet *)skb->data;
+ is_bcast = (batadv_bcast_packet->packet_type == BATADV_BCAST);
/* check if enough space is available for pulling, and pull */
if (!pskb_may_pull(skb, hdr_size))
@@ -345,7 +346,12 @@ void batadv_interface_rx(struct net_device *soft_iface,
skb_pull_rcsum(skb, hdr_size);
skb_reset_mac_header(skb);
- vid = batadv_get_vid(skb, hdr_size);
+ /* clean the netfilter state now that the batman-adv header has been
+ * removed
+ */
+ nf_reset(skb);
+
+ vid = batadv_get_vid(skb, 0);
ethhdr = eth_hdr(skb);
switch (ntohs(ethhdr->h_proto)) {
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index 4add57d4857f..19bc42f8b8be 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -51,7 +51,7 @@ static int batadv_compare_tt(const struct hlist_node *node, const void *data2)
const void *data1 = container_of(node, struct batadv_tt_common_entry,
hash_entry);
- return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
+ return batadv_compare_eth(data1, data2);
}
/**
@@ -333,7 +333,8 @@ static void batadv_tt_local_event(struct batadv_priv *bat_priv,
return;
tt_change_node->change.flags = flags;
- tt_change_node->change.reserved = 0;
+ memset(tt_change_node->change.reserved, 0,
+ sizeof(tt_change_node->change.reserved));
memcpy(tt_change_node->change.addr, common->addr, ETH_ALEN);
tt_change_node->change.vid = htons(common->vid);
@@ -2221,7 +2222,8 @@ static void batadv_tt_tvlv_generate(struct batadv_priv *bat_priv,
ETH_ALEN);
tt_change->flags = tt_common_entry->flags;
tt_change->vid = htons(tt_common_entry->vid);
- tt_change->reserved = 0;
+ memset(tt_change->reserved, 0,
+ sizeof(tt_change->reserved));
tt_num_entries++;
tt_change++;
diff --git a/net/bluetooth/bnep/bnep.h b/net/bluetooth/bnep/bnep.h
index e7ee5314f39a..5a5b16f365e9 100644
--- a/net/bluetooth/bnep/bnep.h
+++ b/net/bluetooth/bnep/bnep.h
@@ -12,8 +12,7 @@
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _BNEP_H
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index f00cfd2a0143..e4401a531afb 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -32,7 +32,7 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
const unsigned char *dest = skb->data;
struct net_bridge_fdb_entry *dst;
struct net_bridge_mdb_entry *mdst;
- struct br_cpu_netstats *brstats = this_cpu_ptr(br->stats);
+ struct pcpu_sw_netstats *brstats = this_cpu_ptr(br->stats);
u16 vid = 0;
rcu_read_lock();
@@ -90,12 +90,12 @@ static int br_dev_init(struct net_device *dev)
struct net_bridge *br = netdev_priv(dev);
int i;
- br->stats = alloc_percpu(struct br_cpu_netstats);
+ br->stats = alloc_percpu(struct pcpu_sw_netstats);
if (!br->stats)
return -ENOMEM;
for_each_possible_cpu(i) {
- struct br_cpu_netstats *br_dev_stats;
+ struct pcpu_sw_netstats *br_dev_stats;
br_dev_stats = per_cpu_ptr(br->stats, i);
u64_stats_init(&br_dev_stats->syncp);
}
@@ -135,12 +135,12 @@ static struct rtnl_link_stats64 *br_get_stats64(struct net_device *dev,
struct rtnl_link_stats64 *stats)
{
struct net_bridge *br = netdev_priv(dev);
- struct br_cpu_netstats tmp, sum = { 0 };
+ struct pcpu_sw_netstats tmp, sum = { 0 };
unsigned int cpu;
for_each_possible_cpu(cpu) {
unsigned int start;
- const struct br_cpu_netstats *bstats
+ const struct pcpu_sw_netstats *bstats
= per_cpu_ptr(br->stats, cpu);
do {
start = u64_stats_fetch_begin_bh(&bstats->syncp);
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index 33e8f23acddd..c5f5a4a933f4 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -570,8 +570,7 @@ static void fdb_notify(struct net_bridge *br,
rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
return;
errout:
- if (err < 0)
- rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
+ rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
}
/* Dump information about entries, in response to GETNEIGH */
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index 4b81b1471789..d3409e6b5453 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -26,13 +26,13 @@ static int deliver_clone(const struct net_bridge_port *prev,
void (*__packet_hook)(const struct net_bridge_port *p,
struct sk_buff *skb));
-/* Don't forward packets to originating port or forwarding diasabled */
+/* Don't forward packets to originating port or forwarding disabled */
static inline int should_deliver(const struct net_bridge_port *p,
const struct sk_buff *skb)
{
- return (((p->flags & BR_HAIRPIN_MODE) || skb->dev != p->dev) &&
+ return ((p->flags & BR_HAIRPIN_MODE) || skb->dev != p->dev) &&
br_allowed_egress(p->br, nbp_get_vlan_info(p), skb) &&
- p->state == BR_STATE_FORWARDING);
+ p->state == BR_STATE_FORWARDING;
}
static inline unsigned int packet_length(const struct sk_buff *skb)
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index 4bf02adb5dc2..1f6bd1e2e8a4 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -61,7 +61,7 @@ static int port_cost(struct net_device *dev)
}
-/* Check for port carrier transistions. */
+/* Check for port carrier transitions. */
void br_port_carrier_check(struct net_bridge_port *p)
{
struct net_device *dev = p->dev;
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 7e73c32e205d..bf8dc7d308d6 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -28,7 +28,7 @@ static int br_pass_frame_up(struct sk_buff *skb)
{
struct net_device *indev, *brdev = BR_INPUT_SKB_CB(skb)->brdev;
struct net_bridge *br = netdev_priv(brdev);
- struct br_cpu_netstats *brstats = this_cpu_ptr(br->stats);
+ struct pcpu_sw_netstats *brstats = this_cpu_ptr(br->stats);
u64_stats_update_begin(&brstats->syncp);
brstats->rx_packets++;
diff --git a/net/bridge/br_ioctl.c b/net/bridge/br_ioctl.c
index cd8c3a44ab7d..a9a4a1b7863d 100644
--- a/net/bridge/br_ioctl.c
+++ b/net/bridge/br_ioctl.c
@@ -381,7 +381,7 @@ int br_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct net_bridge *br = netdev_priv(dev);
- switch(cmd) {
+ switch (cmd) {
case SIOCDEVPRIVATE:
return old_dev_ioctl(dev, rq, cmd);
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 4c214b2b88ef..ef66365b7354 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -1998,7 +1998,7 @@ int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val)
u32 old;
struct net_bridge_mdb_htable *mdb;
- spin_lock(&br->multicast_lock);
+ spin_lock_bh(&br->multicast_lock);
if (!netif_running(br->dev))
goto unlock;
@@ -2030,7 +2030,7 @@ rollback:
}
unlock:
- spin_unlock(&br->multicast_lock);
+ spin_unlock_bh(&br->multicast_lock);
return err;
}
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index 80cad2cf02a7..b008c59a92c4 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -1001,7 +1001,7 @@ static struct nf_hook_ops br_nf_ops[] __read_mostly = {
#ifdef CONFIG_SYSCTL
static
int brnf_sysctl_call_tables(struct ctl_table *ctl, int write,
- void __user * buffer, size_t * lenp, loff_t * ppos)
+ void __user *buffer, size_t *lenp, loff_t *ppos)
{
int ret;
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index f75d92e4f96b..e74b6d530cb6 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -195,8 +195,7 @@ void br_ifinfo_notify(int event, struct net_bridge_port *port)
rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC);
return;
errout:
- if (err < 0)
- rtnl_set_sk_err(net, RTNLGRP_LINK, err);
+ rtnl_set_sk_err(net, RTNLGRP_LINK, err);
}
@@ -373,7 +372,7 @@ int br_setlink(struct net_device *dev, struct nlmsghdr *nlh)
p = br_port_get_rtnl(dev);
/* We want to accept dev as bridge itself if the AF_SPEC
- * is set to see if someone is setting vlan info on the brigde
+ * is set to see if someone is setting vlan info on the bridge
*/
if (!p && !afspec)
return -EINVAL;
@@ -389,7 +388,7 @@ int br_setlink(struct net_device *dev, struct nlmsghdr *nlh)
err = br_setport(p, tb);
spin_unlock_bh(&p->br->lock);
} else {
- /* Binary compatability with old RSTP */
+ /* Binary compatibility with old RSTP */
if (nla_len(protinfo) < sizeof(u8))
return -EINVAL;
@@ -482,9 +481,7 @@ int __init br_netlink_init(void)
int err;
br_mdb_init();
- err = rtnl_af_register(&br_af_ops);
- if (err)
- goto out;
+ rtnl_af_register(&br_af_ops);
err = rtnl_link_register(&br_link_ops);
if (err)
@@ -494,7 +491,6 @@ int __init br_netlink_init(void)
out_af:
rtnl_af_unregister(&br_af_ops);
-out:
br_mdb_uninit();
return err;
}
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 229d820bdf0b..3733f152351c 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -210,21 +210,13 @@ static inline struct net_bridge_port *br_port_get_rtnl(const struct net_device *
rtnl_dereference(dev->rx_handler_data) : NULL;
}
-struct br_cpu_netstats {
- u64 rx_packets;
- u64 rx_bytes;
- u64 tx_packets;
- u64 tx_bytes;
- struct u64_stats_sync syncp;
-};
-
struct net_bridge
{
spinlock_t lock;
struct list_head port_list;
struct net_device *dev;
- struct br_cpu_netstats __percpu *stats;
+ struct pcpu_sw_netstats __percpu *stats;
spinlock_t hash_lock;
struct hlist_head hash[BR_HASH_SIZE];
#ifdef CONFIG_BRIDGE_NETFILTER
@@ -426,6 +418,16 @@ netdev_features_t br_features_recompute(struct net_bridge *br,
int br_handle_frame_finish(struct sk_buff *skb);
rx_handler_result_t br_handle_frame(struct sk_buff **pskb);
+static inline bool br_rx_handler_check_rcu(const struct net_device *dev)
+{
+ return rcu_dereference(dev->rx_handler) == br_handle_frame;
+}
+
+static inline struct net_bridge_port *br_port_get_check_rcu(const struct net_device *dev)
+{
+ return br_rx_handler_check_rcu(dev) ? br_port_get_rcu(dev) : NULL;
+}
+
/* br_ioctl.c */
int br_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
int br_ioctl_deviceless_stub(struct net *net, unsigned int cmd,
@@ -711,7 +713,7 @@ void br_netfilter_fini(void);
void br_netfilter_rtable_init(struct net_bridge *);
#else
#define br_netfilter_init() (0)
-#define br_netfilter_fini() do { } while(0)
+#define br_netfilter_fini() do { } while (0)
#define br_netfilter_rtable_init(x)
#endif
diff --git a/net/bridge/br_stp_bpdu.c b/net/bridge/br_stp_bpdu.c
index 8660ea3be705..bdb459d21ad8 100644
--- a/net/bridge/br_stp_bpdu.c
+++ b/net/bridge/br_stp_bpdu.c
@@ -153,7 +153,7 @@ void br_stp_rcv(const struct stp_proto *proto, struct sk_buff *skb,
if (buf[0] != 0 || buf[1] != 0 || buf[2] != 0)
goto err;
- p = br_port_get_rcu(dev);
+ p = br_port_get_check_rcu(dev);
if (!p)
goto err;
diff --git a/net/bridge/br_stp_timer.c b/net/bridge/br_stp_timer.c
index 950663d4d330..558c46d19e05 100644
--- a/net/bridge/br_stp_timer.c
+++ b/net/bridge/br_stp_timer.c
@@ -110,7 +110,7 @@ static void br_tcn_timer_expired(unsigned long arg)
if (!br_is_root_bridge(br) && (br->dev->flags & IFF_UP)) {
br_transmit_tcn(br);
- mod_timer(&br->tcn_timer,jiffies + br->bridge_hello_time);
+ mod_timer(&br->tcn_timer, jiffies + br->bridge_hello_time);
}
spin_unlock(&br->lock);
}
diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c
index 3b9637fb7939..8dac65552f19 100644
--- a/net/bridge/br_sysfs_br.c
+++ b/net/bridge/br_sysfs_br.c
@@ -49,53 +49,51 @@ static ssize_t store_bridge_parm(struct device *d,
}
-static ssize_t show_forward_delay(struct device *d,
+static ssize_t forward_delay_show(struct device *d,
struct device_attribute *attr, char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%lu\n", jiffies_to_clock_t(br->forward_delay));
}
-static ssize_t store_forward_delay(struct device *d,
+static ssize_t forward_delay_store(struct device *d,
struct device_attribute *attr,
const char *buf, size_t len)
{
return store_bridge_parm(d, buf, len, br_set_forward_delay);
}
-static DEVICE_ATTR(forward_delay, S_IRUGO | S_IWUSR,
- show_forward_delay, store_forward_delay);
+static DEVICE_ATTR_RW(forward_delay);
-static ssize_t show_hello_time(struct device *d, struct device_attribute *attr,
+static ssize_t hello_time_show(struct device *d, struct device_attribute *attr,
char *buf)
{
return sprintf(buf, "%lu\n",
jiffies_to_clock_t(to_bridge(d)->hello_time));
}
-static ssize_t store_hello_time(struct device *d,
+static ssize_t hello_time_store(struct device *d,
struct device_attribute *attr, const char *buf,
size_t len)
{
return store_bridge_parm(d, buf, len, br_set_hello_time);
}
-static DEVICE_ATTR(hello_time, S_IRUGO | S_IWUSR, show_hello_time,
- store_hello_time);
+static DEVICE_ATTR_RW(hello_time);
-static ssize_t show_max_age(struct device *d, struct device_attribute *attr,
+static ssize_t max_age_show(struct device *d, struct device_attribute *attr,
char *buf)
{
return sprintf(buf, "%lu\n",
jiffies_to_clock_t(to_bridge(d)->max_age));
}
-static ssize_t store_max_age(struct device *d, struct device_attribute *attr,
+static ssize_t max_age_store(struct device *d, struct device_attribute *attr,
const char *buf, size_t len)
{
return store_bridge_parm(d, buf, len, br_set_max_age);
}
-static DEVICE_ATTR(max_age, S_IRUGO | S_IWUSR, show_max_age, store_max_age);
+static DEVICE_ATTR_RW(max_age);
-static ssize_t show_ageing_time(struct device *d,
+static ssize_t ageing_time_show(struct device *d,
struct device_attribute *attr, char *buf)
{
struct net_bridge *br = to_bridge(d);
@@ -108,16 +106,15 @@ static int set_ageing_time(struct net_bridge *br, unsigned long val)
return 0;
}
-static ssize_t store_ageing_time(struct device *d,
+static ssize_t ageing_time_store(struct device *d,
struct device_attribute *attr,
const char *buf, size_t len)
{
return store_bridge_parm(d, buf, len, set_ageing_time);
}
-static DEVICE_ATTR(ageing_time, S_IRUGO | S_IWUSR, show_ageing_time,
- store_ageing_time);
+static DEVICE_ATTR_RW(ageing_time);
-static ssize_t show_stp_state(struct device *d,
+static ssize_t stp_state_show(struct device *d,
struct device_attribute *attr, char *buf)
{
struct net_bridge *br = to_bridge(d);
@@ -125,7 +122,7 @@ static ssize_t show_stp_state(struct device *d,
}
-static ssize_t store_stp_state(struct device *d,
+static ssize_t stp_state_store(struct device *d,
struct device_attribute *attr, const char *buf,
size_t len)
{
@@ -147,20 +144,21 @@ static ssize_t store_stp_state(struct device *d,
return len;
}
-static DEVICE_ATTR(stp_state, S_IRUGO | S_IWUSR, show_stp_state,
- store_stp_state);
+static DEVICE_ATTR_RW(stp_state);
-static ssize_t show_group_fwd_mask(struct device *d,
- struct device_attribute *attr, char *buf)
+static ssize_t group_fwd_mask_show(struct device *d,
+ struct device_attribute *attr,
+ char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%#x\n", br->group_fwd_mask);
}
-static ssize_t store_group_fwd_mask(struct device *d,
- struct device_attribute *attr, const char *buf,
- size_t len)
+static ssize_t group_fwd_mask_store(struct device *d,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
{
struct net_bridge *br = to_bridge(d);
char *endp;
@@ -180,10 +178,9 @@ static ssize_t store_group_fwd_mask(struct device *d,
return len;
}
-static DEVICE_ATTR(group_fwd_mask, S_IRUGO | S_IWUSR, show_group_fwd_mask,
- store_group_fwd_mask);
+static DEVICE_ATTR_RW(group_fwd_mask);
-static ssize_t show_priority(struct device *d, struct device_attribute *attr,
+static ssize_t priority_show(struct device *d, struct device_attribute *attr,
char *buf)
{
struct net_bridge *br = to_bridge(d);
@@ -197,93 +194,91 @@ static int set_priority(struct net_bridge *br, unsigned long val)
return 0;
}
-static ssize_t store_priority(struct device *d, struct device_attribute *attr,
- const char *buf, size_t len)
+static ssize_t priority_store(struct device *d, struct device_attribute *attr,
+ const char *buf, size_t len)
{
return store_bridge_parm(d, buf, len, set_priority);
}
-static DEVICE_ATTR(priority, S_IRUGO | S_IWUSR, show_priority, store_priority);
+static DEVICE_ATTR_RW(priority);
-static ssize_t show_root_id(struct device *d, struct device_attribute *attr,
+static ssize_t root_id_show(struct device *d, struct device_attribute *attr,
char *buf)
{
return br_show_bridge_id(buf, &to_bridge(d)->designated_root);
}
-static DEVICE_ATTR(root_id, S_IRUGO, show_root_id, NULL);
+static DEVICE_ATTR_RO(root_id);
-static ssize_t show_bridge_id(struct device *d, struct device_attribute *attr,
+static ssize_t bridge_id_show(struct device *d, struct device_attribute *attr,
char *buf)
{
return br_show_bridge_id(buf, &to_bridge(d)->bridge_id);
}
-static DEVICE_ATTR(bridge_id, S_IRUGO, show_bridge_id, NULL);
+static DEVICE_ATTR_RO(bridge_id);
-static ssize_t show_root_port(struct device *d, struct device_attribute *attr,
+static ssize_t root_port_show(struct device *d, struct device_attribute *attr,
char *buf)
{
return sprintf(buf, "%d\n", to_bridge(d)->root_port);
}
-static DEVICE_ATTR(root_port, S_IRUGO, show_root_port, NULL);
+static DEVICE_ATTR_RO(root_port);
-static ssize_t show_root_path_cost(struct device *d,
+static ssize_t root_path_cost_show(struct device *d,
struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%d\n", to_bridge(d)->root_path_cost);
}
-static DEVICE_ATTR(root_path_cost, S_IRUGO, show_root_path_cost, NULL);
+static DEVICE_ATTR_RO(root_path_cost);
-static ssize_t show_topology_change(struct device *d,
+static ssize_t topology_change_show(struct device *d,
struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%d\n", to_bridge(d)->topology_change);
}
-static DEVICE_ATTR(topology_change, S_IRUGO, show_topology_change, NULL);
+static DEVICE_ATTR_RO(topology_change);
-static ssize_t show_topology_change_detected(struct device *d,
+static ssize_t topology_change_detected_show(struct device *d,
struct device_attribute *attr,
char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%d\n", br->topology_change_detected);
}
-static DEVICE_ATTR(topology_change_detected, S_IRUGO,
- show_topology_change_detected, NULL);
+static DEVICE_ATTR_RO(topology_change_detected);
-static ssize_t show_hello_timer(struct device *d,
+static ssize_t hello_timer_show(struct device *d,
struct device_attribute *attr, char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%ld\n", br_timer_value(&br->hello_timer));
}
-static DEVICE_ATTR(hello_timer, S_IRUGO, show_hello_timer, NULL);
+static DEVICE_ATTR_RO(hello_timer);
-static ssize_t show_tcn_timer(struct device *d, struct device_attribute *attr,
+static ssize_t tcn_timer_show(struct device *d, struct device_attribute *attr,
char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%ld\n", br_timer_value(&br->tcn_timer));
}
-static DEVICE_ATTR(tcn_timer, S_IRUGO, show_tcn_timer, NULL);
+static DEVICE_ATTR_RO(tcn_timer);
-static ssize_t show_topology_change_timer(struct device *d,
+static ssize_t topology_change_timer_show(struct device *d,
struct device_attribute *attr,
char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%ld\n", br_timer_value(&br->topology_change_timer));
}
-static DEVICE_ATTR(topology_change_timer, S_IRUGO, show_topology_change_timer,
- NULL);
+static DEVICE_ATTR_RO(topology_change_timer);
-static ssize_t show_gc_timer(struct device *d, struct device_attribute *attr,
+static ssize_t gc_timer_show(struct device *d, struct device_attribute *attr,
char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%ld\n", br_timer_value(&br->gc_timer));
}
-static DEVICE_ATTR(gc_timer, S_IRUGO, show_gc_timer, NULL);
+static DEVICE_ATTR_RO(gc_timer);
-static ssize_t show_group_addr(struct device *d,
+static ssize_t group_addr_show(struct device *d,
struct device_attribute *attr, char *buf)
{
struct net_bridge *br = to_bridge(d);
@@ -293,7 +288,7 @@ static ssize_t show_group_addr(struct device *d,
br->group_addr[4], br->group_addr[5]);
}
-static ssize_t store_group_addr(struct device *d,
+static ssize_t group_addr_store(struct device *d,
struct device_attribute *attr,
const char *buf, size_t len)
{
@@ -324,10 +319,9 @@ static ssize_t store_group_addr(struct device *d,
return len;
}
-static DEVICE_ATTR(group_addr, S_IRUGO | S_IWUSR,
- show_group_addr, store_group_addr);
+static DEVICE_ATTR_RW(group_addr);
-static ssize_t store_flush(struct device *d,
+static ssize_t flush_store(struct device *d,
struct device_attribute *attr,
const char *buf, size_t len)
{
@@ -339,26 +333,25 @@ static ssize_t store_flush(struct device *d,
br_fdb_flush(br);
return len;
}
-static DEVICE_ATTR(flush, S_IWUSR, NULL, store_flush);
+static DEVICE_ATTR_WO(flush);
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
-static ssize_t show_multicast_router(struct device *d,
+static ssize_t multicast_router_show(struct device *d,
struct device_attribute *attr, char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%d\n", br->multicast_router);
}
-static ssize_t store_multicast_router(struct device *d,
+static ssize_t multicast_router_store(struct device *d,
struct device_attribute *attr,
const char *buf, size_t len)
{
return store_bridge_parm(d, buf, len, br_multicast_set_router);
}
-static DEVICE_ATTR(multicast_router, S_IRUGO | S_IWUSR, show_multicast_router,
- store_multicast_router);
+static DEVICE_ATTR_RW(multicast_router);
-static ssize_t show_multicast_snooping(struct device *d,
+static ssize_t multicast_snooping_show(struct device *d,
struct device_attribute *attr,
char *buf)
{
@@ -366,18 +359,17 @@ static ssize_t show_multicast_snooping(struct device *d,
return sprintf(buf, "%d\n", !br->multicast_disabled);
}
-static ssize_t store_multicast_snooping(struct device *d,
+static ssize_t multicast_snooping_store(struct device *d,
struct device_attribute *attr,
const char *buf, size_t len)
{
return store_bridge_parm(d, buf, len, br_multicast_toggle);
}
-static DEVICE_ATTR(multicast_snooping, S_IRUGO | S_IWUSR,
- show_multicast_snooping, store_multicast_snooping);
+static DEVICE_ATTR_RW(multicast_snooping);
-static ssize_t show_multicast_query_use_ifaddr(struct device *d,
- struct device_attribute *attr,
- char *buf)
+static ssize_t multicast_query_use_ifaddr_show(struct device *d,
+ struct device_attribute *attr,
+ char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%d\n", br->multicast_query_use_ifaddr);
@@ -390,17 +382,15 @@ static int set_query_use_ifaddr(struct net_bridge *br, unsigned long val)
}
static ssize_t
-store_multicast_query_use_ifaddr(struct device *d,
+multicast_query_use_ifaddr_store(struct device *d,
struct device_attribute *attr,
const char *buf, size_t len)
{
return store_bridge_parm(d, buf, len, set_query_use_ifaddr);
}
-static DEVICE_ATTR(multicast_query_use_ifaddr, S_IRUGO | S_IWUSR,
- show_multicast_query_use_ifaddr,
- store_multicast_query_use_ifaddr);
+static DEVICE_ATTR_RW(multicast_query_use_ifaddr);
-static ssize_t show_multicast_querier(struct device *d,
+static ssize_t multicast_querier_show(struct device *d,
struct device_attribute *attr,
char *buf)
{
@@ -408,16 +398,15 @@ static ssize_t show_multicast_querier(struct device *d,
return sprintf(buf, "%d\n", br->multicast_querier);
}
-static ssize_t store_multicast_querier(struct device *d,
+static ssize_t multicast_querier_store(struct device *d,
struct device_attribute *attr,
const char *buf, size_t len)
{
return store_bridge_parm(d, buf, len, br_multicast_set_querier);
}
-static DEVICE_ATTR(multicast_querier, S_IRUGO | S_IWUSR,
- show_multicast_querier, store_multicast_querier);
+static DEVICE_ATTR_RW(multicast_querier);
-static ssize_t show_hash_elasticity(struct device *d,
+static ssize_t hash_elasticity_show(struct device *d,
struct device_attribute *attr, char *buf)
{
struct net_bridge *br = to_bridge(d);
@@ -430,31 +419,29 @@ static int set_elasticity(struct net_bridge *br, unsigned long val)
return 0;
}
-static ssize_t store_hash_elasticity(struct device *d,
+static ssize_t hash_elasticity_store(struct device *d,
struct device_attribute *attr,
const char *buf, size_t len)
{
return store_bridge_parm(d, buf, len, set_elasticity);
}
-static DEVICE_ATTR(hash_elasticity, S_IRUGO | S_IWUSR, show_hash_elasticity,
- store_hash_elasticity);
+static DEVICE_ATTR_RW(hash_elasticity);
-static ssize_t show_hash_max(struct device *d, struct device_attribute *attr,
+static ssize_t hash_max_show(struct device *d, struct device_attribute *attr,
char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%u\n", br->hash_max);
}
-static ssize_t store_hash_max(struct device *d, struct device_attribute *attr,
+static ssize_t hash_max_store(struct device *d, struct device_attribute *attr,
const char *buf, size_t len)
{
return store_bridge_parm(d, buf, len, br_multicast_set_hash_max);
}
-static DEVICE_ATTR(hash_max, S_IRUGO | S_IWUSR, show_hash_max,
- store_hash_max);
+static DEVICE_ATTR_RW(hash_max);
-static ssize_t show_multicast_last_member_count(struct device *d,
+static ssize_t multicast_last_member_count_show(struct device *d,
struct device_attribute *attr,
char *buf)
{
@@ -468,17 +455,15 @@ static int set_last_member_count(struct net_bridge *br, unsigned long val)
return 0;
}
-static ssize_t store_multicast_last_member_count(struct device *d,
+static ssize_t multicast_last_member_count_store(struct device *d,
struct device_attribute *attr,
const char *buf, size_t len)
{
return store_bridge_parm(d, buf, len, set_last_member_count);
}
-static DEVICE_ATTR(multicast_last_member_count, S_IRUGO | S_IWUSR,
- show_multicast_last_member_count,
- store_multicast_last_member_count);
+static DEVICE_ATTR_RW(multicast_last_member_count);
-static ssize_t show_multicast_startup_query_count(
+static ssize_t multicast_startup_query_count_show(
struct device *d, struct device_attribute *attr, char *buf)
{
struct net_bridge *br = to_bridge(d);
@@ -491,17 +476,15 @@ static int set_startup_query_count(struct net_bridge *br, unsigned long val)
return 0;
}
-static ssize_t store_multicast_startup_query_count(
+static ssize_t multicast_startup_query_count_store(
struct device *d, struct device_attribute *attr, const char *buf,
size_t len)
{
return store_bridge_parm(d, buf, len, set_startup_query_count);
}
-static DEVICE_ATTR(multicast_startup_query_count, S_IRUGO | S_IWUSR,
- show_multicast_startup_query_count,
- store_multicast_startup_query_count);
+static DEVICE_ATTR_RW(multicast_startup_query_count);
-static ssize_t show_multicast_last_member_interval(
+static ssize_t multicast_last_member_interval_show(
struct device *d, struct device_attribute *attr, char *buf)
{
struct net_bridge *br = to_bridge(d);
@@ -515,17 +498,15 @@ static int set_last_member_interval(struct net_bridge *br, unsigned long val)
return 0;
}
-static ssize_t store_multicast_last_member_interval(
+static ssize_t multicast_last_member_interval_store(
struct device *d, struct device_attribute *attr, const char *buf,
size_t len)
{
return store_bridge_parm(d, buf, len, set_last_member_interval);
}
-static DEVICE_ATTR(multicast_last_member_interval, S_IRUGO | S_IWUSR,
- show_multicast_last_member_interval,
- store_multicast_last_member_interval);
+static DEVICE_ATTR_RW(multicast_last_member_interval);
-static ssize_t show_multicast_membership_interval(
+static ssize_t multicast_membership_interval_show(
struct device *d, struct device_attribute *attr, char *buf)
{
struct net_bridge *br = to_bridge(d);
@@ -539,17 +520,15 @@ static int set_membership_interval(struct net_bridge *br, unsigned long val)
return 0;
}
-static ssize_t store_multicast_membership_interval(
+static ssize_t multicast_membership_interval_store(
struct device *d, struct device_attribute *attr, const char *buf,
size_t len)
{
return store_bridge_parm(d, buf, len, set_membership_interval);
}
-static DEVICE_ATTR(multicast_membership_interval, S_IRUGO | S_IWUSR,
- show_multicast_membership_interval,
- store_multicast_membership_interval);
+static DEVICE_ATTR_RW(multicast_membership_interval);
-static ssize_t show_multicast_querier_interval(struct device *d,
+static ssize_t multicast_querier_interval_show(struct device *d,
struct device_attribute *attr,
char *buf)
{
@@ -564,17 +543,15 @@ static int set_querier_interval(struct net_bridge *br, unsigned long val)
return 0;
}
-static ssize_t store_multicast_querier_interval(struct device *d,
+static ssize_t multicast_querier_interval_store(struct device *d,
struct device_attribute *attr,
const char *buf, size_t len)
{
return store_bridge_parm(d, buf, len, set_querier_interval);
}
-static DEVICE_ATTR(multicast_querier_interval, S_IRUGO | S_IWUSR,
- show_multicast_querier_interval,
- store_multicast_querier_interval);
+static DEVICE_ATTR_RW(multicast_querier_interval);
-static ssize_t show_multicast_query_interval(struct device *d,
+static ssize_t multicast_query_interval_show(struct device *d,
struct device_attribute *attr,
char *buf)
{
@@ -589,17 +566,15 @@ static int set_query_interval(struct net_bridge *br, unsigned long val)
return 0;
}
-static ssize_t store_multicast_query_interval(struct device *d,
+static ssize_t multicast_query_interval_store(struct device *d,
struct device_attribute *attr,
const char *buf, size_t len)
{
return store_bridge_parm(d, buf, len, set_query_interval);
}
-static DEVICE_ATTR(multicast_query_interval, S_IRUGO | S_IWUSR,
- show_multicast_query_interval,
- store_multicast_query_interval);
+static DEVICE_ATTR_RW(multicast_query_interval);
-static ssize_t show_multicast_query_response_interval(
+static ssize_t multicast_query_response_interval_show(
struct device *d, struct device_attribute *attr, char *buf)
{
struct net_bridge *br = to_bridge(d);
@@ -614,17 +589,15 @@ static int set_query_response_interval(struct net_bridge *br, unsigned long val)
return 0;
}
-static ssize_t store_multicast_query_response_interval(
+static ssize_t multicast_query_response_interval_store(
struct device *d, struct device_attribute *attr, const char *buf,
size_t len)
{
return store_bridge_parm(d, buf, len, set_query_response_interval);
}
-static DEVICE_ATTR(multicast_query_response_interval, S_IRUGO | S_IWUSR,
- show_multicast_query_response_interval,
- store_multicast_query_response_interval);
+static DEVICE_ATTR_RW(multicast_query_response_interval);
-static ssize_t show_multicast_startup_query_interval(
+static ssize_t multicast_startup_query_interval_show(
struct device *d, struct device_attribute *attr, char *buf)
{
struct net_bridge *br = to_bridge(d);
@@ -639,18 +612,16 @@ static int set_startup_query_interval(struct net_bridge *br, unsigned long val)
return 0;
}
-static ssize_t store_multicast_startup_query_interval(
+static ssize_t multicast_startup_query_interval_store(
struct device *d, struct device_attribute *attr, const char *buf,
size_t len)
{
return store_bridge_parm(d, buf, len, set_startup_query_interval);
}
-static DEVICE_ATTR(multicast_startup_query_interval, S_IRUGO | S_IWUSR,
- show_multicast_startup_query_interval,
- store_multicast_startup_query_interval);
+static DEVICE_ATTR_RW(multicast_startup_query_interval);
#endif
#ifdef CONFIG_BRIDGE_NETFILTER
-static ssize_t show_nf_call_iptables(
+static ssize_t nf_call_iptables_show(
struct device *d, struct device_attribute *attr, char *buf)
{
struct net_bridge *br = to_bridge(d);
@@ -663,16 +634,15 @@ static int set_nf_call_iptables(struct net_bridge *br, unsigned long val)
return 0;
}
-static ssize_t store_nf_call_iptables(
+static ssize_t nf_call_iptables_store(
struct device *d, struct device_attribute *attr, const char *buf,
size_t len)
{
return store_bridge_parm(d, buf, len, set_nf_call_iptables);
}
-static DEVICE_ATTR(nf_call_iptables, S_IRUGO | S_IWUSR,
- show_nf_call_iptables, store_nf_call_iptables);
+static DEVICE_ATTR_RW(nf_call_iptables);
-static ssize_t show_nf_call_ip6tables(
+static ssize_t nf_call_ip6tables_show(
struct device *d, struct device_attribute *attr, char *buf)
{
struct net_bridge *br = to_bridge(d);
@@ -685,16 +655,15 @@ static int set_nf_call_ip6tables(struct net_bridge *br, unsigned long val)
return 0;
}
-static ssize_t store_nf_call_ip6tables(
+static ssize_t nf_call_ip6tables_store(
struct device *d, struct device_attribute *attr, const char *buf,
size_t len)
{
return store_bridge_parm(d, buf, len, set_nf_call_ip6tables);
}
-static DEVICE_ATTR(nf_call_ip6tables, S_IRUGO | S_IWUSR,
- show_nf_call_ip6tables, store_nf_call_ip6tables);
+static DEVICE_ATTR_RW(nf_call_ip6tables);
-static ssize_t show_nf_call_arptables(
+static ssize_t nf_call_arptables_show(
struct device *d, struct device_attribute *attr, char *buf)
{
struct net_bridge *br = to_bridge(d);
@@ -707,17 +676,16 @@ static int set_nf_call_arptables(struct net_bridge *br, unsigned long val)
return 0;
}
-static ssize_t store_nf_call_arptables(
+static ssize_t nf_call_arptables_store(
struct device *d, struct device_attribute *attr, const char *buf,
size_t len)
{
return store_bridge_parm(d, buf, len, set_nf_call_arptables);
}
-static DEVICE_ATTR(nf_call_arptables, S_IRUGO | S_IWUSR,
- show_nf_call_arptables, store_nf_call_arptables);
+static DEVICE_ATTR_RW(nf_call_arptables);
#endif
#ifdef CONFIG_BRIDGE_VLAN_FILTERING
-static ssize_t show_vlan_filtering(struct device *d,
+static ssize_t vlan_filtering_show(struct device *d,
struct device_attribute *attr,
char *buf)
{
@@ -725,14 +693,13 @@ static ssize_t show_vlan_filtering(struct device *d,
return sprintf(buf, "%d\n", br->vlan_enabled);
}
-static ssize_t store_vlan_filtering(struct device *d,
+static ssize_t vlan_filtering_store(struct device *d,
struct device_attribute *attr,
const char *buf, size_t len)
{
return store_bridge_parm(d, buf, len, br_vlan_filter_toggle);
}
-static DEVICE_ATTR(vlan_filtering, S_IRUGO | S_IWUSR,
- show_vlan_filtering, store_vlan_filtering);
+static DEVICE_ATTR_RW(vlan_filtering);
#endif
static struct attribute *bridge_attrs[] = {
diff --git a/net/bridge/br_sysfs_if.c b/net/bridge/br_sysfs_if.c
index 2a2cdb756d51..dd595bd7fa82 100644
--- a/net/bridge/br_sysfs_if.c
+++ b/net/bridge/br_sysfs_if.c
@@ -26,7 +26,7 @@ struct brport_attribute {
int (*store)(struct net_bridge_port *, unsigned long);
};
-#define BRPORT_ATTR(_name,_mode,_show,_store) \
+#define BRPORT_ATTR(_name, _mode, _show, _store) \
const struct brport_attribute brport_attr_##_name = { \
.attr = {.name = __stringify(_name), \
.mode = _mode }, \
@@ -209,21 +209,21 @@ static const struct brport_attribute *brport_attrs[] = {
#define to_brport_attr(_at) container_of(_at, struct brport_attribute, attr)
#define to_brport(obj) container_of(obj, struct net_bridge_port, kobj)
-static ssize_t brport_show(struct kobject * kobj,
- struct attribute * attr, char * buf)
+static ssize_t brport_show(struct kobject *kobj,
+ struct attribute *attr, char *buf)
{
- struct brport_attribute * brport_attr = to_brport_attr(attr);
- struct net_bridge_port * p = to_brport(kobj);
+ struct brport_attribute *brport_attr = to_brport_attr(attr);
+ struct net_bridge_port *p = to_brport(kobj);
return brport_attr->show(p, buf);
}
-static ssize_t brport_store(struct kobject * kobj,
- struct attribute * attr,
- const char * buf, size_t count)
+static ssize_t brport_store(struct kobject *kobj,
+ struct attribute *attr,
+ const char *buf, size_t count)
{
- struct brport_attribute * brport_attr = to_brport_attr(attr);
- struct net_bridge_port * p = to_brport(kobj);
+ struct brport_attribute *brport_attr = to_brport_attr(attr);
+ struct net_bridge_port *p = to_brport(kobj);
ssize_t ret = -EINVAL;
char *endp;
unsigned long val;
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
index af5ebd18d705..7ffc801467ec 100644
--- a/net/bridge/br_vlan.c
+++ b/net/bridge/br_vlan.c
@@ -146,7 +146,7 @@ struct sk_buff *br_handle_vlan(struct net_bridge *br,
/* At this point, we know that the frame was filtered and contains
* a valid vlan id. If the vlan id is set in the untagged bitmap,
- * send untagged; otherwise, send taged.
+ * send untagged; otherwise, send tagged.
*/
br_vlan_get_tag(skb, &vid);
if (test_bit(vid, pv->untagged_bitmap))
diff --git a/net/bridge/netfilter/ebt_log.c b/net/bridge/netfilter/ebt_log.c
index 19c37a4929bc..5322a36867a3 100644
--- a/net/bridge/netfilter/ebt_log.c
+++ b/net/bridge/netfilter/ebt_log.c
@@ -96,7 +96,7 @@ ebt_log_packet(struct net *net, u_int8_t pf, unsigned int hooknum,
bitmask = NF_LOG_MASK;
if ((bitmask & EBT_LOG_IP) && eth_hdr(skb)->h_proto ==
- htons(ETH_P_IP)){
+ htons(ETH_P_IP)) {
const struct iphdr *ih;
struct iphdr _iph;
diff --git a/net/bridge/netfilter/ebt_snat.c b/net/bridge/netfilter/ebt_snat.c
index f8f0bd1a1d51..0f6b118d6cb2 100644
--- a/net/bridge/netfilter/ebt_snat.c
+++ b/net/bridge/netfilter/ebt_snat.c
@@ -35,7 +35,7 @@ ebt_snat_tg(struct sk_buff *skb, const struct xt_action_param *par)
return EBT_DROP;
if (ap->ar_hln != ETH_ALEN)
goto out;
- if (skb_store_bits(skb, sizeof(_ah), info->mac,ETH_ALEN))
+ if (skb_store_bits(skb, sizeof(_ah), info->mac, ETH_ALEN))
return EBT_DROP;
}
out:
diff --git a/net/bridge/netfilter/ebt_vlan.c b/net/bridge/netfilter/ebt_vlan.c
index eae67bf0446c..8d3f8c7651f0 100644
--- a/net/bridge/netfilter/ebt_vlan.c
+++ b/net/bridge/netfilter/ebt_vlan.c
@@ -14,8 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/if_ether.h>
diff --git a/net/bridge/netfilter/ebtable_broute.c b/net/bridge/netfilter/ebtable_broute.c
index dbd1c783431b..d2cdf5d6e98c 100644
--- a/net/bridge/netfilter/ebtable_broute.c
+++ b/net/bridge/netfilter/ebtable_broute.c
@@ -23,8 +23,7 @@ static struct ebt_entries initial_chain = {
.policy = EBT_ACCEPT,
};
-static struct ebt_replace_kernel initial_table =
-{
+static struct ebt_replace_kernel initial_table = {
.name = "broute",
.valid_hooks = 1 << NF_BR_BROUTING,
.entries_size = sizeof(struct ebt_entries),
@@ -41,8 +40,7 @@ static int check(const struct ebt_table_info *info, unsigned int valid_hooks)
return 0;
}
-static const struct ebt_table broute_table =
-{
+static const struct ebt_table broute_table = {
.name = "broute",
.table = &initial_table,
.valid_hooks = 1 << NF_BR_BROUTING,
diff --git a/net/bridge/netfilter/ebtable_filter.c b/net/bridge/netfilter/ebtable_filter.c
index bb2da7b706e7..ce205aabf9c5 100644
--- a/net/bridge/netfilter/ebtable_filter.c
+++ b/net/bridge/netfilter/ebtable_filter.c
@@ -14,8 +14,7 @@
#define FILTER_VALID_HOOKS ((1 << NF_BR_LOCAL_IN) | (1 << NF_BR_FORWARD) | \
(1 << NF_BR_LOCAL_OUT))
-static struct ebt_entries initial_chains[] =
-{
+static struct ebt_entries initial_chains[] = {
{
.name = "INPUT",
.policy = EBT_ACCEPT,
@@ -30,8 +29,7 @@ static struct ebt_entries initial_chains[] =
},
};
-static struct ebt_replace_kernel initial_table =
-{
+static struct ebt_replace_kernel initial_table = {
.name = "filter",
.valid_hooks = FILTER_VALID_HOOKS,
.entries_size = 3 * sizeof(struct ebt_entries),
@@ -50,8 +48,7 @@ static int check(const struct ebt_table_info *info, unsigned int valid_hooks)
return 0;
}
-static const struct ebt_table frame_filter =
-{
+static const struct ebt_table frame_filter = {
.name = "filter",
.table = &initial_table,
.valid_hooks = FILTER_VALID_HOOKS,
diff --git a/net/bridge/netfilter/ebtable_nat.c b/net/bridge/netfilter/ebtable_nat.c
index bd238f1f105b..a0ac2984fb6c 100644
--- a/net/bridge/netfilter/ebtable_nat.c
+++ b/net/bridge/netfilter/ebtable_nat.c
@@ -14,8 +14,7 @@
#define NAT_VALID_HOOKS ((1 << NF_BR_PRE_ROUTING) | (1 << NF_BR_LOCAL_OUT) | \
(1 << NF_BR_POST_ROUTING))
-static struct ebt_entries initial_chains[] =
-{
+static struct ebt_entries initial_chains[] = {
{
.name = "PREROUTING",
.policy = EBT_ACCEPT,
@@ -30,8 +29,7 @@ static struct ebt_entries initial_chains[] =
}
};
-static struct ebt_replace_kernel initial_table =
-{
+static struct ebt_replace_kernel initial_table = {
.name = "nat",
.valid_hooks = NAT_VALID_HOOKS,
.entries_size = 3 * sizeof(struct ebt_entries),
@@ -50,8 +48,7 @@ static int check(const struct ebt_table_info *info, unsigned int valid_hooks)
return 0;
}
-static struct ebt_table frame_nat =
-{
+static struct ebt_table frame_nat = {
.name = "nat",
.table = &initial_table,
.valid_hooks = NAT_VALID_HOOKS,
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index ac7802428384..0e474b13463b 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -118,10 +118,10 @@ ebt_dev_check(const char *entry, const struct net_device *device)
/* 1 is the wildcard token */
while (entry[i] != '\0' && entry[i] != 1 && entry[i] == devname[i])
i++;
- return (devname[i] != entry[i] && entry[i] != 1);
+ return devname[i] != entry[i] && entry[i] != 1;
}
-#define FWINV2(bool,invflg) ((bool) ^ !!(e->invflags & invflg))
+#define FWINV2(bool, invflg) ((bool) ^ !!(e->invflags & invflg))
/* process standard matches */
static inline int
ebt_basic_match(const struct ebt_entry *e, const struct sk_buff *skb,
@@ -1441,7 +1441,7 @@ static int copy_everything_to_user(struct ebt_table *t, void __user *user,
return -EFAULT;
if (*len != sizeof(struct ebt_replace) + entries_size +
- (tmp.num_counters? nentries * sizeof(struct ebt_counter): 0))
+ (tmp.num_counters ? nentries * sizeof(struct ebt_counter) : 0))
return -EINVAL;
if (tmp.nentries != nentries) {
@@ -1477,7 +1477,7 @@ static int do_ebt_set_ctl(struct sock *sk,
if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
return -EPERM;
- switch(cmd) {
+ switch (cmd) {
case EBT_SO_SET_ENTRIES:
ret = do_replace(net, user, len);
break;
@@ -1507,10 +1507,10 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
if (!t)
return ret;
- switch(cmd) {
+ switch (cmd) {
case EBT_SO_GET_INFO:
case EBT_SO_GET_INIT_INFO:
- if (*len != sizeof(struct ebt_replace)){
+ if (*len != sizeof(struct ebt_replace)) {
ret = -EINVAL;
mutex_unlock(&ebt_mutex);
break;
@@ -1525,7 +1525,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
tmp.valid_hooks = t->table->valid_hooks;
}
mutex_unlock(&ebt_mutex);
- if (copy_to_user(user, &tmp, *len) != 0){
+ if (copy_to_user(user, &tmp, *len) != 0) {
BUGPRINT("c2u Didn't work\n");
ret = -EFAULT;
break;
@@ -2375,8 +2375,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
}
#endif
-static struct nf_sockopt_ops ebt_sockopts =
-{
+static struct nf_sockopt_ops ebt_sockopts = {
.pf = PF_INET,
.set_optmin = EBT_BASE_CTL,
.set_optmax = EBT_SO_SET_MAX + 1,
diff --git a/net/bridge/netfilter/nf_tables_bridge.c b/net/bridge/netfilter/nf_tables_bridge.c
index cf54b22818c8..5bcc0d8b31f2 100644
--- a/net/bridge/netfilter/nf_tables_bridge.c
+++ b/net/bridge/netfilter/nf_tables_bridge.c
@@ -14,10 +14,30 @@
#include <linux/netfilter_bridge.h>
#include <net/netfilter/nf_tables.h>
+static unsigned int
+nft_do_chain_bridge(const struct nf_hook_ops *ops,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
+{
+ struct nft_pktinfo pkt;
+
+ nft_set_pktinfo(&pkt, ops, skb, in, out);
+
+ return nft_do_chain(&pkt, ops);
+}
+
static struct nft_af_info nft_af_bridge __read_mostly = {
.family = NFPROTO_BRIDGE,
.nhooks = NF_BR_NUMHOOKS,
.owner = THIS_MODULE,
+ .nops = 1,
+ .hooks = {
+ [NF_BR_LOCAL_IN] = nft_do_chain_bridge,
+ [NF_BR_FORWARD] = nft_do_chain_bridge,
+ [NF_BR_LOCAL_OUT] = nft_do_chain_bridge,
+ },
};
static int nf_tables_bridge_init_net(struct net *net)
@@ -48,32 +68,14 @@ static struct pernet_operations nf_tables_bridge_net_ops = {
.exit = nf_tables_bridge_exit_net,
};
-static unsigned int
-nft_do_chain_bridge(const struct nf_hook_ops *ops,
- struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
-{
- struct nft_pktinfo pkt;
-
- nft_set_pktinfo(&pkt, ops, skb, in, out);
-
- return nft_do_chain_pktinfo(&pkt, ops);
-}
-
-static struct nf_chain_type filter_bridge = {
- .family = NFPROTO_BRIDGE,
+static const struct nf_chain_type filter_bridge = {
.name = "filter",
.type = NFT_CHAIN_T_DEFAULT,
+ .family = NFPROTO_BRIDGE,
+ .owner = THIS_MODULE,
.hook_mask = (1 << NF_BR_LOCAL_IN) |
(1 << NF_BR_FORWARD) |
(1 << NF_BR_LOCAL_OUT),
- .fn = {
- [NF_BR_LOCAL_IN] = nft_do_chain_bridge,
- [NF_BR_FORWARD] = nft_do_chain_bridge,
- [NF_BR_LOCAL_OUT] = nft_do_chain_bridge,
- },
};
static int __init nf_tables_bridge_init(void)
diff --git a/net/can/gw.c b/net/can/gw.c
index 3f9b0f3a2818..88c8a39c173d 100644
--- a/net/can/gw.c
+++ b/net/can/gw.c
@@ -844,8 +844,7 @@ static int cgw_create_job(struct sk_buff *skb, struct nlmsghdr *nlh)
if (!gwj->src.dev)
goto out;
- /* check for CAN netdev not using header_ops - see gw_rcv() */
- if (gwj->src.dev->type != ARPHRD_CAN || gwj->src.dev->header_ops)
+ if (gwj->src.dev->type != ARPHRD_CAN)
goto put_src_out;
gwj->dst.dev = dev_get_by_index(&init_net, gwj->ccgw.dst_idx);
@@ -853,8 +852,7 @@ static int cgw_create_job(struct sk_buff *skb, struct nlmsghdr *nlh)
if (!gwj->dst.dev)
goto put_src_out;
- /* check for CAN netdev not using header_ops - see gw_rcv() */
- if (gwj->dst.dev->type != ARPHRD_CAN || gwj->dst.dev->header_ops)
+ if (gwj->dst.dev->type != ARPHRD_CAN)
goto put_src_dst_out;
gwj->limit_hops = limhops;
diff --git a/net/compat.c b/net/compat.c
index 618c6a8a911b..dd32e34c1e2c 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -72,7 +72,7 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
__get_user(kmsg->msg_flags, &umsg->msg_flags))
return -EFAULT;
if (kmsg->msg_namelen > sizeof(struct sockaddr_storage))
- return -EINVAL;
+ kmsg->msg_namelen = sizeof(struct sockaddr_storage);
kmsg->msg_name = compat_ptr(tmp1);
kmsg->msg_iov = compat_ptr(tmp2);
kmsg->msg_control = compat_ptr(tmp3);
diff --git a/net/core/Makefile b/net/core/Makefile
index b33b996f5dd6..9628c20acff6 100644
--- a/net/core/Makefile
+++ b/net/core/Makefile
@@ -21,4 +21,5 @@ obj-$(CONFIG_FIB_RULES) += fib_rules.o
obj-$(CONFIG_TRACEPOINTS) += net-traces.o
obj-$(CONFIG_NET_DROP_MONITOR) += drop_monitor.o
obj-$(CONFIG_NETWORK_PHY_TIMESTAMPING) += timestamping.o
-obj-$(CONFIG_NETPRIO_CGROUP) += netprio_cgroup.o
+obj-$(CONFIG_CGROUP_NET_PRIO) += netprio_cgroup.o
+obj-$(CONFIG_CGROUP_NET_CLASSID) += netclassid_cgroup.o
diff --git a/net/core/dev.c b/net/core/dev.c
index ba3b7ea5ebb3..ce01847793c0 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -480,7 +480,7 @@ EXPORT_SYMBOL(dev_add_offload);
* and must not be freed until after all the CPU's have gone
* through a quiescent state.
*/
-void __dev_remove_offload(struct packet_offload *po)
+static void __dev_remove_offload(struct packet_offload *po)
{
struct list_head *head = &offload_base;
struct packet_offload *po1;
@@ -498,7 +498,6 @@ void __dev_remove_offload(struct packet_offload *po)
out:
spin_unlock(&offload_lock);
}
-EXPORT_SYMBOL(__dev_remove_offload);
/**
* dev_remove_offload - remove packet offload handler
@@ -1566,14 +1565,14 @@ EXPORT_SYMBOL(unregister_netdevice_notifier);
* are as for raw_notifier_call_chain().
*/
-int call_netdevice_notifiers_info(unsigned long val, struct net_device *dev,
- struct netdev_notifier_info *info)
+static int call_netdevice_notifiers_info(unsigned long val,
+ struct net_device *dev,
+ struct netdev_notifier_info *info)
{
ASSERT_RTNL();
netdev_notifier_info_init(info, dev);
return raw_notifier_call_chain(&netdev_chain, val, info);
}
-EXPORT_SYMBOL(call_netdevice_notifiers_info);
/**
* call_netdevice_notifiers - call all network notifier blocks
@@ -2145,30 +2144,42 @@ void __netif_schedule(struct Qdisc *q)
}
EXPORT_SYMBOL(__netif_schedule);
-void dev_kfree_skb_irq(struct sk_buff *skb)
+struct dev_kfree_skb_cb {
+ enum skb_free_reason reason;
+};
+
+static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb)
{
- if (atomic_dec_and_test(&skb->users)) {
- struct softnet_data *sd;
- unsigned long flags;
+ return (struct dev_kfree_skb_cb *)skb->cb;
+}
- local_irq_save(flags);
- sd = &__get_cpu_var(softnet_data);
- skb->next = sd->completion_queue;
- sd->completion_queue = skb;
- raise_softirq_irqoff(NET_TX_SOFTIRQ);
- local_irq_restore(flags);
+void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason)
+{
+ unsigned long flags;
+
+ if (likely(atomic_read(&skb->users) == 1)) {
+ smp_rmb();
+ atomic_set(&skb->users, 0);
+ } else if (likely(!atomic_dec_and_test(&skb->users))) {
+ return;
}
+ get_kfree_skb_cb(skb)->reason = reason;
+ local_irq_save(flags);
+ skb->next = __this_cpu_read(softnet_data.completion_queue);
+ __this_cpu_write(softnet_data.completion_queue, skb);
+ raise_softirq_irqoff(NET_TX_SOFTIRQ);
+ local_irq_restore(flags);
}
-EXPORT_SYMBOL(dev_kfree_skb_irq);
+EXPORT_SYMBOL(__dev_kfree_skb_irq);
-void dev_kfree_skb_any(struct sk_buff *skb)
+void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason)
{
if (in_irq() || irqs_disabled())
- dev_kfree_skb_irq(skb);
+ __dev_kfree_skb_irq(skb, reason);
else
dev_kfree_skb(skb);
}
-EXPORT_SYMBOL(dev_kfree_skb_any);
+EXPORT_SYMBOL(__dev_kfree_skb_any);
/**
@@ -2442,13 +2453,8 @@ static void dev_gso_skb_destructor(struct sk_buff *skb)
{
struct dev_gso_cb *cb;
- do {
- struct sk_buff *nskb = skb->next;
-
- skb->next = nskb->next;
- nskb->next = NULL;
- kfree_skb(nskb);
- } while (skb->next);
+ kfree_skb_list(skb->next);
+ skb->next = NULL;
cb = DEV_GSO_CB(skb);
if (cb->destructor)
@@ -2523,21 +2529,6 @@ netdev_features_t netif_skb_features(struct sk_buff *skb)
}
EXPORT_SYMBOL(netif_skb_features);
-/*
- * Returns true if either:
- * 1. skb has frag_list and the device doesn't support FRAGLIST, or
- * 2. skb is fragmented and the device does not support SG.
- */
-static inline int skb_needs_linearize(struct sk_buff *skb,
- netdev_features_t features)
-{
- return skb_is_nonlinear(skb) &&
- ((skb_has_frag_list(skb) &&
- !(features & NETIF_F_FRAGLIST)) ||
- (skb_shinfo(skb)->nr_frags &&
- !(features & NETIF_F_SG)));
-}
-
int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
struct netdev_queue *txq, void *accel_priv)
{
@@ -2750,7 +2741,7 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
return rc;
}
-#if IS_ENABLED(CONFIG_NETPRIO_CGROUP)
+#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
static void skb_update_prio(struct sk_buff *skb)
{
struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap);
@@ -3009,7 +3000,7 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
}
skb_reset_network_header(skb);
- if (!skb_get_rxhash(skb))
+ if (!skb_get_hash(skb))
goto done;
flow_table = rcu_dereference(rxqueue->rps_flow_table);
@@ -3154,7 +3145,7 @@ static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen)
rcu_read_lock();
fl = rcu_dereference(sd->flow_limit);
if (fl) {
- new_flow = skb_get_rxhash(skb) & (fl->num_buckets - 1);
+ new_flow = skb_get_hash(skb) & (fl->num_buckets - 1);
old_flow = fl->history[fl->history_head];
fl->history[fl->history_head] = new_flow;
@@ -3306,7 +3297,10 @@ static void net_tx_action(struct softirq_action *h)
clist = clist->next;
WARN_ON(atomic_read(&skb->users));
- trace_kfree_skb(skb, net_tx_action);
+ if (likely(get_kfree_skb_cb(skb)->reason == SKB_REASON_CONSUMED))
+ trace_consume_skb(skb);
+ else
+ trace_kfree_skb(skb, net_tx_action);
__kfree_skb(skb);
}
}
@@ -3752,7 +3746,7 @@ static int napi_gro_complete(struct sk_buff *skb)
if (ptype->type != type || !ptype->callbacks.gro_complete)
continue;
- err = ptype->callbacks.gro_complete(skb);
+ err = ptype->callbacks.gro_complete(skb, 0);
break;
}
rcu_read_unlock();
@@ -3818,6 +3812,23 @@ static void gro_list_prepare(struct napi_struct *napi, struct sk_buff *skb)
}
}
+static void skb_gro_reset_offset(struct sk_buff *skb)
+{
+ const struct skb_shared_info *pinfo = skb_shinfo(skb);
+ const skb_frag_t *frag0 = &pinfo->frags[0];
+
+ NAPI_GRO_CB(skb)->data_offset = 0;
+ NAPI_GRO_CB(skb)->frag0 = NULL;
+ NAPI_GRO_CB(skb)->frag0_len = 0;
+
+ if (skb_mac_header(skb) == skb_tail_pointer(skb) &&
+ pinfo->nr_frags &&
+ !PageHighMem(skb_frag_page(frag0))) {
+ NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
+ NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(frag0);
+ }
+}
+
static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
{
struct sk_buff **pp = NULL;
@@ -3833,7 +3844,9 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
if (skb_is_gso(skb) || skb_has_frag_list(skb))
goto normal;
+ skb_gro_reset_offset(skb);
gro_list_prepare(napi, skb);
+ NAPI_GRO_CB(skb)->csum = skb->csum; /* Needed for CHECKSUM_COMPLETE */
rcu_read_lock();
list_for_each_entry_rcu(ptype, head, list) {
@@ -3910,6 +3923,31 @@ normal:
goto pull;
}
+struct packet_offload *gro_find_receive_by_type(__be16 type)
+{
+ struct list_head *offload_head = &offload_base;
+ struct packet_offload *ptype;
+
+ list_for_each_entry_rcu(ptype, offload_head, list) {
+ if (ptype->type != type || !ptype->callbacks.gro_receive)
+ continue;
+ return ptype;
+ }
+ return NULL;
+}
+
+struct packet_offload *gro_find_complete_by_type(__be16 type)
+{
+ struct list_head *offload_head = &offload_base;
+ struct packet_offload *ptype;
+
+ list_for_each_entry_rcu(ptype, offload_head, list) {
+ if (ptype->type != type || !ptype->callbacks.gro_complete)
+ continue;
+ return ptype;
+ }
+ return NULL;
+}
static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
{
@@ -3938,27 +3976,8 @@ static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
return ret;
}
-static void skb_gro_reset_offset(struct sk_buff *skb)
-{
- const struct skb_shared_info *pinfo = skb_shinfo(skb);
- const skb_frag_t *frag0 = &pinfo->frags[0];
-
- NAPI_GRO_CB(skb)->data_offset = 0;
- NAPI_GRO_CB(skb)->frag0 = NULL;
- NAPI_GRO_CB(skb)->frag0_len = 0;
-
- if (skb_mac_header(skb) == skb_tail_pointer(skb) &&
- pinfo->nr_frags &&
- !PageHighMem(skb_frag_page(frag0))) {
- NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
- NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(frag0);
- }
-}
-
gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
{
- skb_gro_reset_offset(skb);
-
return napi_skb_finish(dev_gro_receive(napi, skb), skb);
}
EXPORT_SYMBOL(napi_gro_receive);
@@ -3981,8 +4000,7 @@ struct sk_buff *napi_get_frags(struct napi_struct *napi)
if (!skb) {
skb = netdev_alloc_skb_ip_align(napi->dev, GRO_MAX_HEAD);
- if (skb)
- napi->skb = skb;
+ napi->skb = skb;
}
return skb;
}
@@ -3993,12 +4011,7 @@ static gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *
{
switch (ret) {
case GRO_NORMAL:
- case GRO_HELD:
- skb->protocol = eth_type_trans(skb, skb->dev);
-
- if (ret == GRO_HELD)
- skb_gro_pull(skb, -ETH_HLEN);
- else if (netif_receive_skb(skb))
+ if (netif_receive_skb(skb))
ret = GRO_DROP;
break;
@@ -4007,6 +4020,7 @@ static gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *
napi_reuse_skb(napi, skb);
break;
+ case GRO_HELD:
case GRO_MERGED:
break;
}
@@ -4017,36 +4031,15 @@ static gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *
static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
{
struct sk_buff *skb = napi->skb;
- struct ethhdr *eth;
- unsigned int hlen;
- unsigned int off;
napi->skb = NULL;
- skb_reset_mac_header(skb);
- skb_gro_reset_offset(skb);
-
- off = skb_gro_offset(skb);
- hlen = off + sizeof(*eth);
- eth = skb_gro_header_fast(skb, off);
- if (skb_gro_header_hard(skb, hlen)) {
- eth = skb_gro_header_slow(skb, hlen, off);
- if (unlikely(!eth)) {
- napi_reuse_skb(napi, skb);
- skb = NULL;
- goto out;
- }
+ if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr)))) {
+ napi_reuse_skb(napi, skb);
+ return NULL;
}
+ skb->protocol = eth_type_trans(skb, skb->dev);
- skb_gro_pull(skb, sizeof(*eth));
-
- /*
- * This works because the only protocols we care about don't require
- * special handling. We'll fix it up properly at the end.
- */
- skb->protocol = eth->h_proto;
-
-out:
return skb;
}
@@ -4062,7 +4055,7 @@ gro_result_t napi_gro_frags(struct napi_struct *napi)
EXPORT_SYMBOL(napi_gro_frags);
/*
- * net_rps_action sends any pending IPI's for rps.
+ * net_rps_action_and_irq_enable sends any pending IPI's for rps.
* Note: called with local irq disabled, but exits with local irq enabled.
*/
static void net_rps_action_and_irq_enable(struct softnet_data *sd)
@@ -4267,17 +4260,10 @@ EXPORT_SYMBOL(netif_napi_add);
void netif_napi_del(struct napi_struct *napi)
{
- struct sk_buff *skb, *next;
-
list_del_init(&napi->dev_list);
napi_free_frags(napi);
- for (skb = napi->gro_list; skb; skb = next) {
- next = skb->next;
- skb->next = NULL;
- kfree_skb(skb);
- }
-
+ kfree_skb_list(napi->gro_list);
napi->gro_list = NULL;
napi->gro_count = 0;
}
@@ -4394,19 +4380,6 @@ struct netdev_adjacent {
struct rcu_head rcu;
};
-static struct netdev_adjacent *__netdev_find_adj_rcu(struct net_device *dev,
- struct net_device *adj_dev,
- struct list_head *adj_list)
-{
- struct netdev_adjacent *adj;
-
- list_for_each_entry_rcu(adj, adj_list, list) {
- if (adj->dev == adj_dev)
- return adj;
- }
- return NULL;
-}
-
static struct netdev_adjacent *__netdev_find_adj(struct net_device *dev,
struct net_device *adj_dev,
struct list_head *adj_list)
@@ -4445,13 +4418,12 @@ EXPORT_SYMBOL(netdev_has_upper_dev);
* Find out if a device is linked to an upper device and return true in case
* it is. The caller must hold the RTNL lock.
*/
-bool netdev_has_any_upper_dev(struct net_device *dev)
+static bool netdev_has_any_upper_dev(struct net_device *dev)
{
ASSERT_RTNL();
return !list_empty(&dev->all_adj_list.upper);
}
-EXPORT_SYMBOL(netdev_has_any_upper_dev);
/**
* netdev_master_upper_dev_get - Get master upper device
@@ -4500,7 +4472,7 @@ struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
{
struct netdev_adjacent *upper;
- WARN_ON_ONCE(!rcu_read_lock_held());
+ WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
@@ -4571,6 +4543,27 @@ void *netdev_lower_get_next_private_rcu(struct net_device *dev,
EXPORT_SYMBOL(netdev_lower_get_next_private_rcu);
/**
+ * netdev_lower_get_first_private_rcu - Get the first ->private from the
+ * lower neighbour list, RCU
+ * variant
+ * @dev: device
+ *
+ * Gets the first netdev_adjacent->private from the dev's lower neighbour
+ * list. The caller must hold RCU read lock.
+ */
+void *netdev_lower_get_first_private_rcu(struct net_device *dev)
+{
+ struct netdev_adjacent *lower;
+
+ lower = list_first_or_null_rcu(&dev->adj_list.lower,
+ struct netdev_adjacent, list);
+ if (lower)
+ return lower->private;
+ return NULL;
+}
+EXPORT_SYMBOL(netdev_lower_get_first_private_rcu);
+
+/**
* netdev_master_upper_dev_get_rcu - Get master upper device
* @dev: device
*
@@ -4662,9 +4655,9 @@ free_adj:
return ret;
}
-void __netdev_adjacent_dev_remove(struct net_device *dev,
- struct net_device *adj_dev,
- struct list_head *dev_list)
+static void __netdev_adjacent_dev_remove(struct net_device *dev,
+ struct net_device *adj_dev,
+ struct list_head *dev_list)
{
struct netdev_adjacent *adj;
char linkname[IFNAMSIZ+7];
@@ -4702,11 +4695,11 @@ void __netdev_adjacent_dev_remove(struct net_device *dev,
kfree_rcu(adj, rcu);
}
-int __netdev_adjacent_dev_link_lists(struct net_device *dev,
- struct net_device *upper_dev,
- struct list_head *up_list,
- struct list_head *down_list,
- void *private, bool master)
+static int __netdev_adjacent_dev_link_lists(struct net_device *dev,
+ struct net_device *upper_dev,
+ struct list_head *up_list,
+ struct list_head *down_list,
+ void *private, bool master)
{
int ret;
@@ -4725,8 +4718,8 @@ int __netdev_adjacent_dev_link_lists(struct net_device *dev,
return 0;
}
-int __netdev_adjacent_dev_link(struct net_device *dev,
- struct net_device *upper_dev)
+static int __netdev_adjacent_dev_link(struct net_device *dev,
+ struct net_device *upper_dev)
{
return __netdev_adjacent_dev_link_lists(dev, upper_dev,
&dev->all_adj_list.upper,
@@ -4734,26 +4727,26 @@ int __netdev_adjacent_dev_link(struct net_device *dev,
NULL, false);
}
-void __netdev_adjacent_dev_unlink_lists(struct net_device *dev,
- struct net_device *upper_dev,
- struct list_head *up_list,
- struct list_head *down_list)
+static void __netdev_adjacent_dev_unlink_lists(struct net_device *dev,
+ struct net_device *upper_dev,
+ struct list_head *up_list,
+ struct list_head *down_list)
{
__netdev_adjacent_dev_remove(dev, upper_dev, up_list);
__netdev_adjacent_dev_remove(upper_dev, dev, down_list);
}
-void __netdev_adjacent_dev_unlink(struct net_device *dev,
- struct net_device *upper_dev)
+static void __netdev_adjacent_dev_unlink(struct net_device *dev,
+ struct net_device *upper_dev)
{
__netdev_adjacent_dev_unlink_lists(dev, upper_dev,
&dev->all_adj_list.upper,
&upper_dev->all_adj_list.lower);
}
-int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
- struct net_device *upper_dev,
- void *private, bool master)
+static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
+ struct net_device *upper_dev,
+ void *private, bool master)
{
int ret = __netdev_adjacent_dev_link(dev, upper_dev);
@@ -4772,8 +4765,8 @@ int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
return 0;
}
-void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev,
- struct net_device *upper_dev)
+static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev,
+ struct net_device *upper_dev)
{
__netdev_adjacent_dev_unlink(dev, upper_dev);
__netdev_adjacent_dev_unlink_lists(dev, upper_dev,
@@ -4962,21 +4955,6 @@ void netdev_upper_dev_unlink(struct net_device *dev,
}
EXPORT_SYMBOL(netdev_upper_dev_unlink);
-void *netdev_lower_dev_get_private_rcu(struct net_device *dev,
- struct net_device *lower_dev)
-{
- struct netdev_adjacent *lower;
-
- if (!lower_dev)
- return NULL;
- lower = __netdev_find_adj_rcu(dev, lower_dev, &dev->adj_list.lower);
- if (!lower)
- return NULL;
-
- return lower->private;
-}
-EXPORT_SYMBOL(netdev_lower_dev_get_private_rcu);
-
void *netdev_lower_dev_get_private(struct net_device *dev,
struct net_device *lower_dev)
{
@@ -5831,13 +5809,8 @@ int register_netdevice(struct net_device *dev)
dev->features |= NETIF_F_SOFT_FEATURES;
dev->wanted_features = dev->features & dev->hw_features;
- /* Turn on no cache copy if HW is doing checksum */
if (!(dev->flags & IFF_LOOPBACK)) {
dev->hw_features |= NETIF_F_NOCACHE_COPY;
- if (dev->features & NETIF_F_ALL_CSUM) {
- dev->wanted_features |= NETIF_F_NOCACHE_COPY;
- dev->features |= NETIF_F_NOCACHE_COPY;
- }
}
/* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c
index ec40a849fc42..bb504a919e33 100644
--- a/net/core/dev_addr_lists.c
+++ b/net/core/dev_addr_lists.c
@@ -186,47 +186,6 @@ static int __hw_addr_sync_multiple(struct netdev_hw_addr_list *to_list,
return err;
}
-int __hw_addr_add_multiple(struct netdev_hw_addr_list *to_list,
- struct netdev_hw_addr_list *from_list,
- int addr_len, unsigned char addr_type)
-{
- int err;
- struct netdev_hw_addr *ha, *ha2;
- unsigned char type;
-
- list_for_each_entry(ha, &from_list->list, list) {
- type = addr_type ? addr_type : ha->type;
- err = __hw_addr_add(to_list, ha->addr, addr_len, type);
- if (err)
- goto unroll;
- }
- return 0;
-
-unroll:
- list_for_each_entry(ha2, &from_list->list, list) {
- if (ha2 == ha)
- break;
- type = addr_type ? addr_type : ha2->type;
- __hw_addr_del(to_list, ha2->addr, addr_len, type);
- }
- return err;
-}
-EXPORT_SYMBOL(__hw_addr_add_multiple);
-
-void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list,
- struct netdev_hw_addr_list *from_list,
- int addr_len, unsigned char addr_type)
-{
- struct netdev_hw_addr *ha;
- unsigned char type;
-
- list_for_each_entry(ha, &from_list->list, list) {
- type = addr_type ? addr_type : ha->type;
- __hw_addr_del(to_list, ha->addr, addr_len, type);
- }
-}
-EXPORT_SYMBOL(__hw_addr_del_multiple);
-
/* This function only works where there is a strict 1-1 relationship
* between source and destionation of they synch. If you ever need to
* sync addresses to more then 1 destination, you need to use
@@ -264,7 +223,7 @@ void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
}
EXPORT_SYMBOL(__hw_addr_unsync);
-void __hw_addr_flush(struct netdev_hw_addr_list *list)
+static void __hw_addr_flush(struct netdev_hw_addr_list *list)
{
struct netdev_hw_addr *ha, *tmp;
@@ -274,7 +233,6 @@ void __hw_addr_flush(struct netdev_hw_addr_list *list)
}
list->count = 0;
}
-EXPORT_SYMBOL(__hw_addr_flush);
void __hw_addr_init(struct netdev_hw_addr_list *list)
{
@@ -400,59 +358,6 @@ int dev_addr_del(struct net_device *dev, const unsigned char *addr,
}
EXPORT_SYMBOL(dev_addr_del);
-/**
- * dev_addr_add_multiple - Add device addresses from another device
- * @to_dev: device to which addresses will be added
- * @from_dev: device from which addresses will be added
- * @addr_type: address type - 0 means type will be used from from_dev
- *
- * Add device addresses of the one device to another.
- **
- * The caller must hold the rtnl_mutex.
- */
-int dev_addr_add_multiple(struct net_device *to_dev,
- struct net_device *from_dev,
- unsigned char addr_type)
-{
- int err;
-
- ASSERT_RTNL();
-
- if (from_dev->addr_len != to_dev->addr_len)
- return -EINVAL;
- err = __hw_addr_add_multiple(&to_dev->dev_addrs, &from_dev->dev_addrs,
- to_dev->addr_len, addr_type);
- if (!err)
- call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev);
- return err;
-}
-EXPORT_SYMBOL(dev_addr_add_multiple);
-
-/**
- * dev_addr_del_multiple - Delete device addresses by another device
- * @to_dev: device where the addresses will be deleted
- * @from_dev: device supplying the addresses to be deleted
- * @addr_type: address type - 0 means type will be used from from_dev
- *
- * Deletes addresses in to device by the list of addresses in from device.
- *
- * The caller must hold the rtnl_mutex.
- */
-int dev_addr_del_multiple(struct net_device *to_dev,
- struct net_device *from_dev,
- unsigned char addr_type)
-{
- ASSERT_RTNL();
-
- if (from_dev->addr_len != to_dev->addr_len)
- return -EINVAL;
- __hw_addr_del_multiple(&to_dev->dev_addrs, &from_dev->dev_addrs,
- to_dev->addr_len, addr_type);
- call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev);
- return 0;
-}
-EXPORT_SYMBOL(dev_addr_del_multiple);
-
/*
* Unicast list handling functions
*/
diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
index 5b7d0e1d0664..cf999e09bcd2 100644
--- a/net/core/dev_ioctl.c
+++ b/net/core/dev_ioctl.c
@@ -327,6 +327,7 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
cmd == SIOCBRADDIF ||
cmd == SIOCBRDELIF ||
cmd == SIOCSHWTSTAMP ||
+ cmd == SIOCGHWTSTAMP ||
cmd == SIOCWANDEV) {
err = -EOPNOTSUPP;
if (ops->ndo_do_ioctl) {
@@ -546,6 +547,7 @@ int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
*/
default:
if (cmd == SIOCWANDEV ||
+ cmd == SIOCGHWTSTAMP ||
(cmd >= SIOCDEVPRIVATE &&
cmd <= SIOCDEVPRIVATE + 15)) {
dev_load(net, ifr.ifr_name);
diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
index 95897183226e..e70301eb7a4a 100644
--- a/net/core/drop_monitor.c
+++ b/net/core/drop_monitor.c
@@ -64,7 +64,6 @@ static struct genl_family net_drop_monitor_family = {
.hdrsize = 0,
.name = "NET_DM",
.version = 2,
- .maxattr = NET_DM_CMD_MAX,
};
static DEFINE_PER_CPU(struct per_cpu_dm_data, dm_cpu_data);
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index d6ef17322500..b324bfa3485c 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -202,12 +202,12 @@ static __always_inline u32 __flow_hash_1word(u32 a)
}
/*
- * __skb_get_rxhash: calculate a flow hash based on src/dst addresses
+ * __skb_get_hash: calculate a flow hash based on src/dst addresses
* and src/dst port numbers. Sets rxhash in skb to non-zero hash value
* on success, zero indicates no valid hash. Also, sets l4_rxhash in skb
* if hash is a canonical 4-tuple hash over transport ports.
*/
-void __skb_get_rxhash(struct sk_buff *skb)
+void __skb_get_hash(struct sk_buff *skb)
{
struct flow_keys keys;
u32 hash;
@@ -234,7 +234,7 @@ void __skb_get_rxhash(struct sk_buff *skb)
skb->rxhash = hash;
}
-EXPORT_SYMBOL(__skb_get_rxhash);
+EXPORT_SYMBOL(__skb_get_hash);
/*
* Returns a Tx hash based on the given packet descriptor a Tx queues' number
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index ca15f32821fb..ea97361f0e9b 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -38,6 +38,8 @@
#include <linux/random.h>
#include <linux/string.h>
#include <linux/log2.h>
+#include <linux/inetdevice.h>
+#include <net/addrconf.h>
#define DEBUG
#define NEIGH_DEBUG 1
@@ -497,7 +499,7 @@ struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
goto out_neigh_release;
}
- n->confirmed = jiffies - (n->parms->base_reachable_time << 1);
+ n->confirmed = jiffies - (NEIGH_VAR(n->parms, BASE_REACHABLE_TIME) << 1);
write_lock_bh(&tbl->lock);
nht = rcu_dereference_protected(tbl->nht,
@@ -776,7 +778,7 @@ static void neigh_periodic_work(struct work_struct *work)
tbl->last_rand = jiffies;
for (p = &tbl->parms; p; p = p->next)
p->reachable_time =
- neigh_rand_reach_time(p->base_reachable_time);
+ neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
}
for (i = 0 ; i < (1 << nht->hash_shift); i++) {
@@ -799,7 +801,7 @@ static void neigh_periodic_work(struct work_struct *work)
if (atomic_read(&n->refcnt) == 1 &&
(state == NUD_FAILED ||
- time_after(jiffies, n->used + n->parms->gc_staletime))) {
+ time_after(jiffies, n->used + NEIGH_VAR(n->parms, GC_STALETIME)))) {
*np = n->next;
n->dead = 1;
write_unlock(&n->lock);
@@ -822,12 +824,12 @@ next_elt:
lockdep_is_held(&tbl->lock));
}
out:
- /* Cycle through all hash buckets every base_reachable_time/2 ticks.
- * ARP entry timeouts range from 1/2 base_reachable_time to 3/2
- * base_reachable_time.
+ /* Cycle through all hash buckets every BASE_REACHABLE_TIME/2 ticks.
+ * ARP entry timeouts range from 1/2 BASE_REACHABLE_TIME to 3/2
+ * BASE_REACHABLE_TIME.
*/
schedule_delayed_work(&tbl->gc_work,
- tbl->parms.base_reachable_time >> 1);
+ NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME) >> 1);
write_unlock_bh(&tbl->lock);
}
@@ -835,8 +837,9 @@ static __inline__ int neigh_max_probes(struct neighbour *n)
{
struct neigh_parms *p = n->parms;
return (n->nud_state & NUD_PROBE) ?
- p->ucast_probes :
- p->ucast_probes + p->app_probes + p->mcast_probes;
+ NEIGH_VAR(p, UCAST_PROBES) :
+ NEIGH_VAR(p, UCAST_PROBES) + NEIGH_VAR(p, APP_PROBES) +
+ NEIGH_VAR(p, MCAST_PROBES);
}
static void neigh_invalidate(struct neighbour *neigh)
@@ -901,12 +904,13 @@ static void neigh_timer_handler(unsigned long arg)
neigh_dbg(2, "neigh %p is still alive\n", neigh);
next = neigh->confirmed + neigh->parms->reachable_time;
} else if (time_before_eq(now,
- neigh->used + neigh->parms->delay_probe_time)) {
+ neigh->used +
+ NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) {
neigh_dbg(2, "neigh %p is delayed\n", neigh);
neigh->nud_state = NUD_DELAY;
neigh->updated = jiffies;
neigh_suspect(neigh);
- next = now + neigh->parms->delay_probe_time;
+ next = now + NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME);
} else {
neigh_dbg(2, "neigh %p is suspected\n", neigh);
neigh->nud_state = NUD_STALE;
@@ -916,7 +920,8 @@ static void neigh_timer_handler(unsigned long arg)
}
} else if (state & NUD_DELAY) {
if (time_before_eq(now,
- neigh->confirmed + neigh->parms->delay_probe_time)) {
+ neigh->confirmed +
+ NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) {
neigh_dbg(2, "neigh %p is now reachable\n", neigh);
neigh->nud_state = NUD_REACHABLE;
neigh->updated = jiffies;
@@ -928,11 +933,11 @@ static void neigh_timer_handler(unsigned long arg)
neigh->nud_state = NUD_PROBE;
neigh->updated = jiffies;
atomic_set(&neigh->probes, 0);
- next = now + neigh->parms->retrans_time;
+ next = now + NEIGH_VAR(neigh->parms, RETRANS_TIME);
}
} else {
/* NUD_PROBE|NUD_INCOMPLETE */
- next = now + neigh->parms->retrans_time;
+ next = now + NEIGH_VAR(neigh->parms, RETRANS_TIME);
}
if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
@@ -973,13 +978,16 @@ int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
goto out_unlock_bh;
if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
- if (neigh->parms->mcast_probes + neigh->parms->app_probes) {
+ if (NEIGH_VAR(neigh->parms, MCAST_PROBES) +
+ NEIGH_VAR(neigh->parms, APP_PROBES)) {
unsigned long next, now = jiffies;
- atomic_set(&neigh->probes, neigh->parms->ucast_probes);
+ atomic_set(&neigh->probes,
+ NEIGH_VAR(neigh->parms, UCAST_PROBES));
neigh->nud_state = NUD_INCOMPLETE;
neigh->updated = now;
- next = now + max(neigh->parms->retrans_time, HZ/2);
+ next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME),
+ HZ/2);
neigh_add_timer(neigh, next);
immediate_probe = true;
} else {
@@ -994,14 +1002,14 @@ int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
neigh_dbg(2, "neigh %p is delayed\n", neigh);
neigh->nud_state = NUD_DELAY;
neigh->updated = jiffies;
- neigh_add_timer(neigh,
- jiffies + neigh->parms->delay_probe_time);
+ neigh_add_timer(neigh, jiffies +
+ NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME));
}
if (neigh->nud_state == NUD_INCOMPLETE) {
if (skb) {
while (neigh->arp_queue_len_bytes + skb->truesize >
- neigh->parms->queue_len_bytes) {
+ NEIGH_VAR(neigh->parms, QUEUE_LEN_BYTES)) {
struct sk_buff *buff;
buff = __skb_dequeue(&neigh->arp_queue);
@@ -1161,6 +1169,7 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
neigh->parms->reachable_time :
0)));
neigh->nud_state = new;
+ notify = 1;
}
if (lladdr != neigh->ha) {
@@ -1170,7 +1179,7 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
neigh_update_hhs(neigh);
if (!(new & NUD_CONNECTED))
neigh->confirmed = jiffies -
- (neigh->parms->base_reachable_time << 1);
+ (NEIGH_VAR(neigh->parms, BASE_REACHABLE_TIME) << 1);
notify = 1;
}
if (new == old)
@@ -1230,6 +1239,21 @@ out:
}
EXPORT_SYMBOL(neigh_update);
+/* Update the neigh to listen temporarily for probe responses, even if it is
+ * in a NUD_FAILED state. The caller has to hold neigh->lock for writing.
+ */
+void __neigh_set_probe_once(struct neighbour *neigh)
+{
+ neigh->updated = jiffies;
+ if (!(neigh->nud_state & NUD_FAILED))
+ return;
+ neigh->nud_state = NUD_PROBE;
+ atomic_set(&neigh->probes, NEIGH_VAR(neigh->parms, UCAST_PROBES));
+ neigh_add_timer(neigh,
+ jiffies + NEIGH_VAR(neigh->parms, RETRANS_TIME));
+}
+EXPORT_SYMBOL(__neigh_set_probe_once);
+
struct neighbour *neigh_event_ns(struct neigh_table *tbl,
u8 *lladdr, void *saddr,
struct net_device *dev)
@@ -1274,7 +1298,7 @@ int neigh_compat_output(struct neighbour *neigh, struct sk_buff *skb)
if (dev_hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL,
skb->len) < 0 &&
- dev->header_ops->rebuild(skb))
+ dev_rebuild_header(skb))
return 0;
return dev_queue_xmit(skb);
@@ -1391,9 +1415,10 @@ void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
struct sk_buff *skb)
{
unsigned long now = jiffies;
- unsigned long sched_next = now + (net_random() % p->proxy_delay);
+ unsigned long sched_next = now + (net_random() %
+ NEIGH_VAR(p, PROXY_DELAY));
- if (tbl->proxy_queue.qlen > p->proxy_qlen) {
+ if (tbl->proxy_queue.qlen > NEIGH_VAR(p, PROXY_QLEN)) {
kfree_skb(skb);
return;
}
@@ -1440,7 +1465,7 @@ struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
p->tbl = tbl;
atomic_set(&p->refcnt, 1);
p->reachable_time =
- neigh_rand_reach_time(p->base_reachable_time);
+ neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
dev_hold(dev);
p->dev = dev;
write_pnet(&p->net, hold_net(net));
@@ -1457,6 +1482,8 @@ struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
p->next = tbl->parms.next;
tbl->parms.next = p;
write_unlock_bh(&tbl->lock);
+
+ neigh_parms_data_state_cleanall(p);
}
return p;
}
@@ -1509,7 +1536,7 @@ static void neigh_table_init_no_netlink(struct neigh_table *tbl)
write_pnet(&tbl->parms.net, &init_net);
atomic_set(&tbl->parms.refcnt, 1);
tbl->parms.reachable_time =
- neigh_rand_reach_time(tbl->parms.base_reachable_time);
+ neigh_rand_reach_time(NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME));
tbl->stats = alloc_percpu(struct neigh_statistics);
if (!tbl->stats)
@@ -1777,24 +1804,32 @@ static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
if ((parms->dev &&
nla_put_u32(skb, NDTPA_IFINDEX, parms->dev->ifindex)) ||
nla_put_u32(skb, NDTPA_REFCNT, atomic_read(&parms->refcnt)) ||
- nla_put_u32(skb, NDTPA_QUEUE_LENBYTES, parms->queue_len_bytes) ||
+ nla_put_u32(skb, NDTPA_QUEUE_LENBYTES,
+ NEIGH_VAR(parms, QUEUE_LEN_BYTES)) ||
/* approximative value for deprecated QUEUE_LEN (in packets) */
nla_put_u32(skb, NDTPA_QUEUE_LEN,
- parms->queue_len_bytes / SKB_TRUESIZE(ETH_FRAME_LEN)) ||
- nla_put_u32(skb, NDTPA_PROXY_QLEN, parms->proxy_qlen) ||
- nla_put_u32(skb, NDTPA_APP_PROBES, parms->app_probes) ||
- nla_put_u32(skb, NDTPA_UCAST_PROBES, parms->ucast_probes) ||
- nla_put_u32(skb, NDTPA_MCAST_PROBES, parms->mcast_probes) ||
+ NEIGH_VAR(parms, QUEUE_LEN_BYTES) / SKB_TRUESIZE(ETH_FRAME_LEN)) ||
+ nla_put_u32(skb, NDTPA_PROXY_QLEN, NEIGH_VAR(parms, PROXY_QLEN)) ||
+ nla_put_u32(skb, NDTPA_APP_PROBES, NEIGH_VAR(parms, APP_PROBES)) ||
+ nla_put_u32(skb, NDTPA_UCAST_PROBES,
+ NEIGH_VAR(parms, UCAST_PROBES)) ||
+ nla_put_u32(skb, NDTPA_MCAST_PROBES,
+ NEIGH_VAR(parms, MCAST_PROBES)) ||
nla_put_msecs(skb, NDTPA_REACHABLE_TIME, parms->reachable_time) ||
nla_put_msecs(skb, NDTPA_BASE_REACHABLE_TIME,
- parms->base_reachable_time) ||
- nla_put_msecs(skb, NDTPA_GC_STALETIME, parms->gc_staletime) ||
+ NEIGH_VAR(parms, BASE_REACHABLE_TIME)) ||
+ nla_put_msecs(skb, NDTPA_GC_STALETIME,
+ NEIGH_VAR(parms, GC_STALETIME)) ||
nla_put_msecs(skb, NDTPA_DELAY_PROBE_TIME,
- parms->delay_probe_time) ||
- nla_put_msecs(skb, NDTPA_RETRANS_TIME, parms->retrans_time) ||
- nla_put_msecs(skb, NDTPA_ANYCAST_DELAY, parms->anycast_delay) ||
- nla_put_msecs(skb, NDTPA_PROXY_DELAY, parms->proxy_delay) ||
- nla_put_msecs(skb, NDTPA_LOCKTIME, parms->locktime))
+ NEIGH_VAR(parms, DELAY_PROBE_TIME)) ||
+ nla_put_msecs(skb, NDTPA_RETRANS_TIME,
+ NEIGH_VAR(parms, RETRANS_TIME)) ||
+ nla_put_msecs(skb, NDTPA_ANYCAST_DELAY,
+ NEIGH_VAR(parms, ANYCAST_DELAY)) ||
+ nla_put_msecs(skb, NDTPA_PROXY_DELAY,
+ NEIGH_VAR(parms, PROXY_DELAY)) ||
+ nla_put_msecs(skb, NDTPA_LOCKTIME,
+ NEIGH_VAR(parms, LOCKTIME)))
goto nla_put_failure;
return nla_nest_end(skb, nest);
@@ -2010,44 +2045,54 @@ static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh)
switch (i) {
case NDTPA_QUEUE_LEN:
- p->queue_len_bytes = nla_get_u32(tbp[i]) *
- SKB_TRUESIZE(ETH_FRAME_LEN);
+ NEIGH_VAR_SET(p, QUEUE_LEN_BYTES,
+ nla_get_u32(tbp[i]) *
+ SKB_TRUESIZE(ETH_FRAME_LEN));
break;
case NDTPA_QUEUE_LENBYTES:
- p->queue_len_bytes = nla_get_u32(tbp[i]);
+ NEIGH_VAR_SET(p, QUEUE_LEN_BYTES,
+ nla_get_u32(tbp[i]));
break;
case NDTPA_PROXY_QLEN:
- p->proxy_qlen = nla_get_u32(tbp[i]);
+ NEIGH_VAR_SET(p, PROXY_QLEN,
+ nla_get_u32(tbp[i]));
break;
case NDTPA_APP_PROBES:
- p->app_probes = nla_get_u32(tbp[i]);
+ NEIGH_VAR_SET(p, APP_PROBES,
+ nla_get_u32(tbp[i]));
break;
case NDTPA_UCAST_PROBES:
- p->ucast_probes = nla_get_u32(tbp[i]);
+ NEIGH_VAR_SET(p, UCAST_PROBES,
+ nla_get_u32(tbp[i]));
break;
case NDTPA_MCAST_PROBES:
- p->mcast_probes = nla_get_u32(tbp[i]);
+ NEIGH_VAR_SET(p, MCAST_PROBES,
+ nla_get_u32(tbp[i]));
break;
case NDTPA_BASE_REACHABLE_TIME:
- p->base_reachable_time = nla_get_msecs(tbp[i]);
+ NEIGH_VAR_SET(p, BASE_REACHABLE_TIME,
+ nla_get_msecs(tbp[i]));
break;
case NDTPA_GC_STALETIME:
- p->gc_staletime = nla_get_msecs(tbp[i]);
+ NEIGH_VAR_SET(p, GC_STALETIME,
+ nla_get_msecs(tbp[i]));
break;
case NDTPA_DELAY_PROBE_TIME:
- p->delay_probe_time = nla_get_msecs(tbp[i]);
+ NEIGH_VAR_SET(p, DELAY_PROBE_TIME,
+ nla_get_msecs(tbp[i]));
break;
case NDTPA_RETRANS_TIME:
- p->retrans_time = nla_get_msecs(tbp[i]);
+ NEIGH_VAR_SET(p, RETRANS_TIME,
+ nla_get_msecs(tbp[i]));
break;
case NDTPA_ANYCAST_DELAY:
- p->anycast_delay = nla_get_msecs(tbp[i]);
+ NEIGH_VAR_SET(p, ANYCAST_DELAY, nla_get_msecs(tbp[i]));
break;
case NDTPA_PROXY_DELAY:
- p->proxy_delay = nla_get_msecs(tbp[i]);
+ NEIGH_VAR_SET(p, PROXY_DELAY, nla_get_msecs(tbp[i]));
break;
case NDTPA_LOCKTIME:
- p->locktime = nla_get_msecs(tbp[i]);
+ NEIGH_VAR_SET(p, LOCKTIME, nla_get_msecs(tbp[i]));
break;
}
}
@@ -2788,133 +2833,167 @@ static int proc_unres_qlen(struct ctl_table *ctl, int write,
return ret;
}
-enum {
- NEIGH_VAR_MCAST_PROBE,
- NEIGH_VAR_UCAST_PROBE,
- NEIGH_VAR_APP_PROBE,
- NEIGH_VAR_RETRANS_TIME,
- NEIGH_VAR_BASE_REACHABLE_TIME,
- NEIGH_VAR_DELAY_PROBE_TIME,
- NEIGH_VAR_GC_STALETIME,
- NEIGH_VAR_QUEUE_LEN,
- NEIGH_VAR_QUEUE_LEN_BYTES,
- NEIGH_VAR_PROXY_QLEN,
- NEIGH_VAR_ANYCAST_DELAY,
- NEIGH_VAR_PROXY_DELAY,
- NEIGH_VAR_LOCKTIME,
- NEIGH_VAR_RETRANS_TIME_MS,
- NEIGH_VAR_BASE_REACHABLE_TIME_MS,
- NEIGH_VAR_GC_INTERVAL,
- NEIGH_VAR_GC_THRESH1,
- NEIGH_VAR_GC_THRESH2,
- NEIGH_VAR_GC_THRESH3,
- NEIGH_VAR_MAX
-};
+static struct neigh_parms *neigh_get_dev_parms_rcu(struct net_device *dev,
+ int family)
+{
+ switch (family) {
+ case AF_INET:
+ return __in_dev_arp_parms_get_rcu(dev);
+ case AF_INET6:
+ return __in6_dev_nd_parms_get_rcu(dev);
+ }
+ return NULL;
+}
+
+static void neigh_copy_dflt_parms(struct net *net, struct neigh_parms *p,
+ int index)
+{
+ struct net_device *dev;
+ int family = neigh_parms_family(p);
+
+ rcu_read_lock();
+ for_each_netdev_rcu(net, dev) {
+ struct neigh_parms *dst_p =
+ neigh_get_dev_parms_rcu(dev, family);
+
+ if (dst_p && !test_bit(index, dst_p->data_state))
+ dst_p->data[index] = p->data[index];
+ }
+ rcu_read_unlock();
+}
+
+static void neigh_proc_update(struct ctl_table *ctl, int write)
+{
+ struct net_device *dev = ctl->extra1;
+ struct neigh_parms *p = ctl->extra2;
+ struct net *net = neigh_parms_net(p);
+ int index = (int *) ctl->data - p->data;
+
+ if (!write)
+ return;
+
+ set_bit(index, p->data_state);
+ if (!dev) /* NULL dev means this is default value */
+ neigh_copy_dflt_parms(net, p, index);
+}
+
+static int neigh_proc_dointvec_zero_intmax(struct ctl_table *ctl, int write,
+ void __user *buffer,
+ size_t *lenp, loff_t *ppos)
+{
+ struct ctl_table tmp = *ctl;
+ int ret;
+
+ tmp.extra1 = &zero;
+ tmp.extra2 = &int_max;
+
+ ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
+ neigh_proc_update(ctl, write);
+ return ret;
+}
+
+int neigh_proc_dointvec(struct ctl_table *ctl, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+ int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
+
+ neigh_proc_update(ctl, write);
+ return ret;
+}
+EXPORT_SYMBOL(neigh_proc_dointvec);
+
+int neigh_proc_dointvec_jiffies(struct ctl_table *ctl, int write,
+ void __user *buffer,
+ size_t *lenp, loff_t *ppos)
+{
+ int ret = proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
+
+ neigh_proc_update(ctl, write);
+ return ret;
+}
+EXPORT_SYMBOL(neigh_proc_dointvec_jiffies);
+
+static int neigh_proc_dointvec_userhz_jiffies(struct ctl_table *ctl, int write,
+ void __user *buffer,
+ size_t *lenp, loff_t *ppos)
+{
+ int ret = proc_dointvec_userhz_jiffies(ctl, write, buffer, lenp, ppos);
+
+ neigh_proc_update(ctl, write);
+ return ret;
+}
+
+int neigh_proc_dointvec_ms_jiffies(struct ctl_table *ctl, int write,
+ void __user *buffer,
+ size_t *lenp, loff_t *ppos)
+{
+ int ret = proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos);
+
+ neigh_proc_update(ctl, write);
+ return ret;
+}
+EXPORT_SYMBOL(neigh_proc_dointvec_ms_jiffies);
+
+static int neigh_proc_dointvec_unres_qlen(struct ctl_table *ctl, int write,
+ void __user *buffer,
+ size_t *lenp, loff_t *ppos)
+{
+ int ret = proc_unres_qlen(ctl, write, buffer, lenp, ppos);
+
+ neigh_proc_update(ctl, write);
+ return ret;
+}
+
+#define NEIGH_PARMS_DATA_OFFSET(index) \
+ (&((struct neigh_parms *) 0)->data[index])
+
+#define NEIGH_SYSCTL_ENTRY(attr, data_attr, name, mval, proc) \
+ [NEIGH_VAR_ ## attr] = { \
+ .procname = name, \
+ .data = NEIGH_PARMS_DATA_OFFSET(NEIGH_VAR_ ## data_attr), \
+ .maxlen = sizeof(int), \
+ .mode = mval, \
+ .proc_handler = proc, \
+ }
+
+#define NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(attr, name) \
+ NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_zero_intmax)
+
+#define NEIGH_SYSCTL_JIFFIES_ENTRY(attr, name) \
+ NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_jiffies)
+
+#define NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(attr, name) \
+ NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_userhz_jiffies)
+
+#define NEIGH_SYSCTL_MS_JIFFIES_ENTRY(attr, name) \
+ NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_ms_jiffies)
+
+#define NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(attr, data_attr, name) \
+ NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_ms_jiffies)
+
+#define NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(attr, data_attr, name) \
+ NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_unres_qlen)
static struct neigh_sysctl_table {
struct ctl_table_header *sysctl_header;
struct ctl_table neigh_vars[NEIGH_VAR_MAX + 1];
} neigh_sysctl_template __read_mostly = {
.neigh_vars = {
- [NEIGH_VAR_MCAST_PROBE] = {
- .procname = "mcast_solicit",
- .maxlen = sizeof(int),
- .mode = 0644,
- .extra1 = &zero,
- .extra2 = &int_max,
- .proc_handler = proc_dointvec_minmax,
- },
- [NEIGH_VAR_UCAST_PROBE] = {
- .procname = "ucast_solicit",
- .maxlen = sizeof(int),
- .mode = 0644,
- .extra1 = &zero,
- .extra2 = &int_max,
- .proc_handler = proc_dointvec_minmax,
- },
- [NEIGH_VAR_APP_PROBE] = {
- .procname = "app_solicit",
- .maxlen = sizeof(int),
- .mode = 0644,
- .extra1 = &zero,
- .extra2 = &int_max,
- .proc_handler = proc_dointvec_minmax,
- },
- [NEIGH_VAR_RETRANS_TIME] = {
- .procname = "retrans_time",
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec_userhz_jiffies,
- },
- [NEIGH_VAR_BASE_REACHABLE_TIME] = {
- .procname = "base_reachable_time",
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
- [NEIGH_VAR_DELAY_PROBE_TIME] = {
- .procname = "delay_first_probe_time",
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
- [NEIGH_VAR_GC_STALETIME] = {
- .procname = "gc_stale_time",
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
- [NEIGH_VAR_QUEUE_LEN] = {
- .procname = "unres_qlen",
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_unres_qlen,
- },
- [NEIGH_VAR_QUEUE_LEN_BYTES] = {
- .procname = "unres_qlen_bytes",
- .maxlen = sizeof(int),
- .mode = 0644,
- .extra1 = &zero,
- .proc_handler = proc_dointvec_minmax,
- },
- [NEIGH_VAR_PROXY_QLEN] = {
- .procname = "proxy_qlen",
- .maxlen = sizeof(int),
- .mode = 0644,
- .extra1 = &zero,
- .extra2 = &int_max,
- .proc_handler = proc_dointvec_minmax,
- },
- [NEIGH_VAR_ANYCAST_DELAY] = {
- .procname = "anycast_delay",
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec_userhz_jiffies,
- },
- [NEIGH_VAR_PROXY_DELAY] = {
- .procname = "proxy_delay",
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec_userhz_jiffies,
- },
- [NEIGH_VAR_LOCKTIME] = {
- .procname = "locktime",
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec_userhz_jiffies,
- },
- [NEIGH_VAR_RETRANS_TIME_MS] = {
- .procname = "retrans_time_ms",
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec_ms_jiffies,
- },
- [NEIGH_VAR_BASE_REACHABLE_TIME_MS] = {
- .procname = "base_reachable_time_ms",
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec_ms_jiffies,
- },
+ NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_PROBES, "mcast_solicit"),
+ NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(UCAST_PROBES, "ucast_solicit"),
+ NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(APP_PROBES, "app_solicit"),
+ NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(RETRANS_TIME, "retrans_time"),
+ NEIGH_SYSCTL_JIFFIES_ENTRY(BASE_REACHABLE_TIME, "base_reachable_time"),
+ NEIGH_SYSCTL_JIFFIES_ENTRY(DELAY_PROBE_TIME, "delay_first_probe_time"),
+ NEIGH_SYSCTL_JIFFIES_ENTRY(GC_STALETIME, "gc_stale_time"),
+ NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(QUEUE_LEN_BYTES, "unres_qlen_bytes"),
+ NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(PROXY_QLEN, "proxy_qlen"),
+ NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(ANYCAST_DELAY, "anycast_delay"),
+ NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(PROXY_DELAY, "proxy_delay"),
+ NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(LOCKTIME, "locktime"),
+ NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(QUEUE_LEN, QUEUE_LEN_BYTES, "unres_qlen"),
+ NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(RETRANS_TIME_MS, RETRANS_TIME, "retrans_time_ms"),
+ NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(BASE_REACHABLE_TIME_MS, BASE_REACHABLE_TIME, "base_reachable_time_ms"),
[NEIGH_VAR_GC_INTERVAL] = {
.procname = "gc_interval",
.maxlen = sizeof(int),
@@ -2950,31 +3029,23 @@ static struct neigh_sysctl_table {
};
int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
- char *p_name, proc_handler *handler)
+ proc_handler *handler)
{
+ int i;
struct neigh_sysctl_table *t;
- const char *dev_name_source = NULL;
+ const char *dev_name_source;
char neigh_path[ sizeof("net//neigh/") + IFNAMSIZ + IFNAMSIZ ];
+ char *p_name;
t = kmemdup(&neigh_sysctl_template, sizeof(*t), GFP_KERNEL);
if (!t)
goto err;
- t->neigh_vars[NEIGH_VAR_MCAST_PROBE].data = &p->mcast_probes;
- t->neigh_vars[NEIGH_VAR_UCAST_PROBE].data = &p->ucast_probes;
- t->neigh_vars[NEIGH_VAR_APP_PROBE].data = &p->app_probes;
- t->neigh_vars[NEIGH_VAR_RETRANS_TIME].data = &p->retrans_time;
- t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].data = &p->base_reachable_time;
- t->neigh_vars[NEIGH_VAR_DELAY_PROBE_TIME].data = &p->delay_probe_time;
- t->neigh_vars[NEIGH_VAR_GC_STALETIME].data = &p->gc_staletime;
- t->neigh_vars[NEIGH_VAR_QUEUE_LEN].data = &p->queue_len_bytes;
- t->neigh_vars[NEIGH_VAR_QUEUE_LEN_BYTES].data = &p->queue_len_bytes;
- t->neigh_vars[NEIGH_VAR_PROXY_QLEN].data = &p->proxy_qlen;
- t->neigh_vars[NEIGH_VAR_ANYCAST_DELAY].data = &p->anycast_delay;
- t->neigh_vars[NEIGH_VAR_PROXY_DELAY].data = &p->proxy_delay;
- t->neigh_vars[NEIGH_VAR_LOCKTIME].data = &p->locktime;
- t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].data = &p->retrans_time;
- t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].data = &p->base_reachable_time;
+ for (i = 0; i < ARRAY_SIZE(t->neigh_vars); i++) {
+ t->neigh_vars[i].data += (long) p;
+ t->neigh_vars[i].extra1 = dev;
+ t->neigh_vars[i].extra2 = p;
+ }
if (dev) {
dev_name_source = dev->name;
@@ -2989,26 +3060,32 @@ int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = (int *)(p + 1) + 3;
}
-
if (handler) {
/* RetransTime */
t->neigh_vars[NEIGH_VAR_RETRANS_TIME].proc_handler = handler;
- t->neigh_vars[NEIGH_VAR_RETRANS_TIME].extra1 = dev;
/* ReachableTime */
t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler = handler;
- t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].extra1 = dev;
/* RetransTime (in milliseconds)*/
t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].proc_handler = handler;
- t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].extra1 = dev;
/* ReachableTime (in milliseconds) */
t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler = handler;
- t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].extra1 = dev;
}
/* Don't export sysctls to unprivileged users */
if (neigh_parms_net(p)->user_ns != &init_user_ns)
t->neigh_vars[0].procname = NULL;
+ switch (neigh_parms_family(p)) {
+ case AF_INET:
+ p_name = "ipv4";
+ break;
+ case AF_INET6:
+ p_name = "ipv6";
+ break;
+ default:
+ BUG();
+ }
+
snprintf(neigh_path, sizeof(neigh_path), "net/%s/neigh/%s",
p_name, dev_name_source);
t->sysctl_header =
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index f3edf9635e02..49843bf7e43e 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -676,8 +676,8 @@ static ssize_t store_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
while ((mask | (mask >> 1)) != mask)
mask |= (mask >> 1);
/* On 64 bit arches, must check mask fits in table->mask (u32),
- * and on 32bit arches, must check RPS_DEV_FLOW_TABLE_SIZE(mask + 1)
- * doesnt overflow.
+ * and on 32bit arches, must check
+ * RPS_DEV_FLOW_TABLE_SIZE(mask + 1) doesn't overflow.
*/
#if BITS_PER_LONG > 32
if (mask > (unsigned long)(u32)mask)
@@ -1358,7 +1358,7 @@ void netdev_class_remove_file_ns(struct class_attribute *class_attr,
}
EXPORT_SYMBOL(netdev_class_remove_file_ns);
-int netdev_kobject_init(void)
+int __init netdev_kobject_init(void)
{
kobj_ns_type_register(&net_ns_type_operations);
return class_register(&net_class);
diff --git a/net/core/net-sysfs.h b/net/core/net-sysfs.h
index bd7751ec1c4d..2745a1b51e03 100644
--- a/net/core/net-sysfs.h
+++ b/net/core/net-sysfs.h
@@ -1,7 +1,7 @@
#ifndef __NET_SYSFS_H__
#define __NET_SYSFS_H__
-int netdev_kobject_init(void);
+int __init netdev_kobject_init(void);
int netdev_register_kobject(struct net_device *);
void netdev_unregister_kobject(struct net_device *);
int net_rx_queue_update_kobjects(struct net_device *, int old_num, int new_num);
diff --git a/net/core/netclassid_cgroup.c b/net/core/netclassid_cgroup.c
new file mode 100644
index 000000000000..719efd541668
--- /dev/null
+++ b/net/core/netclassid_cgroup.c
@@ -0,0 +1,120 @@
+/*
+ * net/core/netclassid_cgroup.c Classid Cgroupfs Handling
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Authors: Thomas Graf <tgraf@suug.ch>
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/cgroup.h>
+#include <linux/fdtable.h>
+#include <net/cls_cgroup.h>
+#include <net/sock.h>
+
+static inline struct cgroup_cls_state *css_cls_state(struct cgroup_subsys_state *css)
+{
+ return css ? container_of(css, struct cgroup_cls_state, css) : NULL;
+}
+
+struct cgroup_cls_state *task_cls_state(struct task_struct *p)
+{
+ return css_cls_state(task_css(p, net_cls_subsys_id));
+}
+EXPORT_SYMBOL_GPL(task_cls_state);
+
+static struct cgroup_subsys_state *
+cgrp_css_alloc(struct cgroup_subsys_state *parent_css)
+{
+ struct cgroup_cls_state *cs;
+
+ cs = kzalloc(sizeof(*cs), GFP_KERNEL);
+ if (!cs)
+ return ERR_PTR(-ENOMEM);
+
+ return &cs->css;
+}
+
+static int cgrp_css_online(struct cgroup_subsys_state *css)
+{
+ struct cgroup_cls_state *cs = css_cls_state(css);
+ struct cgroup_cls_state *parent = css_cls_state(css_parent(css));
+
+ if (parent)
+ cs->classid = parent->classid;
+
+ return 0;
+}
+
+static void cgrp_css_free(struct cgroup_subsys_state *css)
+{
+ kfree(css_cls_state(css));
+}
+
+static int update_classid(const void *v, struct file *file, unsigned n)
+{
+ int err;
+ struct socket *sock = sock_from_file(file, &err);
+
+ if (sock)
+ sock->sk->sk_classid = (u32)(unsigned long)v;
+
+ return 0;
+}
+
+static void cgrp_attach(struct cgroup_subsys_state *css,
+ struct cgroup_taskset *tset)
+{
+ struct cgroup_cls_state *cs = css_cls_state(css);
+ void *v = (void *)(unsigned long)cs->classid;
+ struct task_struct *p;
+
+ cgroup_taskset_for_each(p, css, tset) {
+ task_lock(p);
+ iterate_fd(p->files, 0, update_classid, v);
+ task_unlock(p);
+ }
+}
+
+static u64 read_classid(struct cgroup_subsys_state *css, struct cftype *cft)
+{
+ return css_cls_state(css)->classid;
+}
+
+static int write_classid(struct cgroup_subsys_state *css, struct cftype *cft,
+ u64 value)
+{
+ css_cls_state(css)->classid = (u32) value;
+
+ return 0;
+}
+
+static struct cftype ss_files[] = {
+ {
+ .name = "classid",
+ .read_u64 = read_classid,
+ .write_u64 = write_classid,
+ },
+ { } /* terminate */
+};
+
+struct cgroup_subsys net_cls_subsys = {
+ .name = "net_cls",
+ .css_alloc = cgrp_css_alloc,
+ .css_online = cgrp_css_online,
+ .css_free = cgrp_css_free,
+ .attach = cgrp_attach,
+ .subsys_id = net_cls_subsys_id,
+ .base_cftypes = ss_files,
+ .module = THIS_MODULE,
+};
+
+static int __init init_netclassid_cgroup(void)
+{
+ return cgroup_load_subsys(&net_cls_subsys);
+}
+__initcall(init_netclassid_cgroup);
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 8f971990677c..303097874633 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -386,8 +386,14 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
!vlan_hw_offload_capable(netif_skb_features(skb),
skb->vlan_proto)) {
skb = __vlan_put_tag(skb, skb->vlan_proto, vlan_tx_tag_get(skb));
- if (unlikely(!skb))
- break;
+ if (unlikely(!skb)) {
+ /* This is actually a packet drop, but we
+ * don't want the code at the end of this
+ * function to try and re-queue a NULL skb.
+ */
+ status = NETDEV_TX_OK;
+ goto unlock_txq;
+ }
skb->vlan_tci = 0;
}
@@ -395,6 +401,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
if (status == NETDEV_TX_OK)
txq_trans_update(txq);
}
+ unlock_txq:
__netif_tx_unlock(txq);
if (status == NETDEV_TX_OK)
diff --git a/net/core/netprio_cgroup.c b/net/core/netprio_cgroup.c
index 9b7cf6c85f82..1dda50c2e705 100644
--- a/net/core/netprio_cgroup.c
+++ b/net/core/netprio_cgroup.c
@@ -30,7 +30,7 @@
#define PRIOMAP_MIN_SZ 128
/*
- * Extend @dev->priomap so that it's large enough to accomodate
+ * Extend @dev->priomap so that it's large enough to accommodate
* @target_idx. @dev->priomap.priomap_len > @target_idx after successful
* return. Must be called under rtnl lock.
*/
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 261357a66300..a797fff7f222 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -2527,6 +2527,8 @@ static int process_ipsec(struct pktgen_dev *pkt_dev,
if (x) {
int ret;
__u8 *eth;
+ struct iphdr *iph;
+
nhead = x->props.header_len - skb_headroom(skb);
if (nhead > 0) {
ret = pskb_expand_head(skb, nhead, 0, GFP_ATOMIC);
@@ -2548,6 +2550,11 @@ static int process_ipsec(struct pktgen_dev *pkt_dev,
eth = (__u8 *) skb_push(skb, ETH_HLEN);
memcpy(eth, pkt_dev->hh, 12);
*(u16 *) &eth[12] = protocol;
+
+ /* Update IPv4 header len as well as checksum value */
+ iph = ip_hdr(skb);
+ iph->tot_len = htons(skb->len - ETH_HLEN);
+ ip_send_check(iph);
}
}
return 1;
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index cf67144d3e3c..e6e7d582f901 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -403,34 +403,16 @@ static const struct rtnl_af_ops *rtnl_af_lookup(const int family)
}
/**
- * __rtnl_af_register - Register rtnl_af_ops with rtnetlink.
- * @ops: struct rtnl_af_ops * to register
- *
- * The caller must hold the rtnl_mutex.
- *
- * Returns 0 on success or a negative error code.
- */
-int __rtnl_af_register(struct rtnl_af_ops *ops)
-{
- list_add_tail(&ops->list, &rtnl_af_ops);
- return 0;
-}
-EXPORT_SYMBOL_GPL(__rtnl_af_register);
-
-/**
* rtnl_af_register - Register rtnl_af_ops with rtnetlink.
* @ops: struct rtnl_af_ops * to register
*
* Returns 0 on success or a negative error code.
*/
-int rtnl_af_register(struct rtnl_af_ops *ops)
+void rtnl_af_register(struct rtnl_af_ops *ops)
{
- int err;
-
rtnl_lock();
- err = __rtnl_af_register(ops);
+ list_add_tail(&ops->list, &rtnl_af_ops);
rtnl_unlock();
- return err;
}
EXPORT_SYMBOL_GPL(rtnl_af_register);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 2718fed53d8c..1d641e781f85 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -712,9 +712,8 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
new->inner_network_header = old->inner_network_header;
new->inner_mac_header = old->inner_mac_header;
skb_dst_copy(new, old);
- new->rxhash = old->rxhash;
+ skb_copy_hash(new, old);
new->ooo_okay = old->ooo_okay;
- new->l4_rxhash = old->l4_rxhash;
new->no_fcs = old->no_fcs;
new->encapsulation = old->encapsulation;
#ifdef CONFIG_XFRM
@@ -2122,6 +2121,91 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
}
EXPORT_SYMBOL(skb_copy_and_csum_bits);
+ /**
+ * skb_zerocopy_headlen - Calculate headroom needed for skb_zerocopy()
+ * @from: source buffer
+ *
+ * Calculates the amount of linear headroom needed in the 'to' skb passed
+ * into skb_zerocopy().
+ */
+unsigned int
+skb_zerocopy_headlen(const struct sk_buff *from)
+{
+ unsigned int hlen = 0;
+
+ if (!from->head_frag ||
+ skb_headlen(from) < L1_CACHE_BYTES ||
+ skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS)
+ hlen = skb_headlen(from);
+
+ if (skb_has_frag_list(from))
+ hlen = from->len;
+
+ return hlen;
+}
+EXPORT_SYMBOL_GPL(skb_zerocopy_headlen);
+
+/**
+ * skb_zerocopy - Zero copy skb to skb
+ * @to: destination buffer
+ * @source: source buffer
+ * @len: number of bytes to copy from source buffer
+ * @hlen: size of linear headroom in destination buffer
+ *
+ * Copies up to `len` bytes from `from` to `to` by creating references
+ * to the frags in the source buffer.
+ *
+ * The `hlen` as calculated by skb_zerocopy_headlen() specifies the
+ * headroom in the `to` buffer.
+ */
+void
+skb_zerocopy(struct sk_buff *to, const struct sk_buff *from, int len, int hlen)
+{
+ int i, j = 0;
+ int plen = 0; /* length of skb->head fragment */
+ struct page *page;
+ unsigned int offset;
+
+ BUG_ON(!from->head_frag && !hlen);
+
+ /* dont bother with small payloads */
+ if (len <= skb_tailroom(to)) {
+ skb_copy_bits(from, 0, skb_put(to, len), len);
+ return;
+ }
+
+ if (hlen) {
+ skb_copy_bits(from, 0, skb_put(to, hlen), hlen);
+ len -= hlen;
+ } else {
+ plen = min_t(int, skb_headlen(from), len);
+ if (plen) {
+ page = virt_to_head_page(from->head);
+ offset = from->data - (unsigned char *)page_address(page);
+ __skb_fill_page_desc(to, 0, page, offset, plen);
+ get_page(page);
+ j = 1;
+ len -= plen;
+ }
+ }
+
+ to->truesize += len + plen;
+ to->len += len + plen;
+ to->data_len += len + plen;
+
+ for (i = 0; i < skb_shinfo(from)->nr_frags; i++) {
+ if (!len)
+ break;
+ skb_shinfo(to)->frags[j] = skb_shinfo(from)->frags[i];
+ skb_shinfo(to)->frags[j].size = min_t(int, skb_shinfo(to)->frags[j].size, len);
+ len -= skb_shinfo(to)->frags[j].size;
+ skb_frag_ref(to, j);
+ j++;
+ }
+ skb_shinfo(to)->nr_frags = j;
+}
+EXPORT_SYMBOL_GPL(skb_zerocopy);
+
void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to)
{
__wsum csum;
@@ -2982,10 +3066,7 @@ perform_csum_check:
return segs;
err:
- while ((skb = segs)) {
- segs = skb->next;
- kfree_skb(skb);
- }
+ kfree_skb_list(segs);
return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(skb_segment);
@@ -3584,6 +3665,7 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet)
skb->tstamp.tv64 = 0;
skb->pkt_type = PACKET_HOST;
skb->skb_iif = 0;
+ skb->local_df = 0;
skb_dst_drop(skb);
skb->mark = 0;
secpath_reset(skb);
diff --git a/net/core/sock.c b/net/core/sock.c
index ab20ed9b0f31..85ad6f0d3898 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -882,7 +882,7 @@ set_rcvbuf:
case SO_PEEK_OFF:
if (sock->ops->set_peek_off)
- sock->ops->set_peek_off(sk, val);
+ ret = sock->ops->set_peek_off(sk, val);
else
ret = -EOPNOTSUPP;
break;
@@ -925,8 +925,8 @@ set_rcvbuf:
EXPORT_SYMBOL(sock_setsockopt);
-void cred_to_ucred(struct pid *pid, const struct cred *cred,
- struct ucred *ucred)
+static void cred_to_ucred(struct pid *pid, const struct cred *cred,
+ struct ucred *ucred)
{
ucred->pid = pid_vnr(pid);
ucred->uid = ucred->gid = -1;
@@ -937,7 +937,6 @@ void cred_to_ucred(struct pid *pid, const struct cred *cred,
ucred->gid = from_kgid_munged(current_ns, cred->egid);
}
}
-EXPORT_SYMBOL_GPL(cred_to_ucred);
int sock_getsockopt(struct socket *sock, int level, int optname,
char __user *optval, int __user *optlen)
@@ -1308,19 +1307,7 @@ static void sk_prot_free(struct proto *prot, struct sock *sk)
module_put(owner);
}
-#if IS_ENABLED(CONFIG_NET_CLS_CGROUP)
-void sock_update_classid(struct sock *sk)
-{
- u32 classid;
-
- classid = task_cls_classid(current);
- if (classid != sk->sk_classid)
- sk->sk_classid = classid;
-}
-EXPORT_SYMBOL(sock_update_classid);
-#endif
-
-#if IS_ENABLED(CONFIG_NETPRIO_CGROUP)
+#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
void sock_update_netprioidx(struct sock *sk)
{
if (in_interrupt())
@@ -1666,22 +1653,6 @@ struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
EXPORT_SYMBOL(sock_wmalloc);
/*
- * Allocate a skb from the socket's receive buffer.
- */
-struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force,
- gfp_t priority)
-{
- if (force || atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
- struct sk_buff *skb = alloc_skb(size, priority);
- if (skb) {
- skb_set_owner_r(skb, sk);
- return skb;
- }
- }
- return NULL;
-}
-
-/*
* Allocate a memory block from the socket's option memory buffer.
*/
void *sock_kmalloc(struct sock *sk, int size, gfp_t priority)
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index cca444190907..cf9cd13509a7 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -122,7 +122,8 @@ static int flow_limit_cpu_sysctl(struct ctl_table *table, int write,
synchronize_rcu();
kfree(cur);
} else if (!cur && cpumask_test_cpu(i, mask)) {
- cur = kzalloc(len, GFP_KERNEL);
+ cur = kzalloc_node(len, GFP_KERNEL,
+ cpu_to_node(i));
if (!cur) {
/* not unwinding previous changes */
ret = -ENOMEM;
diff --git a/net/dcb/dcbevent.c b/net/dcb/dcbevent.c
index 4f72fc40bf02..a520d8004d89 100644
--- a/net/dcb/dcbevent.c
+++ b/net/dcb/dcbevent.c
@@ -11,8 +11,7 @@
* more details.
*
* You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
+ * this program; if not, see <http://www.gnu.org/licenses/>.
*
* Author: John Fastabend <john.r.fastabend@intel.com>
*/
diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c
index 40d5829ed36a..66fbe1948fb5 100644
--- a/net/dcb/dcbnl.c
+++ b/net/dcb/dcbnl.c
@@ -11,8 +11,7 @@
* more details.
*
* You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
+ * this program; if not, see <http://www.gnu.org/licenses/>.
*
* Author: Lucy Liu <lucy.liu@intel.com>
*/
diff --git a/net/dccp/ccids/lib/tfrc.c b/net/dccp/ccids/lib/tfrc.c
index 62b5828acde0..c073b81a1f3e 100644
--- a/net/dccp/ccids/lib/tfrc.c
+++ b/net/dccp/ccids/lib/tfrc.c
@@ -8,7 +8,7 @@
#include "tfrc.h"
#ifdef CONFIG_IP_DCCP_TFRC_DEBUG
-bool tfrc_debug;
+static bool tfrc_debug;
module_param(tfrc_debug, bool, 0644);
MODULE_PARM_DESC(tfrc_debug, "Enable TFRC debug messages");
#endif
diff --git a/net/dccp/ccids/lib/tfrc.h b/net/dccp/ccids/lib/tfrc.h
index 40ee7d62b652..a3d8f7c76ae0 100644
--- a/net/dccp/ccids/lib/tfrc.h
+++ b/net/dccp/ccids/lib/tfrc.h
@@ -21,7 +21,6 @@
#include "packet_history.h"
#ifdef CONFIG_IP_DCCP_TFRC_DEBUG
-extern bool tfrc_debug;
#define tfrc_pr_debug(format, a...) DCCP_PR_DEBUG(tfrc_debug, format, ##a)
#else
#define tfrc_pr_debug(format, a...)
diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h
index 30948784dd58..c67816647cce 100644
--- a/net/dccp/dccp.h
+++ b/net/dccp/dccp.h
@@ -479,7 +479,6 @@ void dccp_feat_list_purge(struct list_head *fn_list);
int dccp_insert_options(struct sock *sk, struct sk_buff *skb);
int dccp_insert_options_rsk(struct dccp_request_sock *, struct sk_buff *);
-int dccp_insert_option_elapsed_time(struct sk_buff *skb, u32 elapsed);
u32 dccp_timestamp(void);
void dccp_timestamping_init(void);
int dccp_insert_option(struct sk_buff *skb, unsigned char option,
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index d9f65fc66db5..88299c29101d 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -75,7 +75,7 @@ int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
IPPROTO_DCCP,
- orig_sport, orig_dport, sk, true);
+ orig_sport, orig_dport, sk);
if (IS_ERR(rt))
return PTR_ERR(rt);
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 4ac71ff7c2e4..4db3c2a1679c 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -141,6 +141,9 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
if (type == ICMPV6_PKT_TOOBIG) {
struct dst_entry *dst = NULL;
+ if (!ip6_sk_accept_pmtu(sk))
+ goto out;
+
if (sock_owned_by_user(sk))
goto out;
if ((1 << sk->sk_state) & (DCCPF_LISTEN | DCCPF_CLOSED))
@@ -237,7 +240,7 @@ static int dccp_v6_send_response(struct sock *sk, struct request_sock *req)
final_p = fl6_update_dst(&fl6, np->opt, &final);
- dst = ip6_dst_lookup_flow(sk, &fl6, final_p, false);
+ dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
if (IS_ERR(dst)) {
err = PTR_ERR(dst);
dst = NULL;
@@ -301,7 +304,7 @@ static void dccp_v6_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb)
security_skb_classify_flow(rxskb, flowi6_to_flowi(&fl6));
/* sk = NULL, but it is safe for now. RST socket required. */
- dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL, false);
+ dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
if (!IS_ERR(dst)) {
skb_dst_set(skb, dst);
ip6_xmit(ctl_sk, skb, &fl6, NULL, 0);
@@ -512,7 +515,7 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
fl6.fl6_sport = htons(ireq->ir_num);
security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
- dst = ip6_dst_lookup_flow(sk, &fl6, final_p, false);
+ dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
if (IS_ERR(dst))
goto out;
}
@@ -851,7 +854,6 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
if (flowlabel == NULL)
return -EINVAL;
- usin->sin6_addr = flowlabel->dst;
fl6_sock_release(flowlabel);
}
}
@@ -932,7 +934,7 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
final_p = fl6_update_dst(&fl6, np->opt, &final);
- dst = ip6_dst_lookup_flow(sk, &fl6, final_p, true);
+ dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
if (IS_ERR(dst)) {
err = PTR_ERR(dst);
goto failure;
diff --git a/net/dccp/options.c b/net/dccp/options.c
index a58e0b634050..9bce31886bda 100644
--- a/net/dccp/options.c
+++ b/net/dccp/options.c
@@ -343,38 +343,6 @@ static inline int dccp_elapsed_time_len(const u32 elapsed_time)
return elapsed_time == 0 ? 0 : elapsed_time <= 0xFFFF ? 2 : 4;
}
-/* FIXME: This function is currently not used anywhere */
-int dccp_insert_option_elapsed_time(struct sk_buff *skb, u32 elapsed_time)
-{
- const int elapsed_time_len = dccp_elapsed_time_len(elapsed_time);
- const int len = 2 + elapsed_time_len;
- unsigned char *to;
-
- if (elapsed_time_len == 0)
- return 0;
-
- if (DCCP_SKB_CB(skb)->dccpd_opt_len + len > DCCP_MAX_OPT_LEN)
- return -1;
-
- DCCP_SKB_CB(skb)->dccpd_opt_len += len;
-
- to = skb_push(skb, len);
- *to++ = DCCPO_ELAPSED_TIME;
- *to++ = len;
-
- if (elapsed_time_len == 2) {
- const __be16 var16 = htons((u16)elapsed_time);
- memcpy(to, &var16, 2);
- } else {
- const __be32 var32 = htonl(elapsed_time);
- memcpy(to, &var32, 4);
- }
-
- return 0;
-}
-
-EXPORT_SYMBOL_GPL(dccp_insert_option_elapsed_time);
-
static int dccp_insert_option_timestamp(struct sk_buff *skb)
{
__be32 now = htonl(dccp_timestamp());
diff --git a/net/dccp/probe.c b/net/dccp/probe.c
index 4c6bdf97a657..595ddf0459db 100644
--- a/net/dccp/probe.c
+++ b/net/dccp/probe.c
@@ -152,17 +152,6 @@ static const struct file_operations dccpprobe_fops = {
.llseek = noop_llseek,
};
-static __init int setup_jprobe(void)
-{
- int ret = register_jprobe(&dccp_send_probe);
-
- if (ret) {
- request_module("dccp");
- ret = register_jprobe(&dccp_send_probe);
- }
- return ret;
-}
-
static __init int dccpprobe_init(void)
{
int ret = -ENOMEM;
@@ -174,7 +163,13 @@ static __init int dccpprobe_init(void)
if (!proc_create(procname, S_IRUSR, init_net.proc_net, &dccpprobe_fops))
goto err0;
- ret = setup_jprobe();
+ ret = register_jprobe(&dccp_send_probe);
+ if (ret) {
+ ret = request_module("dccp");
+ if (!ret)
+ ret = register_jprobe(&dccp_send_probe);
+ }
+
if (ret)
goto err1;
diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
index dd0dfb25f4b1..a603823a3e27 100644
--- a/net/decnet/dn_dev.c
+++ b/net/decnet/dn_dev.c
@@ -561,6 +561,7 @@ static const struct nla_policy dn_ifa_policy[IFA_MAX+1] = {
[IFA_LOCAL] = { .type = NLA_U16 },
[IFA_LABEL] = { .type = NLA_STRING,
.len = IFNAMSIZ - 1 },
+ [IFA_FLAGS] = { .type = NLA_U32 },
};
static int dn_nl_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh)
@@ -648,7 +649,8 @@ static int dn_nl_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh)
ifa->ifa_local = nla_get_le16(tb[IFA_LOCAL]);
ifa->ifa_address = nla_get_le16(tb[IFA_ADDRESS]);
- ifa->ifa_flags = ifm->ifa_flags;
+ ifa->ifa_flags = tb[IFA_FLAGS] ? nla_get_u32(tb[IFA_FLAGS]) :
+ ifm->ifa_flags;
ifa->ifa_scope = ifm->ifa_scope;
ifa->ifa_dev = dn_db;
@@ -669,7 +671,8 @@ static inline size_t dn_ifaddr_nlmsg_size(void)
return NLMSG_ALIGN(sizeof(struct ifaddrmsg))
+ nla_total_size(IFNAMSIZ) /* IFA_LABEL */
+ nla_total_size(2) /* IFA_ADDRESS */
- + nla_total_size(2); /* IFA_LOCAL */
+ + nla_total_size(2) /* IFA_LOCAL */
+ + nla_total_size(4); /* IFA_FLAGS */
}
static int dn_nl_fill_ifaddr(struct sk_buff *skb, struct dn_ifaddr *ifa,
@@ -677,6 +680,7 @@ static int dn_nl_fill_ifaddr(struct sk_buff *skb, struct dn_ifaddr *ifa,
{
struct ifaddrmsg *ifm;
struct nlmsghdr *nlh;
+ u32 ifa_flags = ifa->ifa_flags | IFA_F_PERMANENT;
nlh = nlmsg_put(skb, portid, seq, event, sizeof(*ifm), flags);
if (nlh == NULL)
@@ -685,7 +689,7 @@ static int dn_nl_fill_ifaddr(struct sk_buff *skb, struct dn_ifaddr *ifa,
ifm = nlmsg_data(nlh);
ifm->ifa_family = AF_DECnet;
ifm->ifa_prefixlen = 16;
- ifm->ifa_flags = ifa->ifa_flags | IFA_F_PERMANENT;
+ ifm->ifa_flags = ifa_flags;
ifm->ifa_scope = ifa->ifa_scope;
ifm->ifa_index = ifa->ifa_dev->dev->ifindex;
@@ -694,7 +698,8 @@ static int dn_nl_fill_ifaddr(struct sk_buff *skb, struct dn_ifaddr *ifa,
(ifa->ifa_local &&
nla_put_le16(skb, IFA_LOCAL, ifa->ifa_local)) ||
(ifa->ifa_label[0] &&
- nla_put_string(skb, IFA_LABEL, ifa->ifa_label)))
+ nla_put_string(skb, IFA_LABEL, ifa->ifa_label)) ||
+ nla_put_u32(skb, IFA_FLAGS, ifa_flags))
goto nla_put_failure;
return nlmsg_end(skb, nlh);
diff --git a/net/decnet/dn_neigh.c b/net/decnet/dn_neigh.c
index f8637f93d318..c8121ceddb9e 100644
--- a/net/decnet/dn_neigh.c
+++ b/net/decnet/dn_neigh.c
@@ -102,19 +102,21 @@ struct neigh_table dn_neigh_table = {
.id = "dn_neigh_cache",
.parms ={
.tbl = &dn_neigh_table,
- .base_reachable_time = 30 * HZ,
- .retrans_time = 1 * HZ,
- .gc_staletime = 60 * HZ,
- .reachable_time = 30 * HZ,
- .delay_probe_time = 5 * HZ,
- .queue_len_bytes = 64*1024,
- .ucast_probes = 0,
- .app_probes = 0,
- .mcast_probes = 0,
- .anycast_delay = 0,
- .proxy_delay = 0,
- .proxy_qlen = 0,
- .locktime = 1 * HZ,
+ .reachable_time = 30 * HZ,
+ .data = {
+ [NEIGH_VAR_MCAST_PROBES] = 0,
+ [NEIGH_VAR_UCAST_PROBES] = 0,
+ [NEIGH_VAR_APP_PROBES] = 0,
+ [NEIGH_VAR_RETRANS_TIME] = 1 * HZ,
+ [NEIGH_VAR_BASE_REACHABLE_TIME] = 30 * HZ,
+ [NEIGH_VAR_DELAY_PROBE_TIME] = 5 * HZ,
+ [NEIGH_VAR_GC_STALETIME] = 60 * HZ,
+ [NEIGH_VAR_QUEUE_LEN_BYTES] = 64*1024,
+ [NEIGH_VAR_PROXY_QLEN] = 0,
+ [NEIGH_VAR_ANYCAST_DELAY] = 0,
+ [NEIGH_VAR_PROXY_DELAY] = 0,
+ [NEIGH_VAR_LOCKTIME] = 1 * HZ,
+ },
},
.gc_interval = 30 * HZ,
.gc_thresh1 = 128,
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index fe32388ea24f..ad2efa5b861b 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -1288,8 +1288,6 @@ int dn_route_output_sock(struct dst_entry __rcu **pprt, struct flowidn *fl, stru
err = __dn_route_output_key(pprt, fl, flags & MSG_TRYHARD);
if (err == 0 && fl->flowidn_proto) {
- if (!(flags & MSG_DONTWAIT))
- fl->flowidn_flags |= FLOWI_FLAG_CAN_SLEEP;
*pprt = xfrm_lookup(&init_net, *pprt,
flowidn_to_flowi(fl), sk, 0);
if (IS_ERR(*pprt)) {
diff --git a/net/dns_resolver/dns_key.c b/net/dns_resolver/dns_key.c
index f347a2ca7d7e..bf8584339048 100644
--- a/net/dns_resolver/dns_key.c
+++ b/net/dns_resolver/dns_key.c
@@ -19,8 +19,7 @@
* the GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
- * along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
diff --git a/net/dns_resolver/dns_query.c b/net/dns_resolver/dns_query.c
index c32be292c7e3..e7b6d53eef88 100644
--- a/net/dns_resolver/dns_query.c
+++ b/net/dns_resolver/dns_query.c
@@ -32,8 +32,7 @@
* the GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
- * along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/module.h>
diff --git a/net/dns_resolver/internal.h b/net/dns_resolver/internal.h
index 17c7886b5b3a..7af1ed39c009 100644
--- a/net/dns_resolver/internal.h
+++ b/net/dns_resolver/internal.h
@@ -15,8 +15,7 @@
* the GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
- * along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/compiler.h>
diff --git a/net/hsr/hsr_framereg.c b/net/hsr/hsr_framereg.c
index 003f5bb3acd2..327060c6c874 100644
--- a/net/hsr/hsr_framereg.c
+++ b/net/hsr/hsr_framereg.c
@@ -127,11 +127,6 @@ int hsr_create_self_node(struct list_head *self_node_db,
return 0;
}
-static void node_entry_reclaim(struct rcu_head *rh)
-{
- kfree(container_of(rh, struct node_entry, rcu_head));
-}
-
/* Add/merge node to the database of nodes. 'skb' must contain an HSR
* supervision frame.
@@ -175,7 +170,7 @@ struct node_entry *hsr_merge_node(struct hsr_priv *hsr_priv,
if (node && !ether_addr_equal(node->MacAddressA, hsr_sp->MacAddressA)) {
/* Node has changed its AddrA, frame was received from SlaveB */
list_del_rcu(&node->mac_list);
- call_rcu(&node->rcu_head, node_entry_reclaim);
+ kfree_rcu(node, rcu_head);
node = NULL;
}
@@ -183,7 +178,7 @@ struct node_entry *hsr_merge_node(struct hsr_priv *hsr_priv,
!ether_addr_equal(node->MacAddressB, hsr_ethsup->ethhdr.h_source)) {
/* Cables have been swapped */
list_del_rcu(&node->mac_list);
- call_rcu(&node->rcu_head, node_entry_reclaim);
+ kfree_rcu(node, rcu_head);
node = NULL;
}
@@ -192,7 +187,7 @@ struct node_entry *hsr_merge_node(struct hsr_priv *hsr_priv,
!ether_addr_equal(node->MacAddressA, hsr_ethsup->ethhdr.h_source)) {
/* Cables have been swapped */
list_del_rcu(&node->mac_list);
- call_rcu(&node->rcu_head, node_entry_reclaim);
+ kfree_rcu(node, rcu_head);
node = NULL;
}
@@ -288,7 +283,8 @@ void hsr_addr_subst_dest(struct hsr_priv *hsr_priv, struct ethhdr *ethhdr,
static bool seq_nr_after(u16 a, u16 b)
{
/* Remove inconsistency where
- * seq_nr_after(a, b) == seq_nr_before(a, b) */
+ * seq_nr_after(a, b) == seq_nr_before(a, b)
+ */
if ((int) b - a == 32768)
return false;
@@ -416,7 +412,7 @@ void hsr_prune_nodes(struct hsr_priv *hsr_priv)
hsr_nl_nodedown(hsr_priv, node->MacAddressA);
list_del_rcu(&node->mac_list);
/* Note that we need to free this entry later: */
- call_rcu(&node->rcu_head, node_entry_reclaim);
+ kfree_rcu(node, rcu_head);
}
}
rcu_read_unlock();
diff --git a/net/hsr/hsr_netlink.c b/net/hsr/hsr_netlink.c
index 5325af85eea6..01a5261ac7a5 100644
--- a/net/hsr/hsr_netlink.c
+++ b/net/hsr/hsr_netlink.c
@@ -23,6 +23,8 @@ static const struct nla_policy hsr_policy[IFLA_HSR_MAX + 1] = {
[IFLA_HSR_SLAVE1] = { .type = NLA_U32 },
[IFLA_HSR_SLAVE2] = { .type = NLA_U32 },
[IFLA_HSR_MULTICAST_SPEC] = { .type = NLA_U8 },
+ [IFLA_HSR_SUPERVISION_ADDR] = { .type = NLA_BINARY, .len = ETH_ALEN },
+ [IFLA_HSR_SEQ_NR] = { .type = NLA_U16 },
};
@@ -59,6 +61,31 @@ static int hsr_newlink(struct net *src_net, struct net_device *dev,
return hsr_dev_finalize(dev, link, multicast_spec);
}
+static int hsr_fill_info(struct sk_buff *skb, const struct net_device *dev)
+{
+ struct hsr_priv *hsr_priv;
+
+ hsr_priv = netdev_priv(dev);
+
+ if (hsr_priv->slave[0])
+ if (nla_put_u32(skb, IFLA_HSR_SLAVE1, hsr_priv->slave[0]->ifindex))
+ goto nla_put_failure;
+
+ if (hsr_priv->slave[1])
+ if (nla_put_u32(skb, IFLA_HSR_SLAVE2, hsr_priv->slave[1]->ifindex))
+ goto nla_put_failure;
+
+ if (nla_put(skb, IFLA_HSR_SUPERVISION_ADDR, ETH_ALEN,
+ hsr_priv->sup_multicast_addr) ||
+ nla_put_u16(skb, IFLA_HSR_SEQ_NR, hsr_priv->sequence_nr))
+ goto nla_put_failure;
+
+ return 0;
+
+nla_put_failure:
+ return -EMSGSIZE;
+}
+
static struct rtnl_link_ops hsr_link_ops __read_mostly = {
.kind = "hsr",
.maxtype = IFLA_HSR_MAX,
@@ -66,6 +93,7 @@ static struct rtnl_link_ops hsr_link_ops __read_mostly = {
.priv_size = sizeof(struct hsr_priv),
.setup = hsr_dev_setup,
.newlink = hsr_newlink,
+ .fill_info = hsr_fill_info,
};
diff --git a/net/ieee802154/wpan-class.c b/net/ieee802154/wpan-class.c
index ef56ab5b35fe..4dd37615a749 100644
--- a/net/ieee802154/wpan-class.c
+++ b/net/ieee802154/wpan-class.c
@@ -46,7 +46,7 @@ MASTER_SHOW(current_channel, "%d");
MASTER_SHOW(current_page, "%d");
MASTER_SHOW_COMPLEX(transmit_power, "%d +- %d dB",
((signed char) (phy->transmit_power << 2)) >> 2,
- (phy->transmit_power >> 6) ? (phy->transmit_power >> 6) * 3 : 1 );
+ (phy->transmit_power >> 6) ? (phy->transmit_power >> 6) * 3 : 1);
MASTER_SHOW(cca_mode, "%d");
static ssize_t channels_supported_show(struct device *dev,
diff --git a/net/ipv4/Makefile b/net/ipv4/Makefile
index 4b81e91c80fe..f8c49ce5b283 100644
--- a/net/ipv4/Makefile
+++ b/net/ipv4/Makefile
@@ -11,7 +11,7 @@ obj-y := route.o inetpeer.o protocol.o \
tcp_offload.o datagram.o raw.o udp.o udplite.o \
udp_offload.o arp.o icmp.o devinet.o af_inet.o igmp.o \
fib_frontend.o fib_semantics.o fib_trie.o \
- inet_fragment.o ping.o ip_tunnel_core.o
+ inet_fragment.o ping.o ip_tunnel_core.o gre_offload.o
obj-$(CONFIG_NET_IP_TUNNEL) += ip_tunnel.o
obj-$(CONFIG_SYSCTL) += sysctl_net_ipv4.o
@@ -19,7 +19,7 @@ obj-$(CONFIG_PROC_FS) += proc.o
obj-$(CONFIG_IP_MULTIPLE_TABLES) += fib_rules.o
obj-$(CONFIG_IP_MROUTE) += ipmr.o
obj-$(CONFIG_NET_IPIP) += ipip.o
-gre-y := gre_demux.o gre_offload.o
+gre-y := gre_demux.o
obj-$(CONFIG_NET_IPGRE_DEMUX) += gre.o
obj-$(CONFIG_NET_IPGRE) += ip_gre.o
obj-$(CONFIG_NET_IPVTI) += ip_vti.o
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 70011e029ac1..6268a4751e64 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -126,9 +126,6 @@
static struct list_head inetsw[SOCK_MAX];
static DEFINE_SPINLOCK(inetsw_lock);
-struct ipv4_config ipv4_config;
-EXPORT_SYMBOL(ipv4_config);
-
/* New destruction routine */
void inet_sock_destruct(struct sock *sk)
@@ -342,7 +339,7 @@ lookup_protocol:
inet->hdrincl = 1;
}
- if (ipv4_config.no_pmtu_disc)
+ if (net->ipv4.sysctl_ip_no_pmtu_disc)
inet->pmtudisc = IP_PMTUDISC_DONT;
else
inet->pmtudisc = IP_PMTUDISC_WANT;
@@ -1133,7 +1130,7 @@ static int inet_sk_reselect_saddr(struct sock *sk)
fl4 = &inet->cork.fl.u.ip4;
rt = ip_route_connect(fl4, daddr, 0, RT_CONN_FLAGS(sk),
sk->sk_bound_dev_if, sk->sk_protocol,
- inet->inet_sport, inet->inet_dport, sk, false);
+ inet->inet_sport, inet->inet_dport, sk);
if (IS_ERR(rt))
return PTR_ERR(rt);
@@ -1377,8 +1374,12 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head,
if (!NAPI_GRO_CB(p)->same_flow)
continue;
- iph2 = ip_hdr(p);
-
+ iph2 = (struct iphdr *)(p->data + off);
+ /* The above works because, with the exception of the top
+ * (inner most) layer, we only aggregate pkts with the same
+ * hdr length so all the hdrs we'll need to verify will start
+ * at the same offset.
+ */
if ((iph->protocol ^ iph2->protocol) |
((__force u32)iph->saddr ^ (__force u32)iph2->saddr) |
((__force u32)iph->daddr ^ (__force u32)iph2->daddr)) {
@@ -1390,13 +1391,24 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head,
NAPI_GRO_CB(p)->flush |=
(iph->ttl ^ iph2->ttl) |
(iph->tos ^ iph2->tos) |
- (__force int)((iph->frag_off ^ iph2->frag_off) & htons(IP_DF)) |
- ((u16)(ntohs(iph2->id) + NAPI_GRO_CB(p)->count) ^ id);
+ ((iph->frag_off ^ iph2->frag_off) & htons(IP_DF));
+ /* Save the IP ID check to be included later when we get to
+ * the transport layer so only the inner most IP ID is checked.
+ * This is because some GSO/TSO implementations do not
+ * correctly increment the IP ID for the outer hdrs.
+ */
+ NAPI_GRO_CB(p)->flush_id =
+ ((u16)(ntohs(iph2->id) + NAPI_GRO_CB(p)->count) ^ id);
NAPI_GRO_CB(p)->flush |= flush;
}
NAPI_GRO_CB(skb)->flush |= flush;
+ skb_set_network_header(skb, off);
+ /* The above will be needed by the transport layer if there is one
+ * immediately following this IP hdr.
+ */
+
skb_gro_pull(skb, sizeof(*iph));
skb_set_transport_header(skb, skb_gro_offset(skb));
@@ -1411,10 +1423,10 @@ out:
return pp;
}
-static int inet_gro_complete(struct sk_buff *skb)
+static int inet_gro_complete(struct sk_buff *skb, int nhoff)
{
- __be16 newlen = htons(skb->len - skb_network_offset(skb));
- struct iphdr *iph = ip_hdr(skb);
+ __be16 newlen = htons(skb->len - nhoff);
+ struct iphdr *iph = (struct iphdr *)(skb->data + nhoff);
const struct net_offload *ops;
int proto = iph->protocol;
int err = -ENOSYS;
@@ -1427,7 +1439,11 @@ static int inet_gro_complete(struct sk_buff *skb)
if (WARN_ON(!ops || !ops->callbacks.gro_complete))
goto out_unlock;
- err = ops->callbacks.gro_complete(skb);
+ /* Only need to add sizeof(*iph) to get to the next hdr below
+ * because any hdr with option will have been flushed in
+ * inet_gro_receive().
+ */
+ err = ops->callbacks.gro_complete(skb, nhoff + sizeof(*iph));
out_unlock:
rcu_read_unlock();
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 7808093cede6..1a9b99e04465 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -166,18 +166,20 @@ struct neigh_table arp_tbl = {
.id = "arp_cache",
.parms = {
.tbl = &arp_tbl,
- .base_reachable_time = 30 * HZ,
- .retrans_time = 1 * HZ,
- .gc_staletime = 60 * HZ,
.reachable_time = 30 * HZ,
- .delay_probe_time = 5 * HZ,
- .queue_len_bytes = 64*1024,
- .ucast_probes = 3,
- .mcast_probes = 3,
- .anycast_delay = 1 * HZ,
- .proxy_delay = (8 * HZ) / 10,
- .proxy_qlen = 64,
- .locktime = 1 * HZ,
+ .data = {
+ [NEIGH_VAR_MCAST_PROBES] = 3,
+ [NEIGH_VAR_UCAST_PROBES] = 3,
+ [NEIGH_VAR_RETRANS_TIME] = 1 * HZ,
+ [NEIGH_VAR_BASE_REACHABLE_TIME] = 30 * HZ,
+ [NEIGH_VAR_DELAY_PROBE_TIME] = 5 * HZ,
+ [NEIGH_VAR_GC_STALETIME] = 60 * HZ,
+ [NEIGH_VAR_QUEUE_LEN_BYTES] = 64 * 1024,
+ [NEIGH_VAR_PROXY_QLEN] = 64,
+ [NEIGH_VAR_ANYCAST_DELAY] = 1 * HZ,
+ [NEIGH_VAR_PROXY_DELAY] = (8 * HZ) / 10,
+ [NEIGH_VAR_LOCKTIME] = 1 * HZ,
+ },
},
.gc_interval = 30 * HZ,
.gc_thresh1 = 128,
@@ -359,14 +361,14 @@ static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb)
if (!saddr)
saddr = inet_select_addr(dev, target, RT_SCOPE_LINK);
- probes -= neigh->parms->ucast_probes;
+ probes -= NEIGH_VAR(neigh->parms, UCAST_PROBES);
if (probes < 0) {
if (!(neigh->nud_state & NUD_VALID))
pr_debug("trying to ucast probe in NUD_INVALID\n");
neigh_ha_snapshot(dst_ha, neigh, dev);
dst_hw = dst_ha;
} else {
- probes -= neigh->parms->app_probes;
+ probes -= NEIGH_VAR(neigh->parms, APP_PROBES);
if (probes < 0) {
neigh_app_ns(neigh);
return;
@@ -379,6 +381,7 @@ static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb)
static int arp_ignore(struct in_device *in_dev, __be32 sip, __be32 tip)
{
+ struct net *net = dev_net(in_dev->dev);
int scope;
switch (IN_DEV_ARP_IGNORE(in_dev)) {
@@ -397,6 +400,7 @@ static int arp_ignore(struct in_device *in_dev, __be32 sip, __be32 tip)
case 3: /* Do not reply for scope host addresses */
sip = 0;
scope = RT_SCOPE_LINK;
+ in_dev = NULL;
break;
case 4: /* Reserved */
case 5:
@@ -408,7 +412,7 @@ static int arp_ignore(struct in_device *in_dev, __be32 sip, __be32 tip)
default:
return 0;
}
- return !inet_confirm_addr(in_dev, sip, tip, scope);
+ return !inet_confirm_addr(net, in_dev, sip, tip, scope);
}
static int arp_filter(__be32 sip, __be32 tip, struct net_device *dev)
@@ -728,6 +732,7 @@ static int arp_process(struct sk_buff *skb)
int addr_type;
struct neighbour *n;
struct net *net = dev_net(dev);
+ bool is_garp = false;
/* arp_rcv below verifies the ARP header and verifies the device
* is ARP'able.
@@ -871,7 +876,7 @@ static int arp_process(struct sk_buff *skb)
if (NEIGH_CB(skb)->flags & LOCALLY_ENQUEUED ||
skb->pkt_type == PACKET_HOST ||
- in_dev->arp_parms->proxy_delay == 0) {
+ NEIGH_VAR(in_dev->arp_parms, PROXY_DELAY) == 0) {
arp_send(ARPOP_REPLY, ETH_P_ARP, sip,
dev, tip, sha, dev->dev_addr,
sha);
@@ -894,10 +899,12 @@ static int arp_process(struct sk_buff *skb)
It is possible, that this option should be enabled for some
devices (strip is candidate)
*/
+ is_garp = arp->ar_op == htons(ARPOP_REQUEST) && tip == sip &&
+ inet_addr_type(net, sip) == RTN_UNICAST;
+
if (n == NULL &&
- (arp->ar_op == htons(ARPOP_REPLY) ||
- (arp->ar_op == htons(ARPOP_REQUEST) && tip == sip)) &&
- inet_addr_type(net, sip) == RTN_UNICAST)
+ ((arp->ar_op == htons(ARPOP_REPLY) &&
+ inet_addr_type(net, sip) == RTN_UNICAST) || is_garp))
n = __neigh_lookup(&arp_tbl, &sip, dev, 1);
}
@@ -910,7 +917,10 @@ static int arp_process(struct sk_buff *skb)
agents are active. Taking the first reply prevents
arp trashing and chooses the fastest router.
*/
- override = time_after(jiffies, n->updated + n->parms->locktime);
+ override = time_after(jiffies,
+ n->updated +
+ NEIGH_VAR(n->parms, LOCKTIME)) ||
+ is_garp;
/* Broadcast replies and request packets
do not assert neighbour reachability.
@@ -1107,7 +1117,7 @@ static int arp_req_get(struct arpreq *r, struct net_device *dev)
return err;
}
-int arp_invalidate(struct net_device *dev, __be32 ip)
+static int arp_invalidate(struct net_device *dev, __be32 ip)
{
struct neighbour *neigh = neigh_lookup(&arp_tbl, &ip, dev);
int err = -ENXIO;
@@ -1122,7 +1132,6 @@ int arp_invalidate(struct net_device *dev, __be32 ip)
return err;
}
-EXPORT_SYMBOL(arp_invalidate);
static int arp_req_delete_public(struct net *net, struct arpreq *r,
struct net_device *dev)
@@ -1284,7 +1293,7 @@ void __init arp_init(void)
dev_add_pack(&arp_packet_type);
arp_proc_init();
#ifdef CONFIG_SYSCTL
- neigh_sysctl_register(NULL, &arp_tbl.parms, "ipv4", NULL);
+ neigh_sysctl_register(NULL, &arp_tbl.parms, NULL);
#endif
register_netdevice_notifier(&arp_netdev_notifier);
}
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
index 667c1d4ca984..69e77c8ff285 100644
--- a/net/ipv4/cipso_ipv4.c
+++ b/net/ipv4/cipso_ipv4.c
@@ -31,8 +31,7 @@
* the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
*/
@@ -1336,8 +1335,7 @@ static int cipso_v4_parsetag_rbm(const struct cipso_v4_doi *doi_def,
secattr->flags |= NETLBL_SECATTR_MLS_LVL;
if (tag_len > 4) {
- secattr->attr.mls.cat =
- netlbl_secattr_catmap_alloc(GFP_ATOMIC);
+ secattr->attr.mls.cat = netlbl_secattr_catmap_alloc(GFP_ATOMIC);
if (secattr->attr.mls.cat == NULL)
return -ENOMEM;
@@ -1432,8 +1430,7 @@ static int cipso_v4_parsetag_enum(const struct cipso_v4_doi *doi_def,
secattr->flags |= NETLBL_SECATTR_MLS_LVL;
if (tag_len > 4) {
- secattr->attr.mls.cat =
- netlbl_secattr_catmap_alloc(GFP_ATOMIC);
+ secattr->attr.mls.cat = netlbl_secattr_catmap_alloc(GFP_ATOMIC);
if (secattr->attr.mls.cat == NULL)
return -ENOMEM;
@@ -1527,8 +1524,7 @@ static int cipso_v4_parsetag_rng(const struct cipso_v4_doi *doi_def,
secattr->flags |= NETLBL_SECATTR_MLS_LVL;
if (tag_len > 4) {
- secattr->attr.mls.cat =
- netlbl_secattr_catmap_alloc(GFP_ATOMIC);
+ secattr->attr.mls.cat = netlbl_secattr_catmap_alloc(GFP_ATOMIC);
if (secattr->attr.mls.cat == NULL)
return -ENOMEM;
diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c
index 19e36376d2a0..8b5134c582f1 100644
--- a/net/ipv4/datagram.c
+++ b/net/ipv4/datagram.c
@@ -53,7 +53,7 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
rt = ip_route_connect(fl4, usin->sin_addr.s_addr, saddr,
RT_CONN_FLAGS(sk), oif,
sk->sk_protocol,
- inet->inet_sport, usin->sin_port, sk, true);
+ inet->inet_sport, usin->sin_port, sk);
if (IS_ERR(rt)) {
err = PTR_ERR(rt);
if (err == -ENETUNREACH)
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index a1b5bcbd04ae..9809f7b69728 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -99,6 +99,7 @@ static const struct nla_policy ifa_ipv4_policy[IFA_MAX+1] = {
[IFA_BROADCAST] = { .type = NLA_U32 },
[IFA_LABEL] = { .type = NLA_STRING, .len = IFNAMSIZ - 1 },
[IFA_CACHEINFO] = { .len = sizeof(struct ifa_cacheinfo) },
+ [IFA_FLAGS] = { .type = NLA_U32 },
};
#define IN4_ADDR_HSIZE_SHIFT 8
@@ -500,6 +501,7 @@ static int inet_set_ifa(struct net_device *dev, struct in_ifaddr *ifa)
return -ENOBUFS;
}
ipv4_devconf_setall(in_dev);
+ neigh_parms_data_state_setall(in_dev->arp_parms);
if (ifa->ifa_dev != in_dev) {
WARN_ON(ifa->ifa_dev);
in_dev_hold(in_dev);
@@ -747,6 +749,7 @@ static struct in_ifaddr *rtm_to_ifaddr(struct net *net, struct nlmsghdr *nlh,
goto errout;
ipv4_devconf_setall(in_dev);
+ neigh_parms_data_state_setall(in_dev->arp_parms);
in_dev_hold(in_dev);
if (tb[IFA_ADDRESS] == NULL)
@@ -755,7 +758,8 @@ static struct in_ifaddr *rtm_to_ifaddr(struct net *net, struct nlmsghdr *nlh,
INIT_HLIST_NODE(&ifa->hash);
ifa->ifa_prefixlen = ifm->ifa_prefixlen;
ifa->ifa_mask = inet_make_mask(ifm->ifa_prefixlen);
- ifa->ifa_flags = ifm->ifa_flags;
+ ifa->ifa_flags = tb[IFA_FLAGS] ? nla_get_u32(tb[IFA_FLAGS]) :
+ ifm->ifa_flags;
ifa->ifa_scope = ifm->ifa_scope;
ifa->ifa_dev = in_dev;
@@ -1236,22 +1240,21 @@ static __be32 confirm_addr_indev(struct in_device *in_dev, __be32 dst,
/*
* Confirm that local IP address exists using wildcards:
- * - in_dev: only on this interface, 0=any interface
+ * - net: netns to check, cannot be NULL
+ * - in_dev: only on this interface, NULL=any interface
* - dst: only in the same subnet as dst, 0=any dst
* - local: address, 0=autoselect the local address
* - scope: maximum allowed scope value for the local address
*/
-__be32 inet_confirm_addr(struct in_device *in_dev,
+__be32 inet_confirm_addr(struct net *net, struct in_device *in_dev,
__be32 dst, __be32 local, int scope)
{
__be32 addr = 0;
struct net_device *dev;
- struct net *net;
- if (scope != RT_SCOPE_LINK)
+ if (in_dev != NULL)
return confirm_addr_indev(in_dev, dst, local, scope);
- net = dev_net(in_dev->dev);
rcu_read_lock();
for_each_netdev_rcu(net, dev) {
in_dev = __in_dev_get_rcu(dev);
@@ -1382,6 +1385,8 @@ static int inetdev_event(struct notifier_block *this, unsigned long event,
memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
set_ifa_lifetime(ifa, INFINITY_LIFE_TIME,
INFINITY_LIFE_TIME);
+ ipv4_devconf_setall(in_dev);
+ neigh_parms_data_state_setall(in_dev->arp_parms);
inet_insert_ifa(ifa);
}
}
@@ -1435,7 +1440,8 @@ static size_t inet_nlmsg_size(void)
+ nla_total_size(4) /* IFA_ADDRESS */
+ nla_total_size(4) /* IFA_LOCAL */
+ nla_total_size(4) /* IFA_BROADCAST */
- + nla_total_size(IFNAMSIZ); /* IFA_LABEL */
+ + nla_total_size(IFNAMSIZ) /* IFA_LABEL */
+ + nla_total_size(4); /* IFA_FLAGS */
}
static inline u32 cstamp_delta(unsigned long cstamp)
@@ -1503,6 +1509,7 @@ static int inet_fill_ifaddr(struct sk_buff *skb, struct in_ifaddr *ifa,
nla_put_be32(skb, IFA_BROADCAST, ifa->ifa_broadcast)) ||
(ifa->ifa_label[0] &&
nla_put_string(skb, IFA_LABEL, ifa->ifa_label)) ||
+ nla_put_u32(skb, IFA_FLAGS, ifa->ifa_flags) ||
put_cacheinfo(skb, ifa->ifa_cstamp, ifa->ifa_tstamp,
preferred, valid))
goto nla_put_failure;
@@ -1691,6 +1698,8 @@ static int inet_netconf_msgsize_devconf(int type)
size += nla_total_size(4);
if (type == -1 || type == NETCONFA_MC_FORWARDING)
size += nla_total_size(4);
+ if (type == -1 || type == NETCONFA_PROXY_NEIGH)
+ size += nla_total_size(4);
return size;
}
@@ -1727,6 +1736,10 @@ static int inet_netconf_fill_devconf(struct sk_buff *skb, int ifindex,
nla_put_s32(skb, NETCONFA_MC_FORWARDING,
IPV4_DEVCONF(*devconf, MC_FORWARDING)) < 0)
goto nla_put_failure;
+ if ((type == -1 || type == NETCONFA_PROXY_NEIGH) &&
+ nla_put_s32(skb, NETCONFA_PROXY_NEIGH,
+ IPV4_DEVCONF(*devconf, PROXY_ARP)) < 0)
+ goto nla_put_failure;
return nlmsg_end(skb, nlh);
@@ -1764,6 +1777,7 @@ static const struct nla_policy devconf_ipv4_policy[NETCONFA_MAX+1] = {
[NETCONFA_IFINDEX] = { .len = sizeof(int) },
[NETCONFA_FORWARDING] = { .len = sizeof(int) },
[NETCONFA_RP_FILTER] = { .len = sizeof(int) },
+ [NETCONFA_PROXY_NEIGH] = { .len = sizeof(int) },
};
static int inet_netconf_get_devconf(struct sk_buff *in_skb,
@@ -1945,6 +1959,19 @@ static void inet_forward_change(struct net *net)
}
}
+static int devinet_conf_ifindex(struct net *net, struct ipv4_devconf *cnf)
+{
+ if (cnf == net->ipv4.devconf_dflt)
+ return NETCONFA_IFINDEX_DEFAULT;
+ else if (cnf == net->ipv4.devconf_all)
+ return NETCONFA_IFINDEX_ALL;
+ else {
+ struct in_device *idev
+ = container_of(cnf, struct in_device, cnf);
+ return idev->dev->ifindex;
+ }
+}
+
static int devinet_conf_proc(struct ctl_table *ctl, int write,
void __user *buffer,
size_t *lenp, loff_t *ppos)
@@ -1957,6 +1984,7 @@ static int devinet_conf_proc(struct ctl_table *ctl, int write,
struct ipv4_devconf *cnf = ctl->extra1;
struct net *net = ctl->extra2;
int i = (int *)ctl->data - cnf->data;
+ int ifindex;
set_bit(i, cnf->state);
@@ -1966,23 +1994,19 @@ static int devinet_conf_proc(struct ctl_table *ctl, int write,
i == IPV4_DEVCONF_ROUTE_LOCALNET - 1)
if ((new_value == 0) && (old_value != 0))
rt_cache_flush(net);
+
if (i == IPV4_DEVCONF_RP_FILTER - 1 &&
new_value != old_value) {
- int ifindex;
-
- if (cnf == net->ipv4.devconf_dflt)
- ifindex = NETCONFA_IFINDEX_DEFAULT;
- else if (cnf == net->ipv4.devconf_all)
- ifindex = NETCONFA_IFINDEX_ALL;
- else {
- struct in_device *idev =
- container_of(cnf, struct in_device,
- cnf);
- ifindex = idev->dev->ifindex;
- }
+ ifindex = devinet_conf_ifindex(net, cnf);
inet_netconf_notify_devconf(net, NETCONFA_RP_FILTER,
ifindex, cnf);
}
+ if (i == IPV4_DEVCONF_PROXY_ARP - 1 &&
+ new_value != old_value) {
+ ifindex = devinet_conf_ifindex(net, cnf);
+ inet_netconf_notify_devconf(net, NETCONFA_PROXY_NEIGH,
+ ifindex, cnf);
+ }
}
return ret;
@@ -2160,7 +2184,7 @@ static void __devinet_sysctl_unregister(struct ipv4_devconf *cnf)
static void devinet_sysctl_register(struct in_device *idev)
{
- neigh_sysctl_register(idev->dev, idev->arp_parms, "ipv4", NULL);
+ neigh_sysctl_register(idev->dev, idev->arp_parms, NULL);
__devinet_sysctl_register(dev_net(idev->dev), idev->dev->name,
&idev->cnf);
}
diff --git a/net/ipv4/fib_lookup.h b/net/ipv4/fib_lookup.h
index 388d113fd289..1e4f6600b31d 100644
--- a/net/ipv4/fib_lookup.h
+++ b/net/ipv4/fib_lookup.h
@@ -33,8 +33,6 @@ int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event, u32 tb_id,
void rtmsg_fib(int event, __be32 key, struct fib_alias *fa, int dst_len,
u32 tb_id, const struct nl_info *info, unsigned int nlm_flags);
struct fib_alias *fib_find_alias(struct list_head *fah, u8 tos, u32 prio);
-int fib_detect_death(struct fib_info *fi, int order,
- struct fib_info **last_resort, int *last_idx, int dflt);
static inline void fib_result_assign(struct fib_result *res,
struct fib_info *fi)
diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c
index 523be38e37de..f2e15738534d 100644
--- a/net/ipv4/fib_rules.c
+++ b/net/ipv4/fib_rules.c
@@ -104,7 +104,10 @@ errout:
static bool fib4_rule_suppress(struct fib_rule *rule, struct fib_lookup_arg *arg)
{
struct fib_result *result = (struct fib_result *) arg->result;
- struct net_device *dev = result->fi->fib_dev;
+ struct net_device *dev = NULL;
+
+ if (result->fi)
+ dev = result->fi->fib_dev;
/* do not accept result if the route does
* not meet the required prefix length
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index e63f47a4e651..b53f0bf84dca 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -426,8 +426,9 @@ struct fib_alias *fib_find_alias(struct list_head *fah, u8 tos, u32 prio)
return NULL;
}
-int fib_detect_death(struct fib_info *fi, int order,
- struct fib_info **last_resort, int *last_idx, int dflt)
+static int fib_detect_death(struct fib_info *fi, int order,
+ struct fib_info **last_resort, int *last_idx,
+ int dflt)
{
struct neighbour *n;
int state = NUD_NONE;
diff --git a/net/ipv4/gre_demux.c b/net/ipv4/gre_demux.c
index 5893e99e8299..1863422fb7d5 100644
--- a/net/ipv4/gre_demux.c
+++ b/net/ipv4/gre_demux.c
@@ -355,14 +355,7 @@ static int __init gre_init(void)
goto err_gre;
}
- if (gre_offload_init()) {
- pr_err("can't add protocol offload\n");
- goto err_gso;
- }
-
return 0;
-err_gso:
- gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
err_gre:
inet_del_protocol(&net_gre_protocol, IPPROTO_GRE);
err:
@@ -371,8 +364,6 @@ err:
static void __exit gre_exit(void)
{
- gre_offload_exit();
-
gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
inet_del_protocol(&net_gre_protocol, IPPROTO_GRE);
}
diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c
index e5d436188464..746a7b10d434 100644
--- a/net/ipv4/gre_offload.c
+++ b/net/ipv4/gre_offload.c
@@ -28,6 +28,7 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
netdev_features_t enc_features;
int ghl = GRE_HEADER_SECTION;
struct gre_base_hdr *greh;
+ u16 mac_offset = skb->mac_header;
int mac_len = skb->mac_len;
__be16 protocol = skb->protocol;
int tnl_hlen;
@@ -58,13 +59,13 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
} else
csum = false;
+ if (unlikely(!pskb_may_pull(skb, ghl)))
+ goto out;
+
/* setup inner skb. */
skb->protocol = greh->protocol;
skb->encapsulation = 0;
- if (unlikely(!pskb_may_pull(skb, ghl)))
- goto out;
-
__skb_pull(skb, ghl);
skb_reset_mac_header(skb);
skb_set_network_header(skb, skb_inner_network_offset(skb));
@@ -73,8 +74,10 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
/* segment inner packet. */
enc_features = skb->dev->hw_enc_features & netif_skb_features(skb);
segs = skb_mac_gso_segment(skb, enc_features);
- if (!segs || IS_ERR(segs))
+ if (!segs || IS_ERR(segs)) {
+ skb_gso_error_unwind(skb, protocol, ghl, mac_offset, mac_len);
goto out;
+ }
skb = segs;
tnl_hlen = skb_tnl_header_len(skb);
@@ -113,19 +116,182 @@ out:
return segs;
}
+/* Compute the whole skb csum in s/w and store it, then verify GRO csum
+ * starting from gro_offset.
+ */
+static __sum16 gro_skb_checksum(struct sk_buff *skb)
+{
+ __sum16 sum;
+
+ skb->csum = skb_checksum(skb, 0, skb->len, 0);
+ NAPI_GRO_CB(skb)->csum = csum_sub(skb->csum,
+ csum_partial(skb->data, skb_gro_offset(skb), 0));
+ sum = csum_fold(NAPI_GRO_CB(skb)->csum);
+ if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE)) {
+ if (unlikely(!sum))
+ netdev_rx_csum_fault(skb->dev);
+ } else
+ skb->ip_summed = CHECKSUM_COMPLETE;
+
+ return sum;
+}
+
+static struct sk_buff **gre_gro_receive(struct sk_buff **head,
+ struct sk_buff *skb)
+{
+ struct sk_buff **pp = NULL;
+ struct sk_buff *p;
+ const struct gre_base_hdr *greh;
+ unsigned int hlen, grehlen;
+ unsigned int off;
+ int flush = 1;
+ struct packet_offload *ptype;
+ __be16 type;
+
+ off = skb_gro_offset(skb);
+ hlen = off + sizeof(*greh);
+ greh = skb_gro_header_fast(skb, off);
+ if (skb_gro_header_hard(skb, hlen)) {
+ greh = skb_gro_header_slow(skb, hlen, off);
+ if (unlikely(!greh))
+ goto out;
+ }
+
+ /* Only support version 0 and K (key), C (csum) flags. Note that
+ * although the support for the S (seq#) flag can be added easily
+ * for GRO, this is problematic for GSO hence can not be enabled
+ * here because a GRO pkt may end up in the forwarding path, thus
+ * requiring GSO support to break it up correctly.
+ */
+ if ((greh->flags & ~(GRE_KEY|GRE_CSUM)) != 0)
+ goto out;
+
+ type = greh->protocol;
+
+ rcu_read_lock();
+ ptype = gro_find_receive_by_type(type);
+ if (ptype == NULL)
+ goto out_unlock;
+
+ grehlen = GRE_HEADER_SECTION;
+
+ if (greh->flags & GRE_KEY)
+ grehlen += GRE_HEADER_SECTION;
+
+ if (greh->flags & GRE_CSUM)
+ grehlen += GRE_HEADER_SECTION;
+
+ hlen = off + grehlen;
+ if (skb_gro_header_hard(skb, hlen)) {
+ greh = skb_gro_header_slow(skb, hlen, off);
+ if (unlikely(!greh))
+ goto out_unlock;
+ }
+ if (greh->flags & GRE_CSUM) { /* Need to verify GRE csum first */
+ __sum16 csum = 0;
+
+ if (skb->ip_summed == CHECKSUM_COMPLETE)
+ csum = csum_fold(NAPI_GRO_CB(skb)->csum);
+ /* Don't trust csum error calculated/reported by h/w */
+ if (skb->ip_summed == CHECKSUM_NONE || csum != 0)
+ csum = gro_skb_checksum(skb);
+
+ /* GRE CSUM is the 1's complement of the 1's complement sum
+ * of the GRE hdr plus payload so it should add up to 0xffff
+ * (and 0 after csum_fold()) just like the IPv4 hdr csum.
+ */
+ if (csum)
+ goto out_unlock;
+ }
+ flush = 0;
+
+ for (p = *head; p; p = p->next) {
+ const struct gre_base_hdr *greh2;
+
+ if (!NAPI_GRO_CB(p)->same_flow)
+ continue;
+
+ /* The following checks are needed to ensure only pkts
+ * from the same tunnel are considered for aggregation.
+ * The criteria for "the same tunnel" includes:
+ * 1) same version (we only support version 0 here)
+ * 2) same protocol (we only support ETH_P_IP for now)
+ * 3) same set of flags
+ * 4) same key if the key field is present.
+ */
+ greh2 = (struct gre_base_hdr *)(p->data + off);
+
+ if (greh2->flags != greh->flags ||
+ greh2->protocol != greh->protocol) {
+ NAPI_GRO_CB(p)->same_flow = 0;
+ continue;
+ }
+ if (greh->flags & GRE_KEY) {
+ /* compare keys */
+ if (*(__be32 *)(greh2+1) != *(__be32 *)(greh+1)) {
+ NAPI_GRO_CB(p)->same_flow = 0;
+ continue;
+ }
+ }
+ }
+
+ skb_gro_pull(skb, grehlen);
+
+ /* Adjusted NAPI_GRO_CB(skb)->csum after skb_gro_pull()*/
+ skb_gro_postpull_rcsum(skb, greh, grehlen);
+
+ pp = ptype->callbacks.gro_receive(head, skb);
+
+out_unlock:
+ rcu_read_unlock();
+out:
+ NAPI_GRO_CB(skb)->flush |= flush;
+
+ return pp;
+}
+
+int gre_gro_complete(struct sk_buff *skb, int nhoff)
+{
+ struct gre_base_hdr *greh = (struct gre_base_hdr *)(skb->data + nhoff);
+ struct packet_offload *ptype;
+ unsigned int grehlen = sizeof(*greh);
+ int err = -ENOENT;
+ __be16 type;
+
+ type = greh->protocol;
+ if (greh->flags & GRE_KEY)
+ grehlen += GRE_HEADER_SECTION;
+
+ if (greh->flags & GRE_CSUM)
+ grehlen += GRE_HEADER_SECTION;
+
+ rcu_read_lock();
+ ptype = gro_find_complete_by_type(type);
+ if (ptype != NULL)
+ err = ptype->callbacks.gro_complete(skb, nhoff + grehlen);
+
+ rcu_read_unlock();
+ return err;
+}
+
static const struct net_offload gre_offload = {
.callbacks = {
.gso_send_check = gre_gso_send_check,
.gso_segment = gre_gso_segment,
+ .gro_receive = gre_gro_receive,
+ .gro_complete = gre_gro_complete,
},
};
-int __init gre_offload_init(void)
+static int __init gre_offload_init(void)
{
return inet_add_offload(&gre_offload, IPPROTO_GRE);
}
-void __exit gre_offload_exit(void)
+static void __exit gre_offload_exit(void)
{
inet_del_offload(&gre_offload, IPPROTO_GRE);
}
+
+module_init(gre_offload_init);
+module_exit(gre_offload_exit);
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 5c0e8bc6e5ba..fb3c5637199d 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -705,7 +705,9 @@ static void icmp_unreach(struct sk_buff *skb)
case ICMP_PORT_UNREACH:
break;
case ICMP_FRAG_NEEDED:
- if (ipv4_config.no_pmtu_disc) {
+ if (net->ipv4.sysctl_ip_no_pmtu_disc == 2) {
+ goto out;
+ } else if (net->ipv4.sysctl_ip_no_pmtu_disc) {
LIMIT_NETDEBUG(KERN_INFO pr_fmt("%pI4: fragmentation needed and DF set\n"),
&iph->daddr);
} else {
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 7defdc9ba167..84c4329cbd30 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -310,7 +310,7 @@ igmp_scount(struct ip_mc_list *pmc, int type, int gdeleted, int sdeleted)
struct ip_sf_list *psf;
int scount = 0;
- for (psf=pmc->sources; psf; psf=psf->sf_next) {
+ for (psf = pmc->sources; psf; psf = psf->sf_next) {
if (!is_in(pmc, psf, type, gdeleted, sdeleted))
continue;
scount++;
@@ -463,7 +463,7 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ip_mc_list *pmc,
}
first = 1;
psf_prev = NULL;
- for (psf=*psf_list; psf; psf=psf_next) {
+ for (psf = *psf_list; psf; psf = psf_next) {
__be32 *psrc;
psf_next = psf->sf_next;
@@ -520,7 +520,7 @@ empty_source:
return skb;
if (pmc->crcount || isquery) {
/* make sure we have room for group header */
- if (skb && AVAILABLE(skb)<sizeof(struct igmpv3_grec)) {
+ if (skb && AVAILABLE(skb) < sizeof(struct igmpv3_grec)) {
igmpv3_sendpack(skb);
skb = NULL; /* add_grhead will get a new one */
}
@@ -576,7 +576,7 @@ static void igmpv3_clear_zeros(struct ip_sf_list **ppsf)
struct ip_sf_list *psf_prev, *psf_next, *psf;
psf_prev = NULL;
- for (psf=*ppsf; psf; psf = psf_next) {
+ for (psf = *ppsf; psf; psf = psf_next) {
psf_next = psf->sf_next;
if (psf->sf_crcount == 0) {
if (psf_prev)
@@ -600,7 +600,7 @@ static void igmpv3_send_cr(struct in_device *in_dev)
/* deleted MCA's */
pmc_prev = NULL;
- for (pmc=in_dev->mc_tomb; pmc; pmc=pmc_next) {
+ for (pmc = in_dev->mc_tomb; pmc; pmc = pmc_next) {
pmc_next = pmc->next;
if (pmc->sfmode == MCAST_INCLUDE) {
type = IGMPV3_BLOCK_OLD_SOURCES;
@@ -764,7 +764,7 @@ static void igmp_ifc_event(struct in_device *in_dev)
static void igmp_timer_expire(unsigned long data)
{
- struct ip_mc_list *im=(struct ip_mc_list *)data;
+ struct ip_mc_list *im = (struct ip_mc_list *)data;
struct in_device *in_dev = im->interface;
spin_lock(&im->lock);
@@ -794,10 +794,10 @@ static int igmp_xmarksources(struct ip_mc_list *pmc, int nsrcs, __be32 *srcs)
int i, scount;
scount = 0;
- for (psf=pmc->sources; psf; psf=psf->sf_next) {
+ for (psf = pmc->sources; psf; psf = psf->sf_next) {
if (scount == nsrcs)
break;
- for (i=0; i<nsrcs; i++) {
+ for (i = 0; i < nsrcs; i++) {
/* skip inactive filters */
if (psf->sf_count[MCAST_INCLUDE] ||
pmc->sfcount[MCAST_EXCLUDE] !=
@@ -825,10 +825,10 @@ static int igmp_marksources(struct ip_mc_list *pmc, int nsrcs, __be32 *srcs)
/* mark INCLUDE-mode sources */
scount = 0;
- for (psf=pmc->sources; psf; psf=psf->sf_next) {
+ for (psf = pmc->sources; psf; psf = psf->sf_next) {
if (scount == nsrcs)
break;
- for (i=0; i<nsrcs; i++)
+ for (i = 0; i < nsrcs; i++)
if (srcs[i] == psf->sf_inaddr) {
psf->sf_gsresp = 1;
scount++;
@@ -1103,7 +1103,7 @@ static void igmpv3_add_delrec(struct in_device *in_dev, struct ip_mc_list *im)
pmc->tomb = im->tomb;
pmc->sources = im->sources;
im->tomb = im->sources = NULL;
- for (psf=pmc->sources; psf; psf=psf->sf_next)
+ for (psf = pmc->sources; psf; psf = psf->sf_next)
psf->sf_crcount = pmc->crcount;
}
spin_unlock_bh(&im->lock);
@@ -1121,7 +1121,7 @@ static void igmpv3_del_delrec(struct in_device *in_dev, __be32 multiaddr)
spin_lock_bh(&in_dev->mc_tomb_lock);
pmc_prev = NULL;
- for (pmc=in_dev->mc_tomb; pmc; pmc=pmc->next) {
+ for (pmc = in_dev->mc_tomb; pmc; pmc = pmc->next) {
if (pmc->multiaddr == multiaddr)
break;
pmc_prev = pmc;
@@ -1134,7 +1134,7 @@ static void igmpv3_del_delrec(struct in_device *in_dev, __be32 multiaddr)
}
spin_unlock_bh(&in_dev->mc_tomb_lock);
if (pmc) {
- for (psf=pmc->tomb; psf; psf=psf_next) {
+ for (psf = pmc->tomb; psf; psf = psf_next) {
psf_next = psf->sf_next;
kfree(psf);
}
@@ -1167,7 +1167,7 @@ static void igmpv3_clear_delrec(struct in_device *in_dev)
psf = pmc->tomb;
pmc->tomb = NULL;
spin_unlock_bh(&pmc->lock);
- for (; psf; psf=psf_next) {
+ for (; psf; psf = psf_next) {
psf_next = psf->sf_next;
kfree(psf);
}
@@ -1557,7 +1557,7 @@ static int ip_mc_del1_src(struct ip_mc_list *pmc, int sfmode,
int rv = 0;
psf_prev = NULL;
- for (psf=pmc->sources; psf; psf=psf->sf_next) {
+ for (psf = pmc->sources; psf; psf = psf->sf_next) {
if (psf->sf_inaddr == *psfsrc)
break;
psf_prev = psf;
@@ -1630,7 +1630,7 @@ static int ip_mc_del_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
pmc->sfcount[sfmode]--;
}
err = 0;
- for (i=0; i<sfcount; i++) {
+ for (i = 0; i < sfcount; i++) {
int rv = ip_mc_del1_src(pmc, sfmode, &psfsrc[i]);
changerec |= rv > 0;
@@ -1650,7 +1650,7 @@ static int ip_mc_del_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
pmc->crcount = in_dev->mr_qrv ? in_dev->mr_qrv :
IGMP_Unsolicited_Report_Count;
in_dev->mr_ifc_count = pmc->crcount;
- for (psf=pmc->sources; psf; psf = psf->sf_next)
+ for (psf = pmc->sources; psf; psf = psf->sf_next)
psf->sf_crcount = 0;
igmp_ifc_event(pmc->interface);
} else if (sf_setstate(pmc) || changerec) {
@@ -1671,7 +1671,7 @@ static int ip_mc_add1_src(struct ip_mc_list *pmc, int sfmode,
struct ip_sf_list *psf, *psf_prev;
psf_prev = NULL;
- for (psf=pmc->sources; psf; psf=psf->sf_next) {
+ for (psf = pmc->sources; psf; psf = psf->sf_next) {
if (psf->sf_inaddr == *psfsrc)
break;
psf_prev = psf;
@@ -1699,7 +1699,7 @@ static void sf_markstate(struct ip_mc_list *pmc)
struct ip_sf_list *psf;
int mca_xcount = pmc->sfcount[MCAST_EXCLUDE];
- for (psf=pmc->sources; psf; psf=psf->sf_next)
+ for (psf = pmc->sources; psf; psf = psf->sf_next)
if (pmc->sfcount[MCAST_EXCLUDE]) {
psf->sf_oldin = mca_xcount ==
psf->sf_count[MCAST_EXCLUDE] &&
@@ -1716,7 +1716,7 @@ static int sf_setstate(struct ip_mc_list *pmc)
int new_in, rv;
rv = 0;
- for (psf=pmc->sources; psf; psf=psf->sf_next) {
+ for (psf = pmc->sources; psf; psf = psf->sf_next) {
if (pmc->sfcount[MCAST_EXCLUDE]) {
new_in = mca_xcount == psf->sf_count[MCAST_EXCLUDE] &&
!psf->sf_count[MCAST_INCLUDE];
@@ -1726,7 +1726,7 @@ static int sf_setstate(struct ip_mc_list *pmc)
if (!psf->sf_oldin) {
struct ip_sf_list *prev = NULL;
- for (dpsf=pmc->tomb; dpsf; dpsf=dpsf->sf_next) {
+ for (dpsf = pmc->tomb; dpsf; dpsf = dpsf->sf_next) {
if (dpsf->sf_inaddr == psf->sf_inaddr)
break;
prev = dpsf;
@@ -1748,7 +1748,7 @@ static int sf_setstate(struct ip_mc_list *pmc)
* add or update "delete" records if an active filter
* is now inactive
*/
- for (dpsf=pmc->tomb; dpsf; dpsf=dpsf->sf_next)
+ for (dpsf = pmc->tomb; dpsf; dpsf = dpsf->sf_next)
if (dpsf->sf_inaddr == psf->sf_inaddr)
break;
if (!dpsf) {
@@ -1800,7 +1800,7 @@ static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
if (!delta)
pmc->sfcount[sfmode]++;
err = 0;
- for (i=0; i<sfcount; i++) {
+ for (i = 0; i < sfcount; i++) {
err = ip_mc_add1_src(pmc, sfmode, &psfsrc[i]);
if (err)
break;
@@ -1810,7 +1810,7 @@ static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
if (!delta)
pmc->sfcount[sfmode]--;
- for (j=0; j<i; j++)
+ for (j = 0; j < i; j++)
(void) ip_mc_del1_src(pmc, sfmode, &psfsrc[j]);
} else if (isexclude != (pmc->sfcount[MCAST_EXCLUDE] != 0)) {
#ifdef CONFIG_IP_MULTICAST
@@ -1829,7 +1829,7 @@ static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
pmc->crcount = in_dev->mr_qrv ? in_dev->mr_qrv :
IGMP_Unsolicited_Report_Count;
in_dev->mr_ifc_count = pmc->crcount;
- for (psf=pmc->sources; psf; psf = psf->sf_next)
+ for (psf = pmc->sources; psf; psf = psf->sf_next)
psf->sf_crcount = 0;
igmp_ifc_event(in_dev);
} else if (sf_setstate(pmc)) {
@@ -1844,12 +1844,12 @@ static void ip_mc_clear_src(struct ip_mc_list *pmc)
{
struct ip_sf_list *psf, *nextpsf;
- for (psf=pmc->tomb; psf; psf=nextpsf) {
+ for (psf = pmc->tomb; psf; psf = nextpsf) {
nextpsf = psf->sf_next;
kfree(psf);
}
pmc->tomb = NULL;
- for (psf=pmc->sources; psf; psf=nextpsf) {
+ for (psf = pmc->sources; psf; psf = nextpsf) {
nextpsf = psf->sf_next;
kfree(psf);
}
@@ -2043,7 +2043,7 @@ int ip_mc_source(int add, int omode, struct sock *sk, struct
if (!psl)
goto done; /* err = -EADDRNOTAVAIL */
rv = !0;
- for (i=0; i<psl->sl_count; i++) {
+ for (i = 0; i < psl->sl_count; i++) {
rv = memcmp(&psl->sl_addr[i], &mreqs->imr_sourceaddr,
sizeof(__be32));
if (rv == 0)
@@ -2062,7 +2062,7 @@ int ip_mc_source(int add, int omode, struct sock *sk, struct
ip_mc_del_src(in_dev, &mreqs->imr_multiaddr, omode, 1,
&mreqs->imr_sourceaddr, 1);
- for (j=i+1; j<psl->sl_count; j++)
+ for (j = i+1; j < psl->sl_count; j++)
psl->sl_addr[j-1] = psl->sl_addr[j];
psl->sl_count--;
err = 0;
@@ -2088,7 +2088,7 @@ int ip_mc_source(int add, int omode, struct sock *sk, struct
newpsl->sl_max = count;
newpsl->sl_count = count - IP_SFBLOCK;
if (psl) {
- for (i=0; i<psl->sl_count; i++)
+ for (i = 0; i < psl->sl_count; i++)
newpsl->sl_addr[i] = psl->sl_addr[i];
/* decrease mem now to avoid the memleak warning */
atomic_sub(IP_SFLSIZE(psl->sl_max), &sk->sk_omem_alloc);
@@ -2098,7 +2098,7 @@ int ip_mc_source(int add, int omode, struct sock *sk, struct
psl = newpsl;
}
rv = 1; /* > 0 for insert logic below if sl_count is 0 */
- for (i=0; i<psl->sl_count; i++) {
+ for (i = 0; i < psl->sl_count; i++) {
rv = memcmp(&psl->sl_addr[i], &mreqs->imr_sourceaddr,
sizeof(__be32));
if (rv == 0)
@@ -2106,7 +2106,7 @@ int ip_mc_source(int add, int omode, struct sock *sk, struct
}
if (rv == 0) /* address already there is an error */
goto done;
- for (j=psl->sl_count-1; j>=i; j--)
+ for (j = psl->sl_count-1; j >= i; j--)
psl->sl_addr[j+1] = psl->sl_addr[j];
psl->sl_addr[i] = mreqs->imr_sourceaddr;
psl->sl_count++;
@@ -2305,7 +2305,7 @@ int ip_mc_gsfget(struct sock *sk, struct group_filter *gsf,
copy_to_user(optval, gsf, GROUP_FILTER_SIZE(0))) {
return -EFAULT;
}
- for (i=0; i<copycount; i++) {
+ for (i = 0; i < copycount; i++) {
struct sockaddr_storage ss;
psin = (struct sockaddr_in *)&ss;
@@ -2350,7 +2350,7 @@ int ip_mc_sf_allow(struct sock *sk, __be32 loc_addr, __be32 rmt_addr, int dif)
if (!psl)
goto unlock;
- for (i=0; i<psl->sl_count; i++) {
+ for (i = 0; i < psl->sl_count; i++) {
if (psl->sl_addr[i] == rmt_addr)
break;
}
@@ -2423,7 +2423,7 @@ int ip_check_mc_rcu(struct in_device *in_dev, __be32 mc_addr, __be32 src_addr, u
rv = 1;
} else if (im) {
if (src_addr) {
- for (psf=im->sources; psf; psf=psf->sf_next) {
+ for (psf = im->sources; psf; psf = psf->sf_next) {
if (psf->sf_inaddr == src_addr)
break;
}
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 56a964a553d2..a0f52dac8940 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -106,6 +106,10 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
r->id.idiag_sport = inet->inet_sport;
r->id.idiag_dport = inet->inet_dport;
+
+ memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src));
+ memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst));
+
r->id.idiag_src[0] = inet->inet_rcv_saddr;
r->id.idiag_dst[0] = inet->inet_daddr;
@@ -240,12 +244,19 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
r->idiag_family = tw->tw_family;
r->idiag_retrans = 0;
+
r->id.idiag_if = tw->tw_bound_dev_if;
sock_diag_save_cookie(tw, r->id.idiag_cookie);
+
r->id.idiag_sport = tw->tw_sport;
r->id.idiag_dport = tw->tw_dport;
+
+ memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src));
+ memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst));
+
r->id.idiag_src[0] = tw->tw_rcv_saddr;
r->id.idiag_dst[0] = tw->tw_daddr;
+
r->idiag_state = tw->tw_substate;
r->idiag_timer = 3;
r->idiag_expires = jiffies_to_msecs(tmo);
@@ -726,8 +737,13 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
r->id.idiag_sport = inet->inet_sport;
r->id.idiag_dport = ireq->ir_rmt_port;
+
+ memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src));
+ memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst));
+
r->id.idiag_src[0] = ireq->ir_loc_addr;
r->id.idiag_dst[0] = ireq->ir_rmt_addr;
+
r->idiag_expires = jiffies_to_msecs(tmo);
r->idiag_rqueue = 0;
r->idiag_wqueue = 0;
diff --git a/net/ipv4/inet_lro.c b/net/ipv4/inet_lro.c
index 1975f52933c5..f17ea49b28fb 100644
--- a/net/ipv4/inet_lro.c
+++ b/net/ipv4/inet_lro.c
@@ -230,29 +230,6 @@ static void lro_add_packet(struct net_lro_desc *lro_desc, struct sk_buff *skb,
lro_desc->last_skb = skb;
}
-static void lro_add_frags(struct net_lro_desc *lro_desc,
- int len, int hlen, int truesize,
- struct skb_frag_struct *skb_frags,
- struct iphdr *iph, struct tcphdr *tcph)
-{
- struct sk_buff *skb = lro_desc->parent;
- int tcp_data_len = TCP_PAYLOAD_LENGTH(iph, tcph);
-
- lro_add_common(lro_desc, iph, tcph, tcp_data_len);
-
- skb->truesize += truesize;
-
- skb_frags[0].page_offset += hlen;
- skb_frag_size_sub(&skb_frags[0], hlen);
-
- while (tcp_data_len > 0) {
- *(lro_desc->next_frag) = *skb_frags;
- tcp_data_len -= skb_frag_size(skb_frags);
- lro_desc->next_frag++;
- skb_frags++;
- skb_shinfo(skb)->nr_frags++;
- }
-}
static int lro_check_tcp_conn(struct net_lro_desc *lro_desc,
struct iphdr *iph,
@@ -371,128 +348,6 @@ out:
return 1;
}
-
-static struct sk_buff *lro_gen_skb(struct net_lro_mgr *lro_mgr,
- struct skb_frag_struct *frags,
- int len, int true_size,
- void *mac_hdr,
- int hlen, __wsum sum,
- u32 ip_summed)
-{
- struct sk_buff *skb;
- struct skb_frag_struct *skb_frags;
- int data_len = len;
- int hdr_len = min(len, hlen);
-
- skb = netdev_alloc_skb(lro_mgr->dev, hlen + lro_mgr->frag_align_pad);
- if (!skb)
- return NULL;
-
- skb_reserve(skb, lro_mgr->frag_align_pad);
- skb->len = len;
- skb->data_len = len - hdr_len;
- skb->truesize += true_size;
- skb->tail += hdr_len;
-
- memcpy(skb->data, mac_hdr, hdr_len);
-
- skb_frags = skb_shinfo(skb)->frags;
- while (data_len > 0) {
- *skb_frags = *frags;
- data_len -= skb_frag_size(frags);
- skb_frags++;
- frags++;
- skb_shinfo(skb)->nr_frags++;
- }
-
- skb_shinfo(skb)->frags[0].page_offset += hdr_len;
- skb_frag_size_sub(&skb_shinfo(skb)->frags[0], hdr_len);
-
- skb->ip_summed = ip_summed;
- skb->csum = sum;
- skb->protocol = eth_type_trans(skb, lro_mgr->dev);
- return skb;
-}
-
-static struct sk_buff *__lro_proc_segment(struct net_lro_mgr *lro_mgr,
- struct skb_frag_struct *frags,
- int len, int true_size,
- void *priv, __wsum sum)
-{
- struct net_lro_desc *lro_desc;
- struct iphdr *iph;
- struct tcphdr *tcph;
- struct sk_buff *skb;
- u64 flags;
- void *mac_hdr;
- int mac_hdr_len;
- int hdr_len = LRO_MAX_PG_HLEN;
- int vlan_hdr_len = 0;
-
- if (!lro_mgr->get_frag_header ||
- lro_mgr->get_frag_header(frags, (void *)&mac_hdr, (void *)&iph,
- (void *)&tcph, &flags, priv)) {
- mac_hdr = skb_frag_address(frags);
- goto out1;
- }
-
- if (!(flags & LRO_IPV4) || !(flags & LRO_TCP))
- goto out1;
-
- hdr_len = (int)((void *)(tcph) + TCP_HDR_LEN(tcph) - mac_hdr);
- mac_hdr_len = (int)((void *)(iph) - mac_hdr);
-
- lro_desc = lro_get_desc(lro_mgr, lro_mgr->lro_arr, iph, tcph);
- if (!lro_desc)
- goto out1;
-
- if (!lro_desc->active) { /* start new lro session */
- if (lro_tcp_ip_check(iph, tcph, len - mac_hdr_len, NULL))
- goto out1;
-
- skb = lro_gen_skb(lro_mgr, frags, len, true_size, mac_hdr,
- hdr_len, 0, lro_mgr->ip_summed_aggr);
- if (!skb)
- goto out;
-
- if ((skb->protocol == htons(ETH_P_8021Q)) &&
- !(lro_mgr->features & LRO_F_EXTRACT_VLAN_ID))
- vlan_hdr_len = VLAN_HLEN;
-
- iph = (void *)(skb->data + vlan_hdr_len);
- tcph = (void *)((u8 *)skb->data + vlan_hdr_len
- + IP_HDR_LEN(iph));
-
- lro_init_desc(lro_desc, skb, iph, tcph);
- LRO_INC_STATS(lro_mgr, aggregated);
- return NULL;
- }
-
- if (lro_desc->tcp_next_seq != ntohl(tcph->seq))
- goto out2;
-
- if (lro_tcp_ip_check(iph, tcph, len - mac_hdr_len, lro_desc))
- goto out2;
-
- lro_add_frags(lro_desc, len, hdr_len, true_size, frags, iph, tcph);
- LRO_INC_STATS(lro_mgr, aggregated);
-
- if ((skb_shinfo(lro_desc->parent)->nr_frags >= lro_mgr->max_aggr) ||
- lro_desc->parent->len > (0xFFFF - lro_mgr->dev->mtu))
- lro_flush(lro_mgr, lro_desc);
-
- return NULL;
-
-out2: /* send aggregated packets to the stack */
- lro_flush(lro_mgr, lro_desc);
-
-out1: /* Original packet has to be posted to the stack */
- skb = lro_gen_skb(lro_mgr, frags, len, true_size, mac_hdr,
- hdr_len, sum, lro_mgr->ip_summed);
-out:
- return skb;
-}
-
void lro_receive_skb(struct net_lro_mgr *lro_mgr,
struct sk_buff *skb,
void *priv)
@@ -506,23 +361,6 @@ void lro_receive_skb(struct net_lro_mgr *lro_mgr,
}
EXPORT_SYMBOL(lro_receive_skb);
-void lro_receive_frags(struct net_lro_mgr *lro_mgr,
- struct skb_frag_struct *frags,
- int len, int true_size, void *priv, __wsum sum)
-{
- struct sk_buff *skb;
-
- skb = __lro_proc_segment(lro_mgr, frags, len, true_size, priv, sum);
- if (!skb)
- return;
-
- if (lro_mgr->features & LRO_F_NAPI)
- netif_receive_skb(skb);
- else
- netif_rx(skb);
-}
-EXPORT_SYMBOL(lro_receive_frags);
-
void lro_flush_all(struct net_lro_mgr *lro_mgr)
{
int i;
@@ -534,14 +372,3 @@ void lro_flush_all(struct net_lro_mgr *lro_mgr)
}
}
EXPORT_SYMBOL(lro_flush_all);
-
-void lro_flush_pkt(struct net_lro_mgr *lro_mgr,
- struct iphdr *iph, struct tcphdr *tcph)
-{
- struct net_lro_desc *lro_desc;
-
- lro_desc = lro_get_desc(lro_mgr, lro_mgr->lro_arr, iph, tcph);
- if (lro_desc->active)
- lro_flush(lro_mgr, lro_desc);
-}
-EXPORT_SYMBOL(lro_flush_pkt);
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index 33d5537881ed..48f424465112 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -109,13 +109,6 @@ static inline void flush_check(struct inet_peer_base *base, int family)
}
}
-void inetpeer_invalidate_family(int family)
-{
- atomic_t *fp = inetpeer_seq_ptr(family);
-
- atomic_inc(fp);
-}
-
#define PEER_MAXDEPTH 40 /* sufficient for about 2^27 nodes */
/* Exported for sysctl_net_ipv4. */
@@ -227,7 +220,7 @@ static int addr_compare(const struct inetpeer_addr *a,
stackptr = _stack; \
*stackptr++ = &_base->root; \
for (u = rcu_deref_locked(_base->root, _base); \
- u != peer_avl_empty; ) { \
+ u != peer_avl_empty;) { \
int cmp = addr_compare(_daddr, &u->daddr); \
if (cmp == 0) \
break; \
@@ -282,7 +275,7 @@ static struct inet_peer *lookup_rcu(const struct inetpeer_addr *daddr,
*stackptr++ = &start->avl_left; \
v = &start->avl_left; \
for (u = rcu_deref_locked(*v, base); \
- u->avl_right != peer_avl_empty_rcu; ) { \
+ u->avl_right != peer_avl_empty_rcu;) { \
v = &u->avl_right; \
*stackptr++ = v; \
u = rcu_deref_locked(*v, base); \
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 2481993a4970..c10a3ce5cbff 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -704,7 +704,7 @@ struct sk_buff *ip_check_defrag(struct sk_buff *skb, u32 user)
memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
if (ip_defrag(skb, user))
return NULL;
- skb->rxhash = 0;
+ skb_clear_hash(skb);
}
}
return skb;
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index d7aea4c5b940..e560ef34cf4b 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -217,6 +217,7 @@ static int ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi)
iph->saddr, iph->daddr, tpi->key);
if (tunnel) {
+ skb_pop_mac_header(skb);
ip_tunnel_rcv(tunnel, skb, tpi, log_ecn_error);
return PACKET_RCVD;
}
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
index ec7264514a82..f4ab72e19af9 100644
--- a/net/ipv4/ip_options.c
+++ b/net/ipv4/ip_options.c
@@ -167,7 +167,7 @@ int ip_options_echo(struct ip_options *dopt, struct sk_buff *skb)
soffset -= 4;
if (soffset > 3) {
memcpy(&faddr, &start[soffset-1], 4);
- for (soffset-=4, doffset=4; soffset > 3; soffset-=4, doffset+=4)
+ for (soffset -= 4, doffset = 4; soffset > 3; soffset -= 4, doffset += 4)
memcpy(&dptr[doffset-1], &start[soffset-1], 4);
/*
* RFC1812 requires to fix illegal source routes.
@@ -227,7 +227,7 @@ void ip_options_fragment(struct sk_buff *skb)
continue;
}
optlen = optptr[1];
- if (optlen<2 || optlen>l)
+ if (optlen < 2 || optlen > l)
return;
if (!IPOPT_COPIED(*optptr))
memset(optptr, IPOPT_NOOP, optlen);
@@ -275,27 +275,27 @@ int ip_options_compile(struct net *net,
for (l = opt->optlen; l > 0; ) {
switch (*optptr) {
- case IPOPT_END:
- for (optptr++, l--; l>0; optptr++, l--) {
+ case IPOPT_END:
+ for (optptr++, l--; l > 0; optptr++, l--) {
if (*optptr != IPOPT_END) {
*optptr = IPOPT_END;
opt->is_changed = 1;
}
}
goto eol;
- case IPOPT_NOOP:
+ case IPOPT_NOOP:
l--;
optptr++;
continue;
}
optlen = optptr[1];
- if (optlen<2 || optlen>l) {
+ if (optlen < 2 || optlen > l) {
pp_ptr = optptr;
goto error;
}
switch (*optptr) {
- case IPOPT_SSRR:
- case IPOPT_LSRR:
+ case IPOPT_SSRR:
+ case IPOPT_LSRR:
if (optlen < 3) {
pp_ptr = optptr + 1;
goto error;
@@ -321,7 +321,7 @@ int ip_options_compile(struct net *net,
opt->is_strictroute = (optptr[0] == IPOPT_SSRR);
opt->srr = optptr - iph;
break;
- case IPOPT_RR:
+ case IPOPT_RR:
if (opt->rr) {
pp_ptr = optptr;
goto error;
@@ -349,7 +349,7 @@ int ip_options_compile(struct net *net,
}
opt->rr = optptr - iph;
break;
- case IPOPT_TIMESTAMP:
+ case IPOPT_TIMESTAMP:
if (opt->ts) {
pp_ptr = optptr;
goto error;
@@ -369,13 +369,13 @@ int ip_options_compile(struct net *net,
goto error;
}
switch (optptr[3]&0xF) {
- case IPOPT_TS_TSONLY:
+ case IPOPT_TS_TSONLY:
if (skb)
timeptr = &optptr[optptr[2]-1];
opt->ts_needtime = 1;
optptr[2] += 4;
break;
- case IPOPT_TS_TSANDADDR:
+ case IPOPT_TS_TSANDADDR:
if (optptr[2]+7 > optptr[1]) {
pp_ptr = optptr + 2;
goto error;
@@ -389,7 +389,7 @@ int ip_options_compile(struct net *net,
opt->ts_needtime = 1;
optptr[2] += 8;
break;
- case IPOPT_TS_PRESPEC:
+ case IPOPT_TS_PRESPEC:
if (optptr[2]+7 > optptr[1]) {
pp_ptr = optptr + 2;
goto error;
@@ -405,7 +405,7 @@ int ip_options_compile(struct net *net,
opt->ts_needtime = 1;
optptr[2] += 8;
break;
- default:
+ default:
if (!skb && !ns_capable(net->user_ns, CAP_NET_RAW)) {
pp_ptr = optptr + 3;
goto error;
@@ -433,7 +433,7 @@ int ip_options_compile(struct net *net,
}
opt->ts = optptr - iph;
break;
- case IPOPT_RA:
+ case IPOPT_RA:
if (optlen < 4) {
pp_ptr = optptr + 1;
goto error;
@@ -441,7 +441,7 @@ int ip_options_compile(struct net *net,
if (optptr[2] == 0 && optptr[3] == 0)
opt->router_alert = optptr - iph;
break;
- case IPOPT_CIPSO:
+ case IPOPT_CIPSO:
if ((!skb && !ns_capable(net->user_ns, CAP_NET_RAW)) || opt->cipso) {
pp_ptr = optptr;
goto error;
@@ -452,9 +452,9 @@ int ip_options_compile(struct net *net,
goto error;
}
break;
- case IPOPT_SEC:
- case IPOPT_SID:
- default:
+ case IPOPT_SEC:
+ case IPOPT_SID:
+ default:
if (!skb && !ns_capable(net->user_ns, CAP_NET_RAW)) {
pp_ptr = optptr;
goto error;
@@ -572,7 +572,7 @@ void ip_forward_options(struct sk_buff *skb)
optptr = raw + opt->srr;
- for ( srrptr=optptr[2], srrspace = optptr[1];
+ for ( srrptr = optptr[2], srrspace = optptr[1];
srrptr <= srrspace;
srrptr += 4
) {
@@ -628,7 +628,7 @@ int ip_options_rcv_srr(struct sk_buff *skb)
if (rt->rt_type != RTN_LOCAL)
return -EINVAL;
- for (srrptr=optptr[2], srrspace = optptr[1]; srrptr <= srrspace; srrptr += 4) {
+ for (srrptr = optptr[2], srrspace = optptr[1]; srrptr <= srrspace; srrptr += 4) {
if (srrptr + 3 > srrspace) {
icmp_send(skb, ICMP_PARAMETERPROB, 0, htonl((opt->srr+2)<<24));
return -EINVAL;
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 912402752f2f..df184616493f 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -828,7 +828,7 @@ static int __ip_append_data(struct sock *sk,
if (cork->length + length > maxnonfragsize - fragheaderlen) {
ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport,
- mtu-exthdrlen);
+ mtu - (opt ? opt->optlen : 0));
return -EMSGSIZE;
}
@@ -1151,7 +1151,8 @@ ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
mtu : 0xFFFF;
if (cork->length + size > maxnonfragsize - fragheaderlen) {
- ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport, mtu);
+ ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport,
+ mtu - (opt ? opt->optlen : 0));
return -EMSGSIZE;
}
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 3f858266fa7e..a9fc435dc89f 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -386,7 +386,7 @@ void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 port, u32 inf
/*
* Handle MSG_ERRQUEUE
*/
-int ip_recv_error(struct sock *sk, struct msghdr *msg, int len)
+int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
{
struct sock_exterr_skb *serr;
struct sk_buff *skb, *skb2;
@@ -423,6 +423,7 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len)
serr->addr_offset);
sin->sin_port = serr->port;
memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
+ *addr_len = sizeof(*sin);
}
memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err));
@@ -1050,7 +1051,7 @@ e_inval:
*
* To support IP_CMSG_PKTINFO option, we store rt_iif and specific
* destination in skb->cb[] before dst drop.
- * This way, receiver doesnt make cache line misses to read rtable.
+ * This way, receiver doesn't make cache line misses to read rtable.
*/
void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb)
{
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index 90ff9570d7d4..d3929a69f008 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -68,6 +68,63 @@ static unsigned int ip_tunnel_hash(struct ip_tunnel_net *itn,
IP_TNL_HASH_BITS);
}
+static inline void __tunnel_dst_set(struct ip_tunnel_dst *idst,
+ struct dst_entry *dst)
+{
+ struct dst_entry *old_dst;
+
+ if (dst && (dst->flags & DST_NOCACHE))
+ dst = NULL;
+
+ spin_lock_bh(&idst->lock);
+ old_dst = rcu_dereference(idst->dst);
+ rcu_assign_pointer(idst->dst, dst);
+ dst_release(old_dst);
+ spin_unlock_bh(&idst->lock);
+}
+
+static inline void tunnel_dst_set(struct ip_tunnel *t, struct dst_entry *dst)
+{
+ __tunnel_dst_set(this_cpu_ptr(t->dst_cache), dst);
+}
+
+static inline void tunnel_dst_reset(struct ip_tunnel *t)
+{
+ tunnel_dst_set(t, NULL);
+}
+
+static void tunnel_dst_reset_all(struct ip_tunnel *t)
+{
+ int i;
+
+ for_each_possible_cpu(i)
+ __tunnel_dst_set(per_cpu_ptr(t->dst_cache, i), NULL);
+}
+
+static inline struct dst_entry *tunnel_dst_get(struct ip_tunnel *t)
+{
+ struct dst_entry *dst;
+
+ rcu_read_lock();
+ dst = rcu_dereference(this_cpu_ptr(t->dst_cache)->dst);
+ if (dst)
+ dst_hold(dst);
+ rcu_read_unlock();
+ return dst;
+}
+
+static struct dst_entry *tunnel_dst_check(struct ip_tunnel *t, u32 cookie)
+{
+ struct dst_entry *dst = tunnel_dst_get(t);
+
+ if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
+ tunnel_dst_reset(t);
+ return NULL;
+ }
+
+ return dst;
+}
+
/* Often modified stats are per cpu, other are shared (netdev->stats) */
struct rtnl_link_stats64 *ip_tunnel_get_stats64(struct net_device *dev,
struct rtnl_link_stats64 *tot)
@@ -75,7 +132,8 @@ struct rtnl_link_stats64 *ip_tunnel_get_stats64(struct net_device *dev,
int i;
for_each_possible_cpu(i) {
- const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i);
+ const struct pcpu_sw_netstats *tstats =
+ per_cpu_ptr(dev->tstats, i);
u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
unsigned int start;
@@ -318,11 +376,10 @@ failed:
return ERR_PTR(err);
}
-static inline struct rtable *ip_route_output_tunnel(struct net *net,
- struct flowi4 *fl4,
- int proto,
- __be32 daddr, __be32 saddr,
- __be32 key, __u8 tos, int oif)
+static inline void init_tunnel_flow(struct flowi4 *fl4,
+ int proto,
+ __be32 daddr, __be32 saddr,
+ __be32 key, __u8 tos, int oif)
{
memset(fl4, 0, sizeof(*fl4));
fl4->flowi4_oif = oif;
@@ -331,7 +388,6 @@ static inline struct rtable *ip_route_output_tunnel(struct net *net,
fl4->flowi4_tos = tos;
fl4->flowi4_proto = proto;
fl4->fl4_gre_key = key;
- return ip_route_output_key(net, fl4);
}
static int ip_tunnel_bind_dev(struct net_device *dev)
@@ -350,14 +406,14 @@ static int ip_tunnel_bind_dev(struct net_device *dev)
struct flowi4 fl4;
struct rtable *rt;
- rt = ip_route_output_tunnel(tunnel->net, &fl4,
- tunnel->parms.iph.protocol,
- iph->daddr, iph->saddr,
- tunnel->parms.o_key,
- RT_TOS(iph->tos),
- tunnel->parms.link);
+ init_tunnel_flow(&fl4, iph->protocol, iph->daddr,
+ iph->saddr, tunnel->parms.o_key,
+ RT_TOS(iph->tos), tunnel->parms.link);
+ rt = ip_route_output_key(tunnel->net, &fl4);
+
if (!IS_ERR(rt)) {
tdev = rt->dst.dev;
+ tunnel_dst_set(tunnel, dst_clone(&rt->dst));
ip_rt_put(rt);
}
if (dev->type != ARPHRD_ETHER)
@@ -405,7 +461,7 @@ static struct ip_tunnel *ip_tunnel_create(struct net *net,
int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
const struct tnl_ptk_info *tpi, bool log_ecn_error)
{
- struct pcpu_tstats *tstats;
+ struct pcpu_sw_netstats *tstats;
const struct iphdr *iph = ip_hdr(skb);
int err;
@@ -528,10 +584,11 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
struct flowi4 fl4;
u8 tos, ttl;
__be16 df;
- struct rtable *rt; /* Route to the other host */
+ struct rtable *rt = NULL; /* Route to the other host */
unsigned int max_headroom; /* The extra header space needed */
__be32 dst;
int err;
+ bool connected = true;
inner_iph = (const struct iphdr *)skb_inner_network_header(skb);
@@ -581,27 +638,39 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
#endif
else
goto tx_error;
+
+ connected = false;
}
tos = tnl_params->tos;
if (tos & 0x1) {
tos &= ~0x1;
- if (skb->protocol == htons(ETH_P_IP))
+ if (skb->protocol == htons(ETH_P_IP)) {
tos = inner_iph->tos;
- else if (skb->protocol == htons(ETH_P_IPV6))
+ connected = false;
+ } else if (skb->protocol == htons(ETH_P_IPV6)) {
tos = ipv6_get_dsfield((const struct ipv6hdr *)inner_iph);
+ connected = false;
+ }
}
- rt = ip_route_output_tunnel(tunnel->net, &fl4,
- protocol,
- dst, tnl_params->saddr,
- tunnel->parms.o_key,
- RT_TOS(tos),
- tunnel->parms.link);
- if (IS_ERR(rt)) {
- dev->stats.tx_carrier_errors++;
- goto tx_error;
+ init_tunnel_flow(&fl4, protocol, dst, tnl_params->saddr,
+ tunnel->parms.o_key, RT_TOS(tos), tunnel->parms.link);
+
+ if (connected)
+ rt = (struct rtable *)tunnel_dst_check(tunnel, 0);
+
+ if (!rt) {
+ rt = ip_route_output_key(tunnel->net, &fl4);
+
+ if (IS_ERR(rt)) {
+ dev->stats.tx_carrier_errors++;
+ goto tx_error;
+ }
+ if (connected)
+ tunnel_dst_set(tunnel, dst_clone(&rt->dst));
}
+
if (rt->dst.dev == dev) {
ip_rt_put(rt);
dev->stats.collisions++;
@@ -696,6 +765,7 @@ static void ip_tunnel_update(struct ip_tunnel_net *itn,
if (set_mtu)
dev->mtu = mtu;
}
+ tunnel_dst_reset_all(t);
netdev_state_change(dev);
}
@@ -811,6 +881,7 @@ static void ip_tunnel_dev_free(struct net_device *dev)
struct ip_tunnel *tunnel = netdev_priv(dev);
gro_cells_destroy(&tunnel->gro_cells);
+ free_percpu(tunnel->dst_cache);
free_percpu(dev->tstats);
free_netdev(dev);
}
@@ -979,18 +1050,31 @@ int ip_tunnel_init(struct net_device *dev)
int i, err;
dev->destructor = ip_tunnel_dev_free;
- dev->tstats = alloc_percpu(struct pcpu_tstats);
+ dev->tstats = alloc_percpu(struct pcpu_sw_netstats);
if (!dev->tstats)
return -ENOMEM;
for_each_possible_cpu(i) {
- struct pcpu_tstats *ipt_stats;
+ struct pcpu_sw_netstats *ipt_stats;
ipt_stats = per_cpu_ptr(dev->tstats, i);
u64_stats_init(&ipt_stats->syncp);
}
+ tunnel->dst_cache = alloc_percpu(struct ip_tunnel_dst);
+ if (!tunnel->dst_cache) {
+ free_percpu(dev->tstats);
+ return -ENOMEM;
+ }
+
+ for_each_possible_cpu(i) {
+ struct ip_tunnel_dst *idst = per_cpu_ptr(tunnel->dst_cache, i);
+ idst-> dst = NULL;
+ spin_lock_init(&idst->lock);
+ }
+
err = gro_cells_init(&tunnel->gro_cells, dev);
if (err) {
+ free_percpu(tunnel->dst_cache);
free_percpu(dev->tstats);
return err;
}
@@ -1015,6 +1099,8 @@ void ip_tunnel_uninit(struct net_device *dev)
/* fb_tunnel_dev will be unregisted in net-exit call. */
if (itn->fb_tunnel_dev != dev)
ip_tunnel_del(netdev_priv(dev));
+
+ tunnel_dst_reset_all(tunnel);
}
EXPORT_SYMBOL_GPL(ip_tunnel_uninit);
diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
index 42ffbc8d65c6..6156f4ef5e91 100644
--- a/net/ipv4/ip_tunnel_core.c
+++ b/net/ipv4/ip_tunnel_core.c
@@ -56,7 +56,7 @@ int iptunnel_xmit(struct rtable *rt, struct sk_buff *skb,
skb_scrub_packet(skb, xnet);
- skb->rxhash = 0;
+ skb_clear_hash(skb);
skb_dst_set(skb, &rt->dst);
memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
@@ -107,8 +107,7 @@ int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto)
nf_reset(skb);
secpath_reset(skb);
- if (!skb->l4_rxhash)
- skb->rxhash = 0;
+ skb_clear_hash_if_not_l4(skb);
skb_dst_drop(skb);
skb->vlan_tci = 0;
skb_set_queue_mapping(skb, 0);
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
index 52b802a0cd8c..0783200ad8d2 100644
--- a/net/ipv4/ip_vti.c
+++ b/net/ipv4/ip_vti.c
@@ -60,7 +60,7 @@ static int vti_rcv(struct sk_buff *skb)
tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
iph->saddr, iph->daddr, 0);
if (tunnel != NULL) {
- struct pcpu_tstats *tstats;
+ struct pcpu_sw_netstats *tstats;
u32 oldmark = skb->mark;
int ret;
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 62212c772a4b..421a24934ffd 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -425,6 +425,7 @@ struct net_device *ipmr_new_tunnel(struct net *net, struct vifctl *v)
goto failure;
ipv4_devconf_setall(in_dev);
+ neigh_parms_data_state_setall(in_dev->arp_parms);
IPV4_DEVCONF(in_dev->cnf, RP_FILTER) = 0;
if (dev_open(dev))
@@ -517,6 +518,7 @@ static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt)
}
ipv4_devconf_setall(in_dev);
+ neigh_parms_data_state_setall(in_dev->arp_parms);
IPV4_DEVCONF(in_dev->cnf, RP_FILTER) = 0;
rcu_read_unlock();
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig
index 40d56073cd19..81c6910cfa92 100644
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -39,23 +39,33 @@ config NF_CONNTRACK_PROC_COMPAT
config NF_TABLES_IPV4
depends on NF_TABLES
tristate "IPv4 nf_tables support"
-
-config NFT_REJECT_IPV4
- depends on NF_TABLES_IPV4
- tristate "nf_tables IPv4 reject support"
+ help
+ This option enables the IPv4 support for nf_tables.
config NFT_CHAIN_ROUTE_IPV4
depends on NF_TABLES_IPV4
tristate "IPv4 nf_tables route chain support"
+ help
+ This option enables the "route" chain for IPv4 in nf_tables. This
+ chain type is used to force packet re-routing after mangling header
+ fields such as the source, destination, type of service and
+ the packet mark.
config NFT_CHAIN_NAT_IPV4
depends on NF_TABLES_IPV4
depends on NF_NAT_IPV4 && NFT_NAT
tristate "IPv4 nf_tables nat chain support"
+ help
+ This option enables the "nat" chain for IPv4 in nf_tables. This
+ chain type is used to perform Network Address Translation (NAT)
+ packet transformations such as the source, destination address and
+ source and destination ports.
config NF_TABLES_ARP
depends on NF_TABLES
tristate "ARP nf_tables support"
+ help
+ This option enables the ARP support for nf_tables.
config IP_NF_IPTABLES
tristate "IP tables support (required for filtering/masq/NAT)"
diff --git a/net/ipv4/netfilter/Makefile b/net/ipv4/netfilter/Makefile
index 19df72b7ba88..c16be9d58420 100644
--- a/net/ipv4/netfilter/Makefile
+++ b/net/ipv4/netfilter/Makefile
@@ -28,7 +28,6 @@ obj-$(CONFIG_NF_NAT_SNMP_BASIC) += nf_nat_snmp_basic.o
obj-$(CONFIG_NF_NAT_PROTO_GRE) += nf_nat_proto_gre.o
obj-$(CONFIG_NF_TABLES_IPV4) += nf_tables_ipv4.o
-obj-$(CONFIG_NFT_REJECT_IPV4) += nft_reject_ipv4.o
obj-$(CONFIG_NFT_CHAIN_ROUTE_IPV4) += nft_chain_route_ipv4.o
obj-$(CONFIG_NFT_CHAIN_NAT_IPV4) += nft_chain_nat_ipv4.o
obj-$(CONFIG_NF_TABLES_ARP) += nf_tables_arp.o
diff --git a/net/ipv4/netfilter/ipt_REJECT.c b/net/ipv4/netfilter/ipt_REJECT.c
index b969131ad1c1..5b6e0df4ccff 100644
--- a/net/ipv4/netfilter/ipt_REJECT.c
+++ b/net/ipv4/netfilter/ipt_REJECT.c
@@ -17,10 +17,6 @@
#include <linux/udp.h>
#include <linux/icmp.h>
#include <net/icmp.h>
-#include <net/ip.h>
-#include <net/tcp.h>
-#include <net/route.h>
-#include <net/dst.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_ipv4/ip_tables.h>
#include <linux/netfilter_ipv4/ipt_REJECT.h>
@@ -28,128 +24,12 @@
#include <linux/netfilter_bridge.h>
#endif
+#include <net/netfilter/ipv4/nf_reject.h>
+
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
MODULE_DESCRIPTION("Xtables: packet \"rejection\" target for IPv4");
-/* Send RST reply */
-static void send_reset(struct sk_buff *oldskb, int hook)
-{
- struct sk_buff *nskb;
- const struct iphdr *oiph;
- struct iphdr *niph;
- const struct tcphdr *oth;
- struct tcphdr _otcph, *tcph;
-
- /* IP header checks: fragment. */
- if (ip_hdr(oldskb)->frag_off & htons(IP_OFFSET))
- return;
-
- oth = skb_header_pointer(oldskb, ip_hdrlen(oldskb),
- sizeof(_otcph), &_otcph);
- if (oth == NULL)
- return;
-
- /* No RST for RST. */
- if (oth->rst)
- return;
-
- if (skb_rtable(oldskb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
- return;
-
- /* Check checksum */
- if (nf_ip_checksum(oldskb, hook, ip_hdrlen(oldskb), IPPROTO_TCP))
- return;
- oiph = ip_hdr(oldskb);
-
- nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct tcphdr) +
- LL_MAX_HEADER, GFP_ATOMIC);
- if (!nskb)
- return;
-
- skb_reserve(nskb, LL_MAX_HEADER);
-
- skb_reset_network_header(nskb);
- niph = (struct iphdr *)skb_put(nskb, sizeof(struct iphdr));
- niph->version = 4;
- niph->ihl = sizeof(struct iphdr) / 4;
- niph->tos = 0;
- niph->id = 0;
- niph->frag_off = htons(IP_DF);
- niph->protocol = IPPROTO_TCP;
- niph->check = 0;
- niph->saddr = oiph->daddr;
- niph->daddr = oiph->saddr;
-
- skb_reset_transport_header(nskb);
- tcph = (struct tcphdr *)skb_put(nskb, sizeof(struct tcphdr));
- memset(tcph, 0, sizeof(*tcph));
- tcph->source = oth->dest;
- tcph->dest = oth->source;
- tcph->doff = sizeof(struct tcphdr) / 4;
-
- if (oth->ack)
- tcph->seq = oth->ack_seq;
- else {
- tcph->ack_seq = htonl(ntohl(oth->seq) + oth->syn + oth->fin +
- oldskb->len - ip_hdrlen(oldskb) -
- (oth->doff << 2));
- tcph->ack = 1;
- }
-
- tcph->rst = 1;
- tcph->check = ~tcp_v4_check(sizeof(struct tcphdr), niph->saddr,
- niph->daddr, 0);
- nskb->ip_summed = CHECKSUM_PARTIAL;
- nskb->csum_start = (unsigned char *)tcph - nskb->head;
- nskb->csum_offset = offsetof(struct tcphdr, check);
-
- /* ip_route_me_harder expects skb->dst to be set */
- skb_dst_set_noref(nskb, skb_dst(oldskb));
-
- nskb->protocol = htons(ETH_P_IP);
- if (ip_route_me_harder(nskb, RTN_UNSPEC))
- goto free_nskb;
-
- niph->ttl = ip4_dst_hoplimit(skb_dst(nskb));
-
- /* "Never happens" */
- if (nskb->len > dst_mtu(skb_dst(nskb)))
- goto free_nskb;
-
- nf_ct_attach(nskb, oldskb);
-
-#ifdef CONFIG_BRIDGE_NETFILTER
- /* If we use ip_local_out for bridged traffic, the MAC source on
- * the RST will be ours, instead of the destination's. This confuses
- * some routers/firewalls, and they drop the packet. So we need to
- * build the eth header using the original destination's MAC as the
- * source, and send the RST packet directly.
- */
- if (oldskb->nf_bridge) {
- struct ethhdr *oeth = eth_hdr(oldskb);
- nskb->dev = oldskb->nf_bridge->physindev;
- niph->tot_len = htons(nskb->len);
- ip_send_check(niph);
- if (dev_hard_header(nskb, nskb->dev, ntohs(nskb->protocol),
- oeth->h_source, oeth->h_dest, nskb->len) < 0)
- goto free_nskb;
- dev_queue_xmit(nskb);
- } else
-#endif
- ip_local_out(nskb);
-
- return;
-
- free_nskb:
- kfree_skb(nskb);
-}
-
-static inline void send_unreach(struct sk_buff *skb_in, int code)
-{
- icmp_send(skb_in, ICMP_DEST_UNREACH, code, 0);
-}
-
static unsigned int
reject_tg(struct sk_buff *skb, const struct xt_action_param *par)
{
@@ -157,28 +37,28 @@ reject_tg(struct sk_buff *skb, const struct xt_action_param *par)
switch (reject->with) {
case IPT_ICMP_NET_UNREACHABLE:
- send_unreach(skb, ICMP_NET_UNREACH);
+ nf_send_unreach(skb, ICMP_NET_UNREACH);
break;
case IPT_ICMP_HOST_UNREACHABLE:
- send_unreach(skb, ICMP_HOST_UNREACH);
+ nf_send_unreach(skb, ICMP_HOST_UNREACH);
break;
case IPT_ICMP_PROT_UNREACHABLE:
- send_unreach(skb, ICMP_PROT_UNREACH);
+ nf_send_unreach(skb, ICMP_PROT_UNREACH);
break;
case IPT_ICMP_PORT_UNREACHABLE:
- send_unreach(skb, ICMP_PORT_UNREACH);
+ nf_send_unreach(skb, ICMP_PORT_UNREACH);
break;
case IPT_ICMP_NET_PROHIBITED:
- send_unreach(skb, ICMP_NET_ANO);
+ nf_send_unreach(skb, ICMP_NET_ANO);
break;
case IPT_ICMP_HOST_PROHIBITED:
- send_unreach(skb, ICMP_HOST_ANO);
+ nf_send_unreach(skb, ICMP_HOST_ANO);
break;
case IPT_ICMP_ADMIN_PROHIBITED:
- send_unreach(skb, ICMP_PKT_FILTERED);
+ nf_send_unreach(skb, ICMP_PKT_FILTERED);
break;
case IPT_TCP_RESET:
- send_reset(skb, par->hooknum);
+ nf_send_reset(skb, par->hooknum);
case IPT_ICMP_ECHOREPLY:
/* Doesn't happen. */
break;
diff --git a/net/ipv4/netfilter/ipt_SYNPROXY.c b/net/ipv4/netfilter/ipt_SYNPROXY.c
index f13bd91d9a56..a313c3fbeb46 100644
--- a/net/ipv4/netfilter/ipt_SYNPROXY.c
+++ b/net/ipv4/netfilter/ipt_SYNPROXY.c
@@ -423,6 +423,7 @@ static void synproxy_tg4_destroy(const struct xt_tgdtor_param *par)
static struct xt_target synproxy_tg4_reg __read_mostly = {
.name = "SYNPROXY",
.family = NFPROTO_IPV4,
+ .hooks = (1 << NF_INET_LOCAL_IN) | (1 << NF_INET_FORWARD),
.target = synproxy_tg4,
.targetsize = sizeof(struct xt_synproxy_info),
.checkentry = synproxy_tg4_check,
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
index ecd8bec411c9..8127dc802865 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
@@ -548,9 +548,3 @@ static void __exit nf_conntrack_l3proto_ipv4_fini(void)
module_init(nf_conntrack_l3proto_ipv4_init);
module_exit(nf_conntrack_l3proto_ipv4_fini);
-
-void need_ipv4_conntrack(void)
-{
- return;
-}
-EXPORT_SYMBOL_GPL(need_ipv4_conntrack);
diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
index 5f011cc89cd9..d551e31b416e 100644
--- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
+++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
@@ -34,8 +34,7 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
* Author: James Morris <jmorris@intercode.com.au>
*
@@ -462,14 +461,14 @@ static unsigned char asn1_oid_decode(struct asn1_ctx *ctx,
}
if (subid < 40) {
- optr [0] = 0;
- optr [1] = subid;
+ optr[0] = 0;
+ optr[1] = subid;
} else if (subid < 80) {
- optr [0] = 1;
- optr [1] = subid - 40;
+ optr[0] = 1;
+ optr[1] = subid - 40;
} else {
- optr [0] = 2;
- optr [1] = subid - 80;
+ optr[0] = 2;
+ optr[1] = subid - 80;
}
*len = 2;
diff --git a/net/ipv4/netfilter/nf_tables_arp.c b/net/ipv4/netfilter/nf_tables_arp.c
index 3e67ef1c676f..19412a4063fb 100644
--- a/net/ipv4/netfilter/nf_tables_arp.c
+++ b/net/ipv4/netfilter/nf_tables_arp.c
@@ -14,10 +14,30 @@
#include <linux/netfilter_arp.h>
#include <net/netfilter/nf_tables.h>
+static unsigned int
+nft_do_chain_arp(const struct nf_hook_ops *ops,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
+{
+ struct nft_pktinfo pkt;
+
+ nft_set_pktinfo(&pkt, ops, skb, in, out);
+
+ return nft_do_chain(&pkt, ops);
+}
+
static struct nft_af_info nft_af_arp __read_mostly = {
.family = NFPROTO_ARP,
.nhooks = NF_ARP_NUMHOOKS,
.owner = THIS_MODULE,
+ .nops = 1,
+ .hooks = {
+ [NF_ARP_IN] = nft_do_chain_arp,
+ [NF_ARP_OUT] = nft_do_chain_arp,
+ [NF_ARP_FORWARD] = nft_do_chain_arp,
+ },
};
static int nf_tables_arp_init_net(struct net *net)
@@ -48,32 +68,14 @@ static struct pernet_operations nf_tables_arp_net_ops = {
.exit = nf_tables_arp_exit_net,
};
-static unsigned int
-nft_do_chain_arp(const struct nf_hook_ops *ops,
- struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
-{
- struct nft_pktinfo pkt;
-
- nft_set_pktinfo(&pkt, ops, skb, in, out);
-
- return nft_do_chain_pktinfo(&pkt, ops);
-}
-
-static struct nf_chain_type filter_arp = {
- .family = NFPROTO_ARP,
+static const struct nf_chain_type filter_arp = {
.name = "filter",
.type = NFT_CHAIN_T_DEFAULT,
+ .family = NFPROTO_ARP,
+ .owner = THIS_MODULE,
.hook_mask = (1 << NF_ARP_IN) |
(1 << NF_ARP_OUT) |
(1 << NF_ARP_FORWARD),
- .fn = {
- [NF_ARP_IN] = nft_do_chain_arp,
- [NF_ARP_OUT] = nft_do_chain_arp,
- [NF_ARP_FORWARD] = nft_do_chain_arp,
- },
};
static int __init nf_tables_arp_init(void)
diff --git a/net/ipv4/netfilter/nf_tables_ipv4.c b/net/ipv4/netfilter/nf_tables_ipv4.c
index 0f4cbfeb19bd..6820c8c40842 100644
--- a/net/ipv4/netfilter/nf_tables_ipv4.c
+++ b/net/ipv4/netfilter/nf_tables_ipv4.c
@@ -18,14 +18,25 @@
#include <net/ip.h>
#include <net/netfilter/nf_tables_ipv4.h>
+static unsigned int nft_do_chain_ipv4(const struct nf_hook_ops *ops,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
+{
+ struct nft_pktinfo pkt;
+
+ nft_set_pktinfo_ipv4(&pkt, ops, skb, in, out);
+
+ return nft_do_chain(&pkt, ops);
+}
+
static unsigned int nft_ipv4_output(const struct nf_hook_ops *ops,
struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
- struct nft_pktinfo pkt;
-
if (unlikely(skb->len < sizeof(struct iphdr) ||
ip_hdr(skb)->ihl < sizeof(struct iphdr) / 4)) {
if (net_ratelimit())
@@ -33,19 +44,24 @@ static unsigned int nft_ipv4_output(const struct nf_hook_ops *ops,
"packet\n");
return NF_ACCEPT;
}
- nft_set_pktinfo_ipv4(&pkt, ops, skb, in, out);
- return nft_do_chain_pktinfo(&pkt, ops);
+ return nft_do_chain_ipv4(ops, skb, in, out, okfn);
}
-static struct nft_af_info nft_af_ipv4 __read_mostly = {
+struct nft_af_info nft_af_ipv4 __read_mostly = {
.family = NFPROTO_IPV4,
.nhooks = NF_INET_NUMHOOKS,
.owner = THIS_MODULE,
+ .nops = 1,
.hooks = {
+ [NF_INET_LOCAL_IN] = nft_do_chain_ipv4,
[NF_INET_LOCAL_OUT] = nft_ipv4_output,
+ [NF_INET_FORWARD] = nft_do_chain_ipv4,
+ [NF_INET_PRE_ROUTING] = nft_do_chain_ipv4,
+ [NF_INET_POST_ROUTING] = nft_do_chain_ipv4,
},
};
+EXPORT_SYMBOL_GPL(nft_af_ipv4);
static int nf_tables_ipv4_init_net(struct net *net)
{
@@ -75,42 +91,28 @@ static struct pernet_operations nf_tables_ipv4_net_ops = {
.exit = nf_tables_ipv4_exit_net,
};
-static unsigned int
-nft_do_chain_ipv4(const struct nf_hook_ops *ops,
- struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
-{
- struct nft_pktinfo pkt;
-
- nft_set_pktinfo_ipv4(&pkt, ops, skb, in, out);
-
- return nft_do_chain_pktinfo(&pkt, ops);
-}
-
-static struct nf_chain_type filter_ipv4 = {
- .family = NFPROTO_IPV4,
+static const struct nf_chain_type filter_ipv4 = {
.name = "filter",
.type = NFT_CHAIN_T_DEFAULT,
+ .family = NFPROTO_IPV4,
+ .owner = THIS_MODULE,
.hook_mask = (1 << NF_INET_LOCAL_IN) |
(1 << NF_INET_LOCAL_OUT) |
(1 << NF_INET_FORWARD) |
(1 << NF_INET_PRE_ROUTING) |
(1 << NF_INET_POST_ROUTING),
- .fn = {
- [NF_INET_LOCAL_IN] = nft_do_chain_ipv4,
- [NF_INET_LOCAL_OUT] = nft_ipv4_output,
- [NF_INET_FORWARD] = nft_do_chain_ipv4,
- [NF_INET_PRE_ROUTING] = nft_do_chain_ipv4,
- [NF_INET_POST_ROUTING] = nft_do_chain_ipv4,
- },
};
static int __init nf_tables_ipv4_init(void)
{
+ int ret;
+
nft_register_chain_type(&filter_ipv4);
- return register_pernet_subsys(&nf_tables_ipv4_net_ops);
+ ret = register_pernet_subsys(&nf_tables_ipv4_net_ops);
+ if (ret < 0)
+ nft_unregister_chain_type(&filter_ipv4);
+
+ return ret;
}
static void __exit nf_tables_ipv4_exit(void)
diff --git a/net/ipv4/netfilter/nft_chain_nat_ipv4.c b/net/ipv4/netfilter/nft_chain_nat_ipv4.c
index cf2c792cd971..b5b256d45e67 100644
--- a/net/ipv4/netfilter/nft_chain_nat_ipv4.c
+++ b/net/ipv4/netfilter/nft_chain_nat_ipv4.c
@@ -75,7 +75,7 @@ static unsigned int nf_nat_fn(const struct nf_hook_ops *ops,
nft_set_pktinfo_ipv4(&pkt, ops, skb, in, out);
- ret = nft_do_chain_pktinfo(&pkt, ops);
+ ret = nft_do_chain(&pkt, ops);
if (ret != NF_ACCEPT)
return ret;
if (!nf_nat_initialized(ct, maniptype)) {
@@ -164,21 +164,21 @@ static unsigned int nf_nat_output(const struct nf_hook_ops *ops,
return ret;
}
-static struct nf_chain_type nft_chain_nat_ipv4 = {
- .family = NFPROTO_IPV4,
+static const struct nf_chain_type nft_chain_nat_ipv4 = {
.name = "nat",
.type = NFT_CHAIN_T_NAT,
+ .family = NFPROTO_IPV4,
+ .owner = THIS_MODULE,
.hook_mask = (1 << NF_INET_PRE_ROUTING) |
(1 << NF_INET_POST_ROUTING) |
(1 << NF_INET_LOCAL_OUT) |
(1 << NF_INET_LOCAL_IN),
- .fn = {
+ .hooks = {
[NF_INET_PRE_ROUTING] = nf_nat_prerouting,
[NF_INET_POST_ROUTING] = nf_nat_postrouting,
[NF_INET_LOCAL_OUT] = nf_nat_output,
[NF_INET_LOCAL_IN] = nf_nat_fn,
},
- .me = THIS_MODULE,
};
static int __init nft_chain_nat_init(void)
diff --git a/net/ipv4/netfilter/nft_chain_route_ipv4.c b/net/ipv4/netfilter/nft_chain_route_ipv4.c
index 4e6bf9a3d7aa..125b66766c0a 100644
--- a/net/ipv4/netfilter/nft_chain_route_ipv4.c
+++ b/net/ipv4/netfilter/nft_chain_route_ipv4.c
@@ -47,7 +47,7 @@ static unsigned int nf_route_table_hook(const struct nf_hook_ops *ops,
daddr = iph->daddr;
tos = iph->tos;
- ret = nft_do_chain_pktinfo(&pkt, ops);
+ ret = nft_do_chain(&pkt, ops);
if (ret != NF_DROP && ret != NF_QUEUE) {
iph = ip_hdr(skb);
@@ -61,15 +61,15 @@ static unsigned int nf_route_table_hook(const struct nf_hook_ops *ops,
return ret;
}
-static struct nf_chain_type nft_chain_route_ipv4 = {
- .family = NFPROTO_IPV4,
+static const struct nf_chain_type nft_chain_route_ipv4 = {
.name = "route",
.type = NFT_CHAIN_T_ROUTE,
+ .family = NFPROTO_IPV4,
+ .owner = THIS_MODULE,
.hook_mask = (1 << NF_INET_LOCAL_OUT),
- .fn = {
+ .hooks = {
[NF_INET_LOCAL_OUT] = nf_route_table_hook,
},
- .me = THIS_MODULE,
};
static int __init nft_chain_route_init(void)
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 876c6ca2d8f9..cae5262a337c 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -53,8 +53,12 @@
#include <net/transp_v6.h>
#endif
+struct ping_table {
+ struct hlist_nulls_head hash[PING_HTABLE_SIZE];
+ rwlock_t lock;
+};
-struct ping_table ping_table;
+static struct ping_table ping_table;
struct pingv6_ops pingv6_ops;
EXPORT_SYMBOL_GPL(pingv6_ops);
@@ -668,8 +672,8 @@ int ping_common_sendmsg(int family, struct msghdr *msg, size_t len,
}
EXPORT_SYMBOL_GPL(ping_common_sendmsg);
-int ping_v4_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
- size_t len)
+static int ping_v4_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+ size_t len)
{
struct net *net = sock_net(sk);
struct flowi4 fl4;
@@ -772,7 +776,7 @@ int ping_v4_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
err = PTR_ERR(rt);
rt = NULL;
if (err == -ENETUNREACH)
- IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES);
+ IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
goto out;
}
@@ -841,10 +845,11 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
if (flags & MSG_ERRQUEUE) {
if (family == AF_INET) {
- return ip_recv_error(sk, msg, len);
+ return ip_recv_error(sk, msg, len, addr_len);
#if IS_ENABLED(CONFIG_IPV6)
} else if (family == AF_INET6) {
- return pingv6_ops.ipv6_recv_error(sk, msg, len);
+ return pingv6_ops.ipv6_recv_error(sk, msg, len,
+ addr_len);
#endif
}
}
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index 4a0335854b89..a6c8a80ec9d6 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -279,6 +279,7 @@ static const struct snmp_mib snmp4_net_list[] = {
SNMP_MIB_ITEM("TCPFastOpenCookieReqd", LINUX_MIB_TCPFASTOPENCOOKIEREQD),
SNMP_MIB_ITEM("TCPSpuriousRtxHostQueues", LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES),
SNMP_MIB_ITEM("BusyPollRxPackets", LINUX_MIB_BUSYPOLLRXPACKETS),
+ SNMP_MIB_ITEM("TCPAutoCorking", LINUX_MIB_TCPAUTOCORKING),
SNMP_MIB_SENTINEL
};
@@ -332,22 +333,22 @@ static void icmp_put(struct seq_file *seq)
atomic_long_t *ptr = net->mib.icmpmsg_statistics->mibs;
seq_puts(seq, "\nIcmp: InMsgs InErrors InCsumErrors");
- for (i=0; icmpmibmap[i].name != NULL; i++)
+ for (i = 0; icmpmibmap[i].name != NULL; i++)
seq_printf(seq, " In%s", icmpmibmap[i].name);
seq_printf(seq, " OutMsgs OutErrors");
- for (i=0; icmpmibmap[i].name != NULL; i++)
+ for (i = 0; icmpmibmap[i].name != NULL; i++)
seq_printf(seq, " Out%s", icmpmibmap[i].name);
seq_printf(seq, "\nIcmp: %lu %lu %lu",
snmp_fold_field((void __percpu **) net->mib.icmp_statistics, ICMP_MIB_INMSGS),
snmp_fold_field((void __percpu **) net->mib.icmp_statistics, ICMP_MIB_INERRORS),
snmp_fold_field((void __percpu **) net->mib.icmp_statistics, ICMP_MIB_CSUMERRORS));
- for (i=0; icmpmibmap[i].name != NULL; i++)
+ for (i = 0; icmpmibmap[i].name != NULL; i++)
seq_printf(seq, " %lu",
atomic_long_read(ptr + icmpmibmap[i].index));
seq_printf(seq, " %lu %lu",
snmp_fold_field((void __percpu **) net->mib.icmp_statistics, ICMP_MIB_OUTMSGS),
snmp_fold_field((void __percpu **) net->mib.icmp_statistics, ICMP_MIB_OUTERRORS));
- for (i=0; icmpmibmap[i].name != NULL; i++)
+ for (i = 0; icmpmibmap[i].name != NULL; i++)
seq_printf(seq, " %lu",
atomic_long_read(ptr + (icmpmibmap[i].index | 0x100)));
}
diff --git a/net/ipv4/protocol.c b/net/ipv4/protocol.c
index ce848461acbb..46d6a1c923a8 100644
--- a/net/ipv4/protocol.c
+++ b/net/ipv4/protocol.c
@@ -31,10 +31,6 @@
const struct net_protocol __rcu *inet_protos[MAX_INET_PROTOS] __read_mostly;
const struct net_offload __rcu *inet_offloads[MAX_INET_PROTOS] __read_mostly;
-/*
- * Add a protocol handler to the hash tables
- */
-
int inet_add_protocol(const struct net_protocol *prot, unsigned char protocol)
{
if (!prot->netns_ok) {
@@ -55,10 +51,6 @@ int inet_add_offload(const struct net_offload *prot, unsigned char protocol)
}
EXPORT_SYMBOL(inet_add_offload);
-/*
- * Remove a protocol from the hash tables.
- */
-
int inet_del_protocol(const struct net_protocol *prot, unsigned char protocol)
{
int ret;
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 5cb8ddb505ee..81e6cfd5a365 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -575,7 +575,7 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos,
RT_SCOPE_UNIVERSE,
inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol,
- inet_sk_flowi_flags(sk) | FLOWI_FLAG_CAN_SLEEP |
+ inet_sk_flowi_flags(sk) |
(inet->hdrincl ? FLOWI_FLAG_KNOWN_NH : 0),
daddr, saddr, 0, 0);
@@ -697,7 +697,7 @@ static int raw_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
goto out;
if (flags & MSG_ERRQUEUE) {
- err = ip_recv_error(sk, msg, len);
+ err = ip_recv_error(sk, msg, len, addr_len);
goto out;
}
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index b95331e6c077..f2ed13c2125f 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -121,7 +121,7 @@ static __u32 check_tcp_syn_cookie(__u32 cookie, __be32 saddr, __be32 daddr,
cookie -= cookie_hash(saddr, daddr, sport, dport, 0, 0) + sseq;
/* Cookie is now reduced to (count * 2^24) ^ (hash % 2^24) */
- diff = (count - (cookie >> COOKIEBITS)) & ((__u32) - 1 >> COOKIEBITS);
+ diff = (count - (cookie >> COOKIEBITS)) & ((__u32) -1 >> COOKIEBITS);
if (diff >= MAX_SYNCOOKIE_AGE)
return (__u32)-1;
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 3d69ec8dac57..1d2480ac2bb6 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -286,13 +286,6 @@ static struct ctl_table ipv4_table[] = {
.extra2 = &ip_ttl_max,
},
{
- .procname = "ip_no_pmtu_disc",
- .data = &ipv4_config.no_pmtu_disc,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec
- },
- {
.procname = "ip_nonlocal_bind",
.data = &sysctl_ip_nonlocal_bind,
.maxlen = sizeof(int),
@@ -707,7 +700,7 @@ static struct ctl_table ipv4_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec
},
- {
+ {
.procname = "tcp_thin_dupack",
.data = &sysctl_tcp_thin_dupack,
.maxlen = sizeof(int),
@@ -733,6 +726,15 @@ static struct ctl_table ipv4_table[] = {
.extra2 = &gso_max_segs,
},
{
+ .procname = "tcp_autocorking",
+ .data = &sysctl_tcp_autocorking,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &zero,
+ .extra2 = &one,
+ },
+ {
.procname = "udp_mem",
.data = &sysctl_udp_mem,
.maxlen = sizeof(sysctl_udp_mem),
@@ -822,6 +824,13 @@ static struct ctl_table ipv4_net_table[] = {
.mode = 0644,
.proc_handler = ipv4_local_port_range,
},
+ {
+ .procname = "ip_no_pmtu_disc",
+ .data = &init_net.ipv4.sysctl_ip_no_pmtu_disc,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec
+ },
{ }
};
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 3dc0c6cf02a8..e2a40515e665 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -285,6 +285,8 @@ int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT;
int sysctl_tcp_min_tso_segs __read_mostly = 2;
+int sysctl_tcp_autocorking __read_mostly = 1;
+
struct percpu_counter tcp_orphan_count;
EXPORT_SYMBOL_GPL(tcp_orphan_count);
@@ -379,7 +381,7 @@ void tcp_init_sock(struct sock *sk)
struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
- skb_queue_head_init(&tp->out_of_order_queue);
+ __skb_queue_head_init(&tp->out_of_order_queue);
tcp_init_xmit_timers(sk);
tcp_prequeue_init(tp);
INIT_LIST_HEAD(&tp->tsq_node);
@@ -619,19 +621,58 @@ static inline void tcp_mark_urg(struct tcp_sock *tp, int flags)
tp->snd_up = tp->write_seq;
}
-static inline void tcp_push(struct sock *sk, int flags, int mss_now,
- int nonagle)
+/* If a not yet filled skb is pushed, do not send it if
+ * we have data packets in Qdisc or NIC queues :
+ * Because TX completion will happen shortly, it gives a chance
+ * to coalesce future sendmsg() payload into this skb, without
+ * need for a timer, and with no latency trade off.
+ * As packets containing data payload have a bigger truesize
+ * than pure acks (dataless) packets, the last checks prevent
+ * autocorking if we only have an ACK in Qdisc/NIC queues,
+ * or if TX completion was delayed after we processed ACK packet.
+ */
+static bool tcp_should_autocork(struct sock *sk, struct sk_buff *skb,
+ int size_goal)
{
- if (tcp_send_head(sk)) {
- struct tcp_sock *tp = tcp_sk(sk);
+ return skb->len < size_goal &&
+ sysctl_tcp_autocorking &&
+ skb != tcp_write_queue_head(sk) &&
+ atomic_read(&sk->sk_wmem_alloc) > skb->truesize;
+}
+
+static void tcp_push(struct sock *sk, int flags, int mss_now,
+ int nonagle, int size_goal)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+ struct sk_buff *skb;
- if (!(flags & MSG_MORE) || forced_push(tp))
- tcp_mark_push(tp, tcp_write_queue_tail(sk));
+ if (!tcp_send_head(sk))
+ return;
+
+ skb = tcp_write_queue_tail(sk);
+ if (!(flags & MSG_MORE) || forced_push(tp))
+ tcp_mark_push(tp, skb);
+
+ tcp_mark_urg(tp, flags);
+
+ if (tcp_should_autocork(sk, skb, size_goal)) {
- tcp_mark_urg(tp, flags);
- __tcp_push_pending_frames(sk, mss_now,
- (flags & MSG_MORE) ? TCP_NAGLE_CORK : nonagle);
+ /* avoid atomic op if TSQ_THROTTLED bit is already set */
+ if (!test_bit(TSQ_THROTTLED, &tp->tsq_flags)) {
+ NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAUTOCORKING);
+ set_bit(TSQ_THROTTLED, &tp->tsq_flags);
+ }
+ /* It is possible TX completion already happened
+ * before we set TSQ_THROTTLED.
+ */
+ if (atomic_read(&sk->sk_wmem_alloc) > skb->truesize)
+ return;
}
+
+ if (flags & MSG_MORE)
+ nonagle = TCP_NAGLE_CORK;
+
+ __tcp_push_pending_frames(sk, mss_now, nonagle);
}
static int tcp_splice_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
@@ -934,7 +975,8 @@ new_segment:
wait_for_sndbuf:
set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
wait_for_memory:
- tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
+ tcp_push(sk, flags & ~MSG_MORE, mss_now,
+ TCP_NAGLE_PUSH, size_goal);
if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
goto do_error;
@@ -944,7 +986,7 @@ wait_for_memory:
out:
if (copied && !(flags & MSG_SENDPAGE_NOTLAST))
- tcp_push(sk, flags, mss_now, tp->nonagle);
+ tcp_push(sk, flags, mss_now, tp->nonagle, size_goal);
return copied;
do_error:
@@ -1225,7 +1267,8 @@ wait_for_sndbuf:
set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
wait_for_memory:
if (copied)
- tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
+ tcp_push(sk, flags & ~MSG_MORE, mss_now,
+ TCP_NAGLE_PUSH, size_goal);
if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
goto do_error;
@@ -1236,7 +1279,7 @@ wait_for_memory:
out:
if (copied)
- tcp_push(sk, flags, mss_now, tp->nonagle);
+ tcp_push(sk, flags, mss_now, tp->nonagle, size_goal);
release_sock(sk);
return copied + copied_syn;
@@ -1425,7 +1468,7 @@ static void tcp_service_net_dma(struct sock *sk, bool wait)
do {
if (dma_async_is_tx_complete(tp->ucopy.dma_chan,
last_issued, &done,
- &used) == DMA_SUCCESS) {
+ &used) == DMA_COMPLETE) {
/* Safe to free early-copied skbs now */
__skb_queue_purge(&sk->sk_async_wait_queue);
break;
@@ -1433,7 +1476,7 @@ static void tcp_service_net_dma(struct sock *sk, bool wait)
struct sk_buff *skb;
while ((skb = skb_peek(&sk->sk_async_wait_queue)) &&
(dma_async_is_complete(skb->dma_cookie, done,
- used) == DMA_SUCCESS)) {
+ used) == DMA_COMPLETE)) {
__skb_dequeue(&sk->sk_async_wait_queue);
kfree_skb(skb);
}
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index c53b7f35c51d..65cf90e063d5 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -766,7 +766,7 @@ static void tcp_update_pacing_rate(struct sock *sk)
/* Calculate rto without backoff. This is the second half of Van Jacobson's
* routine referred to above.
*/
-void tcp_set_rto(struct sock *sk)
+static void tcp_set_rto(struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
/* Old crap is replaced with new one. 8)
@@ -3686,7 +3686,7 @@ const u8 *tcp_parse_md5sig_option(const struct tcphdr *th)
int opcode = *ptr++;
int opsize;
- switch(opcode) {
+ switch (opcode) {
case TCPOPT_EOL:
return NULL;
case TCPOPT_NOP:
@@ -4046,7 +4046,7 @@ static void tcp_sack_remove(struct tcp_sock *tp)
WARN_ON(before(tp->rcv_nxt, sp->end_seq));
/* Zap this SACK, by moving forward any other SACKS. */
- for (i=this_sack+1; i < num_sacks; i++)
+ for (i = this_sack+1; i < num_sacks; i++)
tp->selective_acks[i-1] = tp->selective_acks[i];
num_sacks--;
continue;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 59a6f8b90cd9..7297b56c28c7 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -173,11 +173,11 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
IPPROTO_TCP,
- orig_sport, orig_dport, sk, true);
+ orig_sport, orig_dport, sk);
if (IS_ERR(rt)) {
err = PTR_ERR(rt);
if (err == -ENETUNREACH)
- IP_INC_STATS_BH(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
+ IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
return err;
}
@@ -827,7 +827,7 @@ static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
const struct inet_request_sock *ireq = inet_rsk(req);
struct flowi4 fl4;
int err = -1;
- struct sk_buff * skb;
+ struct sk_buff *skb;
/* First, grab a route. */
if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
diff --git a/net/ipv4/tcp_memcontrol.c b/net/ipv4/tcp_memcontrol.c
index 03e9154f7e68..f7e522c558ba 100644
--- a/net/ipv4/tcp_memcontrol.c
+++ b/net/ipv4/tcp_memcontrol.c
@@ -6,13 +6,6 @@
#include <linux/memcontrol.h>
#include <linux/module.h>
-static void memcg_tcp_enter_memory_pressure(struct sock *sk)
-{
- if (sk->sk_cgrp->memory_pressure)
- sk->sk_cgrp->memory_pressure = 1;
-}
-EXPORT_SYMBOL(memcg_tcp_enter_memory_pressure);
-
int tcp_init_cgroup(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
{
/*
@@ -60,7 +53,6 @@ EXPORT_SYMBOL(tcp_destroy_cgroup);
static int tcp_update_limit(struct mem_cgroup *memcg, u64 val)
{
struct cg_proto *cg_proto;
- u64 old_lim;
int i;
int ret;
@@ -71,7 +63,6 @@ static int tcp_update_limit(struct mem_cgroup *memcg, u64 val)
if (val > RES_COUNTER_MAX)
val = RES_COUNTER_MAX;
- old_lim = res_counter_read_u64(&cg_proto->memory_allocated, RES_LIMIT);
ret = res_counter_set_limit(&cg_proto->memory_allocated, val);
if (ret)
return ret;
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 97b684159861..3aa9e3247991 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -425,7 +425,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
tcp_set_ca_state(newsk, TCP_CA_Open);
tcp_init_xmit_timers(newsk);
- skb_queue_head_init(&newtp->out_of_order_queue);
+ __skb_queue_head_init(&newtp->out_of_order_queue);
newtp->write_seq = newtp->pushed_seq = treq->snt_isn + 1;
newtp->rx_opt.saw_tstamp = 0;
diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c
index a2b68a108eae..771a3950d87a 100644
--- a/net/ipv4/tcp_offload.c
+++ b/net/ipv4/tcp_offload.c
@@ -197,7 +197,8 @@ struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb)
goto out_check_final;
found:
- flush = NAPI_GRO_CB(p)->flush;
+ /* Include the IP ID check below from the inner most IP hdr */
+ flush = NAPI_GRO_CB(p)->flush | NAPI_GRO_CB(p)->flush_id;
flush |= (__force int)(flags & TCP_FLAG_CWR);
flush |= (__force int)((flags ^ tcp_flag_word(th2)) &
~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH));
@@ -230,7 +231,7 @@ out_check_final:
pp = head;
out:
- NAPI_GRO_CB(skb)->flush |= flush;
+ NAPI_GRO_CB(skb)->flush |= (flush != 0);
return pp;
}
@@ -240,7 +241,7 @@ int tcp_gro_complete(struct sk_buff *skb)
{
struct tcphdr *th = tcp_hdr(skb);
- skb->csum_start = skb_transport_header(skb) - skb->head;
+ skb->csum_start = (unsigned char *)th - skb->head;
skb->csum_offset = offsetof(struct tcphdr, check);
skb->ip_summed = CHECKSUM_PARTIAL;
@@ -272,45 +273,45 @@ static int tcp_v4_gso_send_check(struct sk_buff *skb)
static struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb)
{
+ /* Use the IP hdr immediately proceeding for this transport */
const struct iphdr *iph = skb_gro_network_header(skb);
__wsum wsum;
- __sum16 sum;
+
+ /* Don't bother verifying checksum if we're going to flush anyway. */
+ if (NAPI_GRO_CB(skb)->flush)
+ goto skip_csum;
+
+ wsum = NAPI_GRO_CB(skb)->csum;
switch (skb->ip_summed) {
+ case CHECKSUM_NONE:
+ wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb),
+ 0);
+
+ /* fall through */
+
case CHECKSUM_COMPLETE:
if (!tcp_v4_check(skb_gro_len(skb), iph->saddr, iph->daddr,
- skb->csum)) {
+ wsum)) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
break;
}
-flush:
+
NAPI_GRO_CB(skb)->flush = 1;
return NULL;
-
- case CHECKSUM_NONE:
- wsum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
- skb_gro_len(skb), IPPROTO_TCP, 0);
- sum = csum_fold(skb_checksum(skb,
- skb_gro_offset(skb),
- skb_gro_len(skb),
- wsum));
- if (sum)
- goto flush;
-
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- break;
}
+skip_csum:
return tcp_gro_receive(head, skb);
}
-static int tcp4_gro_complete(struct sk_buff *skb)
+static int tcp4_gro_complete(struct sk_buff *skb, int thoff)
{
const struct iphdr *iph = ip_hdr(skb);
struct tcphdr *th = tcp_hdr(skb);
- th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
- iph->saddr, iph->daddr, 0);
+ th->check = ~tcp_v4_check(skb->len - thoff, iph->saddr,
+ iph->daddr, 0);
skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
return tcp_gro_complete(skb);
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 7820f3a7dd70..03d26b85eab8 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -363,15 +363,17 @@ static inline void TCP_ECN_send(struct sock *sk, struct sk_buff *skb,
*/
static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags)
{
+ struct skb_shared_info *shinfo = skb_shinfo(skb);
+
skb->ip_summed = CHECKSUM_PARTIAL;
skb->csum = 0;
TCP_SKB_CB(skb)->tcp_flags = flags;
TCP_SKB_CB(skb)->sacked = 0;
- skb_shinfo(skb)->gso_segs = 1;
- skb_shinfo(skb)->gso_size = 0;
- skb_shinfo(skb)->gso_type = 0;
+ shinfo->gso_segs = 1;
+ shinfo->gso_size = 0;
+ shinfo->gso_type = 0;
TCP_SKB_CB(skb)->seq = seq;
if (flags & (TCPHDR_SYN | TCPHDR_FIN))
@@ -406,7 +408,7 @@ struct tcp_out_options {
* Beware: Something in the Internet is very sensitive to the ordering of
* TCP options, we learned this through the hard way, so be careful here.
* Luckily we can at least blame others for their non-compliance but from
- * inter-operatibility perspective it seems that we're somewhat stuck with
+ * inter-operability perspective it seems that we're somewhat stuck with
* the ordering which we have been using if we want to keep working with
* those broken things (not that it currently hurts anybody as there isn't
* particular reason why the ordering would need to be changed).
@@ -679,7 +681,7 @@ static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb
*
* Its important tcp_wfree() can be replaced by sock_wfree() in the event skb
* needs to be reallocated in a driver.
- * The invariant being skb->truesize substracted from sk->sk_wmem_alloc
+ * The invariant being skb->truesize subtracted from sk->sk_wmem_alloc
*
* Since transmit from skb destructor is forbidden, we use a tasklet
* to process all sockets that eventually need to send more skbs.
@@ -699,9 +701,9 @@ static void tcp_tsq_handler(struct sock *sk)
tcp_write_xmit(sk, tcp_current_mss(sk), 0, 0, GFP_ATOMIC);
}
/*
- * One tasklest per cpu tries to send more skbs.
+ * One tasklet per cpu tries to send more skbs.
* We run in tasklet context but need to disable irqs when
- * transfering tsq->head because tcp_wfree() might
+ * transferring tsq->head because tcp_wfree() might
* interrupt us (non NAPI drivers)
*/
static void tcp_tasklet_func(unsigned long data)
@@ -795,7 +797,7 @@ void __init tcp_tasklet_init(void)
/*
* Write buffer destructor automatically called from kfree_skb.
- * We cant xmit new skbs from this context, as we might already
+ * We can't xmit new skbs from this context, as we might already
* hold qdisc lock.
*/
void tcp_wfree(struct sk_buff *skb)
@@ -986,6 +988,8 @@ static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
static void tcp_set_skb_tso_segs(const struct sock *sk, struct sk_buff *skb,
unsigned int mss_now)
{
+ struct skb_shared_info *shinfo = skb_shinfo(skb);
+
/* Make sure we own this skb before messing gso_size/gso_segs */
WARN_ON_ONCE(skb_cloned(skb));
@@ -993,13 +997,13 @@ static void tcp_set_skb_tso_segs(const struct sock *sk, struct sk_buff *skb,
/* Avoid the costly divide in the normal
* non-TSO case.
*/
- skb_shinfo(skb)->gso_segs = 1;
- skb_shinfo(skb)->gso_size = 0;
- skb_shinfo(skb)->gso_type = 0;
+ shinfo->gso_segs = 1;
+ shinfo->gso_size = 0;
+ shinfo->gso_type = 0;
} else {
- skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss_now);
- skb_shinfo(skb)->gso_size = mss_now;
- skb_shinfo(skb)->gso_type = sk->sk_gso_type;
+ shinfo->gso_segs = DIV_ROUND_UP(skb->len, mss_now);
+ shinfo->gso_size = mss_now;
+ shinfo->gso_type = sk->sk_gso_type;
}
}
@@ -1146,6 +1150,7 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
*/
static void __pskb_trim_head(struct sk_buff *skb, int len)
{
+ struct skb_shared_info *shinfo;
int i, k, eat;
eat = min_t(int, len, skb_headlen(skb));
@@ -1157,23 +1162,24 @@ static void __pskb_trim_head(struct sk_buff *skb, int len)
}
eat = len;
k = 0;
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
+ shinfo = skb_shinfo(skb);
+ for (i = 0; i < shinfo->nr_frags; i++) {
+ int size = skb_frag_size(&shinfo->frags[i]);
if (size <= eat) {
skb_frag_unref(skb, i);
eat -= size;
} else {
- skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i];
+ shinfo->frags[k] = shinfo->frags[i];
if (eat) {
- skb_shinfo(skb)->frags[k].page_offset += eat;
- skb_frag_size_sub(&skb_shinfo(skb)->frags[k], eat);
+ shinfo->frags[k].page_offset += eat;
+ skb_frag_size_sub(&shinfo->frags[k], eat);
eat = 0;
}
k++;
}
}
- skb_shinfo(skb)->nr_frags = k;
+ shinfo->nr_frags = k;
skb_reset_tail_pointer(skb);
skb->data_len -= len;
@@ -1378,23 +1384,51 @@ static void tcp_cwnd_validate(struct sock *sk)
}
}
-/* Returns the portion of skb which can be sent right away without
- * introducing MSS oddities to segment boundaries. In rare cases where
- * mss_now != mss_cache, we will request caller to create a small skb
- * per input skb which could be mostly avoided here (if desired).
- *
- * We explicitly want to create a request for splitting write queue tail
- * to a small skb for Nagle purposes while avoiding unnecessary modulos,
- * thus all the complexity (cwnd_len is always MSS multiple which we
- * return whenever allowed by the other factors). Basically we need the
- * modulo only when the receiver window alone is the limiting factor or
- * when we would be allowed to send the split-due-to-Nagle skb fully.
+/* Minshall's variant of the Nagle send check. */
+static bool tcp_minshall_check(const struct tcp_sock *tp)
+{
+ return after(tp->snd_sml, tp->snd_una) &&
+ !after(tp->snd_sml, tp->snd_nxt);
+}
+
+/* Update snd_sml if this skb is under mss
+ * Note that a TSO packet might end with a sub-mss segment
+ * The test is really :
+ * if ((skb->len % mss) != 0)
+ * tp->snd_sml = TCP_SKB_CB(skb)->end_seq;
+ * But we can avoid doing the divide again given we already have
+ * skb_pcount = skb->len / mss_now
+ */
+static void tcp_minshall_update(struct tcp_sock *tp, unsigned int mss_now,
+ const struct sk_buff *skb)
+{
+ if (skb->len < tcp_skb_pcount(skb) * mss_now)
+ tp->snd_sml = TCP_SKB_CB(skb)->end_seq;
+}
+
+/* Return false, if packet can be sent now without violation Nagle's rules:
+ * 1. It is full sized. (provided by caller in %partial bool)
+ * 2. Or it contains FIN. (already checked by caller)
+ * 3. Or TCP_CORK is not set, and TCP_NODELAY is set.
+ * 4. Or TCP_CORK is not set, and all sent packets are ACKed.
+ * With Minshall's modification: all sent small packets are ACKed.
*/
-static unsigned int tcp_mss_split_point(const struct sock *sk, const struct sk_buff *skb,
- unsigned int mss_now, unsigned int max_segs)
+static bool tcp_nagle_check(bool partial, const struct tcp_sock *tp,
+ unsigned int mss_now, int nonagle)
+{
+ return partial &&
+ ((nonagle & TCP_NAGLE_CORK) ||
+ (!nonagle && tp->packets_out && tcp_minshall_check(tp)));
+}
+/* Returns the portion of skb which can be sent right away */
+static unsigned int tcp_mss_split_point(const struct sock *sk,
+ const struct sk_buff *skb,
+ unsigned int mss_now,
+ unsigned int max_segs,
+ int nonagle)
{
const struct tcp_sock *tp = tcp_sk(sk);
- u32 needed, window, max_len;
+ u32 partial, needed, window, max_len;
window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
max_len = mss_now * max_segs;
@@ -1407,7 +1441,15 @@ static unsigned int tcp_mss_split_point(const struct sock *sk, const struct sk_b
if (max_len <= needed)
return max_len;
- return needed - needed % mss_now;
+ partial = needed % mss_now;
+ /* If last segment is not a full MSS, check if Nagle rules allow us
+ * to include this last segment in this skb.
+ * Otherwise, we'll split the skb at last MSS boundary
+ */
+ if (tcp_nagle_check(partial != 0, tp, mss_now, nonagle))
+ return needed - partial;
+
+ return needed;
}
/* Can at least one segment of SKB be sent right now, according to the
@@ -1447,28 +1489,6 @@ static int tcp_init_tso_segs(const struct sock *sk, struct sk_buff *skb,
return tso_segs;
}
-/* Minshall's variant of the Nagle send check. */
-static inline bool tcp_minshall_check(const struct tcp_sock *tp)
-{
- return after(tp->snd_sml, tp->snd_una) &&
- !after(tp->snd_sml, tp->snd_nxt);
-}
-
-/* Return false, if packet can be sent now without violation Nagle's rules:
- * 1. It is full sized.
- * 2. Or it contains FIN. (already checked by caller)
- * 3. Or TCP_CORK is not set, and TCP_NODELAY is set.
- * 4. Or TCP_CORK is not set, and all sent packets are ACKed.
- * With Minshall's modification: all sent small packets are ACKed.
- */
-static inline bool tcp_nagle_check(const struct tcp_sock *tp,
- const struct sk_buff *skb,
- unsigned int mss_now, int nonagle)
-{
- return skb->len < mss_now &&
- ((nonagle & TCP_NAGLE_CORK) ||
- (!nonagle && tp->packets_out && tcp_minshall_check(tp)));
-}
/* Return true if the Nagle test allows this packet to be
* sent now.
@@ -1489,7 +1509,7 @@ static inline bool tcp_nagle_test(const struct tcp_sock *tp, const struct sk_buf
if (tcp_urg_mode(tp) || (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN))
return true;
- if (!tcp_nagle_check(tp, skb, cur_mss, nonagle))
+ if (!tcp_nagle_check(skb->len < cur_mss, tp, cur_mss, nonagle))
return true;
return false;
@@ -1892,7 +1912,8 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
limit = tcp_mss_split_point(sk, skb, mss_now,
min_t(unsigned int,
cwnd_quota,
- sk->sk_gso_max_segs));
+ sk->sk_gso_max_segs),
+ nonagle);
if (skb->len > limit &&
unlikely(tso_fragment(sk, skb, limit, mss_now, gfp)))
@@ -2756,7 +2777,7 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
EXPORT_SYMBOL(tcp_make_synack);
/* Do all connect socket setups that can be done AF independent. */
-void tcp_connect_init(struct sock *sk)
+static void tcp_connect_init(struct sock *sk)
{
const struct dst_entry *dst = __sk_dst_get(sk);
struct tcp_sock *tp = tcp_sk(sk);
diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
index 8b97d71e193b..1f2d37613c9e 100644
--- a/net/ipv4/tcp_probe.c
+++ b/net/ipv4/tcp_probe.c
@@ -38,7 +38,7 @@ MODULE_DESCRIPTION("TCP cwnd snooper");
MODULE_LICENSE("GPL");
MODULE_VERSION("1.1");
-static int port __read_mostly = 0;
+static int port __read_mostly;
MODULE_PARM_DESC(port, "Port to match (0=all)");
module_param(port, int, 0);
@@ -46,7 +46,7 @@ static unsigned int bufsize __read_mostly = 4096;
MODULE_PARM_DESC(bufsize, "Log buffer size in packets (4096)");
module_param(bufsize, uint, 0);
-static unsigned int fwmark __read_mostly = 0;
+static unsigned int fwmark __read_mostly;
MODULE_PARM_DESC(fwmark, "skb mark to match (0=no mark)");
module_param(fwmark, uint, 0);
diff --git a/net/ipv4/tcp_yeah.c b/net/ipv4/tcp_yeah.c
index a347a078ee07..1a8d271f994d 100644
--- a/net/ipv4/tcp_yeah.c
+++ b/net/ipv4/tcp_yeah.c
@@ -3,7 +3,7 @@
* YeAH TCP
*
* For further details look at:
- * http://wil.cs.caltech.edu/pfldnet2007/paper/YeAH_TCP.pdf
+ * https://web.archive.org/web/20080316215752/http://wil.cs.caltech.edu/pfldnet2007/paper/YeAH_TCP.pdf
*
*/
#include <linux/mm.h>
@@ -15,13 +15,13 @@
#include "tcp_vegas.h"
-#define TCP_YEAH_ALPHA 80 //lin number of packets queued at the bottleneck
-#define TCP_YEAH_GAMMA 1 //lin fraction of queue to be removed per rtt
-#define TCP_YEAH_DELTA 3 //log minimum fraction of cwnd to be removed on loss
-#define TCP_YEAH_EPSILON 1 //log maximum fraction to be removed on early decongestion
-#define TCP_YEAH_PHY 8 //lin maximum delta from base
-#define TCP_YEAH_RHO 16 //lin minimum number of consecutive rtt to consider competition on loss
-#define TCP_YEAH_ZETA 50 //lin minimum number of state switchs to reset reno_count
+#define TCP_YEAH_ALPHA 80 /* number of packets queued at the bottleneck */
+#define TCP_YEAH_GAMMA 1 /* fraction of queue to be removed per rtt */
+#define TCP_YEAH_DELTA 3 /* log minimum fraction of cwnd to be removed on loss */
+#define TCP_YEAH_EPSILON 1 /* log maximum fraction to be removed on early decongestion */
+#define TCP_YEAH_PHY 8 /* maximum delta from base */
+#define TCP_YEAH_RHO 16 /* minimum number of consecutive rtt to consider competition on loss */
+#define TCP_YEAH_ZETA 50 /* minimum number of state switches to reset reno_count */
#define TCP_SCALABLE_AI_CNT 100U
@@ -214,9 +214,9 @@ static u32 tcp_yeah_ssthresh(struct sock *sk) {
if (yeah->doing_reno_now < TCP_YEAH_RHO) {
reduction = yeah->lastQ;
- reduction = min( reduction, max(tp->snd_cwnd>>1, 2U) );
+ reduction = min(reduction, max(tp->snd_cwnd>>1, 2U));
- reduction = max( reduction, tp->snd_cwnd >> TCP_YEAH_DELTA);
+ reduction = max(reduction, tp->snd_cwnd >> TCP_YEAH_DELTA);
} else
reduction = max(tp->snd_cwnd>>1, 2U);
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 5944d7d668dd..80f649fbee63 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -560,15 +560,11 @@ static inline struct sock *__udp4_lib_lookup_skb(struct sk_buff *skb,
__be16 sport, __be16 dport,
struct udp_table *udptable)
{
- struct sock *sk;
const struct iphdr *iph = ip_hdr(skb);
- if (unlikely(sk = skb_steal_sock(skb)))
- return sk;
- else
- return __udp4_lib_lookup(dev_net(skb_dst(skb)->dev), iph->saddr, sport,
- iph->daddr, dport, inet_iif(skb),
- udptable);
+ return __udp4_lib_lookup(dev_net(skb_dst(skb)->dev), iph->saddr, sport,
+ iph->daddr, dport, inet_iif(skb),
+ udptable);
}
struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
@@ -990,7 +986,7 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
fl4 = &fl4_stack;
flowi4_init_output(fl4, ipc.oif, sk->sk_mark, tos,
RT_SCOPE_UNIVERSE, sk->sk_protocol,
- inet_sk_flowi_flags(sk)|FLOWI_FLAG_CAN_SLEEP,
+ inet_sk_flowi_flags(sk),
faddr, saddr, dport, inet->inet_sport);
security_sk_classify_flow(sk, flowi4_to_flowi(fl4));
@@ -999,7 +995,7 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
err = PTR_ERR(rt);
rt = NULL;
if (err == -ENETUNREACH)
- IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES);
+ IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
goto out;
}
@@ -1098,6 +1094,9 @@ int udp_sendpage(struct sock *sk, struct page *page, int offset,
struct udp_sock *up = udp_sk(sk);
int ret;
+ if (flags & MSG_SENDPAGE_NOTLAST)
+ flags |= MSG_MORE;
+
if (!up->pending) {
struct msghdr msg = { .msg_flags = flags|MSG_MORE };
@@ -1236,7 +1235,7 @@ int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
bool slow;
if (flags & MSG_ERRQUEUE)
- return ip_recv_error(sk, msg, len);
+ return ip_recv_error(sk, msg, len, addr_len);
try_again:
skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0),
@@ -1600,12 +1599,16 @@ static void flush_stack(struct sock **stack, unsigned int count,
kfree_skb(skb1);
}
-static void udp_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
+/* For TCP sockets, sk_rx_dst is protected by socket lock
+ * For UDP, we use xchg() to guard against concurrent changes.
+ */
+static void udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
{
- struct dst_entry *dst = skb_dst(skb);
+ struct dst_entry *old;
dst_hold(dst);
- sk->sk_rx_dst = dst;
+ old = xchg(&sk->sk_rx_dst, dst);
+ dst_release(old);
}
/*
@@ -1736,15 +1739,16 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
if (udp4_csum_init(skb, uh, proto))
goto csum_error;
- if (skb->sk) {
+ sk = skb_steal_sock(skb);
+ if (sk) {
+ struct dst_entry *dst = skb_dst(skb);
int ret;
- sk = skb->sk;
- if (unlikely(sk->sk_rx_dst == NULL))
- udp_sk_rx_dst_set(sk, skb);
+ if (unlikely(sk->sk_rx_dst != dst))
+ udp_sk_rx_dst_set(sk, dst);
ret = udp_queue_rcv_skb(sk, skb);
-
+ sock_put(sk);
/* a return value > 0 means to resubmit the input, but
* it wants the return to be -protocol, or 0
*/
@@ -1910,17 +1914,20 @@ static struct sock *__udp4_lib_demux_lookup(struct net *net,
void udp_v4_early_demux(struct sk_buff *skb)
{
- const struct iphdr *iph = ip_hdr(skb);
- const struct udphdr *uh = udp_hdr(skb);
+ struct net *net = dev_net(skb->dev);
+ const struct iphdr *iph;
+ const struct udphdr *uh;
struct sock *sk;
struct dst_entry *dst;
- struct net *net = dev_net(skb->dev);
int dif = skb->dev->ifindex;
/* validate the packet */
if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct udphdr)))
return;
+ iph = ip_hdr(skb);
+ uh = udp_hdr(skb);
+
if (skb->pkt_type == PACKET_BROADCAST ||
skb->pkt_type == PACKET_MULTICAST)
sk = __udp4_lib_mcast_demux_lookup(net, uh->dest, iph->daddr,
@@ -2471,6 +2478,7 @@ struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
netdev_features_t features)
{
struct sk_buff *segs = ERR_PTR(-EINVAL);
+ u16 mac_offset = skb->mac_header;
int mac_len = skb->mac_len;
int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb);
__be16 protocol = skb->protocol;
@@ -2490,8 +2498,11 @@ struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
/* segment inner packet. */
enc_features = skb->dev->hw_enc_features & netif_skb_features(skb);
segs = skb_mac_gso_segment(skb, enc_features);
- if (!segs || IS_ERR(segs))
+ if (!segs || IS_ERR(segs)) {
+ skb_gso_error_unwind(skb, protocol, tnl_hlen, mac_offset,
+ mac_len);
goto out;
+ }
outer_hlen = skb_tnl_header_len(skb);
skb = segs;
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index 83206de2bc76..79c62bdcd3c5 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -41,6 +41,14 @@ static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
{
struct sk_buff *segs = ERR_PTR(-EINVAL);
unsigned int mss;
+ int offset;
+ __wsum csum;
+
+ if (skb->encapsulation &&
+ skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL) {
+ segs = skb_udp_tunnel_segment(skb, features);
+ goto out;
+ }
mss = skb_shinfo(skb)->gso_size;
if (unlikely(skb->len <= mss))
@@ -63,27 +71,20 @@ static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
goto out;
}
+ /* Do software UFO. Complete and fill in the UDP checksum as
+ * HW cannot do checksum of UDP packets sent as multiple
+ * IP fragments.
+ */
+ offset = skb_checksum_start_offset(skb);
+ csum = skb_checksum(skb, offset, skb->len - offset, 0);
+ offset += skb->csum_offset;
+ *(__sum16 *)(skb->data + offset) = csum_fold(csum);
+ skb->ip_summed = CHECKSUM_NONE;
+
/* Fragment the skb. IP headers of the fragments are updated in
* inet_gso_segment()
*/
- if (skb->encapsulation && skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL)
- segs = skb_udp_tunnel_segment(skb, features);
- else {
- int offset;
- __wsum csum;
-
- /* Do software UFO. Complete and fill in the UDP checksum as
- * HW cannot do checksum of UDP packets sent as multiple
- * IP fragments.
- */
- offset = skb_checksum_start_offset(skb);
- csum = skb_checksum(skb, offset, skb->len - offset, 0);
- offset += skb->csum_offset;
- *(__sum16 *)(skb->data + offset) = csum_fold(csum);
- skb->ip_summed = CHECKSUM_NONE;
-
- segs = skb_segment(skb, features);
- }
+ segs = skb_segment(skb, features);
out:
return segs;
}
diff --git a/net/ipv4/xfrm4_mode_beet.c b/net/ipv4/xfrm4_mode_beet.c
index e3db3f915114..71acd0014f2d 100644
--- a/net/ipv4/xfrm4_mode_beet.c
+++ b/net/ipv4/xfrm4_mode_beet.c
@@ -48,7 +48,7 @@ static int xfrm4_beet_output(struct xfrm_state *x, struct sk_buff *skb)
hdrlen += IPV4_BEET_PHMAXLEN - (optlen & 4);
skb_set_network_header(skb, -x->props.header_len -
- hdrlen + (XFRM_MODE_SKB_CB(skb)->ihl - sizeof(*top_iph)));
+ hdrlen + (XFRM_MODE_SKB_CB(skb)->ihl - sizeof(*top_iph)));
if (x->sel.family != AF_INET6)
skb->network_header += IPV4_BEET_PHMAXLEN;
skb->mac_header = skb->network_header +
diff --git a/net/ipv4/xfrm4_state.c b/net/ipv4/xfrm4_state.c
index 0b2a0641526a..542074c00c78 100644
--- a/net/ipv4/xfrm4_state.c
+++ b/net/ipv4/xfrm4_state.c
@@ -16,7 +16,7 @@
static int xfrm4_init_flags(struct xfrm_state *x)
{
- if (ipv4_config.no_pmtu_disc)
+ if (xs_net(x)->ipv4.sysctl_ip_no_pmtu_disc)
x->props.flags |= XFRM_STATE_NOPMTUDISC;
return 0;
}
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index d125fcdefb74..a9fa6c1feed5 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -442,6 +442,8 @@ static int inet6_netconf_msgsize_devconf(int type)
if (type == -1 || type == NETCONFA_MC_FORWARDING)
size += nla_total_size(4);
#endif
+ if (type == -1 || type == NETCONFA_PROXY_NEIGH)
+ size += nla_total_size(4);
return size;
}
@@ -475,6 +477,10 @@ static int inet6_netconf_fill_devconf(struct sk_buff *skb, int ifindex,
devconf->mc_forwarding) < 0)
goto nla_put_failure;
#endif
+ if ((type == -1 || type == NETCONFA_PROXY_NEIGH) &&
+ nla_put_s32(skb, NETCONFA_PROXY_NEIGH, devconf->proxy_ndp) < 0)
+ goto nla_put_failure;
+
return nlmsg_end(skb, nlh);
nla_put_failure:
@@ -509,6 +515,7 @@ errout:
static const struct nla_policy devconf_ipv6_policy[NETCONFA_MAX+1] = {
[NETCONFA_IFINDEX] = { .len = sizeof(int) },
[NETCONFA_FORWARDING] = { .len = sizeof(int) },
+ [NETCONFA_PROXY_NEIGH] = { .len = sizeof(int) },
};
static int inet6_netconf_get_devconf(struct sk_buff *in_skb,
@@ -834,6 +841,8 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr,
goto out;
}
+ neigh_parms_data_state_setall(idev->nd_parms);
+
ifa->addr = *addr;
if (peer_addr)
ifa->peer_addr = *peer_addr;
@@ -986,12 +995,9 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp)
* --yoshfuji
*/
if ((ifp->flags & IFA_F_PERMANENT) && onlink < 1) {
- struct in6_addr prefix;
struct rt6_info *rt;
- ipv6_addr_prefix(&prefix, &ifp->addr, ifp->prefix_len);
-
- rt = addrconf_get_prefix_route(&prefix,
+ rt = addrconf_get_prefix_route(&ifp->addr,
ifp->prefix_len,
ifp->idev->dev,
0, RTF_GATEWAY | RTF_DEFAULT);
@@ -1024,7 +1030,7 @@ static int ipv6_create_tempaddr(struct inet6_ifaddr *ifp, struct inet6_ifaddr *i
u32 addr_flags;
unsigned long now = jiffies;
- write_lock(&idev->lock);
+ write_lock_bh(&idev->lock);
if (ift) {
spin_lock_bh(&ift->lock);
memcpy(&addr.s6_addr[8], &ift->addr.s6_addr[8], 8);
@@ -1036,7 +1042,7 @@ static int ipv6_create_tempaddr(struct inet6_ifaddr *ifp, struct inet6_ifaddr *i
retry:
in6_dev_hold(idev);
if (idev->cnf.use_tempaddr <= 0) {
- write_unlock(&idev->lock);
+ write_unlock_bh(&idev->lock);
pr_info("%s: use_tempaddr is disabled\n", __func__);
in6_dev_put(idev);
ret = -1;
@@ -1046,7 +1052,7 @@ retry:
if (ifp->regen_count++ >= idev->cnf.regen_max_retry) {
idev->cnf.use_tempaddr = -1; /*XXX*/
spin_unlock_bh(&ifp->lock);
- write_unlock(&idev->lock);
+ write_unlock_bh(&idev->lock);
pr_warn("%s: regeneration time exceeded - disabled temporary address support\n",
__func__);
in6_dev_put(idev);
@@ -1071,8 +1077,8 @@ retry:
regen_advance = idev->cnf.regen_max_retry *
idev->cnf.dad_transmits *
- idev->nd_parms->retrans_time / HZ;
- write_unlock(&idev->lock);
+ NEIGH_VAR(idev->nd_parms, RETRANS_TIME) / HZ;
+ write_unlock_bh(&idev->lock);
/* A temporary address is created only if this calculated Preferred
* Lifetime is greater than REGEN_ADVANCE time units. In particular,
@@ -1099,7 +1105,7 @@ retry:
in6_dev_put(idev);
pr_info("%s: retry temporary address regeneration\n", __func__);
tmpaddr = &addr;
- write_lock(&idev->lock);
+ write_lock_bh(&idev->lock);
goto retry;
}
@@ -1407,7 +1413,7 @@ try_nextdev:
EXPORT_SYMBOL(ipv6_dev_get_saddr);
int __ipv6_get_lladdr(struct inet6_dev *idev, struct in6_addr *addr,
- unsigned char banned_flags)
+ u32 banned_flags)
{
struct inet6_ifaddr *ifp;
int err = -EADDRNOTAVAIL;
@@ -1424,7 +1430,7 @@ int __ipv6_get_lladdr(struct inet6_dev *idev, struct in6_addr *addr,
}
int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr,
- unsigned char banned_flags)
+ u32 banned_flags)
{
struct inet6_dev *idev;
int err = -EADDRNOTAVAIL;
@@ -1671,7 +1677,7 @@ void addrconf_leave_solict(struct inet6_dev *idev, const struct in6_addr *addr)
static void addrconf_join_anycast(struct inet6_ifaddr *ifp)
{
struct in6_addr addr;
- if (ifp->prefix_len == 127) /* RFC 6164 */
+ if (ifp->prefix_len >= 127) /* RFC 6164 */
return;
ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len);
if (ipv6_addr_any(&addr))
@@ -1682,7 +1688,7 @@ static void addrconf_join_anycast(struct inet6_ifaddr *ifp)
static void addrconf_leave_anycast(struct inet6_ifaddr *ifp)
{
struct in6_addr addr;
- if (ifp->prefix_len == 127) /* RFC 6164 */
+ if (ifp->prefix_len >= 127) /* RFC 6164 */
return;
ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len);
if (ipv6_addr_any(&addr))
@@ -1889,7 +1895,8 @@ static void ipv6_regen_rndid(unsigned long data)
expires = jiffies +
idev->cnf.temp_prefered_lft * HZ -
- idev->cnf.regen_max_retry * idev->cnf.dad_transmits * idev->nd_parms->retrans_time -
+ idev->cnf.regen_max_retry * idev->cnf.dad_transmits *
+ NEIGH_VAR(idev->nd_parms, RETRANS_TIME) -
idev->cnf.max_desync_factor * HZ;
if (time_before(expires, jiffies)) {
pr_warn("%s: too short regeneration interval; timer disabled for %s\n",
@@ -2017,6 +2024,73 @@ static struct inet6_dev *addrconf_add_dev(struct net_device *dev)
return idev;
}
+static void manage_tempaddrs(struct inet6_dev *idev,
+ struct inet6_ifaddr *ifp,
+ __u32 valid_lft, __u32 prefered_lft,
+ bool create, unsigned long now)
+{
+ u32 flags;
+ struct inet6_ifaddr *ift;
+
+ read_lock_bh(&idev->lock);
+ /* update all temporary addresses in the list */
+ list_for_each_entry(ift, &idev->tempaddr_list, tmp_list) {
+ int age, max_valid, max_prefered;
+
+ if (ifp != ift->ifpub)
+ continue;
+
+ /* RFC 4941 section 3.3:
+ * If a received option will extend the lifetime of a public
+ * address, the lifetimes of temporary addresses should
+ * be extended, subject to the overall constraint that no
+ * temporary addresses should ever remain "valid" or "preferred"
+ * for a time longer than (TEMP_VALID_LIFETIME) or
+ * (TEMP_PREFERRED_LIFETIME - DESYNC_FACTOR), respectively.
+ */
+ age = (now - ift->cstamp) / HZ;
+ max_valid = idev->cnf.temp_valid_lft - age;
+ if (max_valid < 0)
+ max_valid = 0;
+
+ max_prefered = idev->cnf.temp_prefered_lft -
+ idev->cnf.max_desync_factor - age;
+ if (max_prefered < 0)
+ max_prefered = 0;
+
+ if (valid_lft > max_valid)
+ valid_lft = max_valid;
+
+ if (prefered_lft > max_prefered)
+ prefered_lft = max_prefered;
+
+ spin_lock(&ift->lock);
+ flags = ift->flags;
+ ift->valid_lft = valid_lft;
+ ift->prefered_lft = prefered_lft;
+ ift->tstamp = now;
+ if (prefered_lft > 0)
+ ift->flags &= ~IFA_F_DEPRECATED;
+
+ spin_unlock(&ift->lock);
+ if (!(flags&IFA_F_TENTATIVE))
+ ipv6_ifa_notify(0, ift);
+ }
+
+ if ((create || list_empty(&idev->tempaddr_list)) &&
+ idev->cnf.use_tempaddr > 0) {
+ /* When a new public address is created as described
+ * in [ADDRCONF], also create a new temporary address.
+ * Also create a temporary address if it's enabled but
+ * no temporary address currently exists.
+ */
+ read_unlock_bh(&idev->lock);
+ ipv6_create_tempaddr(ifp, NULL);
+ } else {
+ read_unlock_bh(&idev->lock);
+ }
+}
+
void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len, bool sllao)
{
struct prefix_info *pinfo;
@@ -2171,6 +2245,7 @@ ok:
return;
}
+ ifp->flags |= IFA_F_MANAGETEMPADDR;
update_lft = 0;
create = 1;
ifp->cstamp = jiffies;
@@ -2179,9 +2254,8 @@ ok:
}
if (ifp) {
- int flags;
+ u32 flags;
unsigned long now;
- struct inet6_ifaddr *ift;
u32 stored_lft;
/* update lifetime (RFC2462 5.5.3 e) */
@@ -2222,70 +2296,8 @@ ok:
} else
spin_unlock(&ifp->lock);
- read_lock_bh(&in6_dev->lock);
- /* update all temporary addresses in the list */
- list_for_each_entry(ift, &in6_dev->tempaddr_list,
- tmp_list) {
- int age, max_valid, max_prefered;
-
- if (ifp != ift->ifpub)
- continue;
-
- /*
- * RFC 4941 section 3.3:
- * If a received option will extend the lifetime
- * of a public address, the lifetimes of
- * temporary addresses should be extended,
- * subject to the overall constraint that no
- * temporary addresses should ever remain
- * "valid" or "preferred" for a time longer than
- * (TEMP_VALID_LIFETIME) or
- * (TEMP_PREFERRED_LIFETIME - DESYNC_FACTOR),
- * respectively.
- */
- age = (now - ift->cstamp) / HZ;
- max_valid = in6_dev->cnf.temp_valid_lft - age;
- if (max_valid < 0)
- max_valid = 0;
-
- max_prefered = in6_dev->cnf.temp_prefered_lft -
- in6_dev->cnf.max_desync_factor -
- age;
- if (max_prefered < 0)
- max_prefered = 0;
-
- if (valid_lft > max_valid)
- valid_lft = max_valid;
-
- if (prefered_lft > max_prefered)
- prefered_lft = max_prefered;
-
- spin_lock(&ift->lock);
- flags = ift->flags;
- ift->valid_lft = valid_lft;
- ift->prefered_lft = prefered_lft;
- ift->tstamp = now;
- if (prefered_lft > 0)
- ift->flags &= ~IFA_F_DEPRECATED;
-
- spin_unlock(&ift->lock);
- if (!(flags&IFA_F_TENTATIVE))
- ipv6_ifa_notify(0, ift);
- }
-
- if ((create || list_empty(&in6_dev->tempaddr_list)) && in6_dev->cnf.use_tempaddr > 0) {
- /*
- * When a new public address is created as
- * described in [ADDRCONF], also create a new
- * temporary address. Also create a temporary
- * address if it's enabled but no temporary
- * address currently exists.
- */
- read_unlock_bh(&in6_dev->lock);
- ipv6_create_tempaddr(ifp, NULL);
- } else {
- read_unlock_bh(&in6_dev->lock);
- }
+ manage_tempaddrs(in6_dev, ifp, valid_lft, prefered_lft,
+ create, now);
in6_ifa_put(ifp);
addrconf_verify(0);
@@ -2364,10 +2376,11 @@ err_exit:
/*
* Manual configuration of address on an interface
*/
-static int inet6_addr_add(struct net *net, int ifindex, const struct in6_addr *pfx,
+static int inet6_addr_add(struct net *net, int ifindex,
+ const struct in6_addr *pfx,
const struct in6_addr *peer_pfx,
- unsigned int plen, __u8 ifa_flags, __u32 prefered_lft,
- __u32 valid_lft)
+ unsigned int plen, __u32 ifa_flags,
+ __u32 prefered_lft, __u32 valid_lft)
{
struct inet6_ifaddr *ifp;
struct inet6_dev *idev;
@@ -2386,6 +2399,9 @@ static int inet6_addr_add(struct net *net, int ifindex, const struct in6_addr *p
if (!valid_lft || prefered_lft > valid_lft)
return -EINVAL;
+ if (ifa_flags & IFA_F_MANAGETEMPADDR && plen != 64)
+ return -EINVAL;
+
dev = __dev_get_by_index(net, ifindex);
if (!dev)
return -ENODEV;
@@ -2426,6 +2442,9 @@ static int inet6_addr_add(struct net *net, int ifindex, const struct in6_addr *p
* manually configured addresses
*/
addrconf_dad_start(ifp);
+ if (ifa_flags & IFA_F_MANAGETEMPADDR)
+ manage_tempaddrs(idev, ifp, valid_lft, prefered_lft,
+ true, jiffies);
in6_ifa_put(ifp);
addrconf_verify(0);
return 0;
@@ -2614,7 +2633,7 @@ static void init_loopback(struct net_device *dev)
if (sp_ifa->rt)
continue;
- sp_rt = addrconf_dst_alloc(idev, &sp_ifa->addr, 0);
+ sp_rt = addrconf_dst_alloc(idev, &sp_ifa->addr, false);
/* Failure cases are ignored */
if (!IS_ERR(sp_rt)) {
@@ -3178,7 +3197,8 @@ static void addrconf_dad_timer(unsigned long data)
}
ifp->dad_probes--;
- addrconf_mod_dad_timer(ifp, ifp->idev->nd_parms->retrans_time);
+ addrconf_mod_dad_timer(ifp,
+ NEIGH_VAR(ifp->idev->nd_parms, RETRANS_TIME));
spin_unlock(&ifp->lock);
write_unlock(&idev->lock);
@@ -3358,7 +3378,7 @@ static int if6_seq_show(struct seq_file *seq, void *v)
ifp->idev->dev->ifindex,
ifp->prefix_len,
ifp->scope,
- ifp->flags,
+ (u8) ifp->flags,
ifp->idev->dev->name);
return 0;
}
@@ -3458,7 +3478,12 @@ restart:
&inet6_addr_lst[i], addr_lst) {
unsigned long age;
- if (ifp->flags & IFA_F_PERMANENT)
+ /* When setting preferred_lft to a value not zero or
+ * infinity, while valid_lft is infinity
+ * IFA_F_PERMANENT has a non-infinity life time.
+ */
+ if ((ifp->flags & IFA_F_PERMANENT) &&
+ (ifp->prefered_lft == INFINITY_LIFE_TIME))
continue;
spin_lock(&ifp->lock);
@@ -3483,7 +3508,8 @@ restart:
ifp->flags |= IFA_F_DEPRECATED;
}
- if (time_before(ifp->tstamp + ifp->valid_lft * HZ, next))
+ if ((ifp->valid_lft != INFINITY_LIFE_TIME) &&
+ (time_before(ifp->tstamp + ifp->valid_lft * HZ, next)))
next = ifp->tstamp + ifp->valid_lft * HZ;
spin_unlock(&ifp->lock);
@@ -3499,7 +3525,7 @@ restart:
!(ifp->flags&IFA_F_TENTATIVE)) {
unsigned long regen_advance = ifp->idev->cnf.regen_max_retry *
ifp->idev->cnf.dad_transmits *
- ifp->idev->nd_parms->retrans_time / HZ;
+ NEIGH_VAR(ifp->idev->nd_parms, RETRANS_TIME) / HZ;
if (age >= ifp->prefered_lft - regen_advance) {
struct inet6_ifaddr *ifpub = ifp->ifpub;
@@ -3574,6 +3600,7 @@ static const struct nla_policy ifa_ipv6_policy[IFA_MAX+1] = {
[IFA_ADDRESS] = { .len = sizeof(struct in6_addr) },
[IFA_LOCAL] = { .len = sizeof(struct in6_addr) },
[IFA_CACHEINFO] = { .len = sizeof(struct ifa_cacheinfo) },
+ [IFA_FLAGS] = { .len = sizeof(u32) },
};
static int
@@ -3597,16 +3624,21 @@ inet6_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh)
return inet6_addr_del(net, ifm->ifa_index, pfx, ifm->ifa_prefixlen);
}
-static int inet6_addr_modify(struct inet6_ifaddr *ifp, u8 ifa_flags,
+static int inet6_addr_modify(struct inet6_ifaddr *ifp, u32 ifa_flags,
u32 prefered_lft, u32 valid_lft)
{
u32 flags;
clock_t expires;
unsigned long timeout;
+ bool was_managetempaddr;
if (!valid_lft || (prefered_lft > valid_lft))
return -EINVAL;
+ if (ifa_flags & IFA_F_MANAGETEMPADDR &&
+ (ifp->flags & IFA_F_TEMPORARY || ifp->prefix_len != 64))
+ return -EINVAL;
+
timeout = addrconf_timeout_fixup(valid_lft, HZ);
if (addrconf_finite_timeout(timeout)) {
expires = jiffies_to_clock_t(timeout * HZ);
@@ -3626,7 +3658,10 @@ static int inet6_addr_modify(struct inet6_ifaddr *ifp, u8 ifa_flags,
}
spin_lock_bh(&ifp->lock);
- ifp->flags = (ifp->flags & ~(IFA_F_DEPRECATED | IFA_F_PERMANENT | IFA_F_NODAD | IFA_F_HOMEADDRESS)) | ifa_flags;
+ was_managetempaddr = ifp->flags & IFA_F_MANAGETEMPADDR;
+ ifp->flags &= ~(IFA_F_DEPRECATED | IFA_F_PERMANENT | IFA_F_NODAD |
+ IFA_F_HOMEADDRESS | IFA_F_MANAGETEMPADDR);
+ ifp->flags |= ifa_flags;
ifp->tstamp = jiffies;
ifp->valid_lft = valid_lft;
ifp->prefered_lft = prefered_lft;
@@ -3637,6 +3672,14 @@ static int inet6_addr_modify(struct inet6_ifaddr *ifp, u8 ifa_flags,
addrconf_prefix_route(&ifp->addr, ifp->prefix_len, ifp->idev->dev,
expires, flags);
+
+ if (was_managetempaddr || ifp->flags & IFA_F_MANAGETEMPADDR) {
+ if (was_managetempaddr && !(ifp->flags & IFA_F_MANAGETEMPADDR))
+ valid_lft = prefered_lft = 0;
+ manage_tempaddrs(ifp->idev, ifp, valid_lft, prefered_lft,
+ !was_managetempaddr, jiffies);
+ }
+
addrconf_verify(0);
return 0;
@@ -3652,7 +3695,7 @@ inet6_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh)
struct inet6_ifaddr *ifa;
struct net_device *dev;
u32 valid_lft = INFINITY_LIFE_TIME, preferred_lft = INFINITY_LIFE_TIME;
- u8 ifa_flags;
+ u32 ifa_flags;
int err;
err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv6_policy);
@@ -3679,8 +3722,10 @@ inet6_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh)
if (dev == NULL)
return -ENODEV;
+ ifa_flags = tb[IFA_FLAGS] ? nla_get_u32(tb[IFA_FLAGS]) : ifm->ifa_flags;
+
/* We ignore other flags so far. */
- ifa_flags = ifm->ifa_flags & (IFA_F_NODAD | IFA_F_HOMEADDRESS);
+ ifa_flags &= IFA_F_NODAD | IFA_F_HOMEADDRESS | IFA_F_MANAGETEMPADDR;
ifa = ipv6_get_ifaddr(net, pfx, dev, 1);
if (ifa == NULL) {
@@ -3704,7 +3749,7 @@ inet6_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh)
return err;
}
-static void put_ifaddrmsg(struct nlmsghdr *nlh, u8 prefixlen, u8 flags,
+static void put_ifaddrmsg(struct nlmsghdr *nlh, u8 prefixlen, u32 flags,
u8 scope, int ifindex)
{
struct ifaddrmsg *ifm;
@@ -3747,7 +3792,8 @@ static inline int inet6_ifaddr_msgsize(void)
return NLMSG_ALIGN(sizeof(struct ifaddrmsg))
+ nla_total_size(16) /* IFA_LOCAL */
+ nla_total_size(16) /* IFA_ADDRESS */
- + nla_total_size(sizeof(struct ifa_cacheinfo));
+ + nla_total_size(sizeof(struct ifa_cacheinfo))
+ + nla_total_size(4) /* IFA_FLAGS */;
}
static int inet6_fill_ifaddr(struct sk_buff *skb, struct inet6_ifaddr *ifa,
@@ -3763,7 +3809,8 @@ static int inet6_fill_ifaddr(struct sk_buff *skb, struct inet6_ifaddr *ifa,
put_ifaddrmsg(nlh, ifa->prefix_len, ifa->flags, rt_scope(ifa->scope),
ifa->idev->dev->ifindex);
- if (!(ifa->flags&IFA_F_PERMANENT)) {
+ if (!((ifa->flags&IFA_F_PERMANENT) &&
+ (ifa->prefered_lft == INFINITY_LIFE_TIME))) {
preferred = ifa->prefered_lft;
valid = ifa->valid_lft;
if (preferred != INFINITY_LIFE_TIME) {
@@ -3795,6 +3842,9 @@ static int inet6_fill_ifaddr(struct sk_buff *skb, struct inet6_ifaddr *ifa,
if (put_cacheinfo(skb, ifa->cstamp, ifa->tstamp, preferred, valid) < 0)
goto error;
+ if (nla_put_u32(skb, IFA_FLAGS, ifa->flags) < 0)
+ goto error;
+
return nlmsg_end(skb, nlh);
error:
@@ -4198,7 +4248,7 @@ static int inet6_fill_ifla6_attrs(struct sk_buff *skb, struct inet6_dev *idev)
ci.max_reasm_len = IPV6_MAXPLEN;
ci.tstamp = cstamp_delta(idev->tstamp);
ci.reachable_time = jiffies_to_msecs(idev->nd_parms->reachable_time);
- ci.retrans_time = jiffies_to_msecs(idev->nd_parms->retrans_time);
+ ci.retrans_time = jiffies_to_msecs(NEIGH_VAR(idev->nd_parms, RETRANS_TIME));
if (nla_put(skb, IFLA_INET6_CACHEINFO, sizeof(ci), &ci))
goto nla_put_failure;
nla = nla_reserve(skb, IFLA_INET6_CONF, DEVCONF_MAX * sizeof(s32));
@@ -4691,6 +4741,46 @@ int addrconf_sysctl_disable(struct ctl_table *ctl, int write,
return ret;
}
+static
+int addrconf_sysctl_proxy_ndp(struct ctl_table *ctl, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+ int *valp = ctl->data;
+ int ret;
+ int old, new;
+
+ old = *valp;
+ ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
+ new = *valp;
+
+ if (write && old != new) {
+ struct net *net = ctl->extra2;
+
+ if (!rtnl_trylock())
+ return restart_syscall();
+
+ if (valp == &net->ipv6.devconf_dflt->proxy_ndp)
+ inet6_netconf_notify_devconf(net, NETCONFA_PROXY_NEIGH,
+ NETCONFA_IFINDEX_DEFAULT,
+ net->ipv6.devconf_dflt);
+ else if (valp == &net->ipv6.devconf_all->proxy_ndp)
+ inet6_netconf_notify_devconf(net, NETCONFA_PROXY_NEIGH,
+ NETCONFA_IFINDEX_ALL,
+ net->ipv6.devconf_all);
+ else {
+ struct inet6_dev *idev = ctl->extra1;
+
+ inet6_netconf_notify_devconf(net, NETCONFA_PROXY_NEIGH,
+ idev->dev->ifindex,
+ &idev->cnf);
+ }
+ rtnl_unlock();
+ }
+
+ return ret;
+}
+
+
static struct addrconf_sysctl_table
{
struct ctl_table_header *sysctl_header;
@@ -4877,7 +4967,7 @@ static struct addrconf_sysctl_table
.data = &ipv6_devconf.proxy_ndp,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = proc_dointvec,
+ .proc_handler = addrconf_sysctl_proxy_ndp,
},
{
.procname = "accept_source_route",
@@ -4993,7 +5083,7 @@ static void __addrconf_sysctl_unregister(struct ipv6_devconf *p)
static void addrconf_sysctl_register(struct inet6_dev *idev)
{
- neigh_sysctl_register(idev->dev, idev->nd_parms, "ipv6",
+ neigh_sysctl_register(idev->dev, idev->nd_parms,
&ndisc_ifinfo_sysctl_change);
__addrconf_sysctl_register(dev_net(idev->dev), idev->dev->name,
idev, &idev->cnf);
@@ -5126,9 +5216,7 @@ int __init addrconf_init(void)
addrconf_verify(0);
- err = rtnl_af_register(&inet6_ops);
- if (err < 0)
- goto errout_af;
+ rtnl_af_register(&inet6_ops);
err = __rtnl_register(PF_INET6, RTM_GETLINK, NULL, inet6_dump_ifinfo,
NULL);
@@ -5152,7 +5240,6 @@ int __init addrconf_init(void)
return 0;
errout:
rtnl_af_unregister(&inet6_ops);
-errout_af:
unregister_netdevice_notifier(&ipv6_dev_notf);
errlo:
unregister_pernet_subsys(&addrconf_ops);
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 4fbdb7046d28..c921d5d38831 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -213,7 +213,7 @@ lookup_protocol:
inet->mc_list = NULL;
inet->rcv_tos = 0;
- if (ipv4_config.no_pmtu_disc)
+ if (net->ipv4.sysctl_ip_no_pmtu_disc)
inet->pmtudisc = IP_PMTUDISC_DONT;
else
inet->pmtudisc = IP_PMTUDISC_WANT;
@@ -661,7 +661,7 @@ int inet6_sk_rebuild_header(struct sock *sk)
final_p = fl6_update_dst(&fl6, np->opt, &final);
- dst = ip6_dst_lookup_flow(sk, &fl6, final_p, false);
+ dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
if (IS_ERR(dst)) {
sk->sk_route_caps = 0;
sk->sk_err_soft = -PTR_ERR(dst);
@@ -683,8 +683,7 @@ bool ipv6_opt_accepted(const struct sock *sk, const struct sk_buff *skb)
if (np->rxopt.all) {
if ((opt->hop && (np->rxopt.bits.hopopts ||
np->rxopt.bits.ohopopts)) ||
- ((IPV6_FLOWINFO_MASK &
- *(__be32 *)skb_network_header(skb)) &&
+ (ip6_flowinfo((struct ipv6hdr *) skb_network_header(skb)) &&
np->rxopt.bits.rxflow) ||
(opt->srcrt && (np->rxopt.bits.srcrt ||
np->rxopt.bits.osrcrt)) ||
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c
index 82e1da3a40b9..81e496a2e008 100644
--- a/net/ipv6/ah6.c
+++ b/net/ipv6/ah6.c
@@ -12,8 +12,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
* Authors
*
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index a454b0ff57c7..6983058942ea 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -73,7 +73,6 @@ int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
if (flowlabel == NULL)
return -EINVAL;
- usin->sin6_addr = flowlabel->dst;
}
}
@@ -171,7 +170,7 @@ ipv4_connected:
opt = flowlabel ? flowlabel->opt : np->opt;
final_p = fl6_update_dst(&fl6, opt, &final);
- dst = ip6_dst_lookup_flow(sk, &fl6, final_p, true);
+ dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
err = 0;
if (IS_ERR(dst)) {
err = PTR_ERR(dst);
@@ -318,7 +317,7 @@ void ipv6_local_rxpmtu(struct sock *sk, struct flowi6 *fl6, u32 mtu)
/*
* Handle MSG_ERRQUEUE
*/
-int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len)
+int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
{
struct ipv6_pinfo *np = inet6_sk(sk);
struct sock_exterr_skb *serr;
@@ -369,6 +368,7 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len)
&sin->sin6_addr);
sin->sin6_scope_id = 0;
}
+ *addr_len = sizeof(*sin);
}
memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err));
@@ -377,6 +377,7 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len)
if (serr->ee.ee_origin != SO_EE_ORIGIN_LOCAL) {
sin->sin6_family = AF_INET6;
sin->sin6_flowinfo = 0;
+ sin->sin6_port = 0;
if (skb->protocol == htons(ETH_P_IPV6)) {
sin->sin6_addr = ipv6_hdr(skb)->saddr;
if (np->rxopt.all)
@@ -423,7 +424,8 @@ EXPORT_SYMBOL_GPL(ipv6_recv_error);
/*
* Handle IPV6_RECVPATHMTU
*/
-int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len)
+int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len,
+ int *addr_len)
{
struct ipv6_pinfo *np = inet6_sk(sk);
struct sk_buff *skb;
@@ -457,6 +459,7 @@ int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len)
sin->sin6_port = 0;
sin->sin6_scope_id = mtu_info.ip6m_addr.sin6_scope_id;
sin->sin6_addr = mtu_info.ip6m_addr.sin6_addr;
+ *addr_len = sizeof(*sin);
}
put_cmsg(msg, SOL_IPV6, IPV6_PATHMTU, sizeof(mtu_info), &mtu_info);
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index b8719df0366e..6eef8a7e35f2 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -12,8 +12,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
* Authors
*
diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c
index e27591635f92..3fd0a578329e 100644
--- a/net/ipv6/fib6_rules.c
+++ b/net/ipv6/fib6_rules.c
@@ -122,7 +122,11 @@ out:
static bool fib6_rule_suppress(struct fib_rule *rule, struct fib_lookup_arg *arg)
{
struct rt6_info *rt = (struct rt6_info *) arg->result;
- struct net_device *dev = rt->rt6i_idev->dev;
+ struct net_device *dev = NULL;
+
+ if (rt->rt6i_idev)
+ dev = rt->rt6i_idev->dev;
+
/* do not accept result if the route does
* not meet the required prefix length
*/
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index eef8d945b362..9a809a4b3d86 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -315,8 +315,10 @@ static void mip6_addr_swap(struct sk_buff *skb)
static inline void mip6_addr_swap(struct sk_buff *skb) {}
#endif
-struct dst_entry *icmpv6_route_lookup(struct net *net, struct sk_buff *skb,
- struct sock *sk, struct flowi6 *fl6)
+static struct dst_entry *icmpv6_route_lookup(struct net *net,
+ struct sk_buff *skb,
+ struct sock *sk,
+ struct flowi6 *fl6)
{
struct dst_entry *dst, *dst2;
struct flowi6 fl2;
@@ -554,7 +556,9 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
saddr = &ipv6_hdr(skb)->daddr;
- if (!ipv6_unicast_destination(skb))
+ if (!ipv6_unicast_destination(skb) &&
+ !(net->ipv6.anycast_src_echo_reply &&
+ ipv6_anycast_destination(skb)))
saddr = NULL;
memcpy(&tmp_hdr, icmph, sizeof(tmp_hdr));
@@ -984,7 +988,7 @@ int icmpv6_err_convert(u8 type, u8 code, int *err)
EXPORT_SYMBOL(icmpv6_err_convert);
#ifdef CONFIG_SYSCTL
-struct ctl_table ipv6_icmp_table_template[] = {
+static struct ctl_table ipv6_icmp_table_template[] = {
{
.procname = "ratelimit",
.data = &init_net.ipv6.sysctl.icmpv6_time,
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index 77bb8afb141d..c9138189415a 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -86,7 +86,7 @@ struct dst_entry *inet6_csk_route_req(struct sock *sk,
fl6->fl6_sport = htons(ireq->ir_num);
security_req_classify_flow(req, flowi6_to_flowi(fl6));
- dst = ip6_dst_lookup_flow(sk, fl6, final_p, false);
+ dst = ip6_dst_lookup_flow(sk, fl6, final_p);
if (IS_ERR(dst))
return NULL;
@@ -216,7 +216,7 @@ static struct dst_entry *inet6_csk_route_socket(struct sock *sk,
dst = __inet6_csk_dst_check(sk, np->dst_cookie);
if (!dst) {
- dst = ip6_dst_lookup_flow(sk, fl6, final_p, false);
+ dst = ip6_dst_lookup_flow(sk, fl6, final_p);
if (!IS_ERR(dst))
__inet6_csk_dst_store(sk, dst, NULL, NULL);
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 5550a8113a6d..075602fc6b6a 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -1530,7 +1530,7 @@ static void fib6_clean_tree(struct net *net, struct fib6_node *root,
}
void fib6_clean_all(struct net *net, int (*func)(struct rt6_info *, void *arg),
- int prune, void *arg)
+ void *arg)
{
struct fib6_table *table;
struct hlist_head *head;
@@ -1542,7 +1542,7 @@ void fib6_clean_all(struct net *net, int (*func)(struct rt6_info *, void *arg),
hlist_for_each_entry_rcu(table, head, tb6_hlist) {
write_lock_bh(&table->tb6_lock);
fib6_clean_tree(net, &table->tb6_root,
- func, prune, arg);
+ func, 0, arg);
write_unlock_bh(&table->tb6_lock);
}
}
@@ -1636,7 +1636,7 @@ void fib6_run_gc(unsigned long expires, struct net *net, bool force)
gc_args.more = icmp6_dst_gc();
- fib6_clean_all(net, fib6_age, 0, NULL);
+ fib6_clean_all(net, fib6_age, NULL);
now = jiffies;
net->ipv6.ip6_rt_last_gc = now;
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 8acb28621f9c..e7a440dd5c0d 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -61,7 +61,6 @@ static bool log_ecn_error = true;
module_param(log_ecn_error, bool, 0644);
MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
-#define IPV6_TCLASS_MASK (IPV6_FLOWINFO_MASK & ~IPV6_FLOWLABEL_MASK)
#define IPV6_TCLASS_SHIFT 20
#define HASH_SIZE_SHIFT 5
@@ -499,7 +498,7 @@ static int ip6gre_rcv(struct sk_buff *skb)
&ipv6h->saddr, &ipv6h->daddr, key,
gre_proto);
if (tunnel) {
- struct pcpu_tstats *tstats;
+ struct pcpu_sw_netstats *tstats;
if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
goto drop;
@@ -846,7 +845,7 @@ static inline int ip6gre_xmit_ipv6(struct sk_buff *skb, struct net_device *dev)
if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
fl6.flowlabel |= (*(__be32 *) ipv6h & IPV6_TCLASS_MASK);
if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL)
- fl6.flowlabel |= (*(__be32 *) ipv6h & IPV6_FLOWLABEL_MASK);
+ fl6.flowlabel |= ip6_flowlabel(ipv6h);
if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
fl6.flowi6_mark = skb->mark;
@@ -1266,12 +1265,12 @@ static int ip6gre_tunnel_init(struct net_device *dev)
if (ipv6_addr_any(&tunnel->parms.raddr))
dev->header_ops = &ip6gre_header_ops;
- dev->tstats = alloc_percpu(struct pcpu_tstats);
+ dev->tstats = alloc_percpu(struct pcpu_sw_netstats);
if (!dev->tstats)
return -ENOMEM;
for_each_possible_cpu(i) {
- struct pcpu_tstats *ip6gre_tunnel_stats;
+ struct pcpu_sw_netstats *ip6gre_tunnel_stats;
ip6gre_tunnel_stats = per_cpu_ptr(dev->tstats, i);
u64_stats_init(&ip6gre_tunnel_stats->syncp);
}
@@ -1467,12 +1466,12 @@ static int ip6gre_tap_init(struct net_device *dev)
ip6gre_tnl_link_config(tunnel, 1);
- dev->tstats = alloc_percpu(struct pcpu_tstats);
+ dev->tstats = alloc_percpu(struct pcpu_sw_netstats);
if (!dev->tstats)
return -ENOMEM;
for_each_possible_cpu(i) {
- struct pcpu_tstats *ip6gre_tap_stats;
+ struct pcpu_sw_netstats *ip6gre_tap_stats;
ip6gre_tap_stats = per_cpu_ptr(dev->tstats, i);
u64_stats_init(&ip6gre_tap_stats->syncp);
}
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index 4b851692b1f6..1e8683b135bb 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -154,6 +154,32 @@ out:
return segs;
}
+/* Return the total length of all the extension hdrs, following the same
+ * logic in ipv6_gso_pull_exthdrs() when parsing ext-hdrs.
+ */
+static int ipv6_exthdrs_len(struct ipv6hdr *iph,
+ const struct net_offload **opps)
+{
+ struct ipv6_opt_hdr *opth = (void *)iph;
+ int len = 0, proto, optlen = sizeof(*iph);
+
+ proto = iph->nexthdr;
+ for (;;) {
+ if (proto != NEXTHDR_HOP) {
+ *opps = rcu_dereference(inet6_offloads[proto]);
+ if (unlikely(!(*opps)))
+ break;
+ if (!((*opps)->flags & INET6_PROTO_GSO_EXTHDR))
+ break;
+ }
+ opth = (void *)opth + optlen;
+ optlen = ipv6_optlen(opth);
+ len += optlen;
+ proto = opth->nexthdr;
+ }
+ return len;
+}
+
static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
struct sk_buff *skb)
{
@@ -164,7 +190,7 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
unsigned int nlen;
unsigned int hlen;
unsigned int off;
- int flush = 1;
+ u16 flush = 1;
int proto;
__wsum csum;
@@ -177,6 +203,7 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
goto out;
}
+ skb_set_network_header(skb, off);
skb_gro_pull(skb, sizeof(*iph));
skb_set_transport_header(skb, skb_gro_offset(skb));
@@ -211,12 +238,16 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
if (!NAPI_GRO_CB(p)->same_flow)
continue;
- iph2 = ipv6_hdr(p);
+ iph2 = (struct ipv6hdr *)(p->data + off);
first_word = *(__be32 *)iph ^ *(__be32 *)iph2 ;
- /* All fields must match except length and Traffic Class. */
- if (nlen != skb_network_header_len(p) ||
- (first_word & htonl(0xF00FFFFF)) ||
+ /* All fields must match except length and Traffic Class.
+ * XXX skbs on the gro_list have all been parsed and pulled
+ * already so we don't need to compare nlen
+ * (nlen != (sizeof(*iph2) + ipv6_exthdrs_len(iph2, &ops)))
+ * memcmp() alone below is suffcient, right?
+ */
+ if ((first_word & htonl(0xF00FFFFF)) ||
memcmp(&iph->nexthdr, &iph2->nexthdr,
nlen - offsetof(struct ipv6hdr, nexthdr))) {
NAPI_GRO_CB(p)->same_flow = 0;
@@ -245,21 +276,21 @@ out:
return pp;
}
-static int ipv6_gro_complete(struct sk_buff *skb)
+static int ipv6_gro_complete(struct sk_buff *skb, int nhoff)
{
const struct net_offload *ops;
- struct ipv6hdr *iph = ipv6_hdr(skb);
+ struct ipv6hdr *iph = (struct ipv6hdr *)(skb->data + nhoff);
int err = -ENOSYS;
- iph->payload_len = htons(skb->len - skb_network_offset(skb) -
- sizeof(*iph));
+ iph->payload_len = htons(skb->len - nhoff - sizeof(*iph));
rcu_read_lock();
- ops = rcu_dereference(inet6_offloads[NAPI_GRO_CB(skb)->proto]);
+
+ nhoff += sizeof(*iph) + ipv6_exthdrs_len(iph, &ops);
if (WARN_ON(!ops || !ops->callbacks.gro_complete))
goto out_unlock;
- err = ops->callbacks.gro_complete(skb);
+ err = ops->callbacks.gro_complete(skb, nhoff);
out_unlock:
rcu_read_unlock();
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 59df872e2f4d..d1de9560c421 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -116,8 +116,8 @@ static int ip6_finish_output2(struct sk_buff *skb)
}
rcu_read_unlock_bh();
- IP6_INC_STATS_BH(dev_net(dst->dev),
- ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
+ IP6_INC_STATS(dev_net(dst->dev),
+ ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
kfree_skb(skb);
return -EINVAL;
}
@@ -336,7 +336,8 @@ int ip6_forward(struct sk_buff *skb)
goto drop;
if (!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) {
- IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS);
+ IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
+ IPSTATS_MIB_INDISCARDS);
goto drop;
}
@@ -370,8 +371,8 @@ int ip6_forward(struct sk_buff *skb)
/* Force OUTPUT device used as source address */
skb->dev = dst->dev;
icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, 0);
- IP6_INC_STATS_BH(net,
- ip6_dst_idev(dst), IPSTATS_MIB_INHDRERRORS);
+ IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
+ IPSTATS_MIB_INHDRERRORS);
kfree_skb(skb);
return -ETIMEDOUT;
@@ -384,14 +385,15 @@ int ip6_forward(struct sk_buff *skb)
if (proxied > 0)
return ip6_input(skb);
else if (proxied < 0) {
- IP6_INC_STATS(net, ip6_dst_idev(dst),
- IPSTATS_MIB_INDISCARDS);
+ IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
+ IPSTATS_MIB_INDISCARDS);
goto drop;
}
}
if (!xfrm6_route_forward(skb)) {
- IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS);
+ IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
+ IPSTATS_MIB_INDISCARDS);
goto drop;
}
dst = skb_dst(skb);
@@ -448,16 +450,17 @@ int ip6_forward(struct sk_buff *skb)
/* Again, force OUTPUT device used as source address */
skb->dev = dst->dev;
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
- IP6_INC_STATS_BH(net,
- ip6_dst_idev(dst), IPSTATS_MIB_INTOOBIGERRORS);
- IP6_INC_STATS_BH(net,
- ip6_dst_idev(dst), IPSTATS_MIB_FRAGFAILS);
+ IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
+ IPSTATS_MIB_INTOOBIGERRORS);
+ IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
+ IPSTATS_MIB_FRAGFAILS);
kfree_skb(skb);
return -EMSGSIZE;
}
if (skb_cow(skb, dst->dev->hard_header_len)) {
- IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTDISCARDS);
+ IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
+ IPSTATS_MIB_OUTDISCARDS);
goto drop;
}
@@ -938,7 +941,6 @@ EXPORT_SYMBOL_GPL(ip6_dst_lookup);
* @sk: socket which provides route info
* @fl6: flow to lookup
* @final_dst: final destination address for ipsec lookup
- * @can_sleep: we are in a sleepable context
*
* This function performs a route lookup on the given flow.
*
@@ -946,8 +948,7 @@ EXPORT_SYMBOL_GPL(ip6_dst_lookup);
* error code.
*/
struct dst_entry *ip6_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
- const struct in6_addr *final_dst,
- bool can_sleep)
+ const struct in6_addr *final_dst)
{
struct dst_entry *dst = NULL;
int err;
@@ -957,8 +958,6 @@ struct dst_entry *ip6_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
return ERR_PTR(err);
if (final_dst)
fl6->daddr = *final_dst;
- if (can_sleep)
- fl6->flowi6_flags |= FLOWI_FLAG_CAN_SLEEP;
return xfrm_lookup(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
}
@@ -969,7 +968,6 @@ EXPORT_SYMBOL_GPL(ip6_dst_lookup_flow);
* @sk: socket which provides the dst cache and route info
* @fl6: flow to lookup
* @final_dst: final destination address for ipsec lookup
- * @can_sleep: we are in a sleepable context
*
* This function performs a route lookup on the given flow with the
* possibility of using the cached route in the socket if it is valid.
@@ -980,8 +978,7 @@ EXPORT_SYMBOL_GPL(ip6_dst_lookup_flow);
* error code.
*/
struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
- const struct in6_addr *final_dst,
- bool can_sleep)
+ const struct in6_addr *final_dst)
{
struct dst_entry *dst = sk_dst_check(sk, inet6_sk(sk)->dst_cookie);
int err;
@@ -993,8 +990,6 @@ struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
return ERR_PTR(err);
if (final_dst)
fl6->daddr = *final_dst;
- if (can_sleep)
- fl6->flowi6_flags |= FLOWI_FLAG_CAN_SLEEP;
return xfrm_lookup(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
}
@@ -1162,10 +1157,10 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
np->cork.hop_limit = hlimit;
np->cork.tclass = tclass;
if (rt->dst.flags & DST_XFRM_TUNNEL)
- mtu = np->pmtudisc == IPV6_PMTUDISC_PROBE ?
+ mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ?
rt->dst.dev->mtu : dst_mtu(&rt->dst);
else
- mtu = np->pmtudisc == IPV6_PMTUDISC_PROBE ?
+ mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ?
rt->dst.dev->mtu : dst_mtu(rt->dst.path);
if (np->frag_size < mtu) {
if (np->frag_size)
@@ -1193,11 +1188,35 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
fragheaderlen = sizeof(struct ipv6hdr) + rt->rt6i_nfheader_len +
(opt ? opt->opt_nflen : 0);
- maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen - sizeof(struct frag_hdr);
+ maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen -
+ sizeof(struct frag_hdr);
if (mtu <= sizeof(struct ipv6hdr) + IPV6_MAXPLEN) {
- if (cork->length + length > sizeof(struct ipv6hdr) + IPV6_MAXPLEN - fragheaderlen) {
- ipv6_local_error(sk, EMSGSIZE, fl6, mtu-exthdrlen);
+ unsigned int maxnonfragsize, headersize;
+
+ headersize = sizeof(struct ipv6hdr) +
+ (opt ? opt->tot_len : 0) +
+ (dst_allfrag(&rt->dst) ?
+ sizeof(struct frag_hdr) : 0) +
+ rt->rt6i_nfheader_len;
+
+ maxnonfragsize = (np->pmtudisc >= IPV6_PMTUDISC_DO) ?
+ mtu : sizeof(struct ipv6hdr) + IPV6_MAXPLEN;
+
+ /* dontfrag active */
+ if ((cork->length + length > mtu - headersize) && dontfrag &&
+ (sk->sk_protocol == IPPROTO_UDP ||
+ sk->sk_protocol == IPPROTO_RAW)) {
+ ipv6_local_rxpmtu(sk, fl6, mtu - headersize +
+ sizeof(struct ipv6hdr));
+ goto emsgsize;
+ }
+
+ if (cork->length + length > maxnonfragsize - headersize) {
+emsgsize:
+ ipv6_local_error(sk, EMSGSIZE, fl6,
+ mtu - headersize +
+ sizeof(struct ipv6hdr));
return -EMSGSIZE;
}
}
@@ -1222,12 +1241,6 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
* --yoshfuji
*/
- if ((length > mtu) && dontfrag && (sk->sk_protocol == IPPROTO_UDP ||
- sk->sk_protocol == IPPROTO_RAW)) {
- ipv6_local_rxpmtu(sk, fl6, mtu-exthdrlen);
- return -EMSGSIZE;
- }
-
skb = skb_peek_tail(&sk->sk_write_queue);
cork->length += length;
if (((length > mtu) ||
@@ -1267,7 +1280,7 @@ alloc_new_skb:
if (skb == NULL || skb_prev == NULL)
ip6_append_data_mtu(&mtu, &maxfraglen,
fragheaderlen, skb, rt,
- np->pmtudisc ==
+ np->pmtudisc >=
IPV6_PMTUDISC_PROBE);
skb_prev = skb;
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index d6062325db08..1e5e2404f1af 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -29,7 +29,6 @@
#include <linux/if.h>
#include <linux/in.h>
#include <linux/ip.h>
-#include <linux/if_tunnel.h>
#include <linux/net.h>
#include <linux/in6.h>
#include <linux/netdevice.h>
@@ -70,7 +69,6 @@ MODULE_ALIAS_NETDEV("ip6tnl0");
#define IP6_TNL_TRACE(x...) do {;} while(0)
#endif
-#define IPV6_TCLASS_MASK (IPV6_FLOWINFO_MASK & ~IPV6_FLOWLABEL_MASK)
#define IPV6_TCLASS_SHIFT 20
#define HASH_SIZE_SHIFT 5
@@ -103,16 +101,26 @@ struct ip6_tnl_net {
static struct net_device_stats *ip6_get_stats(struct net_device *dev)
{
- struct pcpu_tstats sum = { 0 };
+ struct pcpu_sw_netstats tmp, sum = { 0 };
int i;
for_each_possible_cpu(i) {
- const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i);
-
- sum.rx_packets += tstats->rx_packets;
- sum.rx_bytes += tstats->rx_bytes;
- sum.tx_packets += tstats->tx_packets;
- sum.tx_bytes += tstats->tx_bytes;
+ unsigned int start;
+ const struct pcpu_sw_netstats *tstats =
+ per_cpu_ptr(dev->tstats, i);
+
+ do {
+ start = u64_stats_fetch_begin_bh(&tstats->syncp);
+ tmp.rx_packets = tstats->rx_packets;
+ tmp.rx_bytes = tstats->rx_bytes;
+ tmp.tx_packets = tstats->tx_packets;
+ tmp.tx_bytes = tstats->tx_bytes;
+ } while (u64_stats_fetch_retry_bh(&tstats->syncp, start));
+
+ sum.rx_packets += tmp.rx_packets;
+ sum.rx_bytes += tmp.rx_bytes;
+ sum.tx_packets += tmp.tx_packets;
+ sum.tx_bytes += tmp.tx_bytes;
}
dev->stats.rx_packets = sum.rx_packets;
dev->stats.rx_bytes = sum.rx_bytes;
@@ -785,7 +793,7 @@ static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol,
if ((t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->saddr,
&ipv6h->daddr)) != NULL) {
- struct pcpu_tstats *tstats;
+ struct pcpu_sw_netstats *tstats;
if (t->parms.proto != ipproto && t->parms.proto != 0) {
rcu_read_unlock();
@@ -824,8 +832,10 @@ static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol,
}
tstats = this_cpu_ptr(t->dev->tstats);
+ u64_stats_update_begin(&tstats->syncp);
tstats->rx_packets++;
tstats->rx_bytes += skb->len;
+ u64_stats_update_end(&tstats->syncp);
netif_rx(skb);
@@ -1131,7 +1141,7 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
fl6.flowlabel |= (*(__be32 *) ipv6h & IPV6_TCLASS_MASK);
if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL)
- fl6.flowlabel |= (*(__be32 *) ipv6h & IPV6_FLOWLABEL_MASK);
+ fl6.flowlabel |= ip6_flowlabel(ipv6h);
if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
fl6.flowi6_mark = skb->mark;
@@ -1498,12 +1508,12 @@ ip6_tnl_dev_init_gen(struct net_device *dev)
t->dev = dev;
t->net = dev_net(dev);
- dev->tstats = alloc_percpu(struct pcpu_tstats);
+ dev->tstats = alloc_percpu(struct pcpu_sw_netstats);
if (!dev->tstats)
return -ENOMEM;
for_each_possible_cpu(i) {
- struct pcpu_tstats *ip6_tnl_stats;
+ struct pcpu_sw_netstats *ip6_tnl_stats;
ip6_tnl_stats = per_cpu_ptr(dev->tstats, i);
u64_stats_init(&ip6_tnl_stats->syncp);
}
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index ed94ba61dda0..b50acd5e75d2 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -24,7 +24,6 @@
#include <linux/if.h>
#include <linux/in.h>
#include <linux/ip.h>
-#include <linux/if_tunnel.h>
#include <linux/net.h>
#include <linux/in6.h>
#include <linux/netdevice.h>
@@ -75,26 +74,6 @@ struct vti6_net {
struct ip6_tnl __rcu **tnls[2];
};
-static struct net_device_stats *vti6_get_stats(struct net_device *dev)
-{
- struct pcpu_tstats sum = { 0 };
- int i;
-
- for_each_possible_cpu(i) {
- const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i);
-
- sum.rx_packets += tstats->rx_packets;
- sum.rx_bytes += tstats->rx_bytes;
- sum.tx_packets += tstats->tx_packets;
- sum.tx_bytes += tstats->tx_bytes;
- }
- dev->stats.rx_packets = sum.rx_packets;
- dev->stats.rx_bytes = sum.rx_bytes;
- dev->stats.tx_packets = sum.tx_packets;
- dev->stats.tx_bytes = sum.tx_bytes;
- return &dev->stats;
-}
-
#define for_each_vti6_tunnel_rcu(start) \
for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
@@ -312,7 +291,7 @@ static int vti6_rcv(struct sk_buff *skb)
if ((t = vti6_tnl_lookup(dev_net(skb->dev), &ipv6h->saddr,
&ipv6h->daddr)) != NULL) {
- struct pcpu_tstats *tstats;
+ struct pcpu_sw_netstats *tstats;
if (t->parms.proto != IPPROTO_IPV6 && t->parms.proto != 0) {
rcu_read_unlock();
@@ -331,8 +310,10 @@ static int vti6_rcv(struct sk_buff *skb)
}
tstats = this_cpu_ptr(t->dev->tstats);
+ u64_stats_update_begin(&tstats->syncp);
tstats->rx_packets++;
tstats->rx_bytes += skb->len;
+ u64_stats_update_end(&tstats->syncp);
skb->mark = 0;
secpath_reset(skb);
@@ -716,7 +697,7 @@ static const struct net_device_ops vti6_netdev_ops = {
.ndo_start_xmit = vti6_tnl_xmit,
.ndo_do_ioctl = vti6_ioctl,
.ndo_change_mtu = vti6_change_mtu,
- .ndo_get_stats = vti6_get_stats,
+ .ndo_get_stats64 = ip_tunnel_get_stats64,
};
/**
@@ -753,7 +734,7 @@ static inline int vti6_dev_init_gen(struct net_device *dev)
t->dev = dev;
t->net = dev_net(dev);
- dev->tstats = alloc_percpu(struct pcpu_tstats);
+ dev->tstats = alloc_percpu(struct pcpu_sw_netstats);
if (!dev->tstats)
return -ENOMEM;
return 0;
diff --git a/net/ipv6/ipcomp6.c b/net/ipv6/ipcomp6.c
index ce507d9e1c90..da9becb42e81 100644
--- a/net/ipv6/ipcomp6.c
+++ b/net/ipv6/ipcomp6.c
@@ -16,8 +16,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
* [Memo]
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 1c6ce3119ff8..af0ecb94b3b4 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -722,7 +722,7 @@ done:
case IPV6_MTU_DISCOVER:
if (optlen < sizeof(int))
goto e_inval;
- if (val < IP_PMTUDISC_DONT || val > IP_PMTUDISC_PROBE)
+ if (val < IPV6_PMTUDISC_DONT || val > IPV6_PMTUDISC_INTERFACE)
goto e_inval;
np->pmtudisc = val;
retv = 0;
@@ -1019,7 +1019,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
put_cmsg(&msg, SOL_IPV6, IPV6_HOPLIMIT, sizeof(hlim), &hlim);
}
if (np->rxopt.bits.rxtclass) {
- int tclass = np->rcv_tclass;
+ int tclass = ntohl(np->rcv_flowinfo & IPV6_TCLASS_MASK) >> 20;
put_cmsg(&msg, SOL_IPV6, IPV6_TCLASS, sizeof(tclass), &tclass);
}
if (np->rxopt.bits.rxoinfo) {
@@ -1034,6 +1034,11 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
int hlim = np->mcast_hops;
put_cmsg(&msg, SOL_IPV6, IPV6_2292HOPLIMIT, sizeof(hlim), &hlim);
}
+ if (np->rxopt.bits.rxflow) {
+ __be32 flowinfo = np->rcv_flowinfo;
+
+ put_cmsg(&msg, SOL_IPV6, IPV6_FLOWINFO, sizeof(flowinfo), &flowinfo);
+ }
}
len -= msg.msg_controllen;
return put_user(len, optlen);
diff --git a/net/ipv6/mip6.c b/net/ipv6/mip6.c
index 9ac01dc9402e..db9b6cbc9db3 100644
--- a/net/ipv6/mip6.c
+++ b/net/ipv6/mip6.c
@@ -13,8 +13,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
* Authors:
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 3512177deb4d..09a22f4f36c9 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -125,17 +125,19 @@ struct neigh_table nd_tbl = {
.id = "ndisc_cache",
.parms = {
.tbl = &nd_tbl,
- .base_reachable_time = ND_REACHABLE_TIME,
- .retrans_time = ND_RETRANS_TIMER,
- .gc_staletime = 60 * HZ,
.reachable_time = ND_REACHABLE_TIME,
- .delay_probe_time = 5 * HZ,
- .queue_len_bytes = 64*1024,
- .ucast_probes = 3,
- .mcast_probes = 3,
- .anycast_delay = 1 * HZ,
- .proxy_delay = (8 * HZ) / 10,
- .proxy_qlen = 64,
+ .data = {
+ [NEIGH_VAR_MCAST_PROBES] = 3,
+ [NEIGH_VAR_UCAST_PROBES] = 3,
+ [NEIGH_VAR_RETRANS_TIME] = ND_RETRANS_TIMER,
+ [NEIGH_VAR_BASE_REACHABLE_TIME] = ND_REACHABLE_TIME,
+ [NEIGH_VAR_DELAY_PROBE_TIME] = 5 * HZ,
+ [NEIGH_VAR_GC_STALETIME] = 60 * HZ,
+ [NEIGH_VAR_QUEUE_LEN_BYTES] = 64 * 1024,
+ [NEIGH_VAR_PROXY_QLEN] = 64,
+ [NEIGH_VAR_ANYCAST_DELAY] = 1 * HZ,
+ [NEIGH_VAR_PROXY_DELAY] = (8 * HZ) / 10,
+ },
},
.gc_interval = 30 * HZ,
.gc_thresh1 = 128,
@@ -656,14 +658,14 @@ static void ndisc_solicit(struct neighbour *neigh, struct sk_buff *skb)
if (skb && ipv6_chk_addr(dev_net(dev), &ipv6_hdr(skb)->saddr, dev, 1))
saddr = &ipv6_hdr(skb)->saddr;
- if ((probes -= neigh->parms->ucast_probes) < 0) {
+ if ((probes -= NEIGH_VAR(neigh->parms, UCAST_PROBES)) < 0) {
if (!(neigh->nud_state & NUD_VALID)) {
ND_PRINTK(1, dbg,
"%s: trying to ucast probe in NUD_INVALID: %pI6\n",
__func__, target);
}
ndisc_send_ns(dev, neigh, target, target, saddr);
- } else if ((probes -= neigh->parms->app_probes) < 0) {
+ } else if ((probes -= NEIGH_VAR(neigh->parms, APP_PROBES)) < 0) {
neigh_app_ns(neigh);
} else {
addrconf_addr_solict_mult(target, &mcaddr);
@@ -790,7 +792,7 @@ static void ndisc_recv_ns(struct sk_buff *skb)
if (!(NEIGH_CB(skb)->flags & LOCALLY_ENQUEUED) &&
skb->pkt_type != PACKET_HOST &&
inc &&
- idev->nd_parms->proxy_delay != 0) {
+ NEIGH_VAR(idev->nd_parms, PROXY_DELAY) != 0) {
/*
* for anycast or proxy,
* sender should delay its response
@@ -1210,7 +1212,7 @@ skip_defrtr:
rtime = (rtime*HZ)/1000;
if (rtime < HZ/10)
rtime = HZ/10;
- in6_dev->nd_parms->retrans_time = rtime;
+ NEIGH_VAR_SET(in6_dev->nd_parms, RETRANS_TIME, rtime);
in6_dev->tstamp = jiffies;
inet6_ifinfo_notify(RTM_NEWLINK, in6_dev);
}
@@ -1222,9 +1224,11 @@ skip_defrtr:
if (rtime < HZ/10)
rtime = HZ/10;
- if (rtime != in6_dev->nd_parms->base_reachable_time) {
- in6_dev->nd_parms->base_reachable_time = rtime;
- in6_dev->nd_parms->gc_staletime = 3 * rtime;
+ if (rtime != NEIGH_VAR(in6_dev->nd_parms, BASE_REACHABLE_TIME)) {
+ NEIGH_VAR_SET(in6_dev->nd_parms,
+ BASE_REACHABLE_TIME, rtime);
+ NEIGH_VAR_SET(in6_dev->nd_parms,
+ GC_STALETIME, 3 * rtime);
in6_dev->nd_parms->reachable_time = neigh_rand_reach_time(rtime);
in6_dev->tstamp = jiffies;
inet6_ifinfo_notify(RTM_NEWLINK, in6_dev);
@@ -1277,6 +1281,9 @@ skip_linkparms:
ri->prefix_len == 0)
continue;
#endif
+ if (ri->prefix_len == 0 &&
+ !in6_dev->cnf.accept_ra_defrtr)
+ continue;
if (ri->prefix_len > in6_dev->cnf.accept_ra_rt_info_max_plen)
continue;
rt6_route_rcv(skb->dev, (u8*)p, (p->nd_opt_len) << 3,
@@ -1648,22 +1655,23 @@ int ndisc_ifinfo_sysctl_change(struct ctl_table *ctl, int write, void __user *bu
ndisc_warn_deprecated_sysctl(ctl, "syscall", dev ? dev->name : "default");
if (strcmp(ctl->procname, "retrans_time") == 0)
- ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
+ ret = neigh_proc_dointvec(ctl, write, buffer, lenp, ppos);
else if (strcmp(ctl->procname, "base_reachable_time") == 0)
- ret = proc_dointvec_jiffies(ctl, write,
- buffer, lenp, ppos);
+ ret = neigh_proc_dointvec_jiffies(ctl, write,
+ buffer, lenp, ppos);
else if ((strcmp(ctl->procname, "retrans_time_ms") == 0) ||
(strcmp(ctl->procname, "base_reachable_time_ms") == 0))
- ret = proc_dointvec_ms_jiffies(ctl, write,
- buffer, lenp, ppos);
+ ret = neigh_proc_dointvec_ms_jiffies(ctl, write,
+ buffer, lenp, ppos);
else
ret = -1;
if (write && ret == 0 && dev && (idev = in6_dev_get(dev)) != NULL) {
- if (ctl->data == &idev->nd_parms->base_reachable_time)
- idev->nd_parms->reachable_time = neigh_rand_reach_time(idev->nd_parms->base_reachable_time);
+ if (ctl->data == &NEIGH_VAR(idev->nd_parms, BASE_REACHABLE_TIME))
+ idev->nd_parms->reachable_time =
+ neigh_rand_reach_time(NEIGH_VAR(idev->nd_parms, BASE_REACHABLE_TIME));
idev->tstamp = jiffies;
inet6_ifinfo_notify(RTM_NEWLINK, idev);
in6_dev_put(idev);
@@ -1722,7 +1730,7 @@ int __init ndisc_init(void)
neigh_table_init(&nd_tbl);
#ifdef CONFIG_SYSCTL
- err = neigh_sysctl_register(NULL, &nd_tbl.parms, "ipv6",
+ err = neigh_sysctl_register(NULL, &nd_tbl.parms,
&ndisc_ifinfo_sysctl_change);
if (err)
goto out_unregister_pernet;
diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig
index 7702f9e90a04..35750df744dc 100644
--- a/net/ipv6/netfilter/Kconfig
+++ b/net/ipv6/netfilter/Kconfig
@@ -28,15 +28,27 @@ config NF_CONNTRACK_IPV6
config NF_TABLES_IPV6
depends on NF_TABLES
tristate "IPv6 nf_tables support"
+ help
+ This option enables the IPv6 support for nf_tables.
config NFT_CHAIN_ROUTE_IPV6
depends on NF_TABLES_IPV6
tristate "IPv6 nf_tables route chain support"
+ help
+ This option enables the "route" chain for IPv6 in nf_tables. This
+ chain type is used to force packet re-routing after mangling header
+ fields such as the source, destination, flowlabel, hop-limit and
+ the packet mark.
config NFT_CHAIN_NAT_IPV6
depends on NF_TABLES_IPV6
depends on NF_NAT_IPV6 && NFT_NAT
tristate "IPv6 nf_tables nat chain support"
+ help
+ This option enables the "nat" chain for IPv6 in nf_tables. This
+ chain type is used to perform Network Address Translation (NAT)
+ packet transformations such as the source, destination address and
+ source and destination ports.
config IP6_NF_IPTABLES
tristate "IP6 tables support (required for filtering)"
diff --git a/net/ipv6/netfilter/ip6t_REJECT.c b/net/ipv6/netfilter/ip6t_REJECT.c
index da00a2ecde55..544b0a9da1b5 100644
--- a/net/ipv6/netfilter/ip6t_REJECT.c
+++ b/net/ipv6/netfilter/ip6t_REJECT.c
@@ -23,181 +23,18 @@
#include <linux/skbuff.h>
#include <linux/icmpv6.h>
#include <linux/netdevice.h>
-#include <net/ipv6.h>
-#include <net/tcp.h>
#include <net/icmp.h>
-#include <net/ip6_checksum.h>
-#include <net/ip6_fib.h>
-#include <net/ip6_route.h>
#include <net/flow.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_ipv6/ip6_tables.h>
#include <linux/netfilter_ipv6/ip6t_REJECT.h>
+#include <net/netfilter/ipv6/nf_reject.h>
+
MODULE_AUTHOR("Yasuyuki KOZAKAI <yasuyuki.kozakai@toshiba.co.jp>");
MODULE_DESCRIPTION("Xtables: packet \"rejection\" target for IPv6");
MODULE_LICENSE("GPL");
-/* Send RST reply */
-static void send_reset(struct net *net, struct sk_buff *oldskb, int hook)
-{
- struct sk_buff *nskb;
- struct tcphdr otcph, *tcph;
- unsigned int otcplen, hh_len;
- int tcphoff, needs_ack;
- const struct ipv6hdr *oip6h = ipv6_hdr(oldskb);
- struct ipv6hdr *ip6h;
-#define DEFAULT_TOS_VALUE 0x0U
- const __u8 tclass = DEFAULT_TOS_VALUE;
- struct dst_entry *dst = NULL;
- u8 proto;
- __be16 frag_off;
- struct flowi6 fl6;
-
- if ((!(ipv6_addr_type(&oip6h->saddr) & IPV6_ADDR_UNICAST)) ||
- (!(ipv6_addr_type(&oip6h->daddr) & IPV6_ADDR_UNICAST))) {
- pr_debug("addr is not unicast.\n");
- return;
- }
-
- proto = oip6h->nexthdr;
- tcphoff = ipv6_skip_exthdr(oldskb, ((u8*)(oip6h+1) - oldskb->data), &proto, &frag_off);
-
- if ((tcphoff < 0) || (tcphoff > oldskb->len)) {
- pr_debug("Cannot get TCP header.\n");
- return;
- }
-
- otcplen = oldskb->len - tcphoff;
-
- /* IP header checks: fragment, too short. */
- if (proto != IPPROTO_TCP || otcplen < sizeof(struct tcphdr)) {
- pr_debug("proto(%d) != IPPROTO_TCP, "
- "or too short. otcplen = %d\n",
- proto, otcplen);
- return;
- }
-
- if (skb_copy_bits(oldskb, tcphoff, &otcph, sizeof(struct tcphdr)))
- BUG();
-
- /* No RST for RST. */
- if (otcph.rst) {
- pr_debug("RST is set\n");
- return;
- }
-
- /* Check checksum. */
- if (nf_ip6_checksum(oldskb, hook, tcphoff, IPPROTO_TCP)) {
- pr_debug("TCP checksum is invalid\n");
- return;
- }
-
- memset(&fl6, 0, sizeof(fl6));
- fl6.flowi6_proto = IPPROTO_TCP;
- fl6.saddr = oip6h->daddr;
- fl6.daddr = oip6h->saddr;
- fl6.fl6_sport = otcph.dest;
- fl6.fl6_dport = otcph.source;
- security_skb_classify_flow(oldskb, flowi6_to_flowi(&fl6));
- dst = ip6_route_output(net, NULL, &fl6);
- if (dst == NULL || dst->error) {
- dst_release(dst);
- return;
- }
- dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), NULL, 0);
- if (IS_ERR(dst))
- return;
-
- hh_len = (dst->dev->hard_header_len + 15)&~15;
- nskb = alloc_skb(hh_len + 15 + dst->header_len + sizeof(struct ipv6hdr)
- + sizeof(struct tcphdr) + dst->trailer_len,
- GFP_ATOMIC);
-
- if (!nskb) {
- net_dbg_ratelimited("cannot alloc skb\n");
- dst_release(dst);
- return;
- }
-
- skb_dst_set(nskb, dst);
-
- skb_reserve(nskb, hh_len + dst->header_len);
-
- skb_put(nskb, sizeof(struct ipv6hdr));
- skb_reset_network_header(nskb);
- ip6h = ipv6_hdr(nskb);
- ip6_flow_hdr(ip6h, tclass, 0);
- ip6h->hop_limit = ip6_dst_hoplimit(dst);
- ip6h->nexthdr = IPPROTO_TCP;
- ip6h->saddr = oip6h->daddr;
- ip6h->daddr = oip6h->saddr;
-
- skb_reset_transport_header(nskb);
- tcph = (struct tcphdr *)skb_put(nskb, sizeof(struct tcphdr));
- /* Truncate to length (no data) */
- tcph->doff = sizeof(struct tcphdr)/4;
- tcph->source = otcph.dest;
- tcph->dest = otcph.source;
-
- if (otcph.ack) {
- needs_ack = 0;
- tcph->seq = otcph.ack_seq;
- tcph->ack_seq = 0;
- } else {
- needs_ack = 1;
- tcph->ack_seq = htonl(ntohl(otcph.seq) + otcph.syn + otcph.fin
- + otcplen - (otcph.doff<<2));
- tcph->seq = 0;
- }
-
- /* Reset flags */
- ((u_int8_t *)tcph)[13] = 0;
- tcph->rst = 1;
- tcph->ack = needs_ack;
- tcph->window = 0;
- tcph->urg_ptr = 0;
- tcph->check = 0;
-
- /* Adjust TCP checksum */
- tcph->check = csum_ipv6_magic(&ipv6_hdr(nskb)->saddr,
- &ipv6_hdr(nskb)->daddr,
- sizeof(struct tcphdr), IPPROTO_TCP,
- csum_partial(tcph,
- sizeof(struct tcphdr), 0));
-
- nf_ct_attach(nskb, oldskb);
-
-#ifdef CONFIG_BRIDGE_NETFILTER
- /* If we use ip6_local_out for bridged traffic, the MAC source on
- * the RST will be ours, instead of the destination's. This confuses
- * some routers/firewalls, and they drop the packet. So we need to
- * build the eth header using the original destination's MAC as the
- * source, and send the RST packet directly.
- */
- if (oldskb->nf_bridge) {
- struct ethhdr *oeth = eth_hdr(oldskb);
- nskb->dev = oldskb->nf_bridge->physindev;
- nskb->protocol = htons(ETH_P_IPV6);
- ip6h->payload_len = htons(sizeof(struct tcphdr));
- if (dev_hard_header(nskb, nskb->dev, ntohs(nskb->protocol),
- oeth->h_source, oeth->h_dest, nskb->len) < 0)
- return;
- dev_queue_xmit(nskb);
- } else
-#endif
- ip6_local_out(nskb);
-}
-
-static inline void
-send_unreach(struct net *net, struct sk_buff *skb_in, unsigned char code,
- unsigned int hooknum)
-{
- if (hooknum == NF_INET_LOCAL_OUT && skb_in->dev == NULL)
- skb_in->dev = net->loopback_dev;
-
- icmpv6_send(skb_in, ICMPV6_DEST_UNREACH, code, 0);
-}
static unsigned int
reject_tg6(struct sk_buff *skb, const struct xt_action_param *par)
@@ -208,25 +45,25 @@ reject_tg6(struct sk_buff *skb, const struct xt_action_param *par)
pr_debug("%s: medium point\n", __func__);
switch (reject->with) {
case IP6T_ICMP6_NO_ROUTE:
- send_unreach(net, skb, ICMPV6_NOROUTE, par->hooknum);
+ nf_send_unreach6(net, skb, ICMPV6_NOROUTE, par->hooknum);
break;
case IP6T_ICMP6_ADM_PROHIBITED:
- send_unreach(net, skb, ICMPV6_ADM_PROHIBITED, par->hooknum);
+ nf_send_unreach6(net, skb, ICMPV6_ADM_PROHIBITED, par->hooknum);
break;
case IP6T_ICMP6_NOT_NEIGHBOUR:
- send_unreach(net, skb, ICMPV6_NOT_NEIGHBOUR, par->hooknum);
+ nf_send_unreach6(net, skb, ICMPV6_NOT_NEIGHBOUR, par->hooknum);
break;
case IP6T_ICMP6_ADDR_UNREACH:
- send_unreach(net, skb, ICMPV6_ADDR_UNREACH, par->hooknum);
+ nf_send_unreach6(net, skb, ICMPV6_ADDR_UNREACH, par->hooknum);
break;
case IP6T_ICMP6_PORT_UNREACH:
- send_unreach(net, skb, ICMPV6_PORT_UNREACH, par->hooknum);
+ nf_send_unreach6(net, skb, ICMPV6_PORT_UNREACH, par->hooknum);
break;
case IP6T_ICMP6_ECHOREPLY:
/* Do nothing */
break;
case IP6T_TCP_RESET:
- send_reset(net, skb, par->hooknum);
+ nf_send_reset6(net, skb, par->hooknum);
break;
default:
net_info_ratelimited("case %u not handled yet\n", reject->with);
diff --git a/net/ipv6/netfilter/ip6t_SYNPROXY.c b/net/ipv6/netfilter/ip6t_SYNPROXY.c
index f78f41aca8e9..a0d17270117c 100644
--- a/net/ipv6/netfilter/ip6t_SYNPROXY.c
+++ b/net/ipv6/netfilter/ip6t_SYNPROXY.c
@@ -446,6 +446,7 @@ static void synproxy_tg6_destroy(const struct xt_tgdtor_param *par)
static struct xt_target synproxy_tg6_reg __read_mostly = {
.name = "SYNPROXY",
.family = NFPROTO_IPV6,
+ .hooks = (1 << NF_INET_LOCAL_IN) | (1 << NF_INET_FORWARD),
.target = synproxy_tg6,
.targetsize = sizeof(struct xt_synproxy_info),
.checkentry = synproxy_tg6_check,
diff --git a/net/ipv6/netfilter/nf_tables_ipv6.c b/net/ipv6/netfilter/nf_tables_ipv6.c
index d77db8a13505..0d812b31277d 100644
--- a/net/ipv6/netfilter/nf_tables_ipv6.c
+++ b/net/ipv6/netfilter/nf_tables_ipv6.c
@@ -16,34 +16,51 @@
#include <net/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables_ipv6.h>
+static unsigned int nft_do_chain_ipv6(const struct nf_hook_ops *ops,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
+{
+ struct nft_pktinfo pkt;
+
+ /* malformed packet, drop it */
+ if (nft_set_pktinfo_ipv6(&pkt, ops, skb, in, out) < 0)
+ return NF_DROP;
+
+ return nft_do_chain(&pkt, ops);
+}
+
static unsigned int nft_ipv6_output(const struct nf_hook_ops *ops,
struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
- struct nft_pktinfo pkt;
-
if (unlikely(skb->len < sizeof(struct ipv6hdr))) {
if (net_ratelimit())
pr_info("nf_tables_ipv6: ignoring short SOCK_RAW "
"packet\n");
return NF_ACCEPT;
}
- if (nft_set_pktinfo_ipv6(&pkt, ops, skb, in, out) < 0)
- return NF_DROP;
- return nft_do_chain_pktinfo(&pkt, ops);
+ return nft_do_chain_ipv6(ops, skb, in, out, okfn);
}
-static struct nft_af_info nft_af_ipv6 __read_mostly = {
+struct nft_af_info nft_af_ipv6 __read_mostly = {
.family = NFPROTO_IPV6,
.nhooks = NF_INET_NUMHOOKS,
.owner = THIS_MODULE,
+ .nops = 1,
.hooks = {
+ [NF_INET_LOCAL_IN] = nft_do_chain_ipv6,
[NF_INET_LOCAL_OUT] = nft_ipv6_output,
+ [NF_INET_FORWARD] = nft_do_chain_ipv6,
+ [NF_INET_PRE_ROUTING] = nft_do_chain_ipv6,
+ [NF_INET_POST_ROUTING] = nft_do_chain_ipv6,
},
};
+EXPORT_SYMBOL_GPL(nft_af_ipv6);
static int nf_tables_ipv6_init_net(struct net *net)
{
@@ -73,44 +90,28 @@ static struct pernet_operations nf_tables_ipv6_net_ops = {
.exit = nf_tables_ipv6_exit_net,
};
-static unsigned int
-nft_do_chain_ipv6(const struct nf_hook_ops *ops,
- struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
-{
- struct nft_pktinfo pkt;
-
- /* malformed packet, drop it */
- if (nft_set_pktinfo_ipv6(&pkt, ops, skb, in, out) < 0)
- return NF_DROP;
-
- return nft_do_chain_pktinfo(&pkt, ops);
-}
-
-static struct nf_chain_type filter_ipv6 = {
- .family = NFPROTO_IPV6,
+static const struct nf_chain_type filter_ipv6 = {
.name = "filter",
.type = NFT_CHAIN_T_DEFAULT,
+ .family = NFPROTO_IPV6,
+ .owner = THIS_MODULE,
.hook_mask = (1 << NF_INET_LOCAL_IN) |
(1 << NF_INET_LOCAL_OUT) |
(1 << NF_INET_FORWARD) |
(1 << NF_INET_PRE_ROUTING) |
(1 << NF_INET_POST_ROUTING),
- .fn = {
- [NF_INET_LOCAL_IN] = nft_do_chain_ipv6,
- [NF_INET_LOCAL_OUT] = nft_ipv6_output,
- [NF_INET_FORWARD] = nft_do_chain_ipv6,
- [NF_INET_PRE_ROUTING] = nft_do_chain_ipv6,
- [NF_INET_POST_ROUTING] = nft_do_chain_ipv6,
- },
};
static int __init nf_tables_ipv6_init(void)
{
+ int ret;
+
nft_register_chain_type(&filter_ipv6);
- return register_pernet_subsys(&nf_tables_ipv6_net_ops);
+ ret = register_pernet_subsys(&nf_tables_ipv6_net_ops);
+ if (ret < 0)
+ nft_unregister_chain_type(&filter_ipv6);
+
+ return ret;
}
static void __exit nf_tables_ipv6_exit(void)
diff --git a/net/ipv6/netfilter/nft_chain_nat_ipv6.c b/net/ipv6/netfilter/nft_chain_nat_ipv6.c
index e86dcd70dc76..9c3297a768fd 100644
--- a/net/ipv6/netfilter/nft_chain_nat_ipv6.c
+++ b/net/ipv6/netfilter/nft_chain_nat_ipv6.c
@@ -79,7 +79,7 @@ static unsigned int nf_nat_ipv6_fn(const struct nf_hook_ops *ops,
nft_set_pktinfo_ipv6(&pkt, ops, skb, in, out);
- ret = nft_do_chain_pktinfo(&pkt, ops);
+ ret = nft_do_chain(&pkt, ops);
if (ret != NF_ACCEPT)
return ret;
if (!nf_nat_initialized(ct, maniptype)) {
@@ -170,21 +170,21 @@ static unsigned int nf_nat_ipv6_output(const struct nf_hook_ops *ops,
return ret;
}
-static struct nf_chain_type nft_chain_nat_ipv6 = {
- .family = NFPROTO_IPV6,
+static const struct nf_chain_type nft_chain_nat_ipv6 = {
.name = "nat",
.type = NFT_CHAIN_T_NAT,
+ .family = NFPROTO_IPV6,
+ .owner = THIS_MODULE,
.hook_mask = (1 << NF_INET_PRE_ROUTING) |
(1 << NF_INET_POST_ROUTING) |
(1 << NF_INET_LOCAL_OUT) |
(1 << NF_INET_LOCAL_IN),
- .fn = {
+ .hooks = {
[NF_INET_PRE_ROUTING] = nf_nat_ipv6_prerouting,
[NF_INET_POST_ROUTING] = nf_nat_ipv6_postrouting,
[NF_INET_LOCAL_OUT] = nf_nat_ipv6_output,
[NF_INET_LOCAL_IN] = nf_nat_ipv6_fn,
},
- .me = THIS_MODULE,
};
static int __init nft_chain_nat_ipv6_init(void)
diff --git a/net/ipv6/netfilter/nft_chain_route_ipv6.c b/net/ipv6/netfilter/nft_chain_route_ipv6.c
index 3fe40f0456ad..42031299585e 100644
--- a/net/ipv6/netfilter/nft_chain_route_ipv6.c
+++ b/net/ipv6/netfilter/nft_chain_route_ipv6.c
@@ -47,7 +47,7 @@ static unsigned int nf_route_table_hook(const struct nf_hook_ops *ops,
/* flowlabel and prio (includes version, which shouldn't change either */
flowlabel = *((u32 *)ipv6_hdr(skb));
- ret = nft_do_chain_pktinfo(&pkt, ops);
+ ret = nft_do_chain(&pkt, ops);
if (ret != NF_DROP && ret != NF_QUEUE &&
(memcmp(&ipv6_hdr(skb)->saddr, &saddr, sizeof(saddr)) ||
memcmp(&ipv6_hdr(skb)->daddr, &daddr, sizeof(daddr)) ||
@@ -59,15 +59,15 @@ static unsigned int nf_route_table_hook(const struct nf_hook_ops *ops,
return ret;
}
-static struct nf_chain_type nft_chain_route_ipv6 = {
- .family = NFPROTO_IPV6,
+static const struct nf_chain_type nft_chain_route_ipv6 = {
.name = "route",
.type = NFT_CHAIN_T_ROUTE,
+ .family = NFPROTO_IPV6,
+ .owner = THIS_MODULE,
.hook_mask = (1 << NF_INET_LOCAL_OUT),
- .fn = {
+ .hooks = {
[NF_INET_LOCAL_OUT] = nf_route_table_hook,
},
- .me = THIS_MODULE,
};
static int __init nft_chain_route_init(void)
diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
index 8815e31a87fe..15d23b8c2129 100644
--- a/net/ipv6/ping.c
+++ b/net/ipv6/ping.c
@@ -57,7 +57,8 @@ static struct inet_protosw pingv6_protosw = {
/* Compatibility glue so we can support IPv6 when it's compiled as a module */
-static int dummy_ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len)
+static int dummy_ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len,
+ int *addr_len)
{
return -EAFNOSUPPORT;
}
@@ -144,7 +145,7 @@ int ping_v6_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
else if (!fl6.flowi6_oif)
fl6.flowi6_oif = np->ucast_oif;
- dst = ip6_sk_dst_lookup_flow(sk, &fl6, daddr, 1);
+ dst = ip6_sk_dst_lookup_flow(sk, &fl6, daddr);
if (IS_ERR(dst))
return PTR_ERR(dst);
rt = (struct rt6_info *) dst;
diff --git a/net/ipv6/protocol.c b/net/ipv6/protocol.c
index 22d1bd4670da..e048cf1bb6a2 100644
--- a/net/ipv6/protocol.c
+++ b/net/ipv6/protocol.c
@@ -36,10 +36,6 @@ int inet6_add_protocol(const struct inet6_protocol *prot, unsigned char protocol
}
EXPORT_SYMBOL(inet6_add_protocol);
-/*
- * Remove a protocol from the hash tables.
- */
-
int inet6_del_protocol(const struct inet6_protocol *prot, unsigned char protocol)
{
int ret;
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index e24ff1df0401..5f10b7ea7ccc 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -466,10 +466,10 @@ static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk,
return -EOPNOTSUPP;
if (flags & MSG_ERRQUEUE)
- return ipv6_recv_error(sk, msg, len);
+ return ipv6_recv_error(sk, msg, len, addr_len);
if (np->rxpmtu && np->rxopt.bits.rxpmtu)
- return ipv6_recv_rxpmtu(sk, msg, len);
+ return ipv6_recv_rxpmtu(sk, msg, len, addr_len);
skb = skb_recv_datagram(sk, flags, noblock, &err);
if (!skb)
@@ -792,7 +792,6 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
if (flowlabel == NULL)
return -EINVAL;
- daddr = &flowlabel->dst;
}
}
@@ -865,7 +864,7 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
fl6.flowi6_oif = np->ucast_oif;
security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
- dst = ip6_dst_lookup_flow(sk, &fl6, final_p, true);
+ dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
if (IS_ERR(dst)) {
err = PTR_ERR(dst);
goto out;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 7faa9d5e1503..11dac21e6586 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -66,8 +66,9 @@
#endif
enum rt6_nud_state {
- RT6_NUD_FAIL_HARD = -2,
- RT6_NUD_FAIL_SOFT = -1,
+ RT6_NUD_FAIL_HARD = -3,
+ RT6_NUD_FAIL_PROBE = -2,
+ RT6_NUD_FAIL_DO_RR = -1,
RT6_NUD_SUCCEED = 1
};
@@ -84,6 +85,8 @@ static int ip6_dst_gc(struct dst_ops *ops);
static int ip6_pkt_discard(struct sk_buff *skb);
static int ip6_pkt_discard_out(struct sk_buff *skb);
+static int ip6_pkt_prohibit(struct sk_buff *skb);
+static int ip6_pkt_prohibit_out(struct sk_buff *skb);
static void ip6_link_failure(struct sk_buff *skb);
static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
struct sk_buff *skb, u32 mtu);
@@ -101,6 +104,36 @@ static struct rt6_info *rt6_get_route_info(struct net *net,
const struct in6_addr *gwaddr, int ifindex);
#endif
+static void rt6_bind_peer(struct rt6_info *rt, int create)
+{
+ struct inet_peer_base *base;
+ struct inet_peer *peer;
+
+ base = inetpeer_base_ptr(rt->_rt6i_peer);
+ if (!base)
+ return;
+
+ peer = inet_getpeer_v6(base, &rt->rt6i_dst.addr, create);
+ if (peer) {
+ if (!rt6_set_peer(rt, peer))
+ inet_putpeer(peer);
+ }
+}
+
+static struct inet_peer *__rt6_get_peer(struct rt6_info *rt, int create)
+{
+ if (rt6_has_peer(rt))
+ return rt6_peer_ptr(rt);
+
+ rt6_bind_peer(rt, create);
+ return (rt6_has_peer(rt) ? rt6_peer_ptr(rt) : NULL);
+}
+
+static struct inet_peer *rt6_get_peer_create(struct rt6_info *rt)
+{
+ return __rt6_get_peer(rt, 1);
+}
+
static u32 *ipv6_cow_metrics(struct dst_entry *dst, unsigned long old)
{
struct rt6_info *rt = (struct rt6_info *) dst;
@@ -234,9 +267,6 @@ static const struct rt6_info ip6_null_entry_template = {
#ifdef CONFIG_IPV6_MULTIPLE_TABLES
-static int ip6_pkt_prohibit(struct sk_buff *skb);
-static int ip6_pkt_prohibit_out(struct sk_buff *skb);
-
static const struct rt6_info ip6_prohibit_entry_template = {
.dst = {
.__refcnt = ATOMIC_INIT(1),
@@ -312,22 +342,6 @@ static void ip6_dst_destroy(struct dst_entry *dst)
}
}
-void rt6_bind_peer(struct rt6_info *rt, int create)
-{
- struct inet_peer_base *base;
- struct inet_peer *peer;
-
- base = inetpeer_base_ptr(rt->_rt6i_peer);
- if (!base)
- return;
-
- peer = inet_getpeer_v6(base, &rt->rt6i_dst.addr, create);
- if (peer) {
- if (!rt6_set_peer(rt, peer))
- inet_putpeer(peer);
- }
-}
-
static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
int how)
{
@@ -522,7 +536,7 @@ static void rt6_probe(struct rt6_info *rt)
work = kmalloc(sizeof(*work), GFP_ATOMIC);
if (neigh && work)
- neigh->updated = jiffies;
+ __neigh_set_probe_once(neigh);
if (neigh)
write_unlock(&neigh->lock);
@@ -578,11 +592,13 @@ static inline enum rt6_nud_state rt6_check_neigh(struct rt6_info *rt)
#ifdef CONFIG_IPV6_ROUTER_PREF
else if (!(neigh->nud_state & NUD_FAILED))
ret = RT6_NUD_SUCCEED;
+ else
+ ret = RT6_NUD_FAIL_PROBE;
#endif
read_unlock(&neigh->lock);
} else {
ret = IS_ENABLED(CONFIG_IPV6_ROUTER_PREF) ?
- RT6_NUD_SUCCEED : RT6_NUD_FAIL_SOFT;
+ RT6_NUD_SUCCEED : RT6_NUD_FAIL_DO_RR;
}
rcu_read_unlock_bh();
@@ -619,16 +635,17 @@ static struct rt6_info *find_match(struct rt6_info *rt, int oif, int strict,
goto out;
m = rt6_score_route(rt, oif, strict);
- if (m == RT6_NUD_FAIL_SOFT) {
+ if (m == RT6_NUD_FAIL_DO_RR) {
match_do_rr = true;
m = 0; /* lowest valid score */
- } else if (m < 0) {
+ } else if (m == RT6_NUD_FAIL_HARD) {
goto out;
}
if (strict & RT6_LOOKUP_F_REACHABLE)
rt6_probe(rt);
+ /* note that m can be RT6_NUD_FAIL_PROBE at this point */
if (m > *mpri) {
*do_rr = match_do_rr;
*mpri = m;
@@ -1565,21 +1582,24 @@ int ip6_route_add(struct fib6_config *cfg)
goto out;
}
}
- rt->dst.output = ip6_pkt_discard_out;
- rt->dst.input = ip6_pkt_discard;
rt->rt6i_flags = RTF_REJECT|RTF_NONEXTHOP;
switch (cfg->fc_type) {
case RTN_BLACKHOLE:
rt->dst.error = -EINVAL;
+ rt->dst.output = dst_discard;
+ rt->dst.input = dst_discard;
break;
case RTN_PROHIBIT:
rt->dst.error = -EACCES;
+ rt->dst.output = ip6_pkt_prohibit_out;
+ rt->dst.input = ip6_pkt_prohibit;
break;
case RTN_THROW:
- rt->dst.error = -EAGAIN;
- break;
default:
- rt->dst.error = -ENETUNREACH;
+ rt->dst.error = (cfg->fc_type == RTN_THROW) ? -EAGAIN
+ : -ENETUNREACH;
+ rt->dst.output = ip6_pkt_discard_out;
+ rt->dst.input = ip6_pkt_discard;
break;
}
goto install_route;
@@ -1903,9 +1923,7 @@ static struct rt6_info *ip6_rt_copy(struct rt6_info *ort,
else
rt->rt6i_gateway = *dest;
rt->rt6i_flags = ort->rt6i_flags;
- if ((ort->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF)) ==
- (RTF_DEFAULT | RTF_ADDRCONF))
- rt6_set_from(rt, ort);
+ rt6_set_from(rt, ort);
rt->rt6i_metric = 0;
#ifdef CONFIG_IPV6_SUBTREES
@@ -2144,8 +2162,6 @@ static int ip6_pkt_discard_out(struct sk_buff *skb)
return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_OUTNOROUTES);
}
-#ifdef CONFIG_IPV6_MULTIPLE_TABLES
-
static int ip6_pkt_prohibit(struct sk_buff *skb)
{
return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_INNOROUTES);
@@ -2157,8 +2173,6 @@ static int ip6_pkt_prohibit_out(struct sk_buff *skb)
return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_OUTNOROUTES);
}
-#endif
-
/*
* Allocate a dst for local (unicast / anycast) address.
*/
@@ -2168,12 +2182,10 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
bool anycast)
{
struct net *net = dev_net(idev->dev);
- struct rt6_info *rt = ip6_dst_alloc(net, net->loopback_dev, 0, NULL);
-
- if (!rt) {
- net_warn_ratelimited("Maximum number of routes reached, consider increasing route/max_size\n");
+ struct rt6_info *rt = ip6_dst_alloc(net, net->loopback_dev,
+ DST_NOCOUNT, NULL);
+ if (!rt)
return ERR_PTR(-ENOMEM);
- }
in6_dev_hold(idev);
@@ -2244,7 +2256,7 @@ void rt6_remove_prefsrc(struct inet6_ifaddr *ifp)
.net = net,
.addr = &ifp->addr,
};
- fib6_clean_all(net, fib6_remove_prefsrc, 0, &adni);
+ fib6_clean_all(net, fib6_remove_prefsrc, &adni);
}
struct arg_dev_net {
@@ -2271,7 +2283,7 @@ void rt6_ifdown(struct net *net, struct net_device *dev)
.net = net,
};
- fib6_clean_all(net, fib6_ifdown, 0, &adn);
+ fib6_clean_all(net, fib6_ifdown, &adn);
icmp6_clean_all(fib6_ifdown, &adn);
}
@@ -2326,7 +2338,7 @@ void rt6_mtu_change(struct net_device *dev, unsigned int mtu)
.mtu = mtu,
};
- fib6_clean_all(dev_net(dev), rt6_mtu_change_route, 0, &arg);
+ fib6_clean_all(dev_net(dev), rt6_mtu_change_route, &arg);
}
static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 1b4a4a953675..3dfbcf1dcb1c 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -478,14 +478,44 @@ static void ipip6_tunnel_uninit(struct net_device *dev)
dev_put(dev);
}
+/* Generate icmpv6 with type/code ICMPV6_DEST_UNREACH/ICMPV6_ADDR_UNREACH
+ * if sufficient data bytes are available
+ */
+static int ipip6_err_gen_icmpv6_unreach(struct sk_buff *skb)
+{
+ const struct iphdr *iph = (const struct iphdr *) skb->data;
+ struct rt6_info *rt;
+ struct sk_buff *skb2;
+
+ if (!pskb_may_pull(skb, iph->ihl * 4 + sizeof(struct ipv6hdr) + 8))
+ return 1;
+
+ skb2 = skb_clone(skb, GFP_ATOMIC);
+
+ if (!skb2)
+ return 1;
+
+ skb_dst_drop(skb2);
+ skb_pull(skb2, iph->ihl * 4);
+ skb_reset_network_header(skb2);
+
+ rt = rt6_lookup(dev_net(skb->dev), &ipv6_hdr(skb2)->saddr, NULL, 0, 0);
+
+ if (rt && rt->dst.dev)
+ skb2->dev = rt->dst.dev;
+
+ icmpv6_send(skb2, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0);
+
+ if (rt)
+ ip6_rt_put(rt);
+
+ kfree_skb(skb2);
+
+ return 0;
+}
static int ipip6_err(struct sk_buff *skb, u32 info)
{
-
-/* All the routers (except for Linux) return only
- 8 bytes of packet payload. It means, that precise relaying of
- ICMP in the real Internet is absolutely infeasible.
- */
const struct iphdr *iph = (const struct iphdr *)skb->data;
const int type = icmp_hdr(skb)->type;
const int code = icmp_hdr(skb)->code;
@@ -500,7 +530,6 @@ static int ipip6_err(struct sk_buff *skb, u32 info)
case ICMP_DEST_UNREACH:
switch (code) {
case ICMP_SR_FAILED:
- case ICMP_PORT_UNREACH:
/* Impossible event. */
return 0;
default:
@@ -545,6 +574,9 @@ static int ipip6_err(struct sk_buff *skb, u32 info)
goto out;
err = 0;
+ if (!ipip6_err_gen_icmpv6_unreach(skb))
+ goto out;
+
if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
goto out;
@@ -639,7 +671,7 @@ static int ipip6_rcv(struct sk_buff *skb)
tunnel = ipip6_tunnel_lookup(dev_net(skb->dev), skb->dev,
iph->saddr, iph->daddr);
if (tunnel != NULL) {
- struct pcpu_tstats *tstats;
+ struct pcpu_sw_netstats *tstats;
if (tunnel->parms.iph.protocol != IPPROTO_IPV6 &&
tunnel->parms.iph.protocol != 0)
@@ -670,8 +702,10 @@ static int ipip6_rcv(struct sk_buff *skb)
}
tstats = this_cpu_ptr(tunnel->dev->tstats);
+ u64_stats_update_begin(&tstats->syncp);
tstats->rx_packets++;
tstats->rx_bytes += skb->len;
+ u64_stats_update_end(&tstats->syncp);
netif_rx(skb);
@@ -892,7 +926,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
if (tunnel->parms.iph.daddr && skb_dst(skb))
skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
- if (skb->len > mtu) {
+ if (skb->len > mtu && !skb_is_gso(skb)) {
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
ip_rt_put(rt);
goto tx_error;
@@ -919,7 +953,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
if (!new_skb) {
ip_rt_put(rt);
dev->stats.tx_dropped++;
- dev_kfree_skb(skb);
+ kfree_skb(skb);
return NETDEV_TX_OK;
}
if (skb->sk)
@@ -934,8 +968,10 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
tos = INET_ECN_encapsulate(tos, ipv6_get_dsfield(iph6));
skb = iptunnel_handle_offloads(skb, false, SKB_GSO_SIT);
- if (IS_ERR(skb))
+ if (IS_ERR(skb)) {
+ ip_rt_put(rt);
goto out;
+ }
err = iptunnel_xmit(rt, skb, fl4.saddr, fl4.daddr, IPPROTO_IPV6, tos,
ttl, df, !net_eq(tunnel->net, dev_net(dev)));
@@ -945,7 +981,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
tx_error_icmp:
dst_link_failure(skb);
tx_error:
- dev_kfree_skb(skb);
+ kfree_skb(skb);
out:
dev->stats.tx_errors++;
return NETDEV_TX_OK;
@@ -985,7 +1021,7 @@ static netdev_tx_t sit_tunnel_xmit(struct sk_buff *skb,
tx_err:
dev->stats.tx_errors++;
- dev_kfree_skb(skb);
+ kfree_skb(skb);
return NETDEV_TX_OK;
}
@@ -1329,12 +1365,12 @@ static int ipip6_tunnel_init(struct net_device *dev)
memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4);
ipip6_tunnel_bind_dev(dev);
- dev->tstats = alloc_percpu(struct pcpu_tstats);
+ dev->tstats = alloc_percpu(struct pcpu_sw_netstats);
if (!dev->tstats)
return -ENOMEM;
for_each_possible_cpu(i) {
- struct pcpu_tstats *ipip6_tunnel_stats;
+ struct pcpu_sw_netstats *ipip6_tunnel_stats;
ipip6_tunnel_stats = per_cpu_ptr(dev->tstats, i);
u64_stats_init(&ipip6_tunnel_stats->syncp);
}
@@ -1359,12 +1395,12 @@ static int __net_init ipip6_fb_tunnel_init(struct net_device *dev)
iph->ihl = 5;
iph->ttl = 64;
- dev->tstats = alloc_percpu(struct pcpu_tstats);
+ dev->tstats = alloc_percpu(struct pcpu_sw_netstats);
if (!dev->tstats)
return -ENOMEM;
for_each_possible_cpu(i) {
- struct pcpu_tstats *ipip6_fb_stats;
+ struct pcpu_sw_netstats *ipip6_fb_stats;
ipip6_fb_stats = per_cpu_ptr(dev->tstats, i);
u64_stats_init(&ipip6_fb_stats->syncp);
}
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index 535a3ad262f1..bb53a5e73c1a 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -247,7 +247,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
fl6.fl6_sport = inet_sk(sk)->inet_sport;
security_req_classify_flow(req, flowi6_to_flowi(&fl6));
- dst = ip6_dst_lookup_flow(sk, &fl6, final_p, false);
+ dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
if (IS_ERR(dst))
goto out_free;
}
diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
index 107b2f1d90ae..6b6a2c83027e 100644
--- a/net/ipv6/sysctl_net_ipv6.c
+++ b/net/ipv6/sysctl_net_ipv6.c
@@ -24,6 +24,13 @@ static struct ctl_table ipv6_table_template[] = {
.mode = 0644,
.proc_handler = proc_dointvec
},
+ {
+ .procname = "anycast_src_echo_reply",
+ .data = &init_net.ipv6.anycast_src_echo_reply,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec
+ },
{ }
};
@@ -51,6 +58,7 @@ static int __net_init ipv6_sysctl_net_init(struct net *net)
if (!ipv6_table)
goto out;
ipv6_table[0].data = &net->ipv6.sysctl.bindv6only;
+ ipv6_table[1].data = &net->ipv6.anycast_src_echo_reply;
ipv6_route_table = ipv6_route_sysctl_init(net);
if (!ipv6_route_table)
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 0740f93a114a..ffd5fa8bdb15 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -156,7 +156,6 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
if (flowlabel == NULL)
return -EINVAL;
- usin->sin6_addr = flowlabel->dst;
fl6_sock_release(flowlabel);
}
}
@@ -165,12 +164,12 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
* connect() to INADDR_ANY means loopback (BSD'ism).
*/
- if(ipv6_addr_any(&usin->sin6_addr))
+ if (ipv6_addr_any(&usin->sin6_addr))
usin->sin6_addr.s6_addr[15] = 0x1;
addr_type = ipv6_addr_type(&usin->sin6_addr);
- if(addr_type & IPV6_ADDR_MULTICAST)
+ if (addr_type & IPV6_ADDR_MULTICAST)
return -ENETUNREACH;
if (addr_type&IPV6_ADDR_LINKLOCAL) {
@@ -258,7 +257,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
- dst = ip6_dst_lookup_flow(sk, &fl6, final_p, true);
+ dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
if (IS_ERR(dst)) {
err = PTR_ERR(dst);
goto failure;
@@ -337,7 +336,7 @@ static void tcp_v6_mtu_reduced(struct sock *sk)
static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
u8 type, u8 code, int offset, __be32 info)
{
- const struct ipv6hdr *hdr = (const struct ipv6hdr*)skb->data;
+ const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
struct ipv6_pinfo *np;
struct sock *sk;
@@ -398,6 +397,9 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
if (sk->sk_state == TCP_LISTEN)
goto out;
+ if (!ip6_sk_accept_pmtu(sk))
+ goto out;
+
tp->mtu_info = ntohl(info);
if (!sock_owned_by_user(sk))
tcp_v6_mtu_reduced(sk);
@@ -467,7 +469,7 @@ static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst,
{
struct inet_request_sock *ireq = inet_rsk(req);
struct ipv6_pinfo *np = inet6_sk(sk);
- struct sk_buff * skb;
+ struct sk_buff *skb;
int err = -ENOMEM;
/* First, grab a route. */
@@ -801,7 +803,7 @@ static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
* Underlying function will use this to retrieve the network
* namespace
*/
- dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL, false);
+ dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
if (!IS_ERR(dst)) {
skb_dst_set(buff, dst);
ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass);
@@ -910,7 +912,7 @@ static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
}
-static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
+static struct sock *tcp_v6_hnd_req(struct sock *sk, struct sk_buff *skb)
{
struct request_sock *req, **prev;
const struct tcphdr *th = tcp_hdr(skb);
@@ -1083,9 +1085,9 @@ drop:
return 0; /* don't send reset */
}
-static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
- struct request_sock *req,
- struct dst_entry *dst)
+static struct sock *tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
+ struct request_sock *req,
+ struct dst_entry *dst)
{
struct inet_request_sock *ireq;
struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
@@ -1135,7 +1137,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
newnp->opt = NULL;
newnp->mcast_oif = inet6_iif(skb);
newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
- newnp->rcv_tclass = ipv6_get_dsfield(ipv6_hdr(skb));
+ newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
/*
* No need to charge this sock to the relevant IPv6 refcnt debug socks count
@@ -1215,7 +1217,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
newnp->opt = NULL;
newnp->mcast_oif = inet6_iif(skb);
newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
- newnp->rcv_tclass = ipv6_get_dsfield(ipv6_hdr(skb));
+ newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
/* Clone native IPv6 options from listening socket (if any)
@@ -1380,7 +1382,7 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
* otherwise we just shortcircuit this and continue with
* the new socket..
*/
- if(nsk != sk) {
+ if (nsk != sk) {
sock_rps_save_rxhash(nsk, skb);
if (tcp_child_process(sk, nsk, skb))
goto reset;
@@ -1425,8 +1427,8 @@ ipv6_pktoptions:
np->mcast_oif = inet6_iif(opt_skb);
if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
- if (np->rxopt.bits.rxtclass)
- np->rcv_tclass = ipv6_get_dsfield(ipv6_hdr(opt_skb));
+ if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
+ np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
if (ipv6_opt_accepted(sk, opt_skb)) {
skb_set_owner_r(opt_skb, sk);
opt_skb = xchg(&np->pktoptions, opt_skb);
@@ -1740,7 +1742,7 @@ static void get_openreq6(struct seq_file *seq,
dest->s6_addr32[2], dest->s6_addr32[3],
ntohs(inet_rsk(req)->ir_rmt_port),
TCP_SYN_RECV,
- 0,0, /* could print option size, but that is af dependent. */
+ 0, 0, /* could print option size, but that is af dependent. */
1, /* timers active (only the expire timer) */
jiffies_to_clock_t(ttd),
req->num_timeout,
@@ -1799,7 +1801,7 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
atomic_read(&sp->sk_refcnt), sp,
jiffies_to_clock_t(icsk->icsk_rto),
jiffies_to_clock_t(icsk->icsk_ack.ato),
- (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
+ (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
tp->snd_cwnd,
tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh
);
diff --git a/net/ipv6/tcpv6_offload.c b/net/ipv6/tcpv6_offload.c
index c1097c798900..0d78132ff18a 100644
--- a/net/ipv6/tcpv6_offload.c
+++ b/net/ipv6/tcpv6_offload.c
@@ -37,44 +37,42 @@ static struct sk_buff **tcp6_gro_receive(struct sk_buff **head,
{
const struct ipv6hdr *iph = skb_gro_network_header(skb);
__wsum wsum;
- __sum16 sum;
+
+ /* Don't bother verifying checksum if we're going to flush anyway. */
+ if (NAPI_GRO_CB(skb)->flush)
+ goto skip_csum;
+
+ wsum = skb->csum;
switch (skb->ip_summed) {
+ case CHECKSUM_NONE:
+ wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb),
+ wsum);
+
+ /* fall through */
+
case CHECKSUM_COMPLETE:
if (!tcp_v6_check(skb_gro_len(skb), &iph->saddr, &iph->daddr,
- skb->csum)) {
+ wsum)) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
break;
}
-flush:
+
NAPI_GRO_CB(skb)->flush = 1;
return NULL;
-
- case CHECKSUM_NONE:
- wsum = ~csum_unfold(csum_ipv6_magic(&iph->saddr, &iph->daddr,
- skb_gro_len(skb),
- IPPROTO_TCP, 0));
- sum = csum_fold(skb_checksum(skb,
- skb_gro_offset(skb),
- skb_gro_len(skb),
- wsum));
- if (sum)
- goto flush;
-
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- break;
}
+skip_csum:
return tcp_gro_receive(head, skb);
}
-static int tcp6_gro_complete(struct sk_buff *skb)
+static int tcp6_gro_complete(struct sk_buff *skb, int thoff)
{
const struct ipv6hdr *iph = ipv6_hdr(skb);
struct tcphdr *th = tcp_hdr(skb);
- th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
- &iph->saddr, &iph->daddr, 0);
+ th->check = ~tcp_v6_check(skb->len - thoff, &iph->saddr,
+ &iph->daddr, 0);
skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
return tcp_gro_complete(skb);
diff --git a/net/ipv6/tunnel6.c b/net/ipv6/tunnel6.c
index 4b0f50d9a962..2c4e4c5c7614 100644
--- a/net/ipv6/tunnel6.c
+++ b/net/ipv6/tunnel6.c
@@ -12,8 +12,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
* Authors Mitsuru KANDA <mk@linux-ipv6.org>
* YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 81eb8cf8389b..fa9d988f4012 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -393,10 +393,10 @@ int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk,
bool slow;
if (flags & MSG_ERRQUEUE)
- return ipv6_recv_error(sk, msg, len);
+ return ipv6_recv_error(sk, msg, len, addr_len);
if (np->rxpmtu && np->rxopt.bits.rxpmtu)
- return ipv6_recv_rxpmtu(sk, msg, len);
+ return ipv6_recv_rxpmtu(sk, msg, len, addr_len);
try_again:
skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0),
@@ -538,8 +538,11 @@ void __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
if (sk == NULL)
return;
- if (type == ICMPV6_PKT_TOOBIG)
+ if (type == ICMPV6_PKT_TOOBIG) {
+ if (!ip6_sk_accept_pmtu(sk))
+ goto out;
ip6_sk_update_pmtu(skb, sk, info);
+ }
if (type == NDISC_REDIRECT) {
ip6_sk_redirect(skb, sk);
goto out;
@@ -1140,7 +1143,6 @@ do_udp_sendmsg:
flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
if (flowlabel == NULL)
return -EINVAL;
- daddr = &flowlabel->dst;
}
}
@@ -1221,7 +1223,7 @@ do_udp_sendmsg:
security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
- dst = ip6_sk_dst_lookup_flow(sk, &fl6, final_p, true);
+ dst = ip6_sk_dst_lookup_flow(sk, &fl6, final_p);
if (IS_ERR(dst)) {
err = PTR_ERR(dst);
dst = NULL;
diff --git a/net/ipv6/xfrm6_mode_ro.c b/net/ipv6/xfrm6_mode_ro.c
index 63d5d493098a..0e015906f9ca 100644
--- a/net/ipv6/xfrm6_mode_ro.c
+++ b/net/ipv6/xfrm6_mode_ro.c
@@ -15,8 +15,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
* Authors:
diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c
index de2bcfaaf759..1c66465a42dd 100644
--- a/net/ipv6/xfrm6_tunnel.c
+++ b/net/ipv6/xfrm6_tunnel.c
@@ -12,8 +12,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
* Authors Mitsuru KANDA <mk@linux-ipv6.org>
* YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
index de7db23049f1..73baf9b346b6 100644
--- a/net/irda/af_irda.c
+++ b/net/irda/af_irda.c
@@ -25,9 +25,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
- * MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
* Linux-IrDA now supports four different types of IrDA sockets:
*
diff --git a/net/irda/discovery.c b/net/irda/discovery.c
index b0b56a339a83..6786e7f193d2 100644
--- a/net/irda/discovery.c
+++ b/net/irda/discovery.c
@@ -24,9 +24,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
- * MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
********************************************************************/
diff --git a/net/irda/ircomm/ircomm_core.c b/net/irda/ircomm/ircomm_core.c
index b797daac063c..4490a675b1bb 100644
--- a/net/irda/ircomm/ircomm_core.c
+++ b/net/irda/ircomm/ircomm_core.c
@@ -23,9 +23,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
- * MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
********************************************************************/
diff --git a/net/irda/ircomm/ircomm_event.c b/net/irda/ircomm/ircomm_event.c
index d78554fedbac..b172c6522328 100644
--- a/net/irda/ircomm/ircomm_event.c
+++ b/net/irda/ircomm/ircomm_event.c
@@ -22,9 +22,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
- * MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
********************************************************************/
diff --git a/net/irda/ircomm/ircomm_lmp.c b/net/irda/ircomm/ircomm_lmp.c
index 3b8095c771d4..6536114adf37 100644
--- a/net/irda/ircomm/ircomm_lmp.c
+++ b/net/irda/ircomm/ircomm_lmp.c
@@ -24,9 +24,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
- * MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
********************************************************************/
diff --git a/net/irda/ircomm/ircomm_param.c b/net/irda/ircomm/ircomm_param.c
index 308939128359..f80b1a6a244b 100644
--- a/net/irda/ircomm/ircomm_param.c
+++ b/net/irda/ircomm/ircomm_param.c
@@ -22,9 +22,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
- * MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
********************************************************************/
diff --git a/net/irda/ircomm/ircomm_ttp.c b/net/irda/ircomm/ircomm_ttp.c
index 6e6509f22f60..d362d711b79c 100644
--- a/net/irda/ircomm/ircomm_ttp.c
+++ b/net/irda/ircomm/ircomm_ttp.c
@@ -23,9 +23,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
- * MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
********************************************************************/
diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
index 41ac7938268b..2ba8b9705bb7 100644
--- a/net/irda/ircomm/ircomm_tty.c
+++ b/net/irda/ircomm/ircomm_tty.c
@@ -24,9 +24,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
- * MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
********************************************************************/
diff --git a/net/irda/ircomm/ircomm_tty_attach.c b/net/irda/ircomm/ircomm_tty_attach.c
index a2a508f5f268..2ee87bf387cc 100644
--- a/net/irda/ircomm/ircomm_tty_attach.c
+++ b/net/irda/ircomm/ircomm_tty_attach.c
@@ -23,9 +23,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
- * MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
********************************************************************/
diff --git a/net/irda/ircomm/ircomm_tty_ioctl.c b/net/irda/ircomm/ircomm_tty_ioctl.c
index b343f50dc8d7..ce943853c38d 100644
--- a/net/irda/ircomm/ircomm_tty_ioctl.c
+++ b/net/irda/ircomm/ircomm_tty_ioctl.c
@@ -22,9 +22,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
- * MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
********************************************************************/
diff --git a/net/irda/irda_device.c b/net/irda/irda_device.c
index 14653b8d664d..365b895da84b 100644
--- a/net/irda/irda_device.c
+++ b/net/irda/irda_device.c
@@ -23,9 +23,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
- * MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
********************************************************************/
diff --git a/net/irda/irlap.c b/net/irda/irlap.c
index 005b424494a0..a778df55f5d6 100644
--- a/net/irda/irlap.c
+++ b/net/irda/irlap.c
@@ -23,9 +23,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
- * MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
********************************************************************/
diff --git a/net/irda/parameters.c b/net/irda/parameters.c
index 71cd38c1a67f..6d0869716bf6 100644
--- a/net/irda/parameters.c
+++ b/net/irda/parameters.c
@@ -22,9 +22,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
- * MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
********************************************************************/
diff --git a/net/irda/qos.c b/net/irda/qos.c
index 798ffd9a705e..11a7cc0cbc28 100644
--- a/net/irda/qos.c
+++ b/net/irda/qos.c
@@ -24,9 +24,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
- * MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
********************************************************************/
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 545f047868ad..1a04c1329362 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -1340,6 +1340,12 @@ static int pfkey_getspi(struct sock *sk, struct sk_buff *skb, const struct sadb_
max_spi = range->sadb_spirange_max;
}
+ err = verify_spi_info(x->id.proto, min_spi, max_spi);
+ if (err) {
+ xfrm_state_put(x);
+ return err;
+ }
+
err = xfrm_alloc_spi(x, min_spi, max_spi);
resp_skb = err ? ERR_PTR(err) : pfkey_xfrm_state2msg(x);
@@ -1380,10 +1386,9 @@ static int pfkey_acquire(struct sock *sk, struct sk_buff *skb, const struct sadb
return 0;
spin_lock_bh(&x->lock);
- if (x->km.state == XFRM_STATE_ACQ) {
+ if (x->km.state == XFRM_STATE_ACQ)
x->km.state = XFRM_STATE_ERROR;
- wake_up(&net->xfrm.km_waitq);
- }
+
spin_unlock_bh(&x->lock);
xfrm_state_put(x);
return 0;
@@ -1785,7 +1790,9 @@ static int pfkey_dump_sa(struct pfkey_sock *pfk)
static void pfkey_dump_sa_done(struct pfkey_sock *pfk)
{
- xfrm_state_walk_done(&pfk->dump.u.state);
+ struct net *net = sock_net(&pfk->sk);
+
+ xfrm_state_walk_done(&pfk->dump.u.state, net);
}
static int pfkey_dump(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs)
@@ -1861,7 +1868,7 @@ static u32 gen_reqid(struct net *net)
reqid = IPSEC_MANUAL_REQID_MAX+1;
xfrm_policy_walk_init(&walk, XFRM_POLICY_TYPE_MAIN);
rc = xfrm_policy_walk(net, &walk, check_reqid, (void*)&reqid);
- xfrm_policy_walk_done(&walk);
+ xfrm_policy_walk_done(&walk, net);
if (rc != -EEXIST)
return reqid;
} while (reqid != start);
@@ -2485,6 +2492,7 @@ static int pfkey_migrate(struct sock *sk, struct sk_buff *skb,
struct xfrm_selector sel;
struct xfrm_migrate m[XFRM_MAX_DEPTH];
struct xfrm_kmaddress k;
+ struct net *net = sock_net(sk);
if (!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC - 1],
ext_hdrs[SADB_EXT_ADDRESS_DST - 1]) ||
@@ -2558,7 +2566,7 @@ static int pfkey_migrate(struct sock *sk, struct sk_buff *skb,
}
return xfrm_migrate(&sel, dir, XFRM_POLICY_TYPE_MAIN, m, i,
- kma ? &k : NULL);
+ kma ? &k : NULL, net);
out:
return err;
@@ -2659,7 +2667,9 @@ static int pfkey_dump_sp(struct pfkey_sock *pfk)
static void pfkey_dump_sp_done(struct pfkey_sock *pfk)
{
- xfrm_policy_walk_done(&pfk->dump.u.policy);
+ struct net *net = sock_net((struct sock *)pfk);
+
+ xfrm_policy_walk_done(&pfk->dump.u.policy, net);
}
static int pfkey_spddump(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs)
@@ -3569,6 +3579,7 @@ static int pfkey_sendmsg(struct kiocb *kiocb,
struct sk_buff *skb = NULL;
struct sadb_msg *hdr = NULL;
int err;
+ struct net *net = sock_net(sk);
err = -EOPNOTSUPP;
if (msg->msg_flags & MSG_OOB)
@@ -3591,9 +3602,9 @@ static int pfkey_sendmsg(struct kiocb *kiocb,
if (!hdr)
goto out;
- mutex_lock(&xfrm_cfg_mutex);
+ mutex_lock(&net->xfrm.xfrm_cfg_mutex);
err = pfkey_process(sk, skb, hdr);
- mutex_unlock(&xfrm_cfg_mutex);
+ mutex_unlock(&net->xfrm.xfrm_cfg_mutex);
out:
if (err && hdr && pfkey_error(hdr, err, sk) == 0)
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index cfd65304be60..29487a8f7fa0 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -528,7 +528,6 @@ static int l2tp_ip6_sendmsg(struct kiocb *iocb, struct sock *sk,
flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
if (flowlabel == NULL)
return -EINVAL;
- daddr = &flowlabel->dst;
}
}
@@ -598,7 +597,7 @@ static int l2tp_ip6_sendmsg(struct kiocb *iocb, struct sock *sk,
security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
- dst = ip6_dst_lookup_flow(sk, &fl6, final_p, true);
+ dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
if (IS_ERR(dst)) {
err = PTR_ERR(dst);
goto out;
@@ -665,7 +664,7 @@ static int l2tp_ip6_recvmsg(struct kiocb *iocb, struct sock *sk,
*addr_len = sizeof(*lsa);
if (flags & MSG_ERRQUEUE)
- return ipv6_recv_error(sk, msg, len);
+ return ipv6_recv_error(sk, msg, len, addr_len);
skb = skb_recv_datagram(sk, flags, noblock, &err);
if (!skb)
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index 7b01b9f5846c..c71b699eb555 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -715,7 +715,7 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
unsigned long cpu_flags;
size_t copied = 0;
u32 peek_seq = 0;
- u32 *seq;
+ u32 *seq, skb_len;
unsigned long used;
int target; /* Read at least this many bytes */
long timeo;
@@ -812,6 +812,7 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
}
continue;
found_ok_skb:
+ skb_len = skb->len;
/* Ok so how much can we use? */
used = skb->len - offset;
if (len < used)
@@ -844,7 +845,7 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
}
/* Partial read */
- if (used + offset < skb->len)
+ if (used + offset < skb_len)
continue;
} while (len > 0);
diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c
index cd8724177965..42dc2e45c921 100644
--- a/net/llc/llc_conn.c
+++ b/net/llc/llc_conn.c
@@ -753,7 +753,7 @@ void llc_sap_remove_socket(struct llc_sap *sap, struct sock *sk)
*
* Sends received pdus to the connection state machine.
*/
-static int llc_conn_rcv(struct sock* sk, struct sk_buff *skb)
+static int llc_conn_rcv(struct sock *sk, struct sk_buff *skb)
{
struct llc_conn_state_ev *ev = llc_conn_ev(skb);
@@ -891,7 +891,7 @@ out_kfree_skb:
*
* Initializes a socket with default llc values.
*/
-static void llc_sk_init(struct sock* sk)
+static void llc_sk_init(struct sock *sk)
{
struct llc_sock *llc = llc_sk(sk);
diff --git a/net/llc/llc_core.c b/net/llc/llc_core.c
index 2bb0ddff8c0f..842851cef698 100644
--- a/net/llc/llc_core.c
+++ b/net/llc/llc_core.c
@@ -23,7 +23,7 @@
#include <net/llc.h>
LIST_HEAD(llc_sap_list);
-DEFINE_SPINLOCK(llc_sap_list_lock);
+static DEFINE_SPINLOCK(llc_sap_list_lock);
/**
* llc_sap_alloc - allocates and initializes sap.
@@ -48,7 +48,7 @@ static struct llc_sap *llc_sap_alloc(void)
static struct llc_sap *__llc_sap_find(unsigned char sap_value)
{
- struct llc_sap* sap;
+ struct llc_sap *sap;
list_for_each_entry(sap, &llc_sap_list, node)
if (sap->laddr.lsap == sap_value)
@@ -159,7 +159,6 @@ module_init(llc_init);
module_exit(llc_exit);
EXPORT_SYMBOL(llc_sap_list);
-EXPORT_SYMBOL(llc_sap_list_lock);
EXPORT_SYMBOL(llc_sap_find);
EXPORT_SYMBOL(llc_sap_open);
EXPORT_SYMBOL(llc_sap_close);
diff --git a/net/llc/llc_sap.c b/net/llc/llc_sap.c
index e5850699098e..06033f6c845f 100644
--- a/net/llc/llc_sap.c
+++ b/net/llc/llc_sap.c
@@ -66,7 +66,7 @@ struct sk_buff *llc_alloc_frame(struct sock *sk, struct net_device *dev,
return skb;
}
-void llc_save_primitive(struct sock *sk, struct sk_buff* skb, u8 prim)
+void llc_save_primitive(struct sock *sk, struct sk_buff *skb, u8 prim)
{
struct sockaddr_llc *addr;
@@ -114,7 +114,7 @@ void llc_sap_rtn_pdu(struct llc_sap *sap, struct sk_buff *skb)
* failure.
*/
static struct llc_sap_state_trans *llc_find_sap_trans(struct llc_sap *sap,
- struct sk_buff* skb)
+ struct sk_buff *skb)
{
int i = 0;
struct llc_sap_state_trans *rc = NULL;
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 0aa9675319ef..b2c83c0f06d0 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -1480,8 +1480,8 @@ static void ieee80211_assign_perm_addr(struct ieee80211_local *local,
bool used = false;
list_for_each_entry(sdata, &local->interfaces, list) {
- if (memcmp(local->hw.wiphy->addresses[i].addr,
- sdata->vif.addr, ETH_ALEN) == 0) {
+ if (ether_addr_equal(local->hw.wiphy->addresses[i].addr,
+ sdata->vif.addr)) {
used = true;
break;
}
@@ -1541,8 +1541,7 @@ static void ieee80211_assign_perm_addr(struct ieee80211_local *local,
val += inc;
list_for_each_entry(sdata, &local->interfaces, list) {
- if (memcmp(tmp_addr, sdata->vif.addr,
- ETH_ALEN) == 0) {
+ if (ether_addr_equal(tmp_addr, sdata->vif.addr)) {
used = true;
break;
}
diff --git a/net/mac802154/wpan.c b/net/mac802154/wpan.c
index e24bcf977296..372d8a222b91 100644
--- a/net/mac802154/wpan.c
+++ b/net/mac802154/wpan.c
@@ -444,8 +444,8 @@ mac802154_subif_frame(struct mac802154_sub_if_data *sdata, struct sk_buff *skb)
case IEEE802154_FC_TYPE_DATA:
return mac802154_process_data(sdata->dev, skb);
default:
- pr_warning("ieee802154: bad frame received (type = %d)\n",
- mac_cb_type(skb));
+ pr_warn("ieee802154: bad frame received (type = %d)\n",
+ mac_cb_type(skb));
kfree_skb(skb);
return NET_RX_DROP;
}
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index c3398cd99b94..37d2092705a7 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -414,47 +414,112 @@ config NETFILTER_SYNPROXY
endif # NF_CONNTRACK
config NF_TABLES
- depends on NETFILTER_NETLINK
+ select NETFILTER_NETLINK
tristate "Netfilter nf_tables support"
+ help
+ nftables is the new packet classification framework that intends to
+ replace the existing {ip,ip6,arp,eb}_tables infrastructure. It
+ provides a pseudo-state machine with an extensible instruction-set
+ (also known as expressions) that the userspace 'nft' utility
+ (http://www.netfilter.org/projects/nftables) uses to build the
+ rule-set. It also comes with the generic set infrastructure that
+ allows you to construct mappings between matchings and actions
+ for performance lookups.
+
+ To compile it as a module, choose M here.
+
+config NF_TABLES_INET
+ depends on NF_TABLES
+ select NF_TABLES_IPV4
+ select NF_TABLES_IPV6
+ tristate "Netfilter nf_tables mixed IPv4/IPv6 tables support"
+ help
+ This option enables support for a mixed IPv4/IPv6 "inet" table.
config NFT_EXTHDR
depends on NF_TABLES
tristate "Netfilter nf_tables IPv6 exthdr module"
+ help
+ This option adds the "exthdr" expression that you can use to match
+ IPv6 extension headers.
config NFT_META
depends on NF_TABLES
tristate "Netfilter nf_tables meta module"
+ help
+ This option adds the "meta" expression that you can use to match and
+ to set packet metainformation such as the packet mark.
config NFT_CT
depends on NF_TABLES
depends on NF_CONNTRACK
tristate "Netfilter nf_tables conntrack module"
+ help
+ This option adds the "meta" expression that you can use to match
+ connection tracking information such as the flow state.
config NFT_RBTREE
depends on NF_TABLES
tristate "Netfilter nf_tables rbtree set module"
+ help
+ This option adds the "rbtree" set type (Red Black tree) that is used
+ to build interval-based sets.
config NFT_HASH
depends on NF_TABLES
tristate "Netfilter nf_tables hash set module"
+ help
+ This option adds the "hash" set type that is used to build one-way
+ mappings between matchings and actions.
config NFT_COUNTER
depends on NF_TABLES
tristate "Netfilter nf_tables counter module"
+ help
+ This option adds the "counter" expression that you can use to
+ include packet and byte counters in a rule.
config NFT_LOG
depends on NF_TABLES
tristate "Netfilter nf_tables log module"
+ help
+ This option adds the "log" expression that you can use to log
+ packets matching some criteria.
config NFT_LIMIT
depends on NF_TABLES
tristate "Netfilter nf_tables limit module"
+ help
+ This option adds the "limit" expression that you can use to
+ ratelimit rule matchings.
config NFT_NAT
depends on NF_TABLES
depends on NF_CONNTRACK
depends on NF_NAT
tristate "Netfilter nf_tables nat module"
+ help
+ This option adds the "nat" expression that you can use to perform
+ typical Network Address Translation (NAT) packet transformations.
+
+config NFT_QUEUE
+ depends on NF_TABLES
+ depends on NETFILTER_XTABLES
+ depends on NETFILTER_NETLINK_QUEUE
+ tristate "Netfilter nf_tables queue module"
+ help
+ This is required if you intend to use the userspace queueing
+ infrastructure (also known as NFQUEUE) from nftables.
+
+config NFT_REJECT
+ depends on NF_TABLES
+ depends on NF_TABLES_IPV6 || !NF_TABLES_IPV6
+ default m if NETFILTER_ADVANCED=n
+ tristate "Netfilter nf_tables reject support"
+ help
+ This option adds the "reject" expression that you can use to
+ explicitly deny and notify via TCP reset/ICMP informational errors
+ unallowed traffic.
config NFT_COMPAT
depends on NF_TABLES
@@ -858,6 +923,16 @@ config NETFILTER_XT_MATCH_BPF
To compile it as a module, choose M here. If unsure, say N.
+config NETFILTER_XT_MATCH_CGROUP
+ tristate '"control group" match support'
+ depends on NETFILTER_ADVANCED
+ depends on CGROUPS
+ select CGROUP_NET_CLASSID
+ ---help---
+ Socket/process control group matching allows you to match locally
+ generated packets based on which net_cls control group processes
+ belong to.
+
config NETFILTER_XT_MATCH_CLUSTER
tristate '"cluster" match support'
depends on NF_CONNTRACK
@@ -1035,6 +1110,15 @@ config NETFILTER_XT_MATCH_HL
in the IPv6 header, or the time-to-live field in the IPv4
header of the packet.
+config NETFILTER_XT_MATCH_IPCOMP
+ tristate '"ipcomp" match support'
+ depends on NETFILTER_ADVANCED
+ help
+ This match extension allows you to match a range of CPIs(16 bits)
+ inside IPComp header of IPSec packets.
+
+ To compile it as a module, choose M here. If unsure, say N.
+
config NETFILTER_XT_MATCH_IPRANGE
tristate '"iprange" address range match support'
depends on NETFILTER_ADVANCED
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index 394483b2c193..74c066109334 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -70,13 +70,15 @@ nf_tables-objs += nft_immediate.o nft_cmp.o nft_lookup.o
nf_tables-objs += nft_bitwise.o nft_byteorder.o nft_payload.o
obj-$(CONFIG_NF_TABLES) += nf_tables.o
+obj-$(CONFIG_NF_TABLES_INET) += nf_tables_inet.o
obj-$(CONFIG_NFT_COMPAT) += nft_compat.o
obj-$(CONFIG_NFT_EXTHDR) += nft_exthdr.o
obj-$(CONFIG_NFT_META) += nft_meta.o
obj-$(CONFIG_NFT_CT) += nft_ct.o
obj-$(CONFIG_NFT_LIMIT) += nft_limit.o
obj-$(CONFIG_NFT_NAT) += nft_nat.o
-#nf_tables-objs += nft_meta_target.o
+obj-$(CONFIG_NFT_QUEUE) += nft_queue.o
+obj-$(CONFIG_NFT_REJECT) += nft_reject.o
obj-$(CONFIG_NFT_RBTREE) += nft_rbtree.o
obj-$(CONFIG_NFT_HASH) += nft_hash.o
obj-$(CONFIG_NFT_COUNTER) += nft_counter.o
@@ -133,6 +135,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
+obj-$(CONFIG_NETFILTER_XT_MATCH_IPCOMP) += xt_ipcomp.o
obj-$(CONFIG_NETFILTER_XT_MATCH_IPRANGE) += xt_iprange.o
obj-$(CONFIG_NETFILTER_XT_MATCH_IPVS) += xt_ipvs.o
obj-$(CONFIG_NETFILTER_XT_MATCH_LENGTH) += xt_length.o
@@ -142,6 +145,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_MULTIPORT) += xt_multiport.o
obj-$(CONFIG_NETFILTER_XT_MATCH_NFACCT) += xt_nfacct.o
obj-$(CONFIG_NETFILTER_XT_MATCH_OSF) += xt_osf.o
obj-$(CONFIG_NETFILTER_XT_MATCH_OWNER) += xt_owner.o
+obj-$(CONFIG_NETFILTER_XT_MATCH_CGROUP) += xt_cgroup.o
obj-$(CONFIG_NETFILTER_XT_MATCH_PHYSDEV) += xt_physdev.o
obj-$(CONFIG_NETFILTER_XT_MATCH_PKTTYPE) += xt_pkttype.o
obj-$(CONFIG_NETFILTER_XT_MATCH_POLICY) += xt_policy.o
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
index bac7e01df67f..de770ec39e51 100644
--- a/net/netfilter/ipset/ip_set_core.c
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -625,34 +625,6 @@ EXPORT_SYMBOL_GPL(ip_set_name_byindex);
*/
/*
- * Find set by name, reference it once. The reference makes sure the
- * thing pointed to, does not go away under our feet.
- *
- * The nfnl mutex is used in the function.
- */
-ip_set_id_t
-ip_set_nfnl_get(struct net *net, const char *name)
-{
- ip_set_id_t i, index = IPSET_INVALID_ID;
- struct ip_set *s;
- struct ip_set_net *inst = ip_set_pernet(net);
-
- nfnl_lock(NFNL_SUBSYS_IPSET);
- for (i = 0; i < inst->ip_set_max; i++) {
- s = nfnl_set(inst, i);
- if (s != NULL && STREQ(s->name, name)) {
- __ip_set_get(s);
- index = i;
- break;
- }
- }
- nfnl_unlock(NFNL_SUBSYS_IPSET);
-
- return index;
-}
-EXPORT_SYMBOL_GPL(ip_set_nfnl_get);
-
-/*
* Find set by index, reference it once. The reference makes sure the
* thing pointed to, does not go away under our feet.
*
diff --git a/net/netfilter/ipset/ip_set_hash_netnet.c b/net/netfilter/ipset/ip_set_hash_netnet.c
index 2bc2dec20b00..6226803fc490 100644
--- a/net/netfilter/ipset/ip_set_hash_netnet.c
+++ b/net/netfilter/ipset/ip_set_hash_netnet.c
@@ -59,7 +59,7 @@ hash_netnet4_data_equal(const struct hash_netnet4_elem *ip1,
u32 *multi)
{
return ip1->ipcmp == ip2->ipcmp &&
- ip2->ccmp == ip2->ccmp;
+ ip1->ccmp == ip2->ccmp;
}
static inline int
diff --git a/net/netfilter/ipvs/ip_vs_nfct.c b/net/netfilter/ipvs/ip_vs_nfct.c
index c8beafd401aa..5882bbfd198c 100644
--- a/net/netfilter/ipvs/ip_vs_nfct.c
+++ b/net/netfilter/ipvs/ip_vs_nfct.c
@@ -19,8 +19,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
*
* Authors:
@@ -63,6 +62,7 @@
#include <net/ip_vs.h>
#include <net/netfilter/nf_conntrack_core.h>
#include <net/netfilter/nf_conntrack_expect.h>
+#include <net/netfilter/nf_conntrack_seqadj.h>
#include <net/netfilter/nf_conntrack_helper.h>
#include <net/netfilter/nf_conntrack_zones.h>
@@ -97,6 +97,11 @@ ip_vs_update_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp, int outin)
if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
return;
+ /* Applications may adjust TCP seqs */
+ if (cp->app && nf_ct_protonum(ct) == IPPROTO_TCP &&
+ !nfct_seqadj(ct) && !nfct_seqadj_ext_add(ct))
+ return;
+
/*
* The connection is not yet in the hashtable, so we update it.
* CIP->VIP will remain the same, so leave the tuple in
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
index f63c2388f38d..db801263ee9f 100644
--- a/net/netfilter/ipvs/ip_vs_sync.c
+++ b/net/netfilter/ipvs/ip_vs_sync.c
@@ -1637,7 +1637,10 @@ static int sync_thread_master(void *data)
continue;
}
while (ip_vs_send_sync_msg(tinfo->sock, sb->mesg) < 0) {
- int ret = __wait_event_interruptible(*sk_sleep(sk),
+ /* (Ab)use interruptible sleep to avoid increasing
+ * the load avg.
+ */
+ __wait_event_interruptible(*sk_sleep(sk),
sock_writeable(sk) ||
kthread_should_stop());
if (unlikely(kthread_should_stop()))
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 43549eb7a7be..8824ed0ccc9c 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -60,12 +60,6 @@ int (*nfnetlink_parse_nat_setup_hook)(struct nf_conn *ct,
const struct nlattr *attr) __read_mostly;
EXPORT_SYMBOL_GPL(nfnetlink_parse_nat_setup_hook);
-int (*nf_nat_seq_adjust_hook)(struct sk_buff *skb,
- struct nf_conn *ct,
- enum ip_conntrack_info ctinfo,
- unsigned int protoff);
-EXPORT_SYMBOL_GPL(nf_nat_seq_adjust_hook);
-
DEFINE_SPINLOCK(nf_conntrack_lock);
EXPORT_SYMBOL_GPL(nf_conntrack_lock);
@@ -361,15 +355,6 @@ begin:
return NULL;
}
-struct nf_conntrack_tuple_hash *
-__nf_conntrack_find(struct net *net, u16 zone,
- const struct nf_conntrack_tuple *tuple)
-{
- return ____nf_conntrack_find(net, zone, tuple,
- hash_conntrack_raw(tuple, zone));
-}
-EXPORT_SYMBOL_GPL(__nf_conntrack_find);
-
/* Find a connection corresponding to a tuple. */
static struct nf_conntrack_tuple_hash *
__nf_conntrack_find_get(struct net *net, u16 zone,
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 08870b859046..bb322d0beb48 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -2118,8 +2118,16 @@ ctnetlink_nfqueue_parse_ct(const struct nlattr *cda[], struct nf_conn *ct)
return err;
}
#if defined(CONFIG_NF_CONNTRACK_MARK)
- if (cda[CTA_MARK])
- ct->mark = ntohl(nla_get_be32(cda[CTA_MARK]));
+ if (cda[CTA_MARK]) {
+ u32 mask = 0, mark, newmark;
+ if (cda[CTA_MARK_MASK])
+ mask = ~ntohl(nla_get_be32(cda[CTA_MARK_MASK]));
+
+ mark = ntohl(nla_get_be32(cda[CTA_MARK]));
+ newmark = (ct->mark & mask) ^ mark;
+ if (newmark != ct->mark)
+ ct->mark = newmark;
+ }
#endif
return 0;
}
diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
index ce3004156eeb..b65d5864b6d9 100644
--- a/net/netfilter/nf_conntrack_proto.c
+++ b/net/netfilter/nf_conntrack_proto.c
@@ -92,12 +92,6 @@ nf_ct_l3proto_find_get(u_int16_t l3proto)
}
EXPORT_SYMBOL_GPL(nf_ct_l3proto_find_get);
-void nf_ct_l3proto_put(struct nf_conntrack_l3proto *p)
-{
- module_put(p->me);
-}
-EXPORT_SYMBOL_GPL(nf_ct_l3proto_put);
-
int
nf_ct_l3proto_try_module_get(unsigned short l3proto)
{
diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c
index a99b6c3427b0..cb372f96f10d 100644
--- a/net/netfilter/nf_conntrack_proto_dccp.c
+++ b/net/netfilter/nf_conntrack_proto_dccp.c
@@ -428,7 +428,7 @@ static bool dccp_new(struct nf_conn *ct, const struct sk_buff *skb,
const char *msg;
u_int8_t state;
- dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &dh);
+ dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &_dh);
BUG_ON(dh == NULL);
state = dccp_state_table[CT_DCCP_ROLE_CLIENT][dh->dccph_type][CT_DCCP_NONE];
@@ -457,7 +457,7 @@ static bool dccp_new(struct nf_conn *ct, const struct sk_buff *skb,
out_invalid:
if (LOG_INVALID(net, IPPROTO_DCCP))
nf_log_packet(net, nf_ct_l3num(ct), 0, skb, NULL, NULL,
- NULL, msg);
+ NULL, "%s", msg);
return false;
}
@@ -486,7 +486,7 @@ static int dccp_packet(struct nf_conn *ct, const struct sk_buff *skb,
u_int8_t type, old_state, new_state;
enum ct_dccp_roles role;
- dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &dh);
+ dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &_dh);
BUG_ON(dh == NULL);
type = dh->dccph_type;
@@ -577,7 +577,7 @@ static int dccp_error(struct net *net, struct nf_conn *tmpl,
unsigned int cscov;
const char *msg;
- dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &dh);
+ dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &_dh);
if (dh == NULL) {
msg = "nf_ct_dccp: short packet ";
goto out_invalid;
@@ -614,7 +614,7 @@ static int dccp_error(struct net *net, struct nf_conn *tmpl,
out_invalid:
if (LOG_INVALID(net, IPPROTO_DCCP))
- nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, msg);
+ nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, "%s", msg);
return -NF_ACCEPT;
}
diff --git a/net/netfilter/nf_conntrack_seqadj.c b/net/netfilter/nf_conntrack_seqadj.c
index 17c1bcb182c6..b2d38da67822 100644
--- a/net/netfilter/nf_conntrack_seqadj.c
+++ b/net/netfilter/nf_conntrack_seqadj.c
@@ -36,6 +36,11 @@ int nf_ct_seqadj_set(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
if (off == 0)
return 0;
+ if (unlikely(!seqadj)) {
+ WARN(1, "Wrong seqadj usage, missing nfct_seqadj_ext_add()\n");
+ return 0;
+ }
+
set_bit(IPS_SEQ_ADJUST_BIT, &ct->status);
spin_lock_bh(&ct->lock);
diff --git a/net/netfilter/nf_conntrack_timestamp.c b/net/netfilter/nf_conntrack_timestamp.c
index 902fb0a6b38a..7a394df0deb7 100644
--- a/net/netfilter/nf_conntrack_timestamp.c
+++ b/net/netfilter/nf_conntrack_timestamp.c
@@ -97,7 +97,6 @@ int nf_conntrack_tstamp_pernet_init(struct net *net)
void nf_conntrack_tstamp_pernet_fini(struct net *net)
{
nf_conntrack_tstamp_fini_sysctl(net);
- nf_ct_extend_unregister(&tstamp_extend);
}
int nf_conntrack_tstamp_init(void)
diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
index 63a815402211..d3f5cd6dd962 100644
--- a/net/netfilter/nf_nat_core.c
+++ b/net/netfilter/nf_nat_core.c
@@ -315,7 +315,7 @@ get_unique_tuple(struct nf_conntrack_tuple *tuple,
* manips not an issue.
*/
if (maniptype == NF_NAT_MANIP_SRC &&
- !(range->flags & NF_NAT_RANGE_PROTO_RANDOM)) {
+ !(range->flags & NF_NAT_RANGE_PROTO_RANDOM_ALL)) {
/* try the original tuple first */
if (in_range(l3proto, l4proto, orig_tuple, range)) {
if (!nf_nat_used_tuple(orig_tuple, ct)) {
@@ -339,7 +339,7 @@ get_unique_tuple(struct nf_conntrack_tuple *tuple,
*/
/* Only bother mapping if it's not already in range and unique */
- if (!(range->flags & NF_NAT_RANGE_PROTO_RANDOM)) {
+ if (!(range->flags & NF_NAT_RANGE_PROTO_RANDOM_ALL)) {
if (range->flags & NF_NAT_RANGE_PROTO_SPECIFIED) {
if (l4proto->in_range(tuple, maniptype,
&range->min_proto,
diff --git a/net/netfilter/nf_nat_proto_common.c b/net/netfilter/nf_nat_proto_common.c
index 9baaf734c142..83a72a235cae 100644
--- a/net/netfilter/nf_nat_proto_common.c
+++ b/net/netfilter/nf_nat_proto_common.c
@@ -74,22 +74,24 @@ void nf_nat_l4proto_unique_tuple(const struct nf_nat_l3proto *l3proto,
range_size = ntohs(range->max_proto.all) - min + 1;
}
- if (range->flags & NF_NAT_RANGE_PROTO_RANDOM)
+ if (range->flags & NF_NAT_RANGE_PROTO_RANDOM) {
off = l3proto->secure_port(tuple, maniptype == NF_NAT_MANIP_SRC
? tuple->dst.u.all
: tuple->src.u.all);
- else
+ } else if (range->flags & NF_NAT_RANGE_PROTO_RANDOM_FULLY) {
+ off = prandom_u32();
+ } else {
off = *rover;
+ }
for (i = 0; ; ++off) {
*portptr = htons(min + off % range_size);
if (++i != range_size && nf_nat_used_tuple(tuple, ct))
continue;
- if (!(range->flags & NF_NAT_RANGE_PROTO_RANDOM))
+ if (!(range->flags & NF_NAT_RANGE_PROTO_RANDOM_ALL))
*rover = off;
return;
}
- return;
}
EXPORT_SYMBOL_GPL(nf_nat_l4proto_unique_tuple);
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index dcddc49c0e08..36add31e08e7 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -124,37 +124,43 @@ static inline u64 nf_tables_alloc_handle(struct nft_table *table)
return ++table->hgenerator;
}
-static struct nf_chain_type *chain_type[AF_MAX][NFT_CHAIN_T_MAX];
+static const struct nf_chain_type *chain_type[AF_MAX][NFT_CHAIN_T_MAX];
-static int __nf_tables_chain_type_lookup(int family, const struct nlattr *nla)
+static const struct nf_chain_type *
+__nf_tables_chain_type_lookup(int family, const struct nlattr *nla)
{
int i;
- for (i=0; i<NFT_CHAIN_T_MAX; i++) {
+ for (i = 0; i < NFT_CHAIN_T_MAX; i++) {
if (chain_type[family][i] != NULL &&
!nla_strcmp(nla, chain_type[family][i]->name))
- return i;
+ return chain_type[family][i];
}
- return -1;
+ return NULL;
}
-static int nf_tables_chain_type_lookup(const struct nft_af_info *afi,
- const struct nlattr *nla,
- bool autoload)
+static const struct nf_chain_type *
+nf_tables_chain_type_lookup(const struct nft_af_info *afi,
+ const struct nlattr *nla,
+ bool autoload)
{
- int type;
+ const struct nf_chain_type *type;
type = __nf_tables_chain_type_lookup(afi->family, nla);
+ if (type != NULL)
+ return type;
#ifdef CONFIG_MODULES
- if (type < 0 && autoload) {
+ if (autoload) {
nfnl_unlock(NFNL_SUBSYS_NFTABLES);
request_module("nft-chain-%u-%*.s", afi->family,
nla_len(nla)-1, (const char *)nla_data(nla));
nfnl_lock(NFNL_SUBSYS_NFTABLES);
type = __nf_tables_chain_type_lookup(afi->family, nla);
+ if (type != NULL)
+ return ERR_PTR(-EAGAIN);
}
#endif
- return type;
+ return ERR_PTR(-ENOENT);
}
static const struct nla_policy nft_table_policy[NFTA_TABLE_MAX + 1] = {
@@ -180,7 +186,8 @@ static int nf_tables_fill_table_info(struct sk_buff *skb, u32 portid, u32 seq,
nfmsg->res_id = 0;
if (nla_put_string(skb, NFTA_TABLE_NAME, table->name) ||
- nla_put_be32(skb, NFTA_TABLE_FLAGS, htonl(table->flags)))
+ nla_put_be32(skb, NFTA_TABLE_FLAGS, htonl(table->flags)) ||
+ nla_put_be32(skb, NFTA_TABLE_USE, htonl(table->use)))
goto nla_put_failure;
return nlmsg_end(skb, nlh);
@@ -306,13 +313,17 @@ err:
return err;
}
-static int nf_tables_table_enable(struct nft_table *table)
+static int nf_tables_table_enable(const struct nft_af_info *afi,
+ struct nft_table *table)
{
struct nft_chain *chain;
int err, i = 0;
list_for_each_entry(chain, &table->chains, list) {
- err = nf_register_hook(&nft_base_chain(chain)->ops);
+ if (!(chain->flags & NFT_BASE_CHAIN))
+ continue;
+
+ err = nf_register_hooks(nft_base_chain(chain)->ops, afi->nops);
if (err < 0)
goto err;
@@ -321,20 +332,27 @@ static int nf_tables_table_enable(struct nft_table *table)
return 0;
err:
list_for_each_entry(chain, &table->chains, list) {
+ if (!(chain->flags & NFT_BASE_CHAIN))
+ continue;
+
if (i-- <= 0)
break;
- nf_unregister_hook(&nft_base_chain(chain)->ops);
+ nf_unregister_hooks(nft_base_chain(chain)->ops, afi->nops);
}
return err;
}
-static int nf_tables_table_disable(struct nft_table *table)
+static int nf_tables_table_disable(const struct nft_af_info *afi,
+ struct nft_table *table)
{
struct nft_chain *chain;
- list_for_each_entry(chain, &table->chains, list)
- nf_unregister_hook(&nft_base_chain(chain)->ops);
+ list_for_each_entry(chain, &table->chains, list) {
+ if (chain->flags & NFT_BASE_CHAIN)
+ nf_unregister_hooks(nft_base_chain(chain)->ops,
+ afi->nops);
+ }
return 0;
}
@@ -348,7 +366,7 @@ static int nf_tables_updtable(struct sock *nlsk, struct sk_buff *skb,
int family = nfmsg->nfgen_family, ret = 0;
if (nla[NFTA_TABLE_FLAGS]) {
- __be32 flags;
+ u32 flags;
flags = ntohl(nla_get_be32(nla[NFTA_TABLE_FLAGS]));
if (flags & ~NFT_TABLE_F_DORMANT)
@@ -356,12 +374,12 @@ static int nf_tables_updtable(struct sock *nlsk, struct sk_buff *skb,
if ((flags & NFT_TABLE_F_DORMANT) &&
!(table->flags & NFT_TABLE_F_DORMANT)) {
- ret = nf_tables_table_disable(table);
+ ret = nf_tables_table_disable(afi, table);
if (ret >= 0)
table->flags |= NFT_TABLE_F_DORMANT;
} else if (!(flags & NFT_TABLE_F_DORMANT) &&
table->flags & NFT_TABLE_F_DORMANT) {
- ret = nf_tables_table_enable(table);
+ ret = nf_tables_table_enable(afi, table);
if (ret >= 0)
table->flags &= ~NFT_TABLE_F_DORMANT;
}
@@ -384,6 +402,7 @@ static int nf_tables_newtable(struct sock *nlsk, struct sk_buff *skb,
struct nft_table *table;
struct net *net = sock_net(skb->sk);
int family = nfmsg->nfgen_family;
+ u32 flags = 0;
afi = nf_tables_afinfo_lookup(net, family, true);
if (IS_ERR(afi))
@@ -405,25 +424,25 @@ static int nf_tables_newtable(struct sock *nlsk, struct sk_buff *skb,
return nf_tables_updtable(nlsk, skb, nlh, nla, afi, table);
}
+ if (nla[NFTA_TABLE_FLAGS]) {
+ flags = ntohl(nla_get_be32(nla[NFTA_TABLE_FLAGS]));
+ if (flags & ~NFT_TABLE_F_DORMANT)
+ return -EINVAL;
+ }
+
+ if (!try_module_get(afi->owner))
+ return -EAFNOSUPPORT;
+
table = kzalloc(sizeof(*table) + nla_len(name), GFP_KERNEL);
- if (table == NULL)
+ if (table == NULL) {
+ module_put(afi->owner);
return -ENOMEM;
+ }
nla_strlcpy(table->name, name, nla_len(name));
INIT_LIST_HEAD(&table->chains);
INIT_LIST_HEAD(&table->sets);
-
- if (nla[NFTA_TABLE_FLAGS]) {
- __be32 flags;
-
- flags = ntohl(nla_get_be32(nla[NFTA_TABLE_FLAGS]));
- if (flags & ~NFT_TABLE_F_DORMANT) {
- kfree(table);
- return -EINVAL;
- }
-
- table->flags |= flags;
- }
+ table->flags = flags;
list_add_tail(&table->list, &afi->tables);
nf_tables_table_notify(skb, nlh, table, NFT_MSG_NEWTABLE, family);
@@ -448,16 +467,17 @@ static int nf_tables_deltable(struct sock *nlsk, struct sk_buff *skb,
if (IS_ERR(table))
return PTR_ERR(table);
- if (table->use)
+ if (!list_empty(&table->chains) || !list_empty(&table->sets))
return -EBUSY;
list_del(&table->list);
nf_tables_table_notify(skb, nlh, table, NFT_MSG_DELTABLE, family);
kfree(table);
+ module_put(afi->owner);
return 0;
}
-int nft_register_chain_type(struct nf_chain_type *ctype)
+int nft_register_chain_type(const struct nf_chain_type *ctype)
{
int err = 0;
@@ -466,10 +486,6 @@ int nft_register_chain_type(struct nf_chain_type *ctype)
err = -EBUSY;
goto out;
}
-
- if (!try_module_get(ctype->me))
- goto out;
-
chain_type[ctype->family][ctype->type] = ctype;
out:
nfnl_unlock(NFNL_SUBSYS_NFTABLES);
@@ -477,11 +493,10 @@ out:
}
EXPORT_SYMBOL_GPL(nft_register_chain_type);
-void nft_unregister_chain_type(struct nf_chain_type *ctype)
+void nft_unregister_chain_type(const struct nf_chain_type *ctype)
{
nfnl_lock(NFNL_SUBSYS_NFTABLES);
chain_type[ctype->family][ctype->type] = NULL;
- module_put(ctype->me);
nfnl_unlock(NFNL_SUBSYS_NFTABLES);
}
EXPORT_SYMBOL_GPL(nft_unregister_chain_type);
@@ -589,7 +604,7 @@ static int nf_tables_fill_chain_info(struct sk_buff *skb, u32 portid, u32 seq,
if (chain->flags & NFT_BASE_CHAIN) {
const struct nft_base_chain *basechain = nft_base_chain(chain);
- const struct nf_hook_ops *ops = &basechain->ops;
+ const struct nf_hook_ops *ops = &basechain->ops[0];
struct nlattr *nest;
nest = nla_nest_start(skb, NFTA_CHAIN_HOOK);
@@ -605,9 +620,8 @@ static int nf_tables_fill_chain_info(struct sk_buff *skb, u32 portid, u32 seq,
htonl(basechain->policy)))
goto nla_put_failure;
- if (nla_put_string(skb, NFTA_CHAIN_TYPE,
- chain_type[ops->pf][nft_base_chain(chain)->type]->name))
- goto nla_put_failure;
+ if (nla_put_string(skb, NFTA_CHAIN_TYPE, basechain->type->name))
+ goto nla_put_failure;
if (nft_dump_stats(skb, nft_base_chain(chain)->stats))
goto nla_put_failure;
@@ -748,22 +762,6 @@ err:
return err;
}
-static int
-nf_tables_chain_policy(struct nft_base_chain *chain, const struct nlattr *attr)
-{
- switch (ntohl(nla_get_be32(attr))) {
- case NF_DROP:
- chain->policy = NF_DROP;
- break;
- case NF_ACCEPT:
- chain->policy = NF_ACCEPT;
- break;
- default:
- return -EINVAL;
- }
- return 0;
-}
-
static const struct nla_policy nft_counter_policy[NFTA_COUNTER_MAX + 1] = {
[NFTA_COUNTER_PACKETS] = { .type = NLA_U64 },
[NFTA_COUNTER_BYTES] = { .type = NLA_U64 },
@@ -822,7 +820,9 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
struct nlattr *ha[NFTA_HOOK_MAX + 1];
struct net *net = sock_net(skb->sk);
int family = nfmsg->nfgen_family;
+ u8 policy = NF_ACCEPT;
u64 handle = 0;
+ unsigned int i;
int err;
bool create;
@@ -836,9 +836,6 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
if (IS_ERR(table))
return PTR_ERR(table);
- if (table->use == UINT_MAX)
- return -EOVERFLOW;
-
chain = NULL;
name = nla[NFTA_CHAIN_NAME];
@@ -856,6 +853,22 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
}
}
+ if (nla[NFTA_CHAIN_POLICY]) {
+ if ((chain != NULL &&
+ !(chain->flags & NFT_BASE_CHAIN)) ||
+ nla[NFTA_CHAIN_HOOK] == NULL)
+ return -EOPNOTSUPP;
+
+ policy = nla_get_be32(nla[NFTA_CHAIN_POLICY]);
+ switch (policy) {
+ case NF_DROP:
+ case NF_ACCEPT:
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
if (chain != NULL) {
if (nlh->nlmsg_flags & NLM_F_EXCL)
return -EEXIST;
@@ -866,16 +879,6 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
!IS_ERR(nf_tables_chain_lookup(table, nla[NFTA_CHAIN_NAME])))
return -EEXIST;
- if (nla[NFTA_CHAIN_POLICY]) {
- if (!(chain->flags & NFT_BASE_CHAIN))
- return -EOPNOTSUPP;
-
- err = nf_tables_chain_policy(nft_base_chain(chain),
- nla[NFTA_CHAIN_POLICY]);
- if (err < 0)
- return err;
- }
-
if (nla[NFTA_CHAIN_COUNTERS]) {
if (!(chain->flags & NFT_BASE_CHAIN))
return -EOPNOTSUPP;
@@ -886,24 +889,31 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
return err;
}
+ if (nla[NFTA_CHAIN_POLICY])
+ nft_base_chain(chain)->policy = policy;
+
if (nla[NFTA_CHAIN_HANDLE] && name)
nla_strlcpy(chain->name, name, NFT_CHAIN_MAXNAMELEN);
goto notify;
}
+ if (table->use == UINT_MAX)
+ return -EOVERFLOW;
+
if (nla[NFTA_CHAIN_HOOK]) {
+ const struct nf_chain_type *type;
struct nf_hook_ops *ops;
nf_hookfn *hookfn;
- u32 hooknum;
- int type = NFT_CHAIN_T_DEFAULT;
+ u32 hooknum, priority;
+ type = chain_type[family][NFT_CHAIN_T_DEFAULT];
if (nla[NFTA_CHAIN_TYPE]) {
type = nf_tables_chain_type_lookup(afi,
nla[NFTA_CHAIN_TYPE],
create);
- if (type < 0)
- return -ENOENT;
+ if (IS_ERR(type))
+ return PTR_ERR(type);
}
err = nla_parse_nested(ha, NFTA_HOOK_MAX, nla[NFTA_CHAIN_HOOK],
@@ -917,46 +927,23 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
hooknum = ntohl(nla_get_be32(ha[NFTA_HOOK_HOOKNUM]));
if (hooknum >= afi->nhooks)
return -EINVAL;
+ priority = ntohl(nla_get_be32(ha[NFTA_HOOK_PRIORITY]));
- hookfn = chain_type[family][type]->fn[hooknum];
- if (hookfn == NULL)
+ if (!(type->hook_mask & (1 << hooknum)))
return -EOPNOTSUPP;
+ if (!try_module_get(type->owner))
+ return -ENOENT;
+ hookfn = type->hooks[hooknum];
basechain = kzalloc(sizeof(*basechain), GFP_KERNEL);
if (basechain == NULL)
return -ENOMEM;
- basechain->type = type;
- chain = &basechain->chain;
-
- ops = &basechain->ops;
- ops->pf = family;
- ops->owner = afi->owner;
- ops->hooknum = ntohl(nla_get_be32(ha[NFTA_HOOK_HOOKNUM]));
- ops->priority = ntohl(nla_get_be32(ha[NFTA_HOOK_PRIORITY]));
- ops->priv = chain;
- ops->hook = hookfn;
- if (afi->hooks[ops->hooknum])
- ops->hook = afi->hooks[ops->hooknum];
-
- chain->flags |= NFT_BASE_CHAIN;
-
- if (nla[NFTA_CHAIN_POLICY]) {
- err = nf_tables_chain_policy(basechain,
- nla[NFTA_CHAIN_POLICY]);
- if (err < 0) {
- free_percpu(basechain->stats);
- kfree(basechain);
- return err;
- }
- } else
- basechain->policy = NF_ACCEPT;
-
if (nla[NFTA_CHAIN_COUNTERS]) {
err = nf_tables_counters(basechain,
nla[NFTA_CHAIN_COUNTERS]);
if (err < 0) {
- free_percpu(basechain->stats);
+ module_put(type->owner);
kfree(basechain);
return err;
}
@@ -964,12 +951,33 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
struct nft_stats __percpu *newstats;
newstats = alloc_percpu(struct nft_stats);
- if (newstats == NULL)
+ if (newstats == NULL) {
+ module_put(type->owner);
+ kfree(basechain);
return -ENOMEM;
+ }
+ rcu_assign_pointer(basechain->stats, newstats);
+ }
+
+ basechain->type = type;
+ chain = &basechain->chain;
- rcu_assign_pointer(nft_base_chain(chain)->stats,
- newstats);
+ for (i = 0; i < afi->nops; i++) {
+ ops = &basechain->ops[i];
+ ops->pf = family;
+ ops->owner = afi->owner;
+ ops->hooknum = hooknum;
+ ops->priority = priority;
+ ops->priv = chain;
+ ops->hook = afi->hooks[ops->hooknum];
+ if (hookfn)
+ ops->hook = hookfn;
+ if (afi->hook_ops_init)
+ afi->hook_ops_init(ops, i);
}
+
+ chain->flags |= NFT_BASE_CHAIN;
+ basechain->policy = policy;
} else {
chain = kzalloc(sizeof(*chain), GFP_KERNEL);
if (chain == NULL)
@@ -984,8 +992,9 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
if (!(table->flags & NFT_TABLE_F_DORMANT) &&
chain->flags & NFT_BASE_CHAIN) {
- err = nf_register_hook(&nft_base_chain(chain)->ops);
+ err = nf_register_hooks(nft_base_chain(chain)->ops, afi->nops);
if (err < 0) {
+ module_put(basechain->type->owner);
free_percpu(basechain->stats);
kfree(basechain);
return err;
@@ -1006,6 +1015,7 @@ static void nf_tables_rcu_chain_destroy(struct rcu_head *head)
BUG_ON(chain->use > 0);
if (chain->flags & NFT_BASE_CHAIN) {
+ module_put(nft_base_chain(chain)->type->owner);
free_percpu(nft_base_chain(chain)->stats);
kfree(nft_base_chain(chain));
} else
@@ -1043,7 +1053,7 @@ static int nf_tables_delchain(struct sock *nlsk, struct sk_buff *skb,
if (!(table->flags & NFT_TABLE_F_DORMANT) &&
chain->flags & NFT_BASE_CHAIN)
- nf_unregister_hook(&nft_base_chain(chain)->ops);
+ nf_unregister_hooks(nft_base_chain(chain)->ops, afi->nops);
nf_tables_chain_notify(skb, nlh, table, chain, NFT_MSG_DELCHAIN,
family);
@@ -1717,6 +1727,19 @@ nf_tables_delrule_one(struct nft_ctx *ctx, struct nft_rule *rule)
return -ENOENT;
}
+static int nf_table_delrule_by_chain(struct nft_ctx *ctx)
+{
+ struct nft_rule *rule;
+ int err;
+
+ list_for_each_entry(rule, &ctx->chain->rules, list) {
+ err = nf_tables_delrule_one(ctx, rule);
+ if (err < 0)
+ return err;
+ }
+ return 0;
+}
+
static int nf_tables_delrule(struct sock *nlsk, struct sk_buff *skb,
const struct nlmsghdr *nlh,
const struct nlattr * const nla[])
@@ -1725,8 +1748,8 @@ static int nf_tables_delrule(struct sock *nlsk, struct sk_buff *skb,
const struct nft_af_info *afi;
struct net *net = sock_net(skb->sk);
const struct nft_table *table;
- struct nft_chain *chain;
- struct nft_rule *rule, *tmp;
+ struct nft_chain *chain = NULL;
+ struct nft_rule *rule;
int family = nfmsg->nfgen_family, err = 0;
struct nft_ctx ctx;
@@ -1738,22 +1761,29 @@ static int nf_tables_delrule(struct sock *nlsk, struct sk_buff *skb,
if (IS_ERR(table))
return PTR_ERR(table);
- chain = nf_tables_chain_lookup(table, nla[NFTA_RULE_CHAIN]);
- if (IS_ERR(chain))
- return PTR_ERR(chain);
+ if (nla[NFTA_RULE_CHAIN]) {
+ chain = nf_tables_chain_lookup(table, nla[NFTA_RULE_CHAIN]);
+ if (IS_ERR(chain))
+ return PTR_ERR(chain);
+ }
nft_ctx_init(&ctx, skb, nlh, afi, table, chain, nla);
- if (nla[NFTA_RULE_HANDLE]) {
- rule = nf_tables_rule_lookup(chain, nla[NFTA_RULE_HANDLE]);
- if (IS_ERR(rule))
- return PTR_ERR(rule);
+ if (chain) {
+ if (nla[NFTA_RULE_HANDLE]) {
+ rule = nf_tables_rule_lookup(chain,
+ nla[NFTA_RULE_HANDLE]);
+ if (IS_ERR(rule))
+ return PTR_ERR(rule);
- err = nf_tables_delrule_one(&ctx, rule);
- } else {
- /* Remove all rules in this chain */
- list_for_each_entry_safe(rule, tmp, &chain->rules, list) {
err = nf_tables_delrule_one(&ctx, rule);
+ } else {
+ err = nf_table_delrule_by_chain(&ctx);
+ }
+ } else {
+ list_for_each_entry(chain, &table->chains, list) {
+ ctx.chain = chain;
+ err = nf_table_delrule_by_chain(&ctx);
if (err < 0)
break;
}
@@ -1903,12 +1933,14 @@ static int nft_ctx_init_from_setattr(struct nft_ctx *ctx,
{
struct net *net = sock_net(skb->sk);
const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
- const struct nft_af_info *afi;
+ const struct nft_af_info *afi = NULL;
const struct nft_table *table = NULL;
- afi = nf_tables_afinfo_lookup(net, nfmsg->nfgen_family, false);
- if (IS_ERR(afi))
- return PTR_ERR(afi);
+ if (nfmsg->nfgen_family != NFPROTO_UNSPEC) {
+ afi = nf_tables_afinfo_lookup(net, nfmsg->nfgen_family, false);
+ if (IS_ERR(afi))
+ return PTR_ERR(afi);
+ }
if (nla[NFTA_SET_TABLE] != NULL) {
table = nf_tables_table_lookup(afi, nla[NFTA_SET_TABLE]);
@@ -1953,11 +1985,14 @@ static int nf_tables_set_alloc_name(struct nft_ctx *ctx, struct nft_set *set,
return -ENOMEM;
list_for_each_entry(i, &ctx->table->sets, list) {
- if (!sscanf(i->name, name, &n))
+ int tmp;
+
+ if (!sscanf(i->name, name, &tmp))
continue;
- if (n < 0 || n > BITS_PER_LONG * PAGE_SIZE)
+ if (tmp < 0 || tmp > BITS_PER_LONG * PAGE_SIZE)
continue;
- set_bit(n, inuse);
+
+ set_bit(tmp, inuse);
}
n = find_first_zero_bit(inuse, BITS_PER_LONG * PAGE_SIZE);
@@ -2074,21 +2109,25 @@ done:
return skb->len;
}
-static int nf_tables_dump_sets_all(struct nft_ctx *ctx, struct sk_buff *skb,
- struct netlink_callback *cb)
+static int nf_tables_dump_sets_family(struct nft_ctx *ctx, struct sk_buff *skb,
+ struct netlink_callback *cb)
{
const struct nft_set *set;
- unsigned int idx = 0, s_idx = cb->args[0];
+ unsigned int idx, s_idx = cb->args[0];
struct nft_table *table, *cur_table = (struct nft_table *)cb->args[2];
if (cb->args[1])
return skb->len;
list_for_each_entry(table, &ctx->afi->tables, list) {
- if (cur_table && cur_table != table)
- continue;
+ if (cur_table) {
+ if (cur_table != table)
+ continue;
+ cur_table = NULL;
+ }
ctx->table = table;
+ idx = 0;
list_for_each_entry(set, &ctx->table->sets, list) {
if (idx < s_idx)
goto cont;
@@ -2107,6 +2146,61 @@ done:
return skb->len;
}
+static int nf_tables_dump_sets_all(struct nft_ctx *ctx, struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ const struct nft_set *set;
+ unsigned int idx, s_idx = cb->args[0];
+ const struct nft_af_info *afi;
+ struct nft_table *table, *cur_table = (struct nft_table *)cb->args[2];
+ struct net *net = sock_net(skb->sk);
+ int cur_family = cb->args[3];
+
+ if (cb->args[1])
+ return skb->len;
+
+ list_for_each_entry(afi, &net->nft.af_info, list) {
+ if (cur_family) {
+ if (afi->family != cur_family)
+ continue;
+
+ cur_family = 0;
+ }
+
+ list_for_each_entry(table, &afi->tables, list) {
+ if (cur_table) {
+ if (cur_table != table)
+ continue;
+
+ cur_table = NULL;
+ }
+
+ ctx->table = table;
+ ctx->afi = afi;
+ idx = 0;
+ list_for_each_entry(set, &ctx->table->sets, list) {
+ if (idx < s_idx)
+ goto cont;
+ if (nf_tables_fill_set(skb, ctx, set,
+ NFT_MSG_NEWSET,
+ NLM_F_MULTI) < 0) {
+ cb->args[0] = idx;
+ cb->args[2] = (unsigned long) table;
+ cb->args[3] = afi->family;
+ goto done;
+ }
+cont:
+ idx++;
+ }
+ if (s_idx)
+ s_idx = 0;
+ }
+ }
+ cb->args[1] = 1;
+done:
+ return skb->len;
+}
+
static int nf_tables_dump_sets(struct sk_buff *skb, struct netlink_callback *cb)
{
const struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
@@ -2123,9 +2217,12 @@ static int nf_tables_dump_sets(struct sk_buff *skb, struct netlink_callback *cb)
if (err < 0)
return err;
- if (ctx.table == NULL)
- ret = nf_tables_dump_sets_all(&ctx, skb, cb);
- else
+ if (ctx.table == NULL) {
+ if (ctx.afi == NULL)
+ ret = nf_tables_dump_sets_all(&ctx, skb, cb);
+ else
+ ret = nf_tables_dump_sets_family(&ctx, skb, cb);
+ } else
ret = nf_tables_dump_sets_table(&ctx, skb, cb);
return ret;
@@ -2138,6 +2235,7 @@ static int nf_tables_getset(struct sock *nlsk, struct sk_buff *skb,
const struct nft_set *set;
struct nft_ctx ctx;
struct sk_buff *skb2;
+ const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
int err;
/* Verify existance before starting dump */
@@ -2152,6 +2250,10 @@ static int nf_tables_getset(struct sock *nlsk, struct sk_buff *skb,
return netlink_dump_start(nlsk, skb, nlh, &c);
}
+ /* Only accept unspec with dump */
+ if (nfmsg->nfgen_family == NFPROTO_UNSPEC)
+ return -EAFNOSUPPORT;
+
set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_NAME]);
if (IS_ERR(set))
return PTR_ERR(set);
@@ -2321,6 +2423,7 @@ static int nf_tables_delset(struct sock *nlsk, struct sk_buff *skb,
const struct nlmsghdr *nlh,
const struct nlattr * const nla[])
{
+ const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
struct nft_set *set;
struct nft_ctx ctx;
int err;
@@ -2332,6 +2435,9 @@ static int nf_tables_delset(struct sock *nlsk, struct sk_buff *skb,
if (err < 0)
return err;
+ if (nfmsg->nfgen_family == NFPROTO_UNSPEC)
+ return -EAFNOSUPPORT;
+
set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_NAME]);
if (IS_ERR(set))
return PTR_ERR(set);
@@ -2350,7 +2456,9 @@ static int nf_tables_bind_check_setelem(const struct nft_ctx *ctx,
enum nft_registers dreg;
dreg = nft_type_to_reg(set->dtype);
- return nft_validate_data_load(ctx, dreg, &elem->data, set->dtype);
+ return nft_validate_data_load(ctx, dreg, &elem->data,
+ set->dtype == NFT_DATA_VERDICT ?
+ NFT_DATA_VERDICT : NFT_DATA_VALUE);
}
int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
@@ -2501,9 +2609,8 @@ static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb)
u32 portid, seq;
int event, err;
- nfmsg = nlmsg_data(cb->nlh);
- err = nlmsg_parse(cb->nlh, sizeof(*nfmsg), nla, NFTA_SET_ELEM_LIST_MAX,
- nft_set_elem_list_policy);
+ err = nlmsg_parse(cb->nlh, sizeof(struct nfgenmsg), nla,
+ NFTA_SET_ELEM_LIST_MAX, nft_set_elem_list_policy);
if (err < 0)
return err;
diff --git a/net/netfilter/nf_tables_core.c b/net/netfilter/nf_tables_core.c
index cb9e685caae1..0d879fcb8763 100644
--- a/net/netfilter/nf_tables_core.c
+++ b/net/netfilter/nf_tables_core.c
@@ -109,14 +109,14 @@ static inline void nft_trace_packet(const struct nft_pktinfo *pkt,
{
struct net *net = dev_net(pkt->in ? pkt->in : pkt->out);
- nf_log_packet(net, pkt->xt.family, pkt->hooknum, pkt->skb, pkt->in,
+ nf_log_packet(net, pkt->xt.family, pkt->ops->hooknum, pkt->skb, pkt->in,
pkt->out, &trace_loginfo, "TRACE: %s:%s:%s:%u ",
chain->table->name, chain->name, comments[type],
rulenum);
}
unsigned int
-nft_do_chain_pktinfo(struct nft_pktinfo *pkt, const struct nf_hook_ops *ops)
+nft_do_chain(struct nft_pktinfo *pkt, const struct nf_hook_ops *ops)
{
const struct nft_chain *chain = ops->priv;
const struct nft_rule *rule;
@@ -164,7 +164,7 @@ next_rule:
break;
}
- switch (data[NFT_REG_VERDICT].verdict) {
+ switch (data[NFT_REG_VERDICT].verdict & NF_VERDICT_MASK) {
case NF_ACCEPT:
case NF_DROP:
case NF_QUEUE:
@@ -172,6 +172,9 @@ next_rule:
nft_trace_packet(pkt, chain, rulenum, NFT_TRACE_RULE);
return data[NFT_REG_VERDICT].verdict;
+ }
+
+ switch (data[NFT_REG_VERDICT].verdict) {
case NFT_JUMP:
if (unlikely(pkt->skb->nf_trace))
nft_trace_packet(pkt, chain, rulenum, NFT_TRACE_RULE);
@@ -213,7 +216,7 @@ next_rule:
return nft_base_chain(chain)->policy;
}
-EXPORT_SYMBOL_GPL(nft_do_chain_pktinfo);
+EXPORT_SYMBOL_GPL(nft_do_chain);
int __init nf_tables_core_module_init(void)
{
diff --git a/net/netfilter/nf_tables_inet.c b/net/netfilter/nf_tables_inet.c
new file mode 100644
index 000000000000..9dd2d216cfc1
--- /dev/null
+++ b/net/netfilter/nf_tables_inet.c
@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 2012-2014 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/netfilter_ipv4.h>
+#include <linux/netfilter_ipv6.h>
+#include <net/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables_ipv4.h>
+#include <net/netfilter/nf_tables_ipv6.h>
+#include <net/ip.h>
+
+static void nft_inet_hook_ops_init(struct nf_hook_ops *ops, unsigned int n)
+{
+ struct nft_af_info *afi;
+
+ if (n == 1)
+ afi = &nft_af_ipv4;
+ else
+ afi = &nft_af_ipv6;
+
+ ops->pf = afi->family;
+ if (afi->hooks[ops->hooknum])
+ ops->hook = afi->hooks[ops->hooknum];
+}
+
+static struct nft_af_info nft_af_inet __read_mostly = {
+ .family = NFPROTO_INET,
+ .nhooks = NF_INET_NUMHOOKS,
+ .owner = THIS_MODULE,
+ .nops = 2,
+ .hook_ops_init = nft_inet_hook_ops_init,
+};
+
+static int __net_init nf_tables_inet_init_net(struct net *net)
+{
+ net->nft.inet = kmalloc(sizeof(struct nft_af_info), GFP_KERNEL);
+ if (net->nft.inet == NULL)
+ return -ENOMEM;
+ memcpy(net->nft.inet, &nft_af_inet, sizeof(nft_af_inet));
+
+ if (nft_register_afinfo(net, net->nft.inet) < 0)
+ goto err;
+
+ return 0;
+
+err:
+ kfree(net->nft.inet);
+ return -ENOMEM;
+}
+
+static void __net_exit nf_tables_inet_exit_net(struct net *net)
+{
+ nft_unregister_afinfo(net->nft.inet);
+ kfree(net->nft.inet);
+}
+
+static struct pernet_operations nf_tables_inet_net_ops = {
+ .init = nf_tables_inet_init_net,
+ .exit = nf_tables_inet_exit_net,
+};
+
+static const struct nf_chain_type filter_inet = {
+ .name = "filter",
+ .type = NFT_CHAIN_T_DEFAULT,
+ .family = NFPROTO_INET,
+ .owner = THIS_MODULE,
+ .hook_mask = (1 << NF_INET_LOCAL_IN) |
+ (1 << NF_INET_LOCAL_OUT) |
+ (1 << NF_INET_FORWARD) |
+ (1 << NF_INET_PRE_ROUTING) |
+ (1 << NF_INET_POST_ROUTING),
+};
+
+static int __init nf_tables_inet_init(void)
+{
+ int ret;
+
+ nft_register_chain_type(&filter_inet);
+ ret = register_pernet_subsys(&nf_tables_inet_net_ops);
+ if (ret < 0)
+ nft_unregister_chain_type(&filter_inet);
+
+ return ret;
+}
+
+static void __exit nf_tables_inet_exit(void)
+{
+ unregister_pernet_subsys(&nf_tables_inet_net_ops);
+ nft_unregister_chain_type(&filter_inet);
+}
+
+module_init(nf_tables_inet_init);
+module_exit(nf_tables_inet_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_ALIAS_NFT_FAMILY(1);
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index 3c4b69e5fe17..a155d19a225e 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -1053,6 +1053,7 @@ static void __net_exit nfnl_log_net_exit(struct net *net)
#ifdef CONFIG_PROC_FS
remove_proc_entry("nfnetlink_log", net->nf.proc_netfilter);
#endif
+ nf_log_unset(net, &nfulnl_logger);
}
static struct pernet_operations nfnl_log_net_ops = {
diff --git a/net/netfilter/nfnetlink_queue_core.c b/net/netfilter/nfnetlink_queue_core.c
index 21258cf70091..f072fe803510 100644
--- a/net/netfilter/nfnetlink_queue_core.c
+++ b/net/netfilter/nfnetlink_queue_core.c
@@ -29,6 +29,7 @@
#include <linux/netfilter/nfnetlink_queue.h>
#include <linux/list.h>
#include <net/sock.h>
+#include <net/tcp_states.h>
#include <net/netfilter/nf_queue.h>
#include <net/netns/generic.h>
#include <net/netfilter/nfnetlink_queue.h>
@@ -235,51 +236,6 @@ nfqnl_flush(struct nfqnl_instance *queue, nfqnl_cmpfn cmpfn, unsigned long data)
spin_unlock_bh(&queue->lock);
}
-static void
-nfqnl_zcopy(struct sk_buff *to, const struct sk_buff *from, int len, int hlen)
-{
- int i, j = 0;
- int plen = 0; /* length of skb->head fragment */
- struct page *page;
- unsigned int offset;
-
- /* dont bother with small payloads */
- if (len <= skb_tailroom(to)) {
- skb_copy_bits(from, 0, skb_put(to, len), len);
- return;
- }
-
- if (hlen) {
- skb_copy_bits(from, 0, skb_put(to, hlen), hlen);
- len -= hlen;
- } else {
- plen = min_t(int, skb_headlen(from), len);
- if (plen) {
- page = virt_to_head_page(from->head);
- offset = from->data - (unsigned char *)page_address(page);
- __skb_fill_page_desc(to, 0, page, offset, plen);
- get_page(page);
- j = 1;
- len -= plen;
- }
- }
-
- to->truesize += len + plen;
- to->len += len + plen;
- to->data_len += len + plen;
-
- for (i = 0; i < skb_shinfo(from)->nr_frags; i++) {
- if (!len)
- break;
- skb_shinfo(to)->frags[j] = skb_shinfo(from)->frags[i];
- skb_shinfo(to)->frags[j].size = min_t(int, skb_shinfo(to)->frags[j].size, len);
- len -= skb_shinfo(to)->frags[j].size;
- skb_frag_ref(to, j);
- j++;
- }
- skb_shinfo(to)->nr_frags = j;
-}
-
static int
nfqnl_put_packet_info(struct sk_buff *nlskb, struct sk_buff *packet,
bool csum_verify)
@@ -297,6 +253,31 @@ nfqnl_put_packet_info(struct sk_buff *nlskb, struct sk_buff *packet,
return flags ? nla_put_be32(nlskb, NFQA_SKB_INFO, htonl(flags)) : 0;
}
+static int nfqnl_put_sk_uidgid(struct sk_buff *skb, struct sock *sk)
+{
+ const struct cred *cred;
+
+ if (sk->sk_state == TCP_TIME_WAIT)
+ return 0;
+
+ read_lock_bh(&sk->sk_callback_lock);
+ if (sk->sk_socket && sk->sk_socket->file) {
+ cred = sk->sk_socket->file->f_cred;
+ if (nla_put_be32(skb, NFQA_UID,
+ htonl(from_kuid_munged(&init_user_ns, cred->fsuid))))
+ goto nla_put_failure;
+ if (nla_put_be32(skb, NFQA_GID,
+ htonl(from_kgid_munged(&init_user_ns, cred->fsgid))))
+ goto nla_put_failure;
+ }
+ read_unlock_bh(&sk->sk_callback_lock);
+ return 0;
+
+nla_put_failure:
+ read_unlock_bh(&sk->sk_callback_lock);
+ return -1;
+}
+
static struct sk_buff *
nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
struct nf_queue_entry *entry,
@@ -304,7 +285,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
{
size_t size;
size_t data_len = 0, cap_len = 0;
- int hlen = 0;
+ unsigned int hlen = 0;
struct sk_buff *skb;
struct nlattr *nla;
struct nfqnl_msg_packet_hdr *pmsg;
@@ -356,14 +337,8 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
if (data_len > entskb->len)
data_len = entskb->len;
- if (!entskb->head_frag ||
- skb_headlen(entskb) < L1_CACHE_BYTES ||
- skb_shinfo(entskb)->nr_frags >= MAX_SKB_FRAGS)
- hlen = skb_headlen(entskb);
-
- if (skb_has_frag_list(entskb))
- hlen = entskb->len;
- hlen = min_t(int, data_len, hlen);
+ hlen = skb_zerocopy_headlen(entskb);
+ hlen = min_t(unsigned int, hlen, data_len);
size += sizeof(struct nlattr) + hlen;
cap_len = entskb->len;
break;
@@ -372,6 +347,11 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
if (queue->flags & NFQA_CFG_F_CONNTRACK)
ct = nfqnl_ct_get(entskb, &size, &ctinfo);
+ if (queue->flags & NFQA_CFG_F_UID_GID) {
+ size += (nla_total_size(sizeof(u_int32_t)) /* uid */
+ + nla_total_size(sizeof(u_int32_t))); /* gid */
+ }
+
skb = nfnetlink_alloc_skb(net, size, queue->peer_portid,
GFP_ATOMIC);
if (!skb)
@@ -484,6 +464,10 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
goto nla_put_failure;
}
+ if ((queue->flags & NFQA_CFG_F_UID_GID) && entskb->sk &&
+ nfqnl_put_sk_uidgid(skb, entskb->sk) < 0)
+ goto nla_put_failure;
+
if (ct && nfqnl_ct_put(skb, ct, ctinfo) < 0)
goto nla_put_failure;
@@ -504,7 +488,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
nla->nla_type = NFQA_PAYLOAD;
nla->nla_len = nla_attr_size(data_len);
- nfqnl_zcopy(skb, entskb, data_len, hlen);
+ skb_zerocopy(skb, entskb, data_len, hlen);
}
nlh->nlmsg_len = skb->len;
diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
index da0c1f4ada12..82cb8236f8a1 100644
--- a/net/netfilter/nft_compat.c
+++ b/net/netfilter/nft_compat.c
@@ -92,7 +92,7 @@ nft_target_set_tgchk_param(struct xt_tgchk_param *par,
if (ctx->chain->flags & NFT_BASE_CHAIN) {
const struct nft_base_chain *basechain =
nft_base_chain(ctx->chain);
- const struct nf_hook_ops *ops = &basechain->ops;
+ const struct nf_hook_ops *ops = &basechain->ops[0];
par->hook_mask = 1 << ops->hooknum;
}
@@ -253,7 +253,7 @@ static int nft_target_validate(const struct nft_ctx *ctx,
if (ctx->chain->flags & NFT_BASE_CHAIN) {
const struct nft_base_chain *basechain =
nft_base_chain(ctx->chain);
- const struct nf_hook_ops *ops = &basechain->ops;
+ const struct nf_hook_ops *ops = &basechain->ops[0];
hook_mask = 1 << ops->hooknum;
if (hook_mask & target->hooks)
@@ -323,7 +323,7 @@ nft_match_set_mtchk_param(struct xt_mtchk_param *par, const struct nft_ctx *ctx,
if (ctx->chain->flags & NFT_BASE_CHAIN) {
const struct nft_base_chain *basechain =
nft_base_chain(ctx->chain);
- const struct nf_hook_ops *ops = &basechain->ops;
+ const struct nf_hook_ops *ops = &basechain->ops[0];
par->hook_mask = 1 << ops->hooknum;
}
@@ -449,7 +449,7 @@ static int nft_match_validate(const struct nft_ctx *ctx,
if (ctx->chain->flags & NFT_BASE_CHAIN) {
const struct nft_base_chain *basechain =
nft_base_chain(ctx->chain);
- const struct nf_hook_ops *ops = &basechain->ops;
+ const struct nf_hook_ops *ops = &basechain->ops[0];
hook_mask = 1 << ops->hooknum;
if (hook_mask & match->hooks)
diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c
index 955f4e6e7089..c7c12858e113 100644
--- a/net/netfilter/nft_ct.c
+++ b/net/netfilter/nft_ct.c
@@ -18,17 +18,21 @@
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_tuple.h>
#include <net/netfilter/nf_conntrack_helper.h>
+#include <net/netfilter/nf_conntrack_ecache.h>
struct nft_ct {
enum nft_ct_keys key:8;
enum ip_conntrack_dir dir:8;
- enum nft_registers dreg:8;
+ union{
+ enum nft_registers dreg:8;
+ enum nft_registers sreg:8;
+ };
uint8_t family;
};
-static void nft_ct_eval(const struct nft_expr *expr,
- struct nft_data data[NFT_REG_MAX + 1],
- const struct nft_pktinfo *pkt)
+static void nft_ct_get_eval(const struct nft_expr *expr,
+ struct nft_data data[NFT_REG_MAX + 1],
+ const struct nft_pktinfo *pkt)
{
const struct nft_ct *priv = nft_expr_priv(expr);
struct nft_data *dest = &data[priv->dreg];
@@ -123,24 +127,77 @@ err:
data[NFT_REG_VERDICT].verdict = NFT_BREAK;
}
+static void nft_ct_set_eval(const struct nft_expr *expr,
+ struct nft_data data[NFT_REG_MAX + 1],
+ const struct nft_pktinfo *pkt)
+{
+ const struct nft_ct *priv = nft_expr_priv(expr);
+ struct sk_buff *skb = pkt->skb;
+ u32 value = data[priv->sreg].data[0];
+ enum ip_conntrack_info ctinfo;
+ struct nf_conn *ct;
+
+ ct = nf_ct_get(skb, &ctinfo);
+ if (ct == NULL)
+ return;
+
+ switch (priv->key) {
+#ifdef CONFIG_NF_CONNTRACK_MARK
+ case NFT_CT_MARK:
+ if (ct->mark != value) {
+ ct->mark = value;
+ nf_conntrack_event_cache(IPCT_MARK, ct);
+ }
+ break;
+#endif
+ }
+}
+
static const struct nla_policy nft_ct_policy[NFTA_CT_MAX + 1] = {
[NFTA_CT_DREG] = { .type = NLA_U32 },
[NFTA_CT_KEY] = { .type = NLA_U32 },
[NFTA_CT_DIRECTION] = { .type = NLA_U8 },
+ [NFTA_CT_SREG] = { .type = NLA_U32 },
};
-static int nft_ct_init(const struct nft_ctx *ctx,
- const struct nft_expr *expr,
- const struct nlattr * const tb[])
+static int nft_ct_l3proto_try_module_get(uint8_t family)
{
- struct nft_ct *priv = nft_expr_priv(expr);
int err;
- if (tb[NFTA_CT_DREG] == NULL ||
- tb[NFTA_CT_KEY] == NULL)
- return -EINVAL;
+ if (family == NFPROTO_INET) {
+ err = nf_ct_l3proto_try_module_get(NFPROTO_IPV4);
+ if (err < 0)
+ goto err1;
+ err = nf_ct_l3proto_try_module_get(NFPROTO_IPV6);
+ if (err < 0)
+ goto err2;
+ } else {
+ err = nf_ct_l3proto_try_module_get(family);
+ if (err < 0)
+ goto err1;
+ }
+ return 0;
+
+err2:
+ nf_ct_l3proto_module_put(NFPROTO_IPV4);
+err1:
+ return err;
+}
+
+static void nft_ct_l3proto_module_put(uint8_t family)
+{
+ if (family == NFPROTO_INET) {
+ nf_ct_l3proto_module_put(NFPROTO_IPV4);
+ nf_ct_l3proto_module_put(NFPROTO_IPV6);
+ } else
+ nf_ct_l3proto_module_put(family);
+}
+
+static int nft_ct_init_validate_get(const struct nft_expr *expr,
+ const struct nlattr * const tb[])
+{
+ struct nft_ct *priv = nft_expr_priv(expr);
- priv->key = ntohl(nla_get_be32(tb[NFTA_CT_KEY]));
if (tb[NFTA_CT_DIRECTION] != NULL) {
priv->dir = nla_get_u8(tb[NFTA_CT_DIRECTION]);
switch (priv->dir) {
@@ -179,34 +236,72 @@ static int nft_ct_init(const struct nft_ctx *ctx,
return -EOPNOTSUPP;
}
- err = nf_ct_l3proto_try_module_get(ctx->afi->family);
+ return 0;
+}
+
+static int nft_ct_init_validate_set(uint32_t key)
+{
+ switch (key) {
+ case NFT_CT_MARK:
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int nft_ct_init(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nlattr * const tb[])
+{
+ struct nft_ct *priv = nft_expr_priv(expr);
+ int err;
+
+ priv->key = ntohl(nla_get_be32(tb[NFTA_CT_KEY]));
+
+ if (tb[NFTA_CT_DREG]) {
+ err = nft_ct_init_validate_get(expr, tb);
+ if (err < 0)
+ return err;
+
+ priv->dreg = ntohl(nla_get_be32(tb[NFTA_CT_DREG]));
+ err = nft_validate_output_register(priv->dreg);
+ if (err < 0)
+ return err;
+
+ err = nft_validate_data_load(ctx, priv->dreg, NULL,
+ NFT_DATA_VALUE);
+ if (err < 0)
+ return err;
+ } else {
+ err = nft_ct_init_validate_set(priv->key);
+ if (err < 0)
+ return err;
+
+ priv->sreg = ntohl(nla_get_be32(tb[NFTA_CT_SREG]));
+ err = nft_validate_input_register(priv->sreg);
+ if (err < 0)
+ return err;
+ }
+
+ err = nft_ct_l3proto_try_module_get(ctx->afi->family);
if (err < 0)
return err;
- priv->family = ctx->afi->family;
- priv->dreg = ntohl(nla_get_be32(tb[NFTA_CT_DREG]));
- err = nft_validate_output_register(priv->dreg);
- if (err < 0)
- goto err1;
+ priv->family = ctx->afi->family;
- err = nft_validate_data_load(ctx, priv->dreg, NULL, NFT_DATA_VALUE);
- if (err < 0)
- goto err1;
return 0;
-
-err1:
- nf_ct_l3proto_module_put(ctx->afi->family);
- return err;
}
static void nft_ct_destroy(const struct nft_expr *expr)
{
struct nft_ct *priv = nft_expr_priv(expr);
- nf_ct_l3proto_module_put(priv->family);
+ nft_ct_l3proto_module_put(priv->family);
}
-static int nft_ct_dump(struct sk_buff *skb, const struct nft_expr *expr)
+static int nft_ct_get_dump(struct sk_buff *skb, const struct nft_expr *expr)
{
const struct nft_ct *priv = nft_expr_priv(expr);
@@ -222,19 +317,61 @@ nla_put_failure:
return -1;
}
+static int nft_ct_set_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+ const struct nft_ct *priv = nft_expr_priv(expr);
+
+ if (nla_put_be32(skb, NFTA_CT_SREG, htonl(priv->sreg)))
+ goto nla_put_failure;
+ if (nla_put_be32(skb, NFTA_CT_KEY, htonl(priv->key)))
+ goto nla_put_failure;
+ return 0;
+
+nla_put_failure:
+ return -1;
+}
+
static struct nft_expr_type nft_ct_type;
-static const struct nft_expr_ops nft_ct_ops = {
+static const struct nft_expr_ops nft_ct_get_ops = {
.type = &nft_ct_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_ct)),
- .eval = nft_ct_eval,
+ .eval = nft_ct_get_eval,
.init = nft_ct_init,
.destroy = nft_ct_destroy,
- .dump = nft_ct_dump,
+ .dump = nft_ct_get_dump,
};
+static const struct nft_expr_ops nft_ct_set_ops = {
+ .type = &nft_ct_type,
+ .size = NFT_EXPR_SIZE(sizeof(struct nft_ct)),
+ .eval = nft_ct_set_eval,
+ .init = nft_ct_init,
+ .destroy = nft_ct_destroy,
+ .dump = nft_ct_set_dump,
+};
+
+static const struct nft_expr_ops *
+nft_ct_select_ops(const struct nft_ctx *ctx,
+ const struct nlattr * const tb[])
+{
+ if (tb[NFTA_CT_KEY] == NULL)
+ return ERR_PTR(-EINVAL);
+
+ if (tb[NFTA_CT_DREG] && tb[NFTA_CT_SREG])
+ return ERR_PTR(-EINVAL);
+
+ if (tb[NFTA_CT_DREG])
+ return &nft_ct_get_ops;
+
+ if (tb[NFTA_CT_SREG])
+ return &nft_ct_set_ops;
+
+ return ERR_PTR(-EINVAL);
+}
+
static struct nft_expr_type nft_ct_type __read_mostly = {
.name = "ct",
- .ops = &nft_ct_ops,
+ .select_ops = &nft_ct_select_ops,
.policy = nft_ct_policy,
.maxattr = NFTA_CT_MAX,
.owner = THIS_MODULE,
diff --git a/net/netfilter/nft_exthdr.c b/net/netfilter/nft_exthdr.c
index 8e0bb75e7c51..55c939f5371f 100644
--- a/net/netfilter/nft_exthdr.c
+++ b/net/netfilter/nft_exthdr.c
@@ -31,7 +31,7 @@ static void nft_exthdr_eval(const struct nft_expr *expr,
{
struct nft_exthdr *priv = nft_expr_priv(expr);
struct nft_data *dest = &data[priv->dreg];
- unsigned int offset;
+ unsigned int offset = 0;
int err;
err = ipv6_find_hdr(pkt->skb, &offset, priv->type, NULL, NULL);
diff --git a/net/netfilter/nft_log.c b/net/netfilter/nft_log.c
index 57cad072a13e..5af790123ad8 100644
--- a/net/netfilter/nft_log.c
+++ b/net/netfilter/nft_log.c
@@ -33,7 +33,7 @@ static void nft_log_eval(const struct nft_expr *expr,
const struct nft_log *priv = nft_expr_priv(expr);
struct net *net = dev_net(pkt->in ? pkt->in : pkt->out);
- nf_log_packet(net, priv->family, pkt->hooknum, pkt->skb, pkt->in,
+ nf_log_packet(net, priv->family, pkt->ops->hooknum, pkt->skb, pkt->in,
pkt->out, &priv->loginfo, "%s", priv->prefix);
}
diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c
index 8c28220a90b3..e8254ad2e5a9 100644
--- a/net/netfilter/nft_meta.c
+++ b/net/netfilter/nft_meta.c
@@ -21,12 +21,15 @@
struct nft_meta {
enum nft_meta_keys key:8;
- enum nft_registers dreg:8;
+ union {
+ enum nft_registers dreg:8;
+ enum nft_registers sreg:8;
+ };
};
-static void nft_meta_eval(const struct nft_expr *expr,
- struct nft_data data[NFT_REG_MAX + 1],
- const struct nft_pktinfo *pkt)
+static void nft_meta_get_eval(const struct nft_expr *expr,
+ struct nft_data data[NFT_REG_MAX + 1],
+ const struct nft_pktinfo *pkt)
{
const struct nft_meta *priv = nft_expr_priv(expr);
const struct sk_buff *skb = pkt->skb;
@@ -40,6 +43,12 @@ static void nft_meta_eval(const struct nft_expr *expr,
case NFT_META_PROTOCOL:
*(__be16 *)dest->data = skb->protocol;
break;
+ case NFT_META_NFPROTO:
+ dest->data[0] = pkt->ops->pf;
+ break;
+ case NFT_META_L4PROTO:
+ dest->data[0] = pkt->tprot;
+ break;
case NFT_META_PRIORITY:
dest->data[0] = skb->priority;
break;
@@ -132,25 +141,54 @@ err:
data[NFT_REG_VERDICT].verdict = NFT_BREAK;
}
+static void nft_meta_set_eval(const struct nft_expr *expr,
+ struct nft_data data[NFT_REG_MAX + 1],
+ const struct nft_pktinfo *pkt)
+{
+ const struct nft_meta *meta = nft_expr_priv(expr);
+ struct sk_buff *skb = pkt->skb;
+ u32 value = data[meta->sreg].data[0];
+
+ switch (meta->key) {
+ case NFT_META_MARK:
+ skb->mark = value;
+ break;
+ case NFT_META_PRIORITY:
+ skb->priority = value;
+ break;
+ case NFT_META_NFTRACE:
+ skb->nf_trace = 1;
+ break;
+ default:
+ WARN_ON(1);
+ }
+}
+
static const struct nla_policy nft_meta_policy[NFTA_META_MAX + 1] = {
[NFTA_META_DREG] = { .type = NLA_U32 },
[NFTA_META_KEY] = { .type = NLA_U32 },
+ [NFTA_META_SREG] = { .type = NLA_U32 },
};
-static int nft_meta_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
- const struct nlattr * const tb[])
+static int nft_meta_init_validate_set(uint32_t key)
{
- struct nft_meta *priv = nft_expr_priv(expr);
- int err;
-
- if (tb[NFTA_META_DREG] == NULL ||
- tb[NFTA_META_KEY] == NULL)
- return -EINVAL;
+ switch (key) {
+ case NFT_META_MARK:
+ case NFT_META_PRIORITY:
+ case NFT_META_NFTRACE:
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
- priv->key = ntohl(nla_get_be32(tb[NFTA_META_KEY]));
- switch (priv->key) {
+static int nft_meta_init_validate_get(uint32_t key)
+{
+ switch (key) {
case NFT_META_LEN:
case NFT_META_PROTOCOL:
+ case NFT_META_NFPROTO:
+ case NFT_META_L4PROTO:
case NFT_META_PRIORITY:
case NFT_META_MARK:
case NFT_META_IIF:
@@ -167,26 +205,72 @@ static int nft_meta_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
#ifdef CONFIG_NETWORK_SECMARK
case NFT_META_SECMARK:
#endif
- break;
+ return 0;
default:
return -EOPNOTSUPP;
}
- priv->dreg = ntohl(nla_get_be32(tb[NFTA_META_DREG]));
- err = nft_validate_output_register(priv->dreg);
+}
+
+static int nft_meta_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
+ const struct nlattr * const tb[])
+{
+ struct nft_meta *priv = nft_expr_priv(expr);
+ int err;
+
+ priv->key = ntohl(nla_get_be32(tb[NFTA_META_KEY]));
+
+ if (tb[NFTA_META_DREG]) {
+ err = nft_meta_init_validate_get(priv->key);
+ if (err < 0)
+ return err;
+
+ priv->dreg = ntohl(nla_get_be32(tb[NFTA_META_DREG]));
+ err = nft_validate_output_register(priv->dreg);
+ if (err < 0)
+ return err;
+
+ return nft_validate_data_load(ctx, priv->dreg, NULL,
+ NFT_DATA_VALUE);
+ }
+
+ err = nft_meta_init_validate_set(priv->key);
+ if (err < 0)
+ return err;
+
+ priv->sreg = ntohl(nla_get_be32(tb[NFTA_META_SREG]));
+ err = nft_validate_input_register(priv->sreg);
if (err < 0)
return err;
- return nft_validate_data_load(ctx, priv->dreg, NULL, NFT_DATA_VALUE);
+
+ return 0;
}
-static int nft_meta_dump(struct sk_buff *skb, const struct nft_expr *expr)
+static int nft_meta_get_dump(struct sk_buff *skb,
+ const struct nft_expr *expr)
{
const struct nft_meta *priv = nft_expr_priv(expr);
+ if (nla_put_be32(skb, NFTA_META_KEY, htonl(priv->key)))
+ goto nla_put_failure;
if (nla_put_be32(skb, NFTA_META_DREG, htonl(priv->dreg)))
goto nla_put_failure;
+ return 0;
+
+nla_put_failure:
+ return -1;
+}
+
+static int nft_meta_set_dump(struct sk_buff *skb,
+ const struct nft_expr *expr)
+{
+ const struct nft_meta *priv = nft_expr_priv(expr);
+
if (nla_put_be32(skb, NFTA_META_KEY, htonl(priv->key)))
goto nla_put_failure;
+ if (nla_put_be32(skb, NFTA_META_SREG, htonl(priv->sreg)))
+ goto nla_put_failure;
+
return 0;
nla_put_failure:
@@ -194,17 +278,44 @@ nla_put_failure:
}
static struct nft_expr_type nft_meta_type;
-static const struct nft_expr_ops nft_meta_ops = {
+static const struct nft_expr_ops nft_meta_get_ops = {
+ .type = &nft_meta_type,
+ .size = NFT_EXPR_SIZE(sizeof(struct nft_meta)),
+ .eval = nft_meta_get_eval,
+ .init = nft_meta_init,
+ .dump = nft_meta_get_dump,
+};
+
+static const struct nft_expr_ops nft_meta_set_ops = {
.type = &nft_meta_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_meta)),
- .eval = nft_meta_eval,
+ .eval = nft_meta_set_eval,
.init = nft_meta_init,
- .dump = nft_meta_dump,
+ .dump = nft_meta_set_dump,
};
+static const struct nft_expr_ops *
+nft_meta_select_ops(const struct nft_ctx *ctx,
+ const struct nlattr * const tb[])
+{
+ if (tb[NFTA_META_KEY] == NULL)
+ return ERR_PTR(-EINVAL);
+
+ if (tb[NFTA_META_DREG] && tb[NFTA_META_SREG])
+ return ERR_PTR(-EINVAL);
+
+ if (tb[NFTA_META_DREG])
+ return &nft_meta_get_ops;
+
+ if (tb[NFTA_META_SREG])
+ return &nft_meta_set_ops;
+
+ return ERR_PTR(-EINVAL);
+}
+
static struct nft_expr_type nft_meta_type __read_mostly = {
.name = "meta",
- .ops = &nft_meta_ops,
+ .select_ops = &nft_meta_select_ops,
.policy = nft_meta_policy,
.maxattr = NFTA_META_MAX,
.owner = THIS_MODULE,
diff --git a/net/netfilter/nft_meta_target.c b/net/netfilter/nft_meta_target.c
deleted file mode 100644
index 71177df75ffb..000000000000
--- a/net/netfilter/nft_meta_target.c
+++ /dev/null
@@ -1,117 +0,0 @@
-/*
- * Copyright (c) 2008 Patrick McHardy <kaber@trash.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Development of this code funded by Astaro AG (http://www.astaro.com/)
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/list.h>
-#include <linux/rbtree.h>
-#include <linux/netlink.h>
-#include <linux/netfilter.h>
-#include <linux/netfilter/nf_tables.h>
-#include <net/netfilter/nf_tables.h>
-
-struct nft_meta {
- enum nft_meta_keys key;
-};
-
-static void nft_meta_eval(const struct nft_expr *expr,
- struct nft_data *nfres,
- struct nft_data *data,
- const struct nft_pktinfo *pkt)
-{
- const struct nft_meta *meta = nft_expr_priv(expr);
- struct sk_buff *skb = pkt->skb;
- u32 val = data->data[0];
-
- switch (meta->key) {
- case NFT_META_MARK:
- skb->mark = val;
- break;
- case NFT_META_PRIORITY:
- skb->priority = val;
- break;
- case NFT_META_NFTRACE:
- skb->nf_trace = val;
- break;
-#ifdef CONFIG_NETWORK_SECMARK
- case NFT_META_SECMARK:
- skb->secmark = val;
- break;
-#endif
- default:
- WARN_ON(1);
- }
-}
-
-static const struct nla_policy nft_meta_policy[NFTA_META_MAX + 1] = {
- [NFTA_META_KEY] = { .type = NLA_U32 },
-};
-
-static int nft_meta_init(const struct nft_expr *expr, struct nlattr *tb[])
-{
- struct nft_meta *meta = nft_expr_priv(expr);
-
- if (tb[NFTA_META_KEY] == NULL)
- return -EINVAL;
-
- meta->key = ntohl(nla_get_be32(tb[NFTA_META_KEY]));
- switch (meta->key) {
- case NFT_META_MARK:
- case NFT_META_PRIORITY:
- case NFT_META_NFTRACE:
-#ifdef CONFIG_NETWORK_SECMARK
- case NFT_META_SECMARK:
-#endif
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int nft_meta_dump(struct sk_buff *skb, const struct nft_expr *expr)
-{
- struct nft_meta *meta = nft_expr_priv(expr);
-
- NLA_PUT_BE32(skb, NFTA_META_KEY, htonl(meta->key));
- return 0;
-
-nla_put_failure:
- return -1;
-}
-
-static struct nft_expr_ops meta_target __read_mostly = {
- .name = "meta",
- .size = NFT_EXPR_SIZE(sizeof(struct nft_meta)),
- .owner = THIS_MODULE,
- .eval = nft_meta_eval,
- .init = nft_meta_init,
- .dump = nft_meta_dump,
- .policy = nft_meta_policy,
- .maxattr = NFTA_META_MAX,
-};
-
-static int __init nft_meta_target_init(void)
-{
- return nft_register_expr(&meta_target);
-}
-
-static void __exit nft_meta_target_exit(void)
-{
- nft_unregister_expr(&meta_target);
-}
-
-module_init(nft_meta_target_init);
-module_exit(nft_meta_target_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
-MODULE_ALIAS_NFT_EXPR("meta");
diff --git a/net/netfilter/nft_queue.c b/net/netfilter/nft_queue.c
new file mode 100644
index 000000000000..cbea473d69e9
--- /dev/null
+++ b/net/netfilter/nft_queue.c
@@ -0,0 +1,134 @@
+/*
+ * Copyright (c) 2013 Eric Leblond <eric@regit.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code partly funded by OISF
+ * (http://www.openinfosecfoundation.org/)
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netlink.h>
+#include <linux/jhash.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables.h>
+#include <net/netfilter/nf_queue.h>
+
+static u32 jhash_initval __read_mostly;
+
+struct nft_queue {
+ u16 queuenum;
+ u16 queues_total;
+ u16 flags;
+ u8 family;
+};
+
+static void nft_queue_eval(const struct nft_expr *expr,
+ struct nft_data data[NFT_REG_MAX + 1],
+ const struct nft_pktinfo *pkt)
+{
+ struct nft_queue *priv = nft_expr_priv(expr);
+ u32 queue = priv->queuenum;
+ u32 ret;
+
+ if (priv->queues_total > 1) {
+ if (priv->flags & NFT_QUEUE_FLAG_CPU_FANOUT) {
+ int cpu = smp_processor_id();
+
+ queue = priv->queuenum + cpu % priv->queues_total;
+ } else {
+ queue = nfqueue_hash(pkt->skb, queue,
+ priv->queues_total, priv->family,
+ jhash_initval);
+ }
+ }
+
+ ret = NF_QUEUE_NR(queue);
+ if (priv->flags & NFT_QUEUE_FLAG_BYPASS)
+ ret |= NF_VERDICT_FLAG_QUEUE_BYPASS;
+
+ data[NFT_REG_VERDICT].verdict = ret;
+}
+
+static const struct nla_policy nft_queue_policy[NFTA_QUEUE_MAX + 1] = {
+ [NFTA_QUEUE_NUM] = { .type = NLA_U16 },
+ [NFTA_QUEUE_TOTAL] = { .type = NLA_U16 },
+ [NFTA_QUEUE_FLAGS] = { .type = NLA_U16 },
+};
+
+static int nft_queue_init(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nlattr * const tb[])
+{
+ struct nft_queue *priv = nft_expr_priv(expr);
+
+ if (tb[NFTA_QUEUE_NUM] == NULL)
+ return -EINVAL;
+
+ init_hashrandom(&jhash_initval);
+ priv->family = ctx->afi->family;
+ priv->queuenum = ntohs(nla_get_be16(tb[NFTA_QUEUE_NUM]));
+
+ if (tb[NFTA_QUEUE_TOTAL] != NULL)
+ priv->queues_total = ntohs(nla_get_be16(tb[NFTA_QUEUE_TOTAL]));
+ if (tb[NFTA_QUEUE_FLAGS] != NULL) {
+ priv->flags = ntohs(nla_get_be16(tb[NFTA_QUEUE_FLAGS]));
+ if (priv->flags & ~NFT_QUEUE_FLAG_MASK)
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int nft_queue_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+ const struct nft_queue *priv = nft_expr_priv(expr);
+
+ if (nla_put_be16(skb, NFTA_QUEUE_NUM, htons(priv->queuenum)) ||
+ nla_put_be16(skb, NFTA_QUEUE_TOTAL, htons(priv->queues_total)) ||
+ nla_put_be16(skb, NFTA_QUEUE_FLAGS, htons(priv->flags)))
+ goto nla_put_failure;
+
+ return 0;
+
+nla_put_failure:
+ return -1;
+}
+
+static struct nft_expr_type nft_queue_type;
+static const struct nft_expr_ops nft_queue_ops = {
+ .type = &nft_queue_type,
+ .size = NFT_EXPR_SIZE(sizeof(struct nft_queue)),
+ .eval = nft_queue_eval,
+ .init = nft_queue_init,
+ .dump = nft_queue_dump,
+};
+
+static struct nft_expr_type nft_queue_type __read_mostly = {
+ .name = "queue",
+ .ops = &nft_queue_ops,
+ .policy = nft_queue_policy,
+ .maxattr = NFTA_QUEUE_MAX,
+ .owner = THIS_MODULE,
+};
+
+static int __init nft_queue_module_init(void)
+{
+ return nft_register_expr(&nft_queue_type);
+}
+
+static void __exit nft_queue_module_exit(void)
+{
+ nft_unregister_expr(&nft_queue_type);
+}
+
+module_init(nft_queue_module_init);
+module_exit(nft_queue_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Eric Leblond <eric@regit.org>");
+MODULE_ALIAS_NFT_EXPR("queue");
diff --git a/net/ipv4/netfilter/nft_reject_ipv4.c b/net/netfilter/nft_reject.c
index fff5ba1a33b7..5e204711d704 100644
--- a/net/ipv4/netfilter/nft_reject_ipv4.c
+++ b/net/netfilter/nft_reject.c
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
+ * Copyright (c) 2013 Eric Leblond <eric@regit.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -16,10 +17,16 @@
#include <linux/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables.h>
#include <net/icmp.h>
+#include <net/netfilter/ipv4/nf_reject.h>
+
+#if IS_ENABLED(CONFIG_NF_TABLES_IPV6)
+#include <net/netfilter/ipv6/nf_reject.h>
+#endif
struct nft_reject {
enum nft_reject_types type:8;
u8 icmp_code;
+ u8 family;
};
static void nft_reject_eval(const struct nft_expr *expr,
@@ -27,12 +34,26 @@ static void nft_reject_eval(const struct nft_expr *expr,
const struct nft_pktinfo *pkt)
{
struct nft_reject *priv = nft_expr_priv(expr);
-
+#if IS_ENABLED(CONFIG_NF_TABLES_IPV6)
+ struct net *net = dev_net((pkt->in != NULL) ? pkt->in : pkt->out);
+#endif
switch (priv->type) {
case NFT_REJECT_ICMP_UNREACH:
- icmp_send(pkt->skb, ICMP_DEST_UNREACH, priv->icmp_code, 0);
+ if (priv->family == NFPROTO_IPV4)
+ nf_send_unreach(pkt->skb, priv->icmp_code);
+#if IS_ENABLED(CONFIG_NF_TABLES_IPV6)
+ else if (priv->family == NFPROTO_IPV6)
+ nf_send_unreach6(net, pkt->skb, priv->icmp_code,
+ pkt->ops->hooknum);
+#endif
break;
case NFT_REJECT_TCP_RST:
+ if (priv->family == NFPROTO_IPV4)
+ nf_send_reset(pkt->skb, pkt->ops->hooknum);
+#if IS_ENABLED(CONFIG_NF_TABLES_IPV6)
+ else if (priv->family == NFPROTO_IPV6)
+ nf_send_reset6(net, pkt->skb, pkt->ops->hooknum);
+#endif
break;
}
@@ -53,6 +74,7 @@ static int nft_reject_init(const struct nft_ctx *ctx,
if (tb[NFTA_REJECT_TYPE] == NULL)
return -EINVAL;
+ priv->family = ctx->afi->family;
priv->type = ntohl(nla_get_be32(tb[NFTA_REJECT_TYPE]));
switch (priv->type) {
case NFT_REJECT_ICMP_UNREACH:
@@ -72,7 +94,7 @@ static int nft_reject_dump(struct sk_buff *skb, const struct nft_expr *expr)
{
const struct nft_reject *priv = nft_expr_priv(expr);
- if (nla_put_be32(skb, NFTA_REJECT_TYPE, priv->type))
+ if (nla_put_be32(skb, NFTA_REJECT_TYPE, htonl(priv->type)))
goto nla_put_failure;
switch (priv->type) {
diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c
index da35ac06a975..5929be622c5c 100644
--- a/net/netfilter/xt_CT.c
+++ b/net/netfilter/xt_CT.c
@@ -211,8 +211,10 @@ static int xt_ct_tg_check(const struct xt_tgchk_param *par,
ret = 0;
if ((info->ct_events || info->exp_events) &&
!nf_ct_ecache_ext_add(ct, info->ct_events, info->exp_events,
- GFP_KERNEL))
+ GFP_KERNEL)) {
+ ret = -EINVAL;
goto err3;
+ }
if (info->helper[0]) {
ret = xt_ct_set_helper(ct, info->helper, par);
diff --git a/net/netfilter/xt_NFQUEUE.c b/net/netfilter/xt_NFQUEUE.c
index ed00fef58996..8f1779ff7e30 100644
--- a/net/netfilter/xt_NFQUEUE.c
+++ b/net/netfilter/xt_NFQUEUE.c
@@ -11,15 +11,13 @@
#include <linux/module.h>
#include <linux/skbuff.h>
-#include <linux/ip.h>
-#include <linux/ipv6.h>
-#include <linux/jhash.h>
-
#include <linux/netfilter.h>
#include <linux/netfilter_arp.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter/xt_NFQUEUE.h>
+#include <net/netfilter/nf_queue.h>
+
MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
MODULE_DESCRIPTION("Xtables: packet forwarding to netlink");
MODULE_LICENSE("GPL");
@@ -28,7 +26,6 @@ MODULE_ALIAS("ip6t_NFQUEUE");
MODULE_ALIAS("arpt_NFQUEUE");
static u32 jhash_initval __read_mostly;
-static bool rnd_inited __read_mostly;
static unsigned int
nfqueue_tg(struct sk_buff *skb, const struct xt_action_param *par)
@@ -38,69 +35,16 @@ nfqueue_tg(struct sk_buff *skb, const struct xt_action_param *par)
return NF_QUEUE_NR(tinfo->queuenum);
}
-static u32 hash_v4(const struct sk_buff *skb)
-{
- const struct iphdr *iph = ip_hdr(skb);
-
- /* packets in either direction go into same queue */
- if ((__force u32)iph->saddr < (__force u32)iph->daddr)
- return jhash_3words((__force u32)iph->saddr,
- (__force u32)iph->daddr, iph->protocol, jhash_initval);
-
- return jhash_3words((__force u32)iph->daddr,
- (__force u32)iph->saddr, iph->protocol, jhash_initval);
-}
-
-#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
-static u32 hash_v6(const struct sk_buff *skb)
-{
- const struct ipv6hdr *ip6h = ipv6_hdr(skb);
- u32 a, b, c;
-
- if ((__force u32)ip6h->saddr.s6_addr32[3] <
- (__force u32)ip6h->daddr.s6_addr32[3]) {
- a = (__force u32) ip6h->saddr.s6_addr32[3];
- b = (__force u32) ip6h->daddr.s6_addr32[3];
- } else {
- b = (__force u32) ip6h->saddr.s6_addr32[3];
- a = (__force u32) ip6h->daddr.s6_addr32[3];
- }
-
- if ((__force u32)ip6h->saddr.s6_addr32[1] <
- (__force u32)ip6h->daddr.s6_addr32[1])
- c = (__force u32) ip6h->saddr.s6_addr32[1];
- else
- c = (__force u32) ip6h->daddr.s6_addr32[1];
-
- return jhash_3words(a, b, c, jhash_initval);
-}
-#endif
-
-static u32
-nfqueue_hash(const struct sk_buff *skb, const struct xt_action_param *par)
-{
- const struct xt_NFQ_info_v1 *info = par->targinfo;
- u32 queue = info->queuenum;
-
- if (par->family == NFPROTO_IPV4)
- queue += ((u64) hash_v4(skb) * info->queues_total) >> 32;
-#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
- else if (par->family == NFPROTO_IPV6)
- queue += ((u64) hash_v6(skb) * info->queues_total) >> 32;
-#endif
-
- return queue;
-}
-
static unsigned int
nfqueue_tg_v1(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct xt_NFQ_info_v1 *info = par->targinfo;
u32 queue = info->queuenum;
- if (info->queues_total > 1)
- queue = nfqueue_hash(skb, par);
-
+ if (info->queues_total > 1) {
+ queue = nfqueue_hash(skb, queue, info->queues_total,
+ par->family, jhash_initval);
+ }
return NF_QUEUE_NR(queue);
}
@@ -120,10 +64,8 @@ static int nfqueue_tg_check(const struct xt_tgchk_param *par)
const struct xt_NFQ_info_v3 *info = par->targinfo;
u32 maxid;
- if (unlikely(!rnd_inited)) {
- get_random_bytes(&jhash_initval, sizeof(jhash_initval));
- rnd_inited = true;
- }
+ init_hashrandom(&jhash_initval);
+
if (info->queues_total == 0) {
pr_err("NFQUEUE: number of total queues is 0\n");
return -EINVAL;
@@ -154,8 +96,10 @@ nfqueue_tg_v3(struct sk_buff *skb, const struct xt_action_param *par)
int cpu = smp_processor_id();
queue = info->queuenum + cpu % info->queues_total;
- } else
- queue = nfqueue_hash(skb, par);
+ } else {
+ queue = nfqueue_hash(skb, queue, info->queues_total,
+ par->family, jhash_initval);
+ }
}
ret = NF_QUEUE_NR(queue);
diff --git a/net/netfilter/xt_cgroup.c b/net/netfilter/xt_cgroup.c
new file mode 100644
index 000000000000..9a8e77e7f8d4
--- /dev/null
+++ b/net/netfilter/xt_cgroup.c
@@ -0,0 +1,71 @@
+/*
+ * Xtables module to match the process control group.
+ *
+ * Might be used to implement individual "per-application" firewall
+ * policies in contrast to global policies based on control groups.
+ * Matching is based upon processes tagged to net_cls' classid marker.
+ *
+ * (C) 2013 Daniel Borkmann <dborkman@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/skbuff.h>
+#include <linux/module.h>
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_cgroup.h>
+#include <net/sock.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Daniel Borkmann <dborkman@redhat.com>");
+MODULE_DESCRIPTION("Xtables: process control group matching");
+MODULE_ALIAS("ipt_cgroup");
+MODULE_ALIAS("ip6t_cgroup");
+
+static int cgroup_mt_check(const struct xt_mtchk_param *par)
+{
+ struct xt_cgroup_info *info = par->matchinfo;
+
+ if (info->invert & ~1)
+ return -EINVAL;
+
+ return info->id ? 0 : -EINVAL;
+}
+
+static bool
+cgroup_mt(const struct sk_buff *skb, struct xt_action_param *par)
+{
+ const struct xt_cgroup_info *info = par->matchinfo;
+
+ if (skb->sk == NULL)
+ return false;
+
+ return (info->id == skb->sk->sk_classid) ^ info->invert;
+}
+
+static struct xt_match cgroup_mt_reg __read_mostly = {
+ .name = "cgroup",
+ .revision = 0,
+ .family = NFPROTO_UNSPEC,
+ .checkentry = cgroup_mt_check,
+ .match = cgroup_mt,
+ .matchsize = sizeof(struct xt_cgroup_info),
+ .me = THIS_MODULE,
+ .hooks = (1 << NF_INET_LOCAL_OUT) |
+ (1 << NF_INET_POST_ROUTING),
+};
+
+static int __init cgroup_mt_init(void)
+{
+ return xt_register_match(&cgroup_mt_reg);
+}
+
+static void __exit cgroup_mt_exit(void)
+{
+ xt_unregister_match(&cgroup_mt_reg);
+}
+
+module_init(cgroup_mt_init);
+module_exit(cgroup_mt_exit);
diff --git a/net/netfilter/xt_connmark.c b/net/netfilter/xt_connmark.c
index 7278145e6a68..69f78e96fdb4 100644
--- a/net/netfilter/xt_connmark.c
+++ b/net/netfilter/xt_connmark.c
@@ -17,8 +17,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/module.h>
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
index 9ff035c71403..a3910fc2122b 100644
--- a/net/netfilter/xt_hashlimit.c
+++ b/net/netfilter/xt_hashlimit.c
@@ -325,21 +325,24 @@ static void htable_gc(unsigned long htlong)
add_timer(&ht->timer);
}
-static void htable_destroy(struct xt_hashlimit_htable *hinfo)
+static void htable_remove_proc_entry(struct xt_hashlimit_htable *hinfo)
{
struct hashlimit_net *hashlimit_net = hashlimit_pernet(hinfo->net);
struct proc_dir_entry *parent;
- del_timer_sync(&hinfo->timer);
-
if (hinfo->family == NFPROTO_IPV4)
parent = hashlimit_net->ipt_hashlimit;
else
parent = hashlimit_net->ip6t_hashlimit;
- if(parent != NULL)
+ if (parent != NULL)
remove_proc_entry(hinfo->name, parent);
+}
+static void htable_destroy(struct xt_hashlimit_htable *hinfo)
+{
+ del_timer_sync(&hinfo->timer);
+ htable_remove_proc_entry(hinfo);
htable_selective_cleanup(hinfo, select_all);
kfree(hinfo->name);
vfree(hinfo);
@@ -883,21 +886,15 @@ static int __net_init hashlimit_proc_net_init(struct net *net)
static void __net_exit hashlimit_proc_net_exit(struct net *net)
{
struct xt_hashlimit_htable *hinfo;
- struct proc_dir_entry *pde;
struct hashlimit_net *hashlimit_net = hashlimit_pernet(net);
- /* recent_net_exit() is called before recent_mt_destroy(). Make sure
- * that the parent xt_recent proc entry is is empty before trying to
- * remove it.
+ /* hashlimit_net_exit() is called before hashlimit_mt_destroy().
+ * Make sure that the parent ipt_hashlimit and ip6t_hashlimit proc
+ * entries is empty before trying to remove it.
*/
mutex_lock(&hashlimit_mutex);
- pde = hashlimit_net->ipt_hashlimit;
- if (pde == NULL)
- pde = hashlimit_net->ip6t_hashlimit;
-
hlist_for_each_entry(hinfo, &hashlimit_net->htables, node)
- remove_proc_entry(hinfo->name, pde);
-
+ htable_remove_proc_entry(hinfo);
hashlimit_net->ipt_hashlimit = NULL;
hashlimit_net->ip6t_hashlimit = NULL;
mutex_unlock(&hashlimit_mutex);
diff --git a/net/netfilter/xt_ipcomp.c b/net/netfilter/xt_ipcomp.c
new file mode 100644
index 000000000000..a4c7561698c5
--- /dev/null
+++ b/net/netfilter/xt_ipcomp.c
@@ -0,0 +1,111 @@
+/* Kernel module to match IPComp parameters for IPv4 and IPv6
+ *
+ * Copyright (C) 2013 WindRiver
+ *
+ * Author:
+ * Fan Du <fan.du@windriver.com>
+ *
+ * Based on:
+ * net/netfilter/xt_esp.c
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/in.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/ip.h>
+
+#include <linux/netfilter/xt_ipcomp.h>
+#include <linux/netfilter/x_tables.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Fan Du <fan.du@windriver.com>");
+MODULE_DESCRIPTION("Xtables: IPv4/6 IPsec-IPComp SPI match");
+
+/* Returns 1 if the spi is matched by the range, 0 otherwise */
+static inline bool
+spi_match(u_int32_t min, u_int32_t max, u_int32_t spi, bool invert)
+{
+ bool r;
+ pr_debug("spi_match:%c 0x%x <= 0x%x <= 0x%x\n",
+ invert ? '!' : ' ', min, spi, max);
+ r = (spi >= min && spi <= max) ^ invert;
+ pr_debug(" result %s\n", r ? "PASS" : "FAILED");
+ return r;
+}
+
+static bool comp_mt(const struct sk_buff *skb, struct xt_action_param *par)
+{
+ struct ip_comp_hdr _comphdr;
+ const struct ip_comp_hdr *chdr;
+ const struct xt_ipcomp *compinfo = par->matchinfo;
+
+ /* Must not be a fragment. */
+ if (par->fragoff != 0)
+ return false;
+
+ chdr = skb_header_pointer(skb, par->thoff, sizeof(_comphdr), &_comphdr);
+ if (chdr == NULL) {
+ /* We've been asked to examine this packet, and we
+ * can't. Hence, no choice but to drop.
+ */
+ pr_debug("Dropping evil IPComp tinygram.\n");
+ par->hotdrop = true;
+ return 0;
+ }
+
+ return spi_match(compinfo->spis[0], compinfo->spis[1],
+ ntohl(chdr->cpi << 16),
+ !!(compinfo->invflags & XT_IPCOMP_INV_SPI));
+}
+
+static int comp_mt_check(const struct xt_mtchk_param *par)
+{
+ const struct xt_ipcomp *compinfo = par->matchinfo;
+
+ /* Must specify no unknown invflags */
+ if (compinfo->invflags & ~XT_IPCOMP_INV_MASK) {
+ pr_err("unknown flags %X\n", compinfo->invflags);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static struct xt_match comp_mt_reg[] __read_mostly = {
+ {
+ .name = "ipcomp",
+ .family = NFPROTO_IPV4,
+ .match = comp_mt,
+ .matchsize = sizeof(struct xt_ipcomp),
+ .proto = IPPROTO_COMP,
+ .checkentry = comp_mt_check,
+ .me = THIS_MODULE,
+ },
+ {
+ .name = "ipcomp",
+ .family = NFPROTO_IPV6,
+ .match = comp_mt,
+ .matchsize = sizeof(struct xt_ipcomp),
+ .proto = IPPROTO_COMP,
+ .checkentry = comp_mt_check,
+ .me = THIS_MODULE,
+ },
+};
+
+static int __init comp_mt_init(void)
+{
+ return xt_register_matches(comp_mt_reg, ARRAY_SIZE(comp_mt_reg));
+}
+
+static void __exit comp_mt_exit(void)
+{
+ xt_unregister_matches(comp_mt_reg, ARRAY_SIZE(comp_mt_reg));
+}
+
+module_init(comp_mt_init);
+module_exit(comp_mt_exit);
diff --git a/net/netfilter/xt_osf.c b/net/netfilter/xt_osf.c
index 647d989a01e6..7174611bd672 100644
--- a/net/netfilter/xt_osf.c
+++ b/net/netfilter/xt_osf.c
@@ -13,8 +13,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
diff --git a/net/netlabel/netlabel_addrlist.c b/net/netlabel/netlabel_addrlist.c
index 6f1701322fb6..d0a3acfa5742 100644
--- a/net/netlabel/netlabel_addrlist.c
+++ b/net/netlabel/netlabel_addrlist.c
@@ -24,8 +24,7 @@
* the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
*/
diff --git a/net/netlabel/netlabel_addrlist.h b/net/netlabel/netlabel_addrlist.h
index a1287ce18130..d0f38bc9af6d 100644
--- a/net/netlabel/netlabel_addrlist.h
+++ b/net/netlabel/netlabel_addrlist.h
@@ -24,8 +24,7 @@
* the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
*/
diff --git a/net/netlabel/netlabel_cipso_v4.c b/net/netlabel/netlabel_cipso_v4.c
index 69345cebe3a3..c2f2a53a4879 100644
--- a/net/netlabel/netlabel_cipso_v4.c
+++ b/net/netlabel/netlabel_cipso_v4.c
@@ -23,8 +23,7 @@
* the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
*/
diff --git a/net/netlabel/netlabel_cipso_v4.h b/net/netlabel/netlabel_cipso_v4.h
index d24d774bfd62..875826808b00 100644
--- a/net/netlabel/netlabel_cipso_v4.h
+++ b/net/netlabel/netlabel_cipso_v4.h
@@ -23,8 +23,7 @@
* the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
*/
diff --git a/net/netlabel/netlabel_domainhash.c b/net/netlabel/netlabel_domainhash.c
index 85d842e6e431..f0cb92f3ddaf 100644
--- a/net/netlabel/netlabel_domainhash.c
+++ b/net/netlabel/netlabel_domainhash.c
@@ -24,8 +24,7 @@
* the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
*/
diff --git a/net/netlabel/netlabel_domainhash.h b/net/netlabel/netlabel_domainhash.h
index b9be0eed8980..680caf4dff56 100644
--- a/net/netlabel/netlabel_domainhash.h
+++ b/net/netlabel/netlabel_domainhash.h
@@ -24,8 +24,7 @@
* the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
*/
diff --git a/net/netlabel/netlabel_kapi.c b/net/netlabel/netlabel_kapi.c
index dce1bebf7aec..3045a964f39c 100644
--- a/net/netlabel/netlabel_kapi.c
+++ b/net/netlabel/netlabel_kapi.c
@@ -23,8 +23,7 @@
* the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
*/
diff --git a/net/netlabel/netlabel_mgmt.c b/net/netlabel/netlabel_mgmt.c
index 8ef83ee97c6a..e66e977ef2fa 100644
--- a/net/netlabel/netlabel_mgmt.c
+++ b/net/netlabel/netlabel_mgmt.c
@@ -23,8 +23,7 @@
* the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
*/
diff --git a/net/netlabel/netlabel_mgmt.h b/net/netlabel/netlabel_mgmt.h
index 5a9f31ce5799..8b6e1ab62b48 100644
--- a/net/netlabel/netlabel_mgmt.h
+++ b/net/netlabel/netlabel_mgmt.h
@@ -23,8 +23,7 @@
* the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
*/
diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c
index 43817d73ccf9..78a63c18779e 100644
--- a/net/netlabel/netlabel_unlabeled.c
+++ b/net/netlabel/netlabel_unlabeled.c
@@ -23,8 +23,7 @@
* the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
*/
diff --git a/net/netlabel/netlabel_unlabeled.h b/net/netlabel/netlabel_unlabeled.h
index 700af49022a0..3a9e5dc9511b 100644
--- a/net/netlabel/netlabel_unlabeled.h
+++ b/net/netlabel/netlabel_unlabeled.h
@@ -23,8 +23,7 @@
* the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
*/
diff --git a/net/netlabel/netlabel_user.c b/net/netlabel/netlabel_user.c
index 9650c4ad5f88..1e779bb7fa43 100644
--- a/net/netlabel/netlabel_user.c
+++ b/net/netlabel/netlabel_user.c
@@ -23,8 +23,7 @@
* the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
*/
diff --git a/net/netlabel/netlabel_user.h b/net/netlabel/netlabel_user.h
index 81969785e279..4a397cde1a48 100644
--- a/net/netlabel/netlabel_user.h
+++ b/net/netlabel/netlabel_user.h
@@ -23,8 +23,7 @@
* the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
*/
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index bca50b95c182..34a656d90175 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -131,7 +131,7 @@ int netlink_add_tap(struct netlink_tap *nt)
}
EXPORT_SYMBOL_GPL(netlink_add_tap);
-int __netlink_remove_tap(struct netlink_tap *nt)
+static int __netlink_remove_tap(struct netlink_tap *nt)
{
bool found = false;
struct netlink_tap *tmp;
@@ -155,7 +155,6 @@ out:
return found ? 0 : -ENODEV;
}
-EXPORT_SYMBOL_GPL(__netlink_remove_tap);
int netlink_remove_tap(struct netlink_tap *nt)
{
@@ -204,6 +203,8 @@ static int __netlink_deliver_tap_skb(struct sk_buff *skb,
if (nskb) {
nskb->dev = dev;
nskb->protocol = htons((u16) sk->sk_protocol);
+ nskb->pkt_type = netlink_is_kernel(sk) ?
+ PACKET_KERNEL : PACKET_USER;
ret = dev_queue_xmit(nskb);
if (unlikely(ret > 0))
@@ -239,6 +240,13 @@ static void netlink_deliver_tap(struct sk_buff *skb)
rcu_read_unlock();
}
+static void netlink_deliver_tap_kernel(struct sock *dst, struct sock *src,
+ struct sk_buff *skb)
+{
+ if (!(netlink_is_kernel(dst) && netlink_is_kernel(src)))
+ netlink_deliver_tap(skb);
+}
+
static void netlink_overrun(struct sock *sk)
{
struct netlink_sock *nlk = nlk_sk(sk);
@@ -1697,14 +1705,10 @@ static int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb,
ret = -ECONNREFUSED;
if (nlk->netlink_rcv != NULL) {
- /* We could do a netlink_deliver_tap(skb) here as well
- * but since this is intended for the kernel only, we
- * should rather let it stay under the hood.
- */
-
ret = skb->len;
netlink_skb_set_owner_r(skb, sk);
NETLINK_CB(skb).sk = ssk;
+ netlink_deliver_tap_kernel(sk, ssk, skb);
nlk->netlink_rcv(skb);
consume_skb(skb);
} else {
@@ -1769,6 +1773,9 @@ struct sk_buff *netlink_alloc_skb(struct sock *ssk, unsigned int size,
if (ring->pg_vec == NULL)
goto out_put;
+ if (ring->frame_size - NL_MMAP_HDRLEN < size)
+ goto out_put;
+
skb = alloc_skb_head(gfp_mask);
if (skb == NULL)
goto err1;
@@ -1778,6 +1785,7 @@ struct sk_buff *netlink_alloc_skb(struct sock *ssk, unsigned int size,
if (ring->pg_vec == NULL)
goto out_free;
+ /* check again under lock */
maxlen = ring->frame_size - NL_MMAP_HDRLEN;
if (maxlen < size)
goto out_free;
@@ -2535,21 +2543,6 @@ void __netlink_clear_multicast_users(struct sock *ksk, unsigned int group)
netlink_update_socket_mc(nlk_sk(sk), group, 0);
}
-/**
- * netlink_clear_multicast_users - kick off multicast listeners
- *
- * This function removes all listeners from the given group.
- * @ksk: The kernel netlink socket, as returned by
- * netlink_kernel_create().
- * @group: The multicast group to clear.
- */
-void netlink_clear_multicast_users(struct sock *ksk, unsigned int group)
-{
- netlink_table_grab();
- __netlink_clear_multicast_users(ksk, group);
- netlink_table_ungrab();
-}
-
struct nlmsghdr *
__nlmsg_put(struct sk_buff *skb, u32 portid, u32 seq, int type, int len, int flags)
{
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index 4518a57aa5fe..b1dcdb932a86 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -74,9 +74,12 @@ static struct list_head family_ht[GENL_FAM_TAB_SIZE];
* Bit 17 is marked as already used since the VFS quota code
* also abused this API and relied on family == group ID, we
* cater to that by giving it a static family and group ID.
+ * Bit 18 is marked as already used since the PMCRAID driver
+ * did the same thing as the VFS quota code (maybe copied?)
*/
static unsigned long mc_group_start = 0x3 | BIT(GENL_ID_CTRL) |
- BIT(GENL_ID_VFS_DQUOT);
+ BIT(GENL_ID_VFS_DQUOT) |
+ BIT(GENL_ID_PMCRAID);
static unsigned long *mc_groups = &mc_group_start;
static unsigned long mc_groups_longs = 1;
@@ -139,6 +142,7 @@ static u16 genl_generate_id(void)
for (i = 0; i <= GENL_MAX_ID - GENL_MIN_ID; i++) {
if (id_gen_idx != GENL_ID_VFS_DQUOT &&
+ id_gen_idx != GENL_ID_PMCRAID &&
!genl_family_find_byid(id_gen_idx))
return id_gen_idx;
if (++id_gen_idx > GENL_MAX_ID)
@@ -214,7 +218,7 @@ static int genl_validate_assign_mc_groups(struct genl_family *family)
{
int first_id;
int n_groups = family->n_mcgrps;
- int err, i;
+ int err = 0, i;
bool groups_allocated = false;
if (!n_groups)
@@ -236,9 +240,12 @@ static int genl_validate_assign_mc_groups(struct genl_family *family)
} else if (strcmp(family->name, "NET_DM") == 0) {
first_id = 1;
BUG_ON(n_groups != 1);
- } else if (strcmp(family->name, "VFS_DQUOT") == 0) {
+ } else if (family->id == GENL_ID_VFS_DQUOT) {
first_id = GENL_ID_VFS_DQUOT;
BUG_ON(n_groups != 1);
+ } else if (family->id == GENL_ID_PMCRAID) {
+ first_id = GENL_ID_PMCRAID;
+ BUG_ON(n_groups != 1);
} else {
groups_allocated = true;
err = genl_allocate_reserve_groups(n_groups, &first_id);
@@ -454,6 +461,26 @@ int genl_unregister_family(struct genl_family *family)
EXPORT_SYMBOL(genl_unregister_family);
/**
+ * genlmsg_new_unicast - Allocate generic netlink message for unicast
+ * @payload: size of the message payload
+ * @info: information on destination
+ * @flags: the type of memory to allocate
+ *
+ * Allocates a new sk_buff large enough to cover the specified payload
+ * plus required Netlink headers. Will check receiving socket for
+ * memory mapped i/o capability and use it if enabled. Will fall back
+ * to non-mapped skb if message size exceeds the frame size of the ring.
+ */
+struct sk_buff *genlmsg_new_unicast(size_t payload, struct genl_info *info,
+ gfp_t flags)
+{
+ size_t len = nlmsg_total_size(genlmsg_total_size(payload));
+
+ return netlink_alloc_skb(info->dst_sk, len, info->snd_portid, flags);
+}
+EXPORT_SYMBOL_GPL(genlmsg_new_unicast);
+
+/**
* genlmsg_put - Add generic netlink header to netlink message
* @skb: socket buffer holding the message
* @portid: netlink portid the message is addressed to
@@ -593,6 +620,7 @@ static int genl_family_rcv_msg(struct genl_family *family,
info.genlhdr = nlmsg_data(nlh);
info.userhdr = nlmsg_data(nlh) + GENL_HDRLEN;
info.attrs = attrbuf;
+ info.dst_sk = skb->sk;
genl_info_net_set(&info, net);
memset(&info.user_ptr, 0, sizeof(info.user_ptr));
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index 65cfaa816075..716b7eebfe70 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -165,7 +165,7 @@ static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh,
}
csum_replace4(&nh->check, *addr, new_addr);
- skb->rxhash = 0;
+ skb_clear_hash(skb);
*addr = new_addr;
}
@@ -199,7 +199,7 @@ static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto,
if (recalculate_csum)
update_ipv6_checksum(skb, l4_proto, addr, new_addr);
- skb->rxhash = 0;
+ skb_clear_hash(skb);
memcpy(addr, new_addr, sizeof(__be32[4]));
}
@@ -296,7 +296,7 @@ static void set_tp_port(struct sk_buff *skb, __be16 *port,
{
inet_proto_csum_replace2(check, skb, *port, new_port, 0);
*port = new_port;
- skb->rxhash = 0;
+ skb_clear_hash(skb);
}
static void set_udp_port(struct sk_buff *skb, __be16 *port, __be16 new_port)
@@ -310,7 +310,7 @@ static void set_udp_port(struct sk_buff *skb, __be16 *port, __be16 new_port)
uh->check = CSUM_MANGLED_0;
} else {
*port = new_port;
- skb->rxhash = 0;
+ skb_clear_hash(skb);
}
}
@@ -381,7 +381,7 @@ static int set_sctp(struct sk_buff *skb,
/* Carry any checksum errors through. */
sh->checksum = old_csum ^ old_correct_csum ^ new_csum;
- skb->rxhash = 0;
+ skb_clear_hash(skb);
}
return 0;
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index 6f5e1dd3be2d..df4692826ead 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -108,10 +108,9 @@ int lockdep_ovsl_is_held(void)
#endif
static struct vport *new_vport(const struct vport_parms *);
-static int queue_gso_packets(struct net *, int dp_ifindex, struct sk_buff *,
+static int queue_gso_packets(struct datapath *dp, struct sk_buff *,
const struct dp_upcall_info *);
-static int queue_userspace_packet(struct net *, int dp_ifindex,
- struct sk_buff *,
+static int queue_userspace_packet(struct datapath *dp, struct sk_buff *,
const struct dp_upcall_info *);
/* Must be called with rcu_read_lock or ovs_mutex. */
@@ -133,7 +132,7 @@ static struct datapath *get_dp(struct net *net, int dp_ifindex)
}
/* Must be called with rcu_read_lock or ovs_mutex. */
-const char *ovs_dp_name(const struct datapath *dp)
+static const char *ovs_dp_name(const struct datapath *dp)
{
struct vport *vport = ovs_vport_ovsl_rcu(dp, OVSP_LOCAL);
return vport->ops->get_name(vport);
@@ -234,7 +233,7 @@ void ovs_dp_process_received_packet(struct vport *p, struct sk_buff *skb)
}
/* Look up flow. */
- flow = ovs_flow_tbl_lookup(&dp->table, &key, &n_mask_hit);
+ flow = ovs_flow_tbl_lookup_stats(&dp->table, &key, &n_mask_hit);
if (unlikely(!flow)) {
struct dp_upcall_info upcall;
@@ -251,9 +250,9 @@ void ovs_dp_process_received_packet(struct vport *p, struct sk_buff *skb)
OVS_CB(skb)->flow = flow;
OVS_CB(skb)->pkt_key = &key;
- stats_counter = &stats->n_hit;
- ovs_flow_used(OVS_CB(skb)->flow, skb);
+ ovs_flow_stats_update(OVS_CB(skb)->flow, skb);
ovs_execute_actions(dp, skb);
+ stats_counter = &stats->n_hit;
out:
/* Update datapath statistics. */
@@ -277,7 +276,6 @@ int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb,
const struct dp_upcall_info *upcall_info)
{
struct dp_stats_percpu *stats;
- int dp_ifindex;
int err;
if (upcall_info->portid == 0) {
@@ -285,16 +283,10 @@ int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb,
goto err;
}
- dp_ifindex = get_dpifindex(dp);
- if (!dp_ifindex) {
- err = -ENODEV;
- goto err;
- }
-
if (!skb_is_gso(skb))
- err = queue_userspace_packet(ovs_dp_get_net(dp), dp_ifindex, skb, upcall_info);
+ err = queue_userspace_packet(dp, skb, upcall_info);
else
- err = queue_gso_packets(ovs_dp_get_net(dp), dp_ifindex, skb, upcall_info);
+ err = queue_gso_packets(dp, skb, upcall_info);
if (err)
goto err;
@@ -310,8 +302,7 @@ err:
return err;
}
-static int queue_gso_packets(struct net *net, int dp_ifindex,
- struct sk_buff *skb,
+static int queue_gso_packets(struct datapath *dp, struct sk_buff *skb,
const struct dp_upcall_info *upcall_info)
{
unsigned short gso_type = skb_shinfo(skb)->gso_type;
@@ -320,14 +311,14 @@ static int queue_gso_packets(struct net *net, int dp_ifindex,
struct sk_buff *segs, *nskb;
int err;
- segs = __skb_gso_segment(skb, NETIF_F_SG | NETIF_F_HW_CSUM, false);
+ segs = __skb_gso_segment(skb, NETIF_F_SG, false);
if (IS_ERR(segs))
return PTR_ERR(segs);
/* Queue all of the segments. */
skb = segs;
do {
- err = queue_userspace_packet(net, dp_ifindex, skb, upcall_info);
+ err = queue_userspace_packet(dp, skb, upcall_info);
if (err)
break;
@@ -380,11 +371,11 @@ static size_t key_attr_size(void)
+ nla_total_size(28); /* OVS_KEY_ATTR_ND */
}
-static size_t upcall_msg_size(const struct sk_buff *skb,
- const struct nlattr *userdata)
+static size_t upcall_msg_size(const struct nlattr *userdata,
+ unsigned int hdrlen)
{
size_t size = NLMSG_ALIGN(sizeof(struct ovs_header))
- + nla_total_size(skb->len) /* OVS_PACKET_ATTR_PACKET */
+ + nla_total_size(hdrlen) /* OVS_PACKET_ATTR_PACKET */
+ nla_total_size(key_attr_size()); /* OVS_PACKET_ATTR_KEY */
/* OVS_PACKET_ATTR_USERDATA */
@@ -394,15 +385,24 @@ static size_t upcall_msg_size(const struct sk_buff *skb,
return size;
}
-static int queue_userspace_packet(struct net *net, int dp_ifindex,
- struct sk_buff *skb,
+static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
const struct dp_upcall_info *upcall_info)
{
struct ovs_header *upcall;
struct sk_buff *nskb = NULL;
struct sk_buff *user_skb; /* to be queued to userspace */
struct nlattr *nla;
- int err;
+ struct genl_info info = {
+ .dst_sk = ovs_dp_get_net(dp)->genl_sock,
+ .snd_portid = upcall_info->portid,
+ };
+ size_t len;
+ unsigned int hlen;
+ int err, dp_ifindex;
+
+ dp_ifindex = get_dpifindex(dp);
+ if (!dp_ifindex)
+ return -ENODEV;
if (vlan_tx_tag_present(skb)) {
nskb = skb_clone(skb, GFP_ATOMIC);
@@ -422,7 +422,22 @@ static int queue_userspace_packet(struct net *net, int dp_ifindex,
goto out;
}
- user_skb = genlmsg_new(upcall_msg_size(skb, upcall_info->userdata), GFP_ATOMIC);
+ /* Complete checksum if needed */
+ if (skb->ip_summed == CHECKSUM_PARTIAL &&
+ (err = skb_checksum_help(skb)))
+ goto out;
+
+ /* Older versions of OVS user space enforce alignment of the last
+ * Netlink attribute to NLA_ALIGNTO which would require extensive
+ * padding logic. Only perform zerocopy if padding is not required.
+ */
+ if (dp->user_features & OVS_DP_F_UNALIGNED)
+ hlen = skb_zerocopy_headlen(skb);
+ else
+ hlen = skb->len;
+
+ len = upcall_msg_size(upcall_info->userdata, hlen);
+ user_skb = genlmsg_new_unicast(len, &info, GFP_ATOMIC);
if (!user_skb) {
err = -ENOMEM;
goto out;
@@ -441,26 +456,24 @@ static int queue_userspace_packet(struct net *net, int dp_ifindex,
nla_len(upcall_info->userdata),
nla_data(upcall_info->userdata));
- nla = __nla_reserve(user_skb, OVS_PACKET_ATTR_PACKET, skb->len);
+ /* Only reserve room for attribute header, packet data is added
+ * in skb_zerocopy() */
+ if (!(nla = nla_reserve(user_skb, OVS_PACKET_ATTR_PACKET, 0))) {
+ err = -ENOBUFS;
+ goto out;
+ }
+ nla->nla_len = nla_attr_size(skb->len);
- skb_copy_and_csum_dev(skb, nla_data(nla));
+ skb_zerocopy(user_skb, skb, skb->len, hlen);
- genlmsg_end(user_skb, upcall);
- err = genlmsg_unicast(net, user_skb, upcall_info->portid);
+ ((struct nlmsghdr *) user_skb->data)->nlmsg_len = user_skb->len;
+ err = genlmsg_unicast(ovs_dp_get_net(dp), user_skb, upcall_info->portid);
out:
kfree_skb(nskb);
return err;
}
-static void clear_stats(struct sw_flow *flow)
-{
- flow->used = 0;
- flow->tcp_flags = 0;
- flow->packet_count = 0;
- flow->byte_count = 0;
-}
-
static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
{
struct ovs_header *ovs_header = info->userhdr;
@@ -499,7 +512,7 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
packet->protocol = htons(ETH_P_802_2);
/* Build an sw_flow for sending this packet. */
- flow = ovs_flow_alloc();
+ flow = ovs_flow_alloc(false);
err = PTR_ERR(flow);
if (IS_ERR(flow))
goto err_kfree_skb;
@@ -635,10 +648,10 @@ static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp,
const int skb_orig_len = skb->len;
struct nlattr *start;
struct ovs_flow_stats stats;
+ __be16 tcp_flags;
+ unsigned long used;
struct ovs_header *ovs_header;
struct nlattr *nla;
- unsigned long used;
- u8 tcp_flags;
int err;
ovs_header = genlmsg_put(skb, portid, seq, &dp_flow_genl_family, flags, cmd);
@@ -667,24 +680,17 @@ static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp,
nla_nest_end(skb, nla);
- spin_lock_bh(&flow->lock);
- used = flow->used;
- stats.n_packets = flow->packet_count;
- stats.n_bytes = flow->byte_count;
- tcp_flags = (u8)ntohs(flow->tcp_flags);
- spin_unlock_bh(&flow->lock);
-
+ ovs_flow_stats_get(flow, &stats, &used, &tcp_flags);
if (used &&
nla_put_u64(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used)))
goto nla_put_failure;
if (stats.n_packets &&
- nla_put(skb, OVS_FLOW_ATTR_STATS,
- sizeof(struct ovs_flow_stats), &stats))
+ nla_put(skb, OVS_FLOW_ATTR_STATS, sizeof(struct ovs_flow_stats), &stats))
goto nla_put_failure;
- if (tcp_flags &&
- nla_put_u8(skb, OVS_FLOW_ATTR_TCP_FLAGS, tcp_flags))
+ if ((u8)ntohs(tcp_flags) &&
+ nla_put_u8(skb, OVS_FLOW_ATTR_TCP_FLAGS, (u8)ntohs(tcp_flags)))
goto nla_put_failure;
/* If OVS_FLOW_ATTR_ACTIONS doesn't fit, skip dumping the actions if
@@ -701,8 +707,7 @@ static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp,
if (start) {
const struct sw_flow_actions *sf_acts;
- sf_acts = rcu_dereference_check(flow->sf_acts,
- lockdep_ovsl_is_held());
+ sf_acts = rcu_dereference_ovsl(flow->sf_acts);
err = ovs_nla_put_actions(sf_acts->actions,
sf_acts->actions_len, skb);
@@ -726,39 +731,34 @@ error:
return err;
}
-static struct sk_buff *ovs_flow_cmd_alloc_info(struct sw_flow *flow)
+static struct sk_buff *ovs_flow_cmd_alloc_info(struct sw_flow *flow,
+ struct genl_info *info)
{
- const struct sw_flow_actions *sf_acts;
+ size_t len;
- sf_acts = ovsl_dereference(flow->sf_acts);
+ len = ovs_flow_cmd_msg_size(ovsl_dereference(flow->sf_acts));
- return genlmsg_new(ovs_flow_cmd_msg_size(sf_acts), GFP_KERNEL);
+ return genlmsg_new_unicast(len, info, GFP_KERNEL);
}
static struct sk_buff *ovs_flow_cmd_build_info(struct sw_flow *flow,
struct datapath *dp,
- u32 portid, u32 seq, u8 cmd)
+ struct genl_info *info,
+ u8 cmd)
{
struct sk_buff *skb;
int retval;
- skb = ovs_flow_cmd_alloc_info(flow);
+ skb = ovs_flow_cmd_alloc_info(flow, info);
if (!skb)
return ERR_PTR(-ENOMEM);
- retval = ovs_flow_cmd_fill_info(flow, dp, skb, portid, seq, 0, cmd);
+ retval = ovs_flow_cmd_fill_info(flow, dp, skb, info->snd_portid,
+ info->snd_seq, 0, cmd);
BUG_ON(retval < 0);
return skb;
}
-static struct sw_flow *__ovs_flow_tbl_lookup(struct flow_table *tbl,
- const struct sw_flow_key *key)
-{
- u32 __always_unused n_mask_hit;
-
- return ovs_flow_tbl_lookup(tbl, key, &n_mask_hit);
-}
-
static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
{
struct nlattr **a = info->attrs;
@@ -770,6 +770,7 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
struct datapath *dp;
struct sw_flow_actions *acts = NULL;
struct sw_flow_match match;
+ bool exact_5tuple;
int error;
/* Extract key. */
@@ -778,7 +779,7 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
goto error;
ovs_match_init(&match, &key, &mask);
- error = ovs_nla_get_match(&match,
+ error = ovs_nla_get_match(&match, &exact_5tuple,
a[OVS_FLOW_ATTR_KEY], a[OVS_FLOW_ATTR_MASK]);
if (error)
goto error;
@@ -809,7 +810,7 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
goto err_unlock_ovs;
/* Check if this is a duplicate flow */
- flow = __ovs_flow_tbl_lookup(&dp->table, &key);
+ flow = ovs_flow_tbl_lookup(&dp->table, &key);
if (!flow) {
/* Bail out if we're not allowed to create a new flow. */
error = -ENOENT;
@@ -817,12 +818,11 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
goto err_unlock_ovs;
/* Allocate flow. */
- flow = ovs_flow_alloc();
+ flow = ovs_flow_alloc(!exact_5tuple);
if (IS_ERR(flow)) {
error = PTR_ERR(flow);
goto err_unlock_ovs;
}
- clear_stats(flow);
flow->key = masked_key;
flow->unmasked_key = key;
@@ -835,8 +835,7 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
goto err_flow_free;
}
- reply = ovs_flow_cmd_build_info(flow, dp, info->snd_portid,
- info->snd_seq, OVS_FLOW_CMD_NEW);
+ reply = ovs_flow_cmd_build_info(flow, dp, info, OVS_FLOW_CMD_NEW);
} else {
/* We found a matching flow. */
struct sw_flow_actions *old_acts;
@@ -864,15 +863,11 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
rcu_assign_pointer(flow->sf_acts, acts);
ovs_nla_free_flow_actions(old_acts);
- reply = ovs_flow_cmd_build_info(flow, dp, info->snd_portid,
- info->snd_seq, OVS_FLOW_CMD_NEW);
+ reply = ovs_flow_cmd_build_info(flow, dp, info, OVS_FLOW_CMD_NEW);
/* Clear stats. */
- if (a[OVS_FLOW_ATTR_CLEAR]) {
- spin_lock_bh(&flow->lock);
- clear_stats(flow);
- spin_unlock_bh(&flow->lock);
- }
+ if (a[OVS_FLOW_ATTR_CLEAR])
+ ovs_flow_stats_clear(flow);
}
ovs_unlock();
@@ -910,7 +905,7 @@ static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
}
ovs_match_init(&match, &key, NULL);
- err = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY], NULL);
+ err = ovs_nla_get_match(&match, NULL, a[OVS_FLOW_ATTR_KEY], NULL);
if (err)
return err;
@@ -921,14 +916,13 @@ static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
goto unlock;
}
- flow = __ovs_flow_tbl_lookup(&dp->table, &key);
+ flow = ovs_flow_tbl_lookup(&dp->table, &key);
if (!flow || !ovs_flow_cmp_unmasked_key(flow, &match)) {
err = -ENOENT;
goto unlock;
}
- reply = ovs_flow_cmd_build_info(flow, dp, info->snd_portid,
- info->snd_seq, OVS_FLOW_CMD_NEW);
+ reply = ovs_flow_cmd_build_info(flow, dp, info, OVS_FLOW_CMD_NEW);
if (IS_ERR(reply)) {
err = PTR_ERR(reply);
goto unlock;
@@ -965,17 +959,17 @@ static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
}
ovs_match_init(&match, &key, NULL);
- err = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY], NULL);
+ err = ovs_nla_get_match(&match, NULL, a[OVS_FLOW_ATTR_KEY], NULL);
if (err)
goto unlock;
- flow = __ovs_flow_tbl_lookup(&dp->table, &key);
+ flow = ovs_flow_tbl_lookup(&dp->table, &key);
if (!flow || !ovs_flow_cmp_unmasked_key(flow, &match)) {
err = -ENOENT;
goto unlock;
}
- reply = ovs_flow_cmd_alloc_info(flow);
+ reply = ovs_flow_cmd_alloc_info(flow, info);
if (!reply) {
err = -ENOMEM;
goto unlock;
@@ -1061,6 +1055,7 @@ static const struct genl_ops dp_flow_genl_ops[] = {
static const struct nla_policy datapath_policy[OVS_DP_ATTR_MAX + 1] = {
[OVS_DP_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
[OVS_DP_ATTR_UPCALL_PID] = { .type = NLA_U32 },
+ [OVS_DP_ATTR_USER_FEATURES] = { .type = NLA_U32 },
};
static struct genl_family dp_datapath_genl_family = {
@@ -1119,6 +1114,9 @@ static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb,
&dp_megaflow_stats))
goto nla_put_failure;
+ if (nla_put_u32(skb, OVS_DP_ATTR_USER_FEATURES, dp->user_features))
+ goto nla_put_failure;
+
return genlmsg_end(skb, ovs_header);
nla_put_failure:
@@ -1127,17 +1125,17 @@ error:
return -EMSGSIZE;
}
-static struct sk_buff *ovs_dp_cmd_build_info(struct datapath *dp, u32 portid,
- u32 seq, u8 cmd)
+static struct sk_buff *ovs_dp_cmd_build_info(struct datapath *dp,
+ struct genl_info *info, u8 cmd)
{
struct sk_buff *skb;
int retval;
- skb = genlmsg_new(ovs_dp_cmd_msg_size(), GFP_KERNEL);
+ skb = genlmsg_new_unicast(ovs_dp_cmd_msg_size(), info, GFP_KERNEL);
if (!skb)
return ERR_PTR(-ENOMEM);
- retval = ovs_dp_cmd_fill_info(dp, skb, portid, seq, 0, cmd);
+ retval = ovs_dp_cmd_fill_info(dp, skb, info->snd_portid, info->snd_seq, 0, cmd);
if (retval < 0) {
kfree_skb(skb);
return ERR_PTR(retval);
@@ -1165,6 +1163,24 @@ static struct datapath *lookup_datapath(struct net *net,
return dp ? dp : ERR_PTR(-ENODEV);
}
+static void ovs_dp_reset_user_features(struct sk_buff *skb, struct genl_info *info)
+{
+ struct datapath *dp;
+
+ dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
+ if (!dp)
+ return;
+
+ WARN(dp->user_features, "Dropping previously announced user features\n");
+ dp->user_features = 0;
+}
+
+static void ovs_dp_change(struct datapath *dp, struct nlattr **a)
+{
+ if (a[OVS_DP_ATTR_USER_FEATURES])
+ dp->user_features = nla_get_u32(a[OVS_DP_ATTR_USER_FEATURES]);
+}
+
static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
{
struct nlattr **a = info->attrs;
@@ -1223,17 +1239,27 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
parms.port_no = OVSP_LOCAL;
parms.upcall_portid = nla_get_u32(a[OVS_DP_ATTR_UPCALL_PID]);
+ ovs_dp_change(dp, a);
+
vport = new_vport(&parms);
if (IS_ERR(vport)) {
err = PTR_ERR(vport);
if (err == -EBUSY)
err = -EEXIST;
+ if (err == -EEXIST) {
+ /* An outdated user space instance that does not understand
+ * the concept of user_features has attempted to create a new
+ * datapath and is likely to reuse it. Drop all user features.
+ */
+ if (info->genlhdr->version < OVS_DP_VER_FEATURES)
+ ovs_dp_reset_user_features(skb, info);
+ }
+
goto err_destroy_ports_array;
}
- reply = ovs_dp_cmd_build_info(dp, info->snd_portid,
- info->snd_seq, OVS_DP_CMD_NEW);
+ reply = ovs_dp_cmd_build_info(dp, info, OVS_DP_CMD_NEW);
err = PTR_ERR(reply);
if (IS_ERR(reply))
goto err_destroy_local_port;
@@ -1299,8 +1325,7 @@ static int ovs_dp_cmd_del(struct sk_buff *skb, struct genl_info *info)
if (IS_ERR(dp))
goto unlock;
- reply = ovs_dp_cmd_build_info(dp, info->snd_portid,
- info->snd_seq, OVS_DP_CMD_DEL);
+ reply = ovs_dp_cmd_build_info(dp, info, OVS_DP_CMD_DEL);
err = PTR_ERR(reply);
if (IS_ERR(reply))
goto unlock;
@@ -1328,8 +1353,9 @@ static int ovs_dp_cmd_set(struct sk_buff *skb, struct genl_info *info)
if (IS_ERR(dp))
goto unlock;
- reply = ovs_dp_cmd_build_info(dp, info->snd_portid,
- info->snd_seq, OVS_DP_CMD_NEW);
+ ovs_dp_change(dp, info->attrs);
+
+ reply = ovs_dp_cmd_build_info(dp, info, OVS_DP_CMD_NEW);
if (IS_ERR(reply)) {
err = PTR_ERR(reply);
genl_set_err(&dp_datapath_genl_family, sock_net(skb->sk), 0,
@@ -1360,8 +1386,7 @@ static int ovs_dp_cmd_get(struct sk_buff *skb, struct genl_info *info)
goto unlock;
}
- reply = ovs_dp_cmd_build_info(dp, info->snd_portid,
- info->snd_seq, OVS_DP_CMD_NEW);
+ reply = ovs_dp_cmd_build_info(dp, info, OVS_DP_CMD_NEW);
if (IS_ERR(reply)) {
err = PTR_ERR(reply);
goto unlock;
@@ -1441,7 +1466,7 @@ struct genl_family dp_vport_genl_family = {
.parallel_ops = true,
};
-struct genl_multicast_group ovs_dp_vport_multicast_group = {
+static struct genl_multicast_group ovs_dp_vport_multicast_group = {
.name = OVS_VPORT_MCGROUP
};
diff --git a/net/openvswitch/datapath.h b/net/openvswitch/datapath.h
index 4067ea41be28..6be9fbb5e9cb 100644
--- a/net/openvswitch/datapath.h
+++ b/net/openvswitch/datapath.h
@@ -88,6 +88,8 @@ struct datapath {
/* Network namespace ref. */
struct net *net;
#endif
+
+ u32 user_features;
};
/**
@@ -145,6 +147,8 @@ int lockdep_ovsl_is_held(void);
#define ASSERT_OVSL() WARN_ON(unlikely(!lockdep_ovsl_is_held()))
#define ovsl_dereference(p) \
rcu_dereference_protected(p, lockdep_ovsl_is_held())
+#define rcu_dereference_ovsl(p) \
+ rcu_dereference_check(p, lockdep_ovsl_is_held())
static inline struct net *ovs_dp_get_net(struct datapath *dp)
{
@@ -178,14 +182,12 @@ static inline struct vport *ovs_vport_ovsl(const struct datapath *dp, int port_n
extern struct notifier_block ovs_dp_device_notifier;
extern struct genl_family dp_vport_genl_family;
-extern struct genl_multicast_group ovs_dp_vport_multicast_group;
void ovs_dp_process_received_packet(struct vport *, struct sk_buff *);
void ovs_dp_detach_port(struct vport *);
int ovs_dp_upcall(struct datapath *, struct sk_buff *,
const struct dp_upcall_info *);
-const char *ovs_dp_name(const struct datapath *dp);
struct sk_buff *ovs_vport_cmd_build_info(struct vport *, u32 pid, u32 seq,
u8 cmd);
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index b409f5279601..16f4b46161d4 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -35,6 +35,7 @@
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/sctp.h>
+#include <linux/smp.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/icmp.h>
@@ -60,10 +61,16 @@ u64 ovs_flow_used_time(unsigned long flow_jiffies)
#define TCP_FLAGS_BE16(tp) (*(__be16 *)&tcp_flag_word(tp) & htons(0x0FFF))
-void ovs_flow_used(struct sw_flow *flow, struct sk_buff *skb)
+void ovs_flow_stats_update(struct sw_flow *flow, struct sk_buff *skb)
{
+ struct flow_stats *stats;
__be16 tcp_flags = 0;
+ if (!flow->stats.is_percpu)
+ stats = flow->stats.stat;
+ else
+ stats = this_cpu_ptr(flow->stats.cpu_stats);
+
if ((flow->key.eth.type == htons(ETH_P_IP) ||
flow->key.eth.type == htons(ETH_P_IPV6)) &&
flow->key.ip.proto == IPPROTO_TCP &&
@@ -71,12 +78,87 @@ void ovs_flow_used(struct sw_flow *flow, struct sk_buff *skb)
tcp_flags = TCP_FLAGS_BE16(tcp_hdr(skb));
}
- spin_lock(&flow->lock);
- flow->used = jiffies;
- flow->packet_count++;
- flow->byte_count += skb->len;
- flow->tcp_flags |= tcp_flags;
- spin_unlock(&flow->lock);
+ spin_lock(&stats->lock);
+ stats->used = jiffies;
+ stats->packet_count++;
+ stats->byte_count += skb->len;
+ stats->tcp_flags |= tcp_flags;
+ spin_unlock(&stats->lock);
+}
+
+static void stats_read(struct flow_stats *stats,
+ struct ovs_flow_stats *ovs_stats,
+ unsigned long *used, __be16 *tcp_flags)
+{
+ spin_lock(&stats->lock);
+ if (time_after(stats->used, *used))
+ *used = stats->used;
+ *tcp_flags |= stats->tcp_flags;
+ ovs_stats->n_packets += stats->packet_count;
+ ovs_stats->n_bytes += stats->byte_count;
+ spin_unlock(&stats->lock);
+}
+
+void ovs_flow_stats_get(struct sw_flow *flow, struct ovs_flow_stats *ovs_stats,
+ unsigned long *used, __be16 *tcp_flags)
+{
+ int cpu, cur_cpu;
+
+ *used = 0;
+ *tcp_flags = 0;
+ memset(ovs_stats, 0, sizeof(*ovs_stats));
+
+ if (!flow->stats.is_percpu) {
+ stats_read(flow->stats.stat, ovs_stats, used, tcp_flags);
+ } else {
+ cur_cpu = get_cpu();
+ for_each_possible_cpu(cpu) {
+ struct flow_stats *stats;
+
+ if (cpu == cur_cpu)
+ local_bh_disable();
+
+ stats = per_cpu_ptr(flow->stats.cpu_stats, cpu);
+ stats_read(stats, ovs_stats, used, tcp_flags);
+
+ if (cpu == cur_cpu)
+ local_bh_enable();
+ }
+ put_cpu();
+ }
+}
+
+static void stats_reset(struct flow_stats *stats)
+{
+ spin_lock(&stats->lock);
+ stats->used = 0;
+ stats->packet_count = 0;
+ stats->byte_count = 0;
+ stats->tcp_flags = 0;
+ spin_unlock(&stats->lock);
+}
+
+void ovs_flow_stats_clear(struct sw_flow *flow)
+{
+ int cpu, cur_cpu;
+
+ if (!flow->stats.is_percpu) {
+ stats_reset(flow->stats.stat);
+ } else {
+ cur_cpu = get_cpu();
+
+ for_each_possible_cpu(cpu) {
+
+ if (cpu == cur_cpu)
+ local_bh_disable();
+
+ stats_reset(per_cpu_ptr(flow->stats.cpu_stats, cpu));
+
+ if (cpu == cur_cpu)
+ local_bh_enable();
+ }
+ put_cpu();
+ }
}
static int check_header(struct sk_buff *skb, int len)
diff --git a/net/openvswitch/flow.h b/net/openvswitch/flow.h
index 1510f51dbf74..2d770e28a3a3 100644
--- a/net/openvswitch/flow.h
+++ b/net/openvswitch/flow.h
@@ -19,6 +19,7 @@
#ifndef FLOW_H
#define FLOW_H 1
+#include <linux/cache.h>
#include <linux/kernel.h>
#include <linux/netlink.h>
#include <linux/openvswitch.h>
@@ -122,8 +123,8 @@ struct sw_flow_key {
} __aligned(BITS_PER_LONG/8); /* Ensure that we can do comparisons as longs. */
struct sw_flow_key_range {
- size_t start;
- size_t end;
+ unsigned short int start;
+ unsigned short int end;
};
struct sw_flow_mask {
@@ -146,6 +147,22 @@ struct sw_flow_actions {
struct nlattr actions[];
};
+struct flow_stats {
+ u64 packet_count; /* Number of packets matched. */
+ u64 byte_count; /* Number of bytes matched. */
+ unsigned long used; /* Last used time (in jiffies). */
+ spinlock_t lock; /* Lock for atomic stats update. */
+ __be16 tcp_flags; /* Union of seen TCP flags. */
+};
+
+struct sw_flow_stats {
+ bool is_percpu;
+ union {
+ struct flow_stats *stat;
+ struct flow_stats __percpu *cpu_stats;
+ };
+};
+
struct sw_flow {
struct rcu_head rcu;
struct hlist_node hash_node[2];
@@ -155,12 +172,7 @@ struct sw_flow {
struct sw_flow_key unmasked_key;
struct sw_flow_mask *mask;
struct sw_flow_actions __rcu *sf_acts;
-
- spinlock_t lock; /* Lock for values below. */
- unsigned long used; /* Last used time (in jiffies). */
- u64 packet_count; /* Number of packets matched. */
- u64 byte_count; /* Number of bytes matched. */
- __be16 tcp_flags; /* Union of seen TCP flags. */
+ struct sw_flow_stats stats;
};
struct arp_eth_header {
@@ -177,7 +189,10 @@ struct arp_eth_header {
unsigned char ar_tip[4]; /* target IP address */
} __packed;
-void ovs_flow_used(struct sw_flow *, struct sk_buff *);
+void ovs_flow_stats_update(struct sw_flow *flow, struct sk_buff *skb);
+void ovs_flow_stats_get(struct sw_flow *flow, struct ovs_flow_stats *stats,
+ unsigned long *used, __be16 *tcp_flags);
+void ovs_flow_stats_clear(struct sw_flow *flow);
u64 ovs_flow_used_time(unsigned long flow_jiffies);
int ovs_flow_extract(struct sk_buff *, u16 in_port, struct sw_flow_key *);
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
index 2bc1bc1aca3b..4d000acaed0d 100644
--- a/net/openvswitch/flow_netlink.c
+++ b/net/openvswitch/flow_netlink.c
@@ -266,6 +266,20 @@ static bool is_all_zero(const u8 *fp, size_t size)
return true;
}
+static bool is_all_set(const u8 *fp, size_t size)
+{
+ int i;
+
+ if (!fp)
+ return false;
+
+ for (i = 0; i < size; i++)
+ if (fp[i] != 0xff)
+ return false;
+
+ return true;
+}
+
static int __parse_flow_nlattrs(const struct nlattr *attr,
const struct nlattr *a[],
u64 *attrsp, bool nz)
@@ -487,8 +501,9 @@ static int metadata_from_nlattrs(struct sw_flow_match *match, u64 *attrs,
return 0;
}
-static int ovs_key_from_nlattrs(struct sw_flow_match *match, u64 attrs,
- const struct nlattr **a, bool is_mask)
+static int ovs_key_from_nlattrs(struct sw_flow_match *match, bool *exact_5tuple,
+ u64 attrs, const struct nlattr **a,
+ bool is_mask)
{
int err;
u64 orig_attrs = attrs;
@@ -545,6 +560,11 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match, u64 attrs,
SW_FLOW_KEY_PUT(match, eth.type, htons(ETH_P_802_2), is_mask);
}
+ if (is_mask && exact_5tuple) {
+ if (match->mask->key.eth.type != htons(0xffff))
+ *exact_5tuple = false;
+ }
+
if (attrs & (1 << OVS_KEY_ATTR_IPV4)) {
const struct ovs_key_ipv4 *ipv4_key;
@@ -567,6 +587,13 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match, u64 attrs,
SW_FLOW_KEY_PUT(match, ipv4.addr.dst,
ipv4_key->ipv4_dst, is_mask);
attrs &= ~(1 << OVS_KEY_ATTR_IPV4);
+
+ if (is_mask && exact_5tuple && *exact_5tuple) {
+ if (ipv4_key->ipv4_proto != 0xff ||
+ ipv4_key->ipv4_src != htonl(0xffffffff) ||
+ ipv4_key->ipv4_dst != htonl(0xffffffff))
+ *exact_5tuple = false;
+ }
}
if (attrs & (1 << OVS_KEY_ATTR_IPV6)) {
@@ -598,6 +625,13 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match, u64 attrs,
is_mask);
attrs &= ~(1 << OVS_KEY_ATTR_IPV6);
+
+ if (is_mask && exact_5tuple && *exact_5tuple) {
+ if (ipv6_key->ipv6_proto != 0xff ||
+ !is_all_set((u8 *)ipv6_key->ipv6_src, sizeof(match->key->ipv6.addr.src)) ||
+ !is_all_set((u8 *)ipv6_key->ipv6_dst, sizeof(match->key->ipv6.addr.dst)))
+ *exact_5tuple = false;
+ }
}
if (attrs & (1 << OVS_KEY_ATTR_ARP)) {
@@ -640,6 +674,11 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match, u64 attrs,
tcp_key->tcp_dst, is_mask);
}
attrs &= ~(1 << OVS_KEY_ATTR_TCP);
+
+ if (is_mask && exact_5tuple && *exact_5tuple &&
+ (tcp_key->tcp_src != htons(0xffff) ||
+ tcp_key->tcp_dst != htons(0xffff)))
+ *exact_5tuple = false;
}
if (attrs & (1 << OVS_KEY_ATTR_TCP_FLAGS)) {
@@ -671,6 +710,11 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match, u64 attrs,
udp_key->udp_dst, is_mask);
}
attrs &= ~(1 << OVS_KEY_ATTR_UDP);
+
+ if (is_mask && exact_5tuple && *exact_5tuple &&
+ (udp_key->udp_src != htons(0xffff) ||
+ udp_key->udp_dst != htons(0xffff)))
+ *exact_5tuple = false;
}
if (attrs & (1 << OVS_KEY_ATTR_SCTP)) {
@@ -756,6 +800,7 @@ static void sw_flow_mask_set(struct sw_flow_mask *mask,
* attribute specifies the mask field of the wildcarded flow.
*/
int ovs_nla_get_match(struct sw_flow_match *match,
+ bool *exact_5tuple,
const struct nlattr *key,
const struct nlattr *mask)
{
@@ -803,10 +848,13 @@ int ovs_nla_get_match(struct sw_flow_match *match,
}
}
- err = ovs_key_from_nlattrs(match, key_attrs, a, false);
+ err = ovs_key_from_nlattrs(match, NULL, key_attrs, a, false);
if (err)
return err;
+ if (exact_5tuple)
+ *exact_5tuple = true;
+
if (mask) {
err = parse_flow_mask_nlattrs(mask, a, &mask_attrs);
if (err)
@@ -844,7 +892,7 @@ int ovs_nla_get_match(struct sw_flow_match *match,
}
}
- err = ovs_key_from_nlattrs(match, mask_attrs, a, true);
+ err = ovs_key_from_nlattrs(match, exact_5tuple, mask_attrs, a, true);
if (err)
return err;
} else {
@@ -1128,19 +1176,11 @@ struct sw_flow_actions *ovs_nla_alloc_flow_actions(int size)
return sfa;
}
-/* RCU callback used by ovs_nla_free_flow_actions. */
-static void rcu_free_acts_callback(struct rcu_head *rcu)
-{
- struct sw_flow_actions *sf_acts = container_of(rcu,
- struct sw_flow_actions, rcu);
- kfree(sf_acts);
-}
-
/* Schedules 'sf_acts' to be freed after the next RCU grace period.
* The caller must hold rcu_read_lock for this to be sensible. */
void ovs_nla_free_flow_actions(struct sw_flow_actions *sf_acts)
{
- call_rcu(&sf_acts->rcu, rcu_free_acts_callback);
+ kfree_rcu(sf_acts, rcu);
}
static struct nlattr *reserve_sfa_size(struct sw_flow_actions **sfa,
diff --git a/net/openvswitch/flow_netlink.h b/net/openvswitch/flow_netlink.h
index 440151045d39..b31fbe28bc7a 100644
--- a/net/openvswitch/flow_netlink.h
+++ b/net/openvswitch/flow_netlink.h
@@ -45,6 +45,7 @@ int ovs_nla_put_flow(const struct sw_flow_key *,
int ovs_nla_get_flow_metadata(struct sw_flow *flow,
const struct nlattr *attr);
int ovs_nla_get_match(struct sw_flow_match *match,
+ bool *exact_5tuple,
const struct nlattr *,
const struct nlattr *);
diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c
index e42542706087..c58a0fe3c889 100644
--- a/net/openvswitch/flow_table.c
+++ b/net/openvswitch/flow_table.c
@@ -25,7 +25,7 @@
#include <linux/if_vlan.h>
#include <net/llc_pdu.h>
#include <linux/kernel.h>
-#include <linux/jhash.h>
+#include <linux/hash.h>
#include <linux/jiffies.h>
#include <linux/llc.h>
#include <linux/module.h>
@@ -44,8 +44,6 @@
#include <net/ipv6.h>
#include <net/ndisc.h>
-#include "datapath.h"
-
#define TBL_MIN_BUCKETS 1024
#define REHASH_INTERVAL (10 * 60 * HZ)
@@ -72,19 +70,42 @@ void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
*d++ = *s++ & *m++;
}
-struct sw_flow *ovs_flow_alloc(void)
+struct sw_flow *ovs_flow_alloc(bool percpu_stats)
{
struct sw_flow *flow;
+ int cpu;
flow = kmem_cache_alloc(flow_cache, GFP_KERNEL);
if (!flow)
return ERR_PTR(-ENOMEM);
- spin_lock_init(&flow->lock);
flow->sf_acts = NULL;
flow->mask = NULL;
+ flow->stats.is_percpu = percpu_stats;
+
+ if (!percpu_stats) {
+ flow->stats.stat = kzalloc(sizeof(*flow->stats.stat), GFP_KERNEL);
+ if (!flow->stats.stat)
+ goto err;
+
+ spin_lock_init(&flow->stats.stat->lock);
+ } else {
+ flow->stats.cpu_stats = alloc_percpu(struct flow_stats);
+ if (!flow->stats.cpu_stats)
+ goto err;
+
+ for_each_possible_cpu(cpu) {
+ struct flow_stats *cpu_stats;
+
+ cpu_stats = per_cpu_ptr(flow->stats.cpu_stats, cpu);
+ spin_lock_init(&cpu_stats->lock);
+ }
+ }
return flow;
+err:
+ kmem_cache_free(flow_cache, flow);
+ return ERR_PTR(-ENOMEM);
}
int ovs_flow_tbl_count(struct flow_table *table)
@@ -118,6 +139,10 @@ static struct flex_array *alloc_buckets(unsigned int n_buckets)
static void flow_free(struct sw_flow *flow)
{
kfree((struct sf_flow_acts __force *)flow->sf_acts);
+ if (flow->stats.is_percpu)
+ free_percpu(flow->stats.cpu_stats);
+ else
+ kfree(flow->stats.stat);
kmem_cache_free(flow_cache, flow);
}
@@ -128,13 +153,6 @@ static void rcu_free_flow_callback(struct rcu_head *rcu)
flow_free(flow);
}
-static void rcu_free_sw_flow_mask_cb(struct rcu_head *rcu)
-{
- struct sw_flow_mask *mask = container_of(rcu, struct sw_flow_mask, rcu);
-
- kfree(mask);
-}
-
static void flow_mask_del_ref(struct sw_flow_mask *mask, bool deferred)
{
if (!mask)
@@ -146,7 +164,7 @@ static void flow_mask_del_ref(struct sw_flow_mask *mask, bool deferred)
if (!mask->ref_count) {
list_del_rcu(&mask->list);
if (deferred)
- call_rcu(&mask->rcu, rcu_free_sw_flow_mask_cb);
+ kfree_rcu(mask, rcu);
else
kfree(mask);
}
@@ -362,7 +380,7 @@ static u32 flow_hash(const struct sw_flow_key *key, int key_start,
/* Make sure number of hash bytes are multiple of u32. */
BUILD_BUG_ON(sizeof(long) % sizeof(u32));
- return jhash2(hash_key, hash_u32s, 0);
+ return arch_fast_hash2(hash_key, hash_u32s, 0);
}
static int flow_key_start(const struct sw_flow_key *key)
@@ -429,11 +447,11 @@ static struct sw_flow *masked_flow_lookup(struct table_instance *ti,
return NULL;
}
-struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl,
+struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl,
const struct sw_flow_key *key,
u32 *n_mask_hit)
{
- struct table_instance *ti = rcu_dereference(tbl->ti);
+ struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
struct sw_flow_mask *mask;
struct sw_flow *flow;
@@ -447,6 +465,14 @@ struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl,
return NULL;
}
+struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl,
+ const struct sw_flow_key *key)
+{
+ u32 __always_unused n_mask_hit;
+
+ return ovs_flow_tbl_lookup_stats(tbl, key, &n_mask_hit);
+}
+
int ovs_flow_tbl_num_masks(const struct flow_table *table)
{
struct sw_flow_mask *mask;
@@ -514,11 +540,7 @@ static struct sw_flow_mask *flow_mask_find(const struct flow_table *tbl,
return NULL;
}
-/**
- * add a new mask into the mask list.
- * The caller needs to make sure that 'mask' is not the same
- * as any masks that are already on the list.
- */
+/* Add 'mask' into the mask list, if it is not already there. */
static int flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow,
struct sw_flow_mask *new)
{
diff --git a/net/openvswitch/flow_table.h b/net/openvswitch/flow_table.h
index fbe45d5ad07d..1996e34c0fd8 100644
--- a/net/openvswitch/flow_table.h
+++ b/net/openvswitch/flow_table.h
@@ -55,7 +55,7 @@ struct flow_table {
int ovs_flow_init(void);
void ovs_flow_exit(void);
-struct sw_flow *ovs_flow_alloc(void);
+struct sw_flow *ovs_flow_alloc(bool percpu_stats);
void ovs_flow_free(struct sw_flow *, bool deferred);
int ovs_flow_tbl_init(struct flow_table *);
@@ -69,9 +69,11 @@ void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow);
int ovs_flow_tbl_num_masks(const struct flow_table *table);
struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *table,
u32 *bucket, u32 *idx);
-struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *,
+struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *,
const struct sw_flow_key *,
u32 *n_mask_hit);
+struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *,
+ const struct sw_flow_key *);
bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow,
struct sw_flow_match *match);
diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c
index d830a95f03a4..208dd9a26dd1 100644
--- a/net/openvswitch/vport.c
+++ b/net/openvswitch/vport.c
@@ -33,6 +33,9 @@
#include "vport.h"
#include "vport-internal_dev.h"
+static void ovs_vport_record_error(struct vport *,
+ enum vport_err_type err_type);
+
/* List of statically compiled vport implementations. Don't forget to also
* add yours to the list at the bottom of vport.h. */
static const struct vport_ops *vport_ops_list[] = {
@@ -136,14 +139,14 @@ struct vport *ovs_vport_alloc(int priv_size, const struct vport_ops *ops,
vport->ops = ops;
INIT_HLIST_NODE(&vport->dp_hash_node);
- vport->percpu_stats = alloc_percpu(struct pcpu_tstats);
+ vport->percpu_stats = alloc_percpu(struct pcpu_sw_netstats);
if (!vport->percpu_stats) {
kfree(vport);
return ERR_PTR(-ENOMEM);
}
for_each_possible_cpu(i) {
- struct pcpu_tstats *vport_stats;
+ struct pcpu_sw_netstats *vport_stats;
vport_stats = per_cpu_ptr(vport->percpu_stats, i);
u64_stats_init(&vport_stats->syncp);
}
@@ -275,8 +278,8 @@ void ovs_vport_get_stats(struct vport *vport, struct ovs_vport_stats *stats)
spin_unlock_bh(&vport->stats_lock);
for_each_possible_cpu(i) {
- const struct pcpu_tstats *percpu_stats;
- struct pcpu_tstats local_stats;
+ const struct pcpu_sw_netstats *percpu_stats;
+ struct pcpu_sw_netstats local_stats;
unsigned int start;
percpu_stats = per_cpu_ptr(vport->percpu_stats, i);
@@ -344,7 +347,7 @@ int ovs_vport_get_options(const struct vport *vport, struct sk_buff *skb)
void ovs_vport_receive(struct vport *vport, struct sk_buff *skb,
struct ovs_key_ipv4_tunnel *tun_key)
{
- struct pcpu_tstats *stats;
+ struct pcpu_sw_netstats *stats;
stats = this_cpu_ptr(vport->percpu_stats);
u64_stats_update_begin(&stats->syncp);
@@ -370,7 +373,7 @@ int ovs_vport_send(struct vport *vport, struct sk_buff *skb)
int sent = vport->ops->send(vport, skb);
if (likely(sent > 0)) {
- struct pcpu_tstats *stats;
+ struct pcpu_sw_netstats *stats;
stats = this_cpu_ptr(vport->percpu_stats);
@@ -396,7 +399,8 @@ int ovs_vport_send(struct vport *vport, struct sk_buff *skb)
* If using the vport generic stats layer indicate that an error of the given
* type has occurred.
*/
-void ovs_vport_record_error(struct vport *vport, enum vport_err_type err_type)
+static void ovs_vport_record_error(struct vport *vport,
+ enum vport_err_type err_type)
{
spin_lock(&vport->stats_lock);
diff --git a/net/openvswitch/vport.h b/net/openvswitch/vport.h
index 1a9fbcec6e1b..d7e50a17396c 100644
--- a/net/openvswitch/vport.h
+++ b/net/openvswitch/vport.h
@@ -87,7 +87,7 @@ struct vport {
struct hlist_node dp_hash_node;
const struct vport_ops *ops;
- struct pcpu_tstats __percpu *percpu_stats;
+ struct pcpu_sw_netstats __percpu *percpu_stats;
spinlock_t stats_lock;
struct vport_err_stats err_stats;
@@ -192,7 +192,6 @@ static inline struct vport *vport_from_priv(const void *priv)
void ovs_vport_receive(struct vport *, struct sk_buff *,
struct ovs_key_ipv4_tunnel *);
-void ovs_vport_record_error(struct vport *, enum vport_err_type err_type);
/* List of statically compiled vport implementations. Don't forget to also
* add yours to the list at the top of vport.c. */
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index ac27c86ef6d1..279467b74eb7 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -237,6 +237,82 @@ struct packet_skb_cb {
static void __fanout_unlink(struct sock *sk, struct packet_sock *po);
static void __fanout_link(struct sock *sk, struct packet_sock *po);
+static int packet_direct_xmit(struct sk_buff *skb)
+{
+ struct net_device *dev = skb->dev;
+ const struct net_device_ops *ops = dev->netdev_ops;
+ netdev_features_t features;
+ struct netdev_queue *txq;
+ u16 queue_map;
+ int ret;
+
+ if (unlikely(!netif_running(dev) ||
+ !netif_carrier_ok(dev))) {
+ kfree_skb(skb);
+ return NET_XMIT_DROP;
+ }
+
+ features = netif_skb_features(skb);
+ if (skb_needs_linearize(skb, features) &&
+ __skb_linearize(skb)) {
+ kfree_skb(skb);
+ return NET_XMIT_DROP;
+ }
+
+ queue_map = skb_get_queue_mapping(skb);
+ txq = netdev_get_tx_queue(dev, queue_map);
+
+ __netif_tx_lock_bh(txq);
+ if (unlikely(netif_xmit_frozen_or_stopped(txq))) {
+ ret = NETDEV_TX_BUSY;
+ kfree_skb(skb);
+ goto out;
+ }
+
+ ret = ops->ndo_start_xmit(skb, dev);
+ if (likely(dev_xmit_complete(ret)))
+ txq_trans_update(txq);
+ else
+ kfree_skb(skb);
+out:
+ __netif_tx_unlock_bh(txq);
+ return ret;
+}
+
+static struct net_device *packet_cached_dev_get(struct packet_sock *po)
+{
+ struct net_device *dev;
+
+ rcu_read_lock();
+ dev = rcu_dereference(po->cached_dev);
+ if (likely(dev))
+ dev_hold(dev);
+ rcu_read_unlock();
+
+ return dev;
+}
+
+static void packet_cached_dev_assign(struct packet_sock *po,
+ struct net_device *dev)
+{
+ rcu_assign_pointer(po->cached_dev, dev);
+}
+
+static void packet_cached_dev_reset(struct packet_sock *po)
+{
+ RCU_INIT_POINTER(po->cached_dev, NULL);
+}
+
+static bool packet_use_direct_xmit(const struct packet_sock *po)
+{
+ return po->xmit == packet_direct_xmit;
+}
+
+static u16 packet_pick_tx_queue(struct net_device *dev)
+{
+ return (u16) raw_smp_processor_id() % dev->real_num_tx_queues;
+}
+
/* register_prot_hook must be invoked with the po->bind_lock held,
* or from a context in which asynchronous accesses to the packet
* socket is not possible (packet_create()).
@@ -246,12 +322,10 @@ static void register_prot_hook(struct sock *sk)
struct packet_sock *po = pkt_sk(sk);
if (!po->running) {
- if (po->fanout) {
+ if (po->fanout)
__fanout_link(sk, po);
- } else {
+ else
dev_add_pack(&po->prot_hook);
- rcu_assign_pointer(po->cached_dev, po->prot_hook.dev);
- }
sock_hold(sk);
po->running = 1;
@@ -270,12 +344,11 @@ static void __unregister_prot_hook(struct sock *sk, bool sync)
struct packet_sock *po = pkt_sk(sk);
po->running = 0;
- if (po->fanout) {
+
+ if (po->fanout)
__fanout_unlink(sk, po);
- } else {
+ else
__dev_remove_pack(&po->prot_hook);
- RCU_INIT_POINTER(po->cached_dev, NULL);
- }
__sock_put(sk);
@@ -437,11 +510,12 @@ static void prb_shutdown_retire_blk_timer(struct packet_sock *po,
{
struct tpacket_kbdq_core *pkc;
- pkc = tx_ring ? &po->tx_ring.prb_bdqc : &po->rx_ring.prb_bdqc;
+ pkc = tx_ring ? GET_PBDQC_FROM_RB(&po->tx_ring) :
+ GET_PBDQC_FROM_RB(&po->rx_ring);
- spin_lock(&rb_queue->lock);
+ spin_lock_bh(&rb_queue->lock);
pkc->delete_blk_timer = 1;
- spin_unlock(&rb_queue->lock);
+ spin_unlock_bh(&rb_queue->lock);
prb_del_retire_blk_timer(pkc);
}
@@ -463,7 +537,8 @@ static void prb_setup_retire_blk_timer(struct packet_sock *po, int tx_ring)
if (tx_ring)
BUG();
- pkc = tx_ring ? &po->tx_ring.prb_bdqc : &po->rx_ring.prb_bdqc;
+ pkc = tx_ring ? GET_PBDQC_FROM_RB(&po->tx_ring) :
+ GET_PBDQC_FROM_RB(&po->rx_ring);
prb_init_blk_timer(po, pkc, prb_retire_rx_blk_timer_expired);
}
@@ -521,7 +596,7 @@ static void init_prb_bdqc(struct packet_sock *po,
struct pgv *pg_vec,
union tpacket_req_u *req_u, int tx_ring)
{
- struct tpacket_kbdq_core *p1 = &rb->prb_bdqc;
+ struct tpacket_kbdq_core *p1 = GET_PBDQC_FROM_RB(rb);
struct tpacket_block_desc *pbd;
memset(p1, 0x0, sizeof(*p1));
@@ -585,7 +660,7 @@ static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *pkc)
static void prb_retire_rx_blk_timer_expired(unsigned long data)
{
struct packet_sock *po = (struct packet_sock *)data;
- struct tpacket_kbdq_core *pkc = &po->rx_ring.prb_bdqc;
+ struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
unsigned int frozen;
struct tpacket_block_desc *pbd;
@@ -888,7 +963,7 @@ static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb)
static void prb_fill_rxhash(struct tpacket_kbdq_core *pkc,
struct tpacket3_hdr *ppd)
{
- ppd->hv1.tp_rxhash = skb_get_rxhash(pkc->skb);
+ ppd->hv1.tp_rxhash = skb_get_hash(pkc->skb);
}
static void prb_clear_rxhash(struct tpacket_kbdq_core *pkc,
@@ -902,9 +977,11 @@ static void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc,
{
if (vlan_tx_tag_present(pkc->skb)) {
ppd->hv1.tp_vlan_tci = vlan_tx_tag_get(pkc->skb);
- ppd->tp_status = TP_STATUS_VLAN_VALID;
+ ppd->hv1.tp_vlan_tpid = ntohs(pkc->skb->vlan_proto);
+ ppd->tp_status = TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
} else {
ppd->hv1.tp_vlan_tci = 0;
+ ppd->hv1.tp_vlan_tpid = 0;
ppd->tp_status = TP_STATUS_AVAILABLE;
}
}
@@ -912,6 +989,7 @@ static void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc,
static void prb_run_all_ft_ops(struct tpacket_kbdq_core *pkc,
struct tpacket3_hdr *ppd)
{
+ ppd->hv1.tp_padding = 0;
prb_fill_vlan_info(pkc, ppd);
if (pkc->feature_req_word & TP_FT_REQ_FILL_RXHASH)
@@ -1220,7 +1298,7 @@ static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
if (!skb)
return 0;
}
- skb_get_rxhash(skb);
+ skb_get_hash(skb);
idx = fanout_demux_hash(f, skb, num);
break;
case PACKET_FANOUT_LB:
@@ -1278,9 +1356,9 @@ static void __fanout_unlink(struct sock *sk, struct packet_sock *po)
spin_unlock(&f->lock);
}
-static bool match_fanout_group(struct packet_type *ptype, struct sock * sk)
+static bool match_fanout_group(struct packet_type *ptype, struct sock *sk)
{
- if (ptype->af_packet_priv == (void*)((struct packet_sock *)sk)->fanout)
+ if (ptype->af_packet_priv == (void *)((struct packet_sock *)sk)->fanout)
return true;
return false;
@@ -1737,6 +1815,13 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
struct timespec ts;
__u32 ts_status;
+ /* struct tpacket{2,3}_hdr is aligned to a multiple of TPACKET_ALIGNMENT.
+ * We may add members to them until current aligned size without forcing
+ * userspace to call getsockopt(..., PACKET_HDRLEN, ...).
+ */
+ BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h2)) != 32);
+ BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h3)) != 48);
+
if (skb->pkt_type == PACKET_LOOPBACK)
goto drop;
@@ -1843,11 +1928,13 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
h.h2->tp_nsec = ts.tv_nsec;
if (vlan_tx_tag_present(skb)) {
h.h2->tp_vlan_tci = vlan_tx_tag_get(skb);
- status |= TP_STATUS_VLAN_VALID;
+ h.h2->tp_vlan_tpid = ntohs(skb->vlan_proto);
+ status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
} else {
h.h2->tp_vlan_tci = 0;
+ h.h2->tp_vlan_tpid = 0;
}
- h.h2->tp_padding = 0;
+ memset(h.h2->tp_padding, 0, sizeof(h.h2->tp_padding));
hdrlen = sizeof(*h.h2);
break;
case TPACKET_V3:
@@ -1861,6 +1948,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
h.h3->tp_net = netoff;
h.h3->tp_sec = ts.tv_sec;
h.h3->tp_nsec = ts.tv_nsec;
+ memset(h.h3->tp_padding, 0, sizeof(h.h3->tp_padding));
hdrlen = sizeof(*h.h3);
break;
default:
@@ -1971,9 +2059,10 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
skb_reserve(skb, hlen);
skb_reset_network_header(skb);
- skb_probe_transport_header(skb, 0);
- if (po->tp_tx_has_off) {
+ if (!packet_use_direct_xmit(po))
+ skb_probe_transport_header(skb, 0);
+ if (unlikely(po->tp_tx_has_off)) {
int off_min, off_max, off;
off_min = po->tp_hdrlen - sizeof(struct sockaddr_ll);
off_max = po->tx_ring.frame_size - tp_len;
@@ -2059,19 +2148,6 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
return tp_len;
}
-static struct net_device *packet_cached_dev_get(struct packet_sock *po)
-{
- struct net_device *dev;
-
- rcu_read_lock();
- dev = rcu_dereference(po->cached_dev);
- if (dev)
- dev_hold(dev);
- rcu_read_unlock();
-
- return dev;
-}
-
static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
{
struct sk_buff *skb;
@@ -2088,7 +2164,7 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
mutex_lock(&po->pg_vec_lock);
- if (saddr == NULL) {
+ if (likely(saddr == NULL)) {
dev = packet_cached_dev_get(po);
proto = po->num;
addr = NULL;
@@ -2156,12 +2232,13 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
}
}
+ skb_set_queue_mapping(skb, packet_pick_tx_queue(dev));
skb->destructor = tpacket_destruct_skb;
__packet_set_status(po, ph, TP_STATUS_SENDING);
atomic_inc(&po->tx_ring.pending);
status = TP_STATUS_SEND_REQUEST;
- err = dev_queue_xmit(skb);
+ err = po->xmit(skb);
if (unlikely(err > 0)) {
err = net_xmit_errno(err);
if (err && __packet_get_status(po, ph) ==
@@ -2220,8 +2297,7 @@ static struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad,
return skb;
}
-static int packet_snd(struct socket *sock,
- struct msghdr *msg, size_t len)
+static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
{
struct sock *sk = sock->sk;
struct sockaddr_ll *saddr = (struct sockaddr_ll *)msg->msg_name;
@@ -2242,7 +2318,7 @@ static int packet_snd(struct socket *sock,
* Get and verify the address.
*/
- if (saddr == NULL) {
+ if (likely(saddr == NULL)) {
dev = packet_cached_dev_get(po);
proto = po->num;
addr = NULL;
@@ -2366,6 +2442,7 @@ static int packet_snd(struct socket *sock,
skb->dev = dev;
skb->priority = sk->sk_priority;
skb->mark = sk->sk_mark;
+ skb_set_queue_mapping(skb, packet_pick_tx_queue(dev));
if (po->has_vnet_hdr) {
if (vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
@@ -2386,16 +2463,12 @@ static int packet_snd(struct socket *sock,
len += vnet_hdr_len;
}
- skb_probe_transport_header(skb, reserve);
-
+ if (!packet_use_direct_xmit(po))
+ skb_probe_transport_header(skb, reserve);
if (unlikely(extra_len == 4))
skb->no_fcs = 1;
- /*
- * Now send it
- */
-
- err = dev_queue_xmit(skb);
+ err = po->xmit(skb);
if (err > 0 && (err = net_xmit_errno(err)) != 0)
goto out_unlock;
@@ -2417,6 +2490,7 @@ static int packet_sendmsg(struct kiocb *iocb, struct socket *sock,
{
struct sock *sk = sock->sk;
struct packet_sock *po = pkt_sk(sk);
+
if (po->tx_ring.pg_vec)
return tpacket_snd(po, msg);
else
@@ -2451,6 +2525,8 @@ static int packet_release(struct socket *sock)
spin_lock(&po->bind_lock);
unregister_prot_hook(sk, false);
+ packet_cached_dev_reset(po);
+
if (po->prot_hook.dev) {
dev_put(po->prot_hook.dev);
po->prot_hook.dev = NULL;
@@ -2506,14 +2582,17 @@ static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 protoc
spin_lock(&po->bind_lock);
unregister_prot_hook(sk, true);
+
po->num = protocol;
po->prot_hook.type = protocol;
if (po->prot_hook.dev)
dev_put(po->prot_hook.dev);
- po->prot_hook.dev = dev;
+ po->prot_hook.dev = dev;
po->ifindex = dev ? dev->ifindex : 0;
+ packet_cached_dev_assign(po, dev);
+
if (protocol == 0)
goto out_unlock;
@@ -2626,7 +2705,9 @@ static int packet_create(struct net *net, struct socket *sock, int protocol,
po = pkt_sk(sk);
sk->sk_family = PF_PACKET;
po->num = proto;
- RCU_INIT_POINTER(po->cached_dev, NULL);
+ po->xmit = dev_queue_xmit;
+
+ packet_cached_dev_reset(po);
sk->sk_destruct = packet_sock_destruct;
sk_refcnt_debug_inc(sk);
@@ -2799,11 +2880,12 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
aux.tp_net = skb_network_offset(skb);
if (vlan_tx_tag_present(skb)) {
aux.tp_vlan_tci = vlan_tx_tag_get(skb);
- aux.tp_status |= TP_STATUS_VLAN_VALID;
+ aux.tp_vlan_tpid = ntohs(skb->vlan_proto);
+ aux.tp_status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
} else {
aux.tp_vlan_tci = 0;
+ aux.tp_vlan_tpid = 0;
}
- aux.tp_padding = 0;
put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux);
}
@@ -3204,6 +3286,18 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
po->tp_tx_has_off = !!val;
return 0;
}
+ case PACKET_QDISC_BYPASS:
+ {
+ int val;
+
+ if (optlen != sizeof(val))
+ return -EINVAL;
+ if (copy_from_user(&val, optval, sizeof(val)))
+ return -EFAULT;
+
+ po->xmit = val ? packet_direct_xmit : dev_queue_xmit;
+ return 0;
+ }
default:
return -ENOPROTOOPT;
}
@@ -3296,6 +3390,9 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
case PACKET_TX_HAS_OFF:
val = po->tp_tx_has_off;
break;
+ case PACKET_QDISC_BYPASS:
+ val = packet_use_direct_xmit(po);
+ break;
default:
return -ENOPROTOOPT;
}
@@ -3337,6 +3434,7 @@ static int packet_notifier(struct notifier_block *this,
sk->sk_error_report(sk);
}
if (msg == NETDEV_UNREGISTER) {
+ packet_cached_dev_reset(po);
po->ifindex = -1;
if (po->prot_hook.dev)
dev_put(po->prot_hook.dev);
diff --git a/net/packet/internal.h b/net/packet/internal.h
index 1035fa2d909c..0a87d7b36c9e 100644
--- a/net/packet/internal.h
+++ b/net/packet/internal.h
@@ -114,6 +114,7 @@ struct packet_sock {
unsigned int tp_tx_has_off:1;
unsigned int tp_tstamp;
struct net_device __rcu *cached_dev;
+ int (*xmit)(struct sk_buff *skb);
struct packet_type prot_hook ____cacheline_aligned_in_smp;
};
diff --git a/net/rds/ib.c b/net/rds/ib.c
index b4c8b0022fee..ba2dffeff608 100644
--- a/net/rds/ib.c
+++ b/net/rds/ib.c
@@ -338,7 +338,8 @@ static int rds_ib_laddr_check(__be32 addr)
ret = rdma_bind_addr(cm_id, (struct sockaddr *)&sin);
/* due to this, we will claim to support iWARP devices unless we
check node_type. */
- if (ret || cm_id->device->node_type != RDMA_NODE_IB_CA)
+ if (ret || !cm_id->device ||
+ cm_id->device->node_type != RDMA_NODE_IB_CA)
ret = -EADDRNOTAVAIL;
rdsdebug("addr %pI4 ret %d node type %d\n",
diff --git a/net/rds/ib_send.c b/net/rds/ib_send.c
index e59094981175..37be6e226d1b 100644
--- a/net/rds/ib_send.c
+++ b/net/rds/ib_send.c
@@ -552,9 +552,8 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
&& rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) {
rds_cong_map_updated(conn->c_fcong, ~(u64) 0);
scat = &rm->data.op_sg[sg];
- ret = sizeof(struct rds_header) + RDS_CONG_MAP_BYTES;
- ret = min_t(int, ret, scat->length - conn->c_xmit_data_off);
- return ret;
+ ret = max_t(int, RDS_CONG_MAP_BYTES, scat->length);
+ return sizeof(struct rds_header) + ret;
}
/* FIXME we may overallocate here */
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index 33af77246bfe..d080eb4b0d29 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -1012,7 +1012,7 @@ int rose_rx_call_request(struct sk_buff *skb, struct net_device *dev, struct ros
make_rose->source_call = facilities.source_call;
make_rose->source_ndigis = facilities.source_ndigis;
for (n = 0 ; n < facilities.source_ndigis ; n++)
- make_rose->source_digis[n]= facilities.source_digis[n];
+ make_rose->source_digis[n] = facilities.source_digis[n];
make_rose->neighbour = neigh;
make_rose->device = dev;
make_rose->facilities = facilities;
@@ -1253,6 +1253,7 @@ static int rose_recvmsg(struct kiocb *iocb, struct socket *sock,
if (msg->msg_name) {
struct sockaddr_rose *srose;
+ struct full_sockaddr_rose *full_srose = msg->msg_name;
memset(msg->msg_name, 0, sizeof(struct full_sockaddr_rose));
srose = msg->msg_name;
@@ -1260,18 +1261,9 @@ static int rose_recvmsg(struct kiocb *iocb, struct socket *sock,
srose->srose_addr = rose->dest_addr;
srose->srose_call = rose->dest_call;
srose->srose_ndigis = rose->dest_ndigis;
- if (msg->msg_namelen >= sizeof(struct full_sockaddr_rose)) {
- struct full_sockaddr_rose *full_srose = (struct full_sockaddr_rose *)msg->msg_name;
- for (n = 0 ; n < rose->dest_ndigis ; n++)
- full_srose->srose_digis[n] = rose->dest_digis[n];
- msg->msg_namelen = sizeof(struct full_sockaddr_rose);
- } else {
- if (rose->dest_ndigis >= 1) {
- srose->srose_ndigis = 1;
- srose->srose_digi = rose->dest_digis[0];
- }
- msg->msg_namelen = sizeof(struct sockaddr_rose);
- }
+ for (n = 0 ; n < rose->dest_ndigis ; n++)
+ full_srose->srose_digis[n] = rose->dest_digis[n];
+ msg->msg_namelen = sizeof(struct full_sockaddr_rose);
}
skb_free_datagram(sk, skb);
diff --git a/net/rose/rose_dev.c b/net/rose/rose_dev.c
index 28dbdb911b85..50005888be57 100644
--- a/net/rose/rose_dev.c
+++ b/net/rose/rose_dev.c
@@ -146,7 +146,7 @@ static netdev_tx_t rose_xmit(struct sk_buff *skb, struct net_device *dev)
static const struct header_ops rose_header_ops = {
.create = rose_header,
- .rebuild= rose_rebuild_header,
+ .rebuild = rose_rebuild_header,
};
static const struct net_device_ops rose_netdev_ops = {
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index ad1f1d819203..a1a8e29e5fc9 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -286,6 +286,28 @@ config NET_SCH_FQ
If unsure, say N.
+config NET_SCH_HHF
+ tristate "Heavy-Hitter Filter (HHF)"
+ help
+ Say Y here if you want to use the Heavy-Hitter Filter (HHF)
+ packet scheduling algorithm.
+
+ To compile this driver as a module, choose M here: the module
+ will be called sch_hhf.
+
+config NET_SCH_PIE
+ tristate "Proportional Integral controller Enhanced (PIE) scheduler"
+ help
+ Say Y here if you want to use the Proportional Integral controller
+ Enhanced scheduler packet scheduling algorithm.
+ For more information, please see
+ http://tools.ietf.org/html/draft-pan-tsvwg-pie-00
+
+ To compile this driver as a module, choose M here: the module
+ will be called sch_pie.
+
+ If unsure, say N.
+
config NET_SCH_INGRESS
tristate "Ingress Qdisc"
depends on NET_CLS_ACT
@@ -435,6 +457,7 @@ config NET_CLS_FLOW
config NET_CLS_CGROUP
tristate "Control Group Classifier"
select NET_CLS
+ select CGROUP_NET_CLASSID
depends on CGROUPS
---help---
Say Y here if you want to classify packets based on the control
diff --git a/net/sched/Makefile b/net/sched/Makefile
index 35fa47a494ab..0a869a11f3e6 100644
--- a/net/sched/Makefile
+++ b/net/sched/Makefile
@@ -40,6 +40,8 @@ obj-$(CONFIG_NET_SCH_QFQ) += sch_qfq.o
obj-$(CONFIG_NET_SCH_CODEL) += sch_codel.o
obj-$(CONFIG_NET_SCH_FQ_CODEL) += sch_fq_codel.o
obj-$(CONFIG_NET_SCH_FQ) += sch_fq.o
+obj-$(CONFIG_NET_SCH_HHF) += sch_hhf.o
+obj-$(CONFIG_NET_SCH_PIE) += sch_pie.o
obj-$(CONFIG_NET_CLS_U32) += cls_u32.o
obj-$(CONFIG_NET_CLS_ROUTE4) += cls_route.o
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index fd7072827a40..f63e1467cd3a 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -29,25 +29,16 @@
void tcf_hash_destroy(struct tcf_common *p, struct tcf_hashinfo *hinfo)
{
- unsigned int h = tcf_hash(p->tcfc_index, hinfo->hmask);
- struct tcf_common **p1p;
-
- for (p1p = &hinfo->htab[h]; *p1p; p1p = &(*p1p)->tcfc_next) {
- if (*p1p == p) {
- write_lock_bh(hinfo->lock);
- *p1p = p->tcfc_next;
- write_unlock_bh(hinfo->lock);
- gen_kill_estimator(&p->tcfc_bstats,
- &p->tcfc_rate_est);
- /*
- * gen_estimator est_timer() might access p->tcfc_lock
- * or bstats, wait a RCU grace period before freeing p
- */
- kfree_rcu(p, tcfc_rcu);
- return;
- }
- }
- WARN_ON(1);
+ spin_lock_bh(&hinfo->lock);
+ hlist_del(&p->tcfc_head);
+ spin_unlock_bh(&hinfo->lock);
+ gen_kill_estimator(&p->tcfc_bstats,
+ &p->tcfc_rate_est);
+ /*
+ * gen_estimator est_timer() might access p->tcfc_lock
+ * or bstats, wait a RCU grace period before freeing p
+ */
+ kfree_rcu(p, tcfc_rcu);
}
EXPORT_SYMBOL(tcf_hash_destroy);
@@ -73,18 +64,19 @@ EXPORT_SYMBOL(tcf_hash_release);
static int tcf_dump_walker(struct sk_buff *skb, struct netlink_callback *cb,
struct tc_action *a, struct tcf_hashinfo *hinfo)
{
+ struct hlist_head *head;
struct tcf_common *p;
int err = 0, index = -1, i = 0, s_i = 0, n_i = 0;
struct nlattr *nest;
- read_lock_bh(hinfo->lock);
+ spin_lock_bh(&hinfo->lock);
s_i = cb->args[0];
for (i = 0; i < (hinfo->hmask + 1); i++) {
- p = hinfo->htab[tcf_hash(i, hinfo->hmask)];
+ head = &hinfo->htab[tcf_hash(i, hinfo->hmask)];
- for (; p; p = p->tcfc_next) {
+ hlist_for_each_entry_rcu(p, head, tcfc_head) {
index++;
if (index < s_i)
continue;
@@ -107,7 +99,7 @@ static int tcf_dump_walker(struct sk_buff *skb, struct netlink_callback *cb,
}
}
done:
- read_unlock_bh(hinfo->lock);
+ spin_unlock_bh(&hinfo->lock);
if (n_i)
cb->args[0] += n_i;
return n_i;
@@ -120,7 +112,9 @@ nla_put_failure:
static int tcf_del_walker(struct sk_buff *skb, struct tc_action *a,
struct tcf_hashinfo *hinfo)
{
- struct tcf_common *p, *s_p;
+ struct hlist_head *head;
+ struct hlist_node *n;
+ struct tcf_common *p;
struct nlattr *nest;
int i = 0, n_i = 0;
@@ -130,14 +124,12 @@ static int tcf_del_walker(struct sk_buff *skb, struct tc_action *a,
if (nla_put_string(skb, TCA_KIND, a->ops->kind))
goto nla_put_failure;
for (i = 0; i < (hinfo->hmask + 1); i++) {
- p = hinfo->htab[tcf_hash(i, hinfo->hmask)];
-
- while (p != NULL) {
- s_p = p->tcfc_next;
- if (ACT_P_DELETED == tcf_hash_release(p, 0, hinfo))
+ head = &hinfo->htab[tcf_hash(i, hinfo->hmask)];
+ hlist_for_each_entry_safe(p, n, head, tcfc_head) {
+ if (ACT_P_DELETED == tcf_hash_release(p, 0, hinfo)) {
module_put(a->ops->owner);
- n_i++;
- p = s_p;
+ n_i++;
+ }
}
}
if (nla_put_u32(skb, TCA_FCNT, n_i))
@@ -150,8 +142,8 @@ nla_put_failure:
return -EINVAL;
}
-int tcf_generic_walker(struct sk_buff *skb, struct netlink_callback *cb,
- int type, struct tc_action *a)
+static int tcf_generic_walker(struct sk_buff *skb, struct netlink_callback *cb,
+ int type, struct tc_action *a)
{
struct tcf_hashinfo *hinfo = a->ops->hinfo;
@@ -164,19 +156,18 @@ int tcf_generic_walker(struct sk_buff *skb, struct netlink_callback *cb,
return -EINVAL;
}
}
-EXPORT_SYMBOL(tcf_generic_walker);
struct tcf_common *tcf_hash_lookup(u32 index, struct tcf_hashinfo *hinfo)
{
- struct tcf_common *p;
+ struct tcf_common *p = NULL;
+ struct hlist_head *head;
- read_lock_bh(hinfo->lock);
- for (p = hinfo->htab[tcf_hash(index, hinfo->hmask)]; p;
- p = p->tcfc_next) {
+ spin_lock_bh(&hinfo->lock);
+ head = &hinfo->htab[tcf_hash(index, hinfo->hmask)];
+ hlist_for_each_entry_rcu(p, head, tcfc_head)
if (p->tcfc_index == index)
break;
- }
- read_unlock_bh(hinfo->lock);
+ spin_unlock_bh(&hinfo->lock);
return p;
}
@@ -191,11 +182,12 @@ u32 tcf_hash_new_index(u32 *idx_gen, struct tcf_hashinfo *hinfo)
val = 1;
} while (tcf_hash_lookup(val, hinfo));
- return (*idx_gen = val);
+ *idx_gen = val;
+ return val;
}
EXPORT_SYMBOL(tcf_hash_new_index);
-int tcf_hash_search(struct tc_action *a, u32 index)
+static int tcf_hash_search(struct tc_action *a, u32 index)
{
struct tcf_hashinfo *hinfo = a->ops->hinfo;
struct tcf_common *p = tcf_hash_lookup(index, hinfo);
@@ -206,7 +198,6 @@ int tcf_hash_search(struct tc_action *a, u32 index)
}
return 0;
}
-EXPORT_SYMBOL(tcf_hash_search);
struct tcf_common *tcf_hash_check(u32 index, struct tc_action *a, int bind,
struct tcf_hashinfo *hinfo)
@@ -235,6 +226,7 @@ struct tcf_common *tcf_hash_create(u32 index, struct nlattr *est,
p->tcfc_bindcnt = 1;
spin_lock_init(&p->tcfc_lock);
+ INIT_HLIST_NODE(&p->tcfc_head);
p->tcfc_index = index ? index : tcf_hash_new_index(idx_gen, hinfo);
p->tcfc_tm.install = jiffies;
p->tcfc_tm.lastuse = jiffies;
@@ -256,29 +248,37 @@ void tcf_hash_insert(struct tcf_common *p, struct tcf_hashinfo *hinfo)
{
unsigned int h = tcf_hash(p->tcfc_index, hinfo->hmask);
- write_lock_bh(hinfo->lock);
- p->tcfc_next = hinfo->htab[h];
- hinfo->htab[h] = p;
- write_unlock_bh(hinfo->lock);
+ spin_lock_bh(&hinfo->lock);
+ hlist_add_head(&p->tcfc_head, &hinfo->htab[h]);
+ spin_unlock_bh(&hinfo->lock);
}
EXPORT_SYMBOL(tcf_hash_insert);
-static struct tc_action_ops *act_base = NULL;
+static LIST_HEAD(act_base);
static DEFINE_RWLOCK(act_mod_lock);
int tcf_register_action(struct tc_action_ops *act)
{
- struct tc_action_ops *a, **ap;
+ struct tc_action_ops *a;
+
+ /* Must supply act, dump, cleanup and init */
+ if (!act->act || !act->dump || !act->cleanup || !act->init)
+ return -EINVAL;
+
+ /* Supply defaults */
+ if (!act->lookup)
+ act->lookup = tcf_hash_search;
+ if (!act->walk)
+ act->walk = tcf_generic_walker;
write_lock(&act_mod_lock);
- for (ap = &act_base; (a = *ap) != NULL; ap = &a->next) {
+ list_for_each_entry(a, &act_base, head) {
if (act->type == a->type || (strcmp(act->kind, a->kind) == 0)) {
write_unlock(&act_mod_lock);
return -EEXIST;
}
}
- act->next = NULL;
- *ap = act;
+ list_add_tail(&act->head, &act_base);
write_unlock(&act_mod_lock);
return 0;
}
@@ -286,17 +286,16 @@ EXPORT_SYMBOL(tcf_register_action);
int tcf_unregister_action(struct tc_action_ops *act)
{
- struct tc_action_ops *a, **ap;
+ struct tc_action_ops *a;
int err = -ENOENT;
write_lock(&act_mod_lock);
- for (ap = &act_base; (a = *ap) != NULL; ap = &a->next)
- if (a == act)
+ list_for_each_entry(a, &act_base, head) {
+ if (a == act) {
+ list_del(&act->head);
+ err = 0;
break;
- if (a) {
- *ap = a->next;
- a->next = NULL;
- err = 0;
+ }
}
write_unlock(&act_mod_lock);
return err;
@@ -306,69 +305,42 @@ EXPORT_SYMBOL(tcf_unregister_action);
/* lookup by name */
static struct tc_action_ops *tc_lookup_action_n(char *kind)
{
- struct tc_action_ops *a = NULL;
+ struct tc_action_ops *a, *res = NULL;
if (kind) {
read_lock(&act_mod_lock);
- for (a = act_base; a; a = a->next) {
+ list_for_each_entry(a, &act_base, head) {
if (strcmp(kind, a->kind) == 0) {
- if (!try_module_get(a->owner)) {
- read_unlock(&act_mod_lock);
- return NULL;
- }
+ if (try_module_get(a->owner))
+ res = a;
break;
}
}
read_unlock(&act_mod_lock);
}
- return a;
+ return res;
}
/* lookup by nlattr */
static struct tc_action_ops *tc_lookup_action(struct nlattr *kind)
{
- struct tc_action_ops *a = NULL;
+ struct tc_action_ops *a, *res = NULL;
if (kind) {
read_lock(&act_mod_lock);
- for (a = act_base; a; a = a->next) {
+ list_for_each_entry(a, &act_base, head) {
if (nla_strcmp(kind, a->kind) == 0) {
- if (!try_module_get(a->owner)) {
- read_unlock(&act_mod_lock);
- return NULL;
- }
+ if (try_module_get(a->owner))
+ res = a;
break;
}
}
read_unlock(&act_mod_lock);
}
- return a;
-}
-
-#if 0
-/* lookup by id */
-static struct tc_action_ops *tc_lookup_action_id(u32 type)
-{
- struct tc_action_ops *a = NULL;
-
- if (type) {
- read_lock(&act_mod_lock);
- for (a = act_base; a; a = a->next) {
- if (a->type == type) {
- if (!try_module_get(a->owner)) {
- read_unlock(&act_mod_lock);
- return NULL;
- }
- break;
- }
- }
- read_unlock(&act_mod_lock);
- }
- return a;
+ return res;
}
-#endif
-int tcf_action_exec(struct sk_buff *skb, const struct tc_action *act,
+int tcf_action_exec(struct sk_buff *skb, const struct list_head *actions,
struct tcf_result *res)
{
const struct tc_action *a;
@@ -379,53 +351,39 @@ int tcf_action_exec(struct sk_buff *skb, const struct tc_action *act,
ret = TC_ACT_OK;
goto exec_done;
}
- while ((a = act) != NULL) {
+ list_for_each_entry(a, actions, list) {
repeat:
- if (a->ops && a->ops->act) {
- ret = a->ops->act(skb, a, res);
- if (TC_MUNGED & skb->tc_verd) {
- /* copied already, allow trampling */
- skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd);
- skb->tc_verd = CLR_TC_MUNGED(skb->tc_verd);
- }
- if (ret == TC_ACT_REPEAT)
- goto repeat; /* we need a ttl - JHS */
- if (ret != TC_ACT_PIPE)
- goto exec_done;
+ ret = a->ops->act(skb, a, res);
+ if (TC_MUNGED & skb->tc_verd) {
+ /* copied already, allow trampling */
+ skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd);
+ skb->tc_verd = CLR_TC_MUNGED(skb->tc_verd);
}
- act = a->next;
+ if (ret == TC_ACT_REPEAT)
+ goto repeat; /* we need a ttl - JHS */
+ if (ret != TC_ACT_PIPE)
+ goto exec_done;
}
exec_done:
return ret;
}
EXPORT_SYMBOL(tcf_action_exec);
-void tcf_action_destroy(struct tc_action *act, int bind)
+void tcf_action_destroy(struct list_head *actions, int bind)
{
- struct tc_action *a;
+ struct tc_action *a, *tmp;
- for (a = act; a; a = act) {
- if (a->ops && a->ops->cleanup) {
- if (a->ops->cleanup(a, bind) == ACT_P_DELETED)
- module_put(a->ops->owner);
- act = act->next;
- kfree(a);
- } else {
- /*FIXME: Remove later - catch insertion bugs*/
- WARN(1, "tcf_action_destroy: BUG? destroying NULL ops\n");
- act = act->next;
- kfree(a);
- }
+ list_for_each_entry_safe(a, tmp, actions, list) {
+ if (a->ops->cleanup(a, bind) == ACT_P_DELETED)
+ module_put(a->ops->owner);
+ list_del(&a->list);
+ kfree(a);
}
}
int
tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
{
- int err = -EINVAL;
-
- if (a->ops == NULL || a->ops->dump == NULL)
- return err;
return a->ops->dump(skb, a, bind, ref);
}
@@ -436,9 +394,6 @@ tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
unsigned char *b = skb_tail_pointer(skb);
struct nlattr *nest;
- if (a->ops == NULL || a->ops->dump == NULL)
- return err;
-
if (nla_put_string(skb, TCA_KIND, a->ops->kind))
goto nla_put_failure;
if (tcf_action_copy_stats(skb, a, 0))
@@ -459,14 +414,13 @@ nla_put_failure:
EXPORT_SYMBOL(tcf_action_dump_1);
int
-tcf_action_dump(struct sk_buff *skb, struct tc_action *act, int bind, int ref)
+tcf_action_dump(struct sk_buff *skb, struct list_head *actions, int bind, int ref)
{
struct tc_action *a;
int err = -EINVAL;
struct nlattr *nest;
- while ((a = act) != NULL) {
- act = a->next;
+ list_for_each_entry(a, actions, list) {
nest = nla_nest_start(skb, a->order);
if (nest == NULL)
goto nla_put_failure;
@@ -541,6 +495,7 @@ struct tc_action *tcf_action_init_1(struct net *net, struct nlattr *nla,
if (a == NULL)
goto err_mod;
+ INIT_LIST_HEAD(&a->list);
/* backward compatibility for policer */
if (name == NULL)
err = a_o->init(net, tb[TCA_ACT_OPTIONS], est, a, ovr, bind);
@@ -567,37 +522,33 @@ err_out:
return ERR_PTR(err);
}
-struct tc_action *tcf_action_init(struct net *net, struct nlattr *nla,
+int tcf_action_init(struct net *net, struct nlattr *nla,
struct nlattr *est, char *name, int ovr,
- int bind)
+ int bind, struct list_head *actions)
{
struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
- struct tc_action *head = NULL, *act, *act_prev = NULL;
+ struct tc_action *act;
int err;
int i;
err = nla_parse_nested(tb, TCA_ACT_MAX_PRIO, nla, NULL);
if (err < 0)
- return ERR_PTR(err);
+ return err;
for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) {
act = tcf_action_init_1(net, tb[i], est, name, ovr, bind);
- if (IS_ERR(act))
+ if (IS_ERR(act)) {
+ err = PTR_ERR(act);
goto err;
+ }
act->order = i;
-
- if (head == NULL)
- head = act;
- else
- act_prev->next = act;
- act_prev = act;
+ list_add_tail(&act->list, actions);
}
- return head;
+ return 0;
err:
- if (head != NULL)
- tcf_action_destroy(head, bind);
- return act;
+ tcf_action_destroy(actions, bind);
+ return err;
}
int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *a,
@@ -626,10 +577,6 @@ int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *a,
if (err < 0)
goto errout;
- if (a->ops != NULL && a->ops->get_stats != NULL)
- if (a->ops->get_stats(skb, a) < 0)
- goto errout;
-
if (gnet_stats_copy_basic(&d, &h->tcf_bstats) < 0 ||
gnet_stats_copy_rate_est(&d, &h->tcf_bstats,
&h->tcf_rate_est) < 0 ||
@@ -646,7 +593,7 @@ errout:
}
static int
-tca_get_fill(struct sk_buff *skb, struct tc_action *a, u32 portid, u32 seq,
+tca_get_fill(struct sk_buff *skb, struct list_head *actions, u32 portid, u32 seq,
u16 flags, int event, int bind, int ref)
{
struct tcamsg *t;
@@ -666,7 +613,7 @@ tca_get_fill(struct sk_buff *skb, struct tc_action *a, u32 portid, u32 seq,
if (nest == NULL)
goto out_nlmsg_trim;
- if (tcf_action_dump(skb, a, bind, ref) < 0)
+ if (tcf_action_dump(skb, actions, bind, ref) < 0)
goto out_nlmsg_trim;
nla_nest_end(skb, nest);
@@ -681,14 +628,14 @@ out_nlmsg_trim:
static int
act_get_notify(struct net *net, u32 portid, struct nlmsghdr *n,
- struct tc_action *a, int event)
+ struct list_head *actions, int event)
{
struct sk_buff *skb;
skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
if (!skb)
return -ENOBUFS;
- if (tca_get_fill(skb, a, portid, n->nlmsg_seq, 0, event, 0, 0) <= 0) {
+ if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, 0, event, 0, 0) <= 0) {
kfree_skb(skb);
return -EINVAL;
}
@@ -719,12 +666,11 @@ tcf_action_get_1(struct nlattr *nla, struct nlmsghdr *n, u32 portid)
if (a == NULL)
goto err_out;
+ INIT_LIST_HEAD(&a->list);
err = -EINVAL;
a->ops = tc_lookup_action(tb[TCA_ACT_KIND]);
- if (a->ops == NULL)
+ if (a->ops == NULL) /* could happen in batch of actions */
goto err_free;
- if (a->ops->lookup == NULL)
- goto err_mod;
err = -ENOENT;
if (a->ops->lookup(a, index) == 0)
goto err_mod;
@@ -740,12 +686,12 @@ err_out:
return ERR_PTR(err);
}
-static void cleanup_a(struct tc_action *act)
+static void cleanup_a(struct list_head *actions)
{
- struct tc_action *a;
+ struct tc_action *a, *tmp;
- for (a = act; a; a = act) {
- act = a->next;
+ list_for_each_entry_safe(a, tmp, actions, list) {
+ list_del(&a->list);
kfree(a);
}
}
@@ -760,6 +706,7 @@ static struct tc_action *create_a(int i)
return NULL;
}
act->order = i;
+ INIT_LIST_HEAD(&act->list);
return act;
}
@@ -798,7 +745,7 @@ static int tca_action_flush(struct net *net, struct nlattr *nla,
err = -EINVAL;
kind = tb[TCA_ACT_KIND];
a->ops = tc_lookup_action(kind);
- if (a->ops == NULL)
+ if (a->ops == NULL) /*some idjot trying to flush unknown action */
goto err_out;
nlh = nlmsg_put(skb, portid, n->nlmsg_seq, RTM_DELACTION, sizeof(*t), 0);
@@ -847,7 +794,8 @@ tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
{
int i, ret;
struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
- struct tc_action *head = NULL, *act, *act_prev = NULL;
+ struct tc_action *act;
+ LIST_HEAD(actions);
ret = nla_parse_nested(tb, TCA_ACT_MAX_PRIO, nla, NULL);
if (ret < 0)
@@ -867,16 +815,11 @@ tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
goto err;
}
act->order = i;
-
- if (head == NULL)
- head = act;
- else
- act_prev->next = act;
- act_prev = act;
+ list_add_tail(&act->list, &actions);
}
if (event == RTM_GETACTION)
- ret = act_get_notify(net, portid, n, head, event);
+ ret = act_get_notify(net, portid, n, &actions, event);
else { /* delete */
struct sk_buff *skb;
@@ -886,7 +829,7 @@ tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
goto err;
}
- if (tca_get_fill(skb, head, portid, n->nlmsg_seq, 0, event,
+ if (tca_get_fill(skb, &actions, portid, n->nlmsg_seq, 0, event,
0, 1) <= 0) {
kfree_skb(skb);
ret = -EINVAL;
@@ -894,7 +837,7 @@ tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
}
/* now do the delete */
- tcf_action_destroy(head, 0);
+ tcf_action_destroy(&actions, 0);
ret = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
n->nlmsg_flags & NLM_F_ECHO);
if (ret > 0)
@@ -902,11 +845,11 @@ tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
return ret;
}
err:
- cleanup_a(head);
+ cleanup_a(&actions);
return ret;
}
-static int tcf_add_notify(struct net *net, struct tc_action *a,
+static int tcf_add_notify(struct net *net, struct list_head *actions,
u32 portid, u32 seq, int event, u16 flags)
{
struct tcamsg *t;
@@ -934,7 +877,7 @@ static int tcf_add_notify(struct net *net, struct tc_action *a,
if (nest == NULL)
goto out_kfree_skb;
- if (tcf_action_dump(skb, a, 0, 0) < 0)
+ if (tcf_action_dump(skb, actions, 0, 0) < 0)
goto out_kfree_skb;
nla_nest_end(skb, nest);
@@ -958,26 +901,18 @@ tcf_action_add(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
u32 portid, int ovr)
{
int ret = 0;
- struct tc_action *act;
- struct tc_action *a;
+ LIST_HEAD(actions);
u32 seq = n->nlmsg_seq;
- act = tcf_action_init(net, nla, NULL, NULL, ovr, 0);
- if (act == NULL)
- goto done;
- if (IS_ERR(act)) {
- ret = PTR_ERR(act);
+ ret = tcf_action_init(net, nla, NULL, NULL, ovr, 0, &actions);
+ if (ret)
goto done;
- }
/* dump then free all the actions after update; inserted policy
* stays intact
*/
- ret = tcf_add_notify(net, act, portid, seq, RTM_NEWACTION, n->nlmsg_flags);
- for (a = act; a; a = act) {
- act = a->next;
- kfree(a);
- }
+ ret = tcf_add_notify(net, &actions, portid, seq, RTM_NEWACTION, n->nlmsg_flags);
+ cleanup_a(&actions);
done:
return ret;
}
@@ -1084,12 +1019,6 @@ tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb)
memset(&a, 0, sizeof(struct tc_action));
a.ops = a_o;
- if (a_o->walk == NULL) {
- WARN(1, "tc_dump_action: %s !capable of dumping table\n",
- a_o->kind);
- goto out_module_put;
- }
-
nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
cb->nlh->nlmsg_type, sizeof(*t), 0);
if (!nlh)
diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c
index 3a4c0caa1f7d..8b1d65772a8d 100644
--- a/net/sched/act_csum.c
+++ b/net/sched/act_csum.c
@@ -37,15 +37,8 @@
#include <net/tc_act/tc_csum.h>
#define CSUM_TAB_MASK 15
-static struct tcf_common *tcf_csum_ht[CSUM_TAB_MASK + 1];
static u32 csum_idx_gen;
-static DEFINE_RWLOCK(csum_lock);
-
-static struct tcf_hashinfo csum_hash_info = {
- .htab = tcf_csum_ht,
- .hmask = CSUM_TAB_MASK,
- .lock = &csum_lock,
-};
+static struct tcf_hashinfo csum_hash_info;
static const struct nla_policy csum_policy[TCA_CSUM_MAX + 1] = {
[TCA_CSUM_PARMS] = { .len = sizeof(struct tc_csum), },
@@ -77,16 +70,16 @@ static int tcf_csum_init(struct net *n, struct nlattr *nla, struct nlattr *est,
&csum_idx_gen, &csum_hash_info);
if (IS_ERR(pc))
return PTR_ERR(pc);
- p = to_tcf_csum(pc);
ret = ACT_P_CREATED;
} else {
- p = to_tcf_csum(pc);
- if (!ovr) {
- tcf_hash_release(pc, bind, &csum_hash_info);
+ if (bind)/* dont override defaults */
+ return 0;
+ tcf_hash_release(pc, bind, &csum_hash_info);
+ if (!ovr)
return -EEXIST;
- }
}
+ p = to_tcf_csum(pc);
spin_lock_bh(&p->tcf_lock);
p->tcf_action = parm->action;
p->update_flags = parm->update_flags;
@@ -585,9 +578,7 @@ static struct tc_action_ops act_csum_ops = {
.act = tcf_csum,
.dump = tcf_csum_dump,
.cleanup = tcf_csum_cleanup,
- .lookup = tcf_hash_search,
.init = tcf_csum_init,
- .walk = tcf_generic_walker
};
MODULE_DESCRIPTION("Checksum updating actions");
@@ -595,6 +586,10 @@ MODULE_LICENSE("GPL");
static int __init csum_init_module(void)
{
+ int err = tcf_hashinfo_init(&csum_hash_info, CSUM_TAB_MASK);
+ if (err)
+ return err;
+
return tcf_register_action(&act_csum_ops);
}
diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c
index fd2b3cff5fa2..af5641c290fa 100644
--- a/net/sched/act_gact.c
+++ b/net/sched/act_gact.c
@@ -24,15 +24,8 @@
#include <net/tc_act/tc_gact.h>
#define GACT_TAB_MASK 15
-static struct tcf_common *tcf_gact_ht[GACT_TAB_MASK + 1];
static u32 gact_idx_gen;
-static DEFINE_RWLOCK(gact_lock);
-
-static struct tcf_hashinfo gact_hash_info = {
- .htab = tcf_gact_ht,
- .hmask = GACT_TAB_MASK,
- .lock = &gact_lock,
-};
+static struct tcf_hashinfo gact_hash_info;
#ifdef CONFIG_GACT_PROB
static int gact_net_rand(struct tcf_gact *gact)
@@ -102,10 +95,11 @@ static int tcf_gact_init(struct net *net, struct nlattr *nla,
return PTR_ERR(pc);
ret = ACT_P_CREATED;
} else {
- if (!ovr) {
- tcf_hash_release(pc, bind, &gact_hash_info);
+ if (bind)/* dont override defaults */
+ return 0;
+ tcf_hash_release(pc, bind, &gact_hash_info);
+ if (!ovr)
return -EEXIST;
- }
}
gact = to_gact(pc);
@@ -206,9 +200,7 @@ static struct tc_action_ops act_gact_ops = {
.act = tcf_gact,
.dump = tcf_gact_dump,
.cleanup = tcf_gact_cleanup,
- .lookup = tcf_hash_search,
.init = tcf_gact_init,
- .walk = tcf_generic_walker
};
MODULE_AUTHOR("Jamal Hadi Salim(2002-4)");
@@ -217,6 +209,9 @@ MODULE_LICENSE("GPL");
static int __init gact_init_module(void)
{
+ int err = tcf_hashinfo_init(&gact_hash_info, GACT_TAB_MASK);
+ if (err)
+ return err;
#ifdef CONFIG_GACT_PROB
pr_info("GACT probability on\n");
#else
@@ -228,6 +223,7 @@ static int __init gact_init_module(void)
static void __exit gact_cleanup_module(void)
{
tcf_unregister_action(&act_gact_ops);
+ tcf_hashinfo_destroy(&gact_hash_info);
}
module_init(gact_init_module);
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index 60d88b6b9560..242636950ea5 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -29,15 +29,8 @@
#define IPT_TAB_MASK 15
-static struct tcf_common *tcf_ipt_ht[IPT_TAB_MASK + 1];
static u32 ipt_idx_gen;
-static DEFINE_RWLOCK(ipt_lock);
-
-static struct tcf_hashinfo ipt_hash_info = {
- .htab = tcf_ipt_ht,
- .hmask = IPT_TAB_MASK,
- .lock = &ipt_lock,
-};
+static struct tcf_hashinfo ipt_hash_info;
static int ipt_init_target(struct xt_entry_target *t, char *table, unsigned int hook)
{
@@ -141,10 +134,12 @@ static int tcf_ipt_init(struct net *net, struct nlattr *nla, struct nlattr *est,
return PTR_ERR(pc);
ret = ACT_P_CREATED;
} else {
- if (!ovr) {
- tcf_ipt_release(to_ipt(pc), bind);
+ if (bind)/* dont override defaults */
+ return 0;
+ tcf_ipt_release(to_ipt(pc), bind);
+
+ if (!ovr)
return -EEXIST;
- }
}
ipt = to_ipt(pc);
@@ -298,9 +293,7 @@ static struct tc_action_ops act_ipt_ops = {
.act = tcf_ipt,
.dump = tcf_ipt_dump,
.cleanup = tcf_ipt_cleanup,
- .lookup = tcf_hash_search,
.init = tcf_ipt_init,
- .walk = tcf_generic_walker
};
static struct tc_action_ops act_xt_ops = {
@@ -312,9 +305,7 @@ static struct tc_action_ops act_xt_ops = {
.act = tcf_ipt,
.dump = tcf_ipt_dump,
.cleanup = tcf_ipt_cleanup,
- .lookup = tcf_hash_search,
.init = tcf_ipt_init,
- .walk = tcf_generic_walker
};
MODULE_AUTHOR("Jamal Hadi Salim(2002-13)");
@@ -324,7 +315,11 @@ MODULE_ALIAS("act_xt");
static int __init ipt_init_module(void)
{
- int ret1, ret2;
+ int ret1, ret2, err;
+ err = tcf_hashinfo_init(&ipt_hash_info, IPT_TAB_MASK);
+ if (err)
+ return err;
+
ret1 = tcf_register_action(&act_xt_ops);
if (ret1 < 0)
printk("Failed to load xt action\n");
@@ -332,9 +327,10 @@ static int __init ipt_init_module(void)
if (ret2 < 0)
printk("Failed to load ipt action\n");
- if (ret1 < 0 && ret2 < 0)
+ if (ret1 < 0 && ret2 < 0) {
+ tcf_hashinfo_destroy(&ipt_hash_info);
return ret1;
- else
+ } else
return 0;
}
@@ -342,6 +338,7 @@ static void __exit ipt_cleanup_module(void)
{
tcf_unregister_action(&act_xt_ops);
tcf_unregister_action(&act_ipt_ops);
+ tcf_hashinfo_destroy(&ipt_hash_info);
}
module_init(ipt_init_module);
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index 977c10e0631b..9dbb8cd64cb0 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -30,16 +30,9 @@
#include <linux/if_arp.h>
#define MIRRED_TAB_MASK 7
-static struct tcf_common *tcf_mirred_ht[MIRRED_TAB_MASK + 1];
static u32 mirred_idx_gen;
-static DEFINE_RWLOCK(mirred_lock);
static LIST_HEAD(mirred_list);
-
-static struct tcf_hashinfo mirred_hash_info = {
- .htab = tcf_mirred_ht,
- .hmask = MIRRED_TAB_MASK,
- .lock = &mirred_lock,
-};
+static struct tcf_hashinfo mirred_hash_info;
static int tcf_mirred_release(struct tcf_mirred *m, int bind)
{
@@ -261,7 +254,6 @@ static struct notifier_block mirred_device_notifier = {
.notifier_call = mirred_device_event,
};
-
static struct tc_action_ops act_mirred_ops = {
.kind = "mirred",
.hinfo = &mirred_hash_info,
@@ -271,9 +263,7 @@ static struct tc_action_ops act_mirred_ops = {
.act = tcf_mirred,
.dump = tcf_mirred_dump,
.cleanup = tcf_mirred_cleanup,
- .lookup = tcf_hash_search,
.init = tcf_mirred_init,
- .walk = tcf_generic_walker
};
MODULE_AUTHOR("Jamal Hadi Salim(2002)");
@@ -286,14 +276,20 @@ static int __init mirred_init_module(void)
if (err)
return err;
+ err = tcf_hashinfo_init(&mirred_hash_info, MIRRED_TAB_MASK);
+ if (err) {
+ unregister_netdevice_notifier(&mirred_device_notifier);
+ return err;
+ }
pr_info("Mirror/redirect action on\n");
return tcf_register_action(&act_mirred_ops);
}
static void __exit mirred_cleanup_module(void)
{
- unregister_netdevice_notifier(&mirred_device_notifier);
tcf_unregister_action(&act_mirred_ops);
+ tcf_hashinfo_destroy(&mirred_hash_info);
+ unregister_netdevice_notifier(&mirred_device_notifier);
}
module_init(mirred_init_module);
diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c
index 876f0ef29694..584e65503edb 100644
--- a/net/sched/act_nat.c
+++ b/net/sched/act_nat.c
@@ -30,15 +30,9 @@
#define NAT_TAB_MASK 15
-static struct tcf_common *tcf_nat_ht[NAT_TAB_MASK + 1];
static u32 nat_idx_gen;
-static DEFINE_RWLOCK(nat_lock);
-static struct tcf_hashinfo nat_hash_info = {
- .htab = tcf_nat_ht,
- .hmask = NAT_TAB_MASK,
- .lock = &nat_lock,
-};
+static struct tcf_hashinfo nat_hash_info;
static const struct nla_policy nat_policy[TCA_NAT_MAX + 1] = {
[TCA_NAT_PARMS] = { .len = sizeof(struct tc_nat) },
@@ -70,15 +64,15 @@ static int tcf_nat_init(struct net *net, struct nlattr *nla, struct nlattr *est,
&nat_idx_gen, &nat_hash_info);
if (IS_ERR(pc))
return PTR_ERR(pc);
- p = to_tcf_nat(pc);
ret = ACT_P_CREATED;
} else {
- p = to_tcf_nat(pc);
- if (!ovr) {
- tcf_hash_release(pc, bind, &nat_hash_info);
+ if (bind)
+ return 0;
+ tcf_hash_release(pc, bind, &nat_hash_info);
+ if (!ovr)
return -EEXIST;
- }
}
+ p = to_tcf_nat(pc);
spin_lock_bh(&p->tcf_lock);
p->old_addr = parm->old_addr;
@@ -308,9 +302,7 @@ static struct tc_action_ops act_nat_ops = {
.act = tcf_nat,
.dump = tcf_nat_dump,
.cleanup = tcf_nat_cleanup,
- .lookup = tcf_hash_search,
.init = tcf_nat_init,
- .walk = tcf_generic_walker
};
MODULE_DESCRIPTION("Stateless NAT actions");
@@ -318,12 +310,16 @@ MODULE_LICENSE("GPL");
static int __init nat_init_module(void)
{
+ int err = tcf_hashinfo_init(&nat_hash_info, NAT_TAB_MASK);
+ if (err)
+ return err;
return tcf_register_action(&act_nat_ops);
}
static void __exit nat_cleanup_module(void)
{
tcf_unregister_action(&act_nat_ops);
+ tcf_hashinfo_destroy(&nat_hash_info);
}
module_init(nat_init_module);
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index 7ed78c9e505c..729189341933 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -24,15 +24,9 @@
#include <net/tc_act/tc_pedit.h>
#define PEDIT_TAB_MASK 15
-static struct tcf_common *tcf_pedit_ht[PEDIT_TAB_MASK + 1];
static u32 pedit_idx_gen;
-static DEFINE_RWLOCK(pedit_lock);
-static struct tcf_hashinfo pedit_hash_info = {
- .htab = tcf_pedit_ht,
- .hmask = PEDIT_TAB_MASK,
- .lock = &pedit_lock,
-};
+static struct tcf_hashinfo pedit_hash_info;
static const struct nla_policy pedit_policy[TCA_PEDIT_MAX + 1] = {
[TCA_PEDIT_PARMS] = { .len = sizeof(struct tc_pedit) },
@@ -84,10 +78,12 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
ret = ACT_P_CREATED;
} else {
p = to_pedit(pc);
- if (!ovr) {
- tcf_hash_release(pc, bind, &pedit_hash_info);
+ tcf_hash_release(pc, bind, &pedit_hash_info);
+ if (bind)
+ return 0;
+ if (!ovr)
return -EEXIST;
- }
+
if (p->tcfp_nkeys && p->tcfp_nkeys != parm->nkeys) {
keys = kmalloc(ksize, GFP_KERNEL);
if (keys == NULL)
@@ -243,9 +239,7 @@ static struct tc_action_ops act_pedit_ops = {
.act = tcf_pedit,
.dump = tcf_pedit_dump,
.cleanup = tcf_pedit_cleanup,
- .lookup = tcf_hash_search,
.init = tcf_pedit_init,
- .walk = tcf_generic_walker
};
MODULE_AUTHOR("Jamal Hadi Salim(2002-4)");
@@ -254,11 +248,15 @@ MODULE_LICENSE("GPL");
static int __init pedit_init_module(void)
{
+ int err = tcf_hashinfo_init(&pedit_hash_info, PEDIT_TAB_MASK);
+ if (err)
+ return err;
return tcf_register_action(&act_pedit_ops);
}
static void __exit pedit_cleanup_module(void)
{
+ tcf_hashinfo_destroy(&pedit_hash_info);
tcf_unregister_action(&act_pedit_ops);
}
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index 272d8e924cf6..9295b86d5319 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -41,15 +41,8 @@ struct tcf_police {
container_of(pc, struct tcf_police, common)
#define POL_TAB_MASK 15
-static struct tcf_common *tcf_police_ht[POL_TAB_MASK + 1];
static u32 police_idx_gen;
-static DEFINE_RWLOCK(police_lock);
-
-static struct tcf_hashinfo police_hash_info = {
- .htab = tcf_police_ht,
- .hmask = POL_TAB_MASK,
- .lock = &police_lock,
-};
+static struct tcf_hashinfo police_hash_info;
/* old policer structure from before tc actions */
struct tc_police_compat {
@@ -67,18 +60,19 @@ struct tc_police_compat {
static int tcf_act_police_walker(struct sk_buff *skb, struct netlink_callback *cb,
int type, struct tc_action *a)
{
+ struct hlist_head *head;
struct tcf_common *p;
int err = 0, index = -1, i = 0, s_i = 0, n_i = 0;
struct nlattr *nest;
- read_lock_bh(&police_lock);
+ spin_lock_bh(&police_hash_info.lock);
s_i = cb->args[0];
for (i = 0; i < (POL_TAB_MASK + 1); i++) {
- p = tcf_police_ht[tcf_hash(i, POL_TAB_MASK)];
+ head = &police_hash_info.htab[tcf_hash(i, POL_TAB_MASK)];
- for (; p; p = p->tcfc_next) {
+ hlist_for_each_entry_rcu(p, head, tcfc_head) {
index++;
if (index < s_i)
continue;
@@ -101,7 +95,7 @@ static int tcf_act_police_walker(struct sk_buff *skb, struct netlink_callback *c
}
}
done:
- read_unlock_bh(&police_lock);
+ spin_unlock_bh(&police_hash_info.lock);
if (n_i)
cb->args[0] += n_i;
return n_i;
@@ -113,25 +107,16 @@ nla_put_failure:
static void tcf_police_destroy(struct tcf_police *p)
{
- unsigned int h = tcf_hash(p->tcf_index, POL_TAB_MASK);
- struct tcf_common **p1p;
-
- for (p1p = &tcf_police_ht[h]; *p1p; p1p = &(*p1p)->tcfc_next) {
- if (*p1p == &p->common) {
- write_lock_bh(&police_lock);
- *p1p = p->tcf_next;
- write_unlock_bh(&police_lock);
- gen_kill_estimator(&p->tcf_bstats,
- &p->tcf_rate_est);
- /*
- * gen_estimator est_timer() might access p->tcf_lock
- * or bstats, wait a RCU grace period before freeing p
- */
- kfree_rcu(p, tcf_rcu);
- return;
- }
- }
- WARN_ON(1);
+ spin_lock_bh(&police_hash_info.lock);
+ hlist_del(&p->tcf_head);
+ spin_unlock_bh(&police_hash_info.lock);
+ gen_kill_estimator(&p->tcf_bstats,
+ &p->tcf_rate_est);
+ /*
+ * gen_estimator est_timer() might access p->tcf_lock
+ * or bstats, wait a RCU grace period before freeing p
+ */
+ kfree_rcu(p, tcf_rcu);
}
static const struct nla_policy police_policy[TCA_POLICE_MAX + 1] = {
@@ -177,10 +162,12 @@ static int tcf_act_police_locate(struct net *net, struct nlattr *nla,
if (bind) {
police->tcf_bindcnt += 1;
police->tcf_refcnt += 1;
+ return 0;
}
if (ovr)
goto override;
- return ret;
+ /* not replacing */
+ return -EEXIST;
}
}
@@ -266,10 +253,9 @@ override:
police->tcf_index = parm->index ? parm->index :
tcf_hash_new_index(&police_idx_gen, &police_hash_info);
h = tcf_hash(police->tcf_index, POL_TAB_MASK);
- write_lock_bh(&police_lock);
- police->tcf_next = tcf_police_ht[h];
- tcf_police_ht[h] = &police->common;
- write_unlock_bh(&police_lock);
+ spin_lock_bh(&police_hash_info.lock);
+ hlist_add_head(&police->tcf_head, &police_hash_info.htab[h]);
+ spin_unlock_bh(&police_hash_info.lock);
a->priv = police;
return ret;
@@ -277,10 +263,8 @@ override:
failure_unlock:
spin_unlock_bh(&police->tcf_lock);
failure:
- if (P_tab)
- qdisc_put_rtab(P_tab);
- if (R_tab)
- qdisc_put_rtab(R_tab);
+ qdisc_put_rtab(P_tab);
+ qdisc_put_rtab(R_tab);
if (ret == ACT_P_CREATED)
kfree(police);
return err;
@@ -407,7 +391,6 @@ static struct tc_action_ops act_police_ops = {
.act = tcf_act_police,
.dump = tcf_act_police_dump,
.cleanup = tcf_act_police_cleanup,
- .lookup = tcf_hash_search,
.init = tcf_act_police_locate,
.walk = tcf_act_police_walker
};
@@ -415,12 +398,19 @@ static struct tc_action_ops act_police_ops = {
static int __init
police_init_module(void)
{
- return tcf_register_action(&act_police_ops);
+ int err = tcf_hashinfo_init(&police_hash_info, POL_TAB_MASK);
+ if (err)
+ return err;
+ err = tcf_register_action(&act_police_ops);
+ if (err)
+ tcf_hashinfo_destroy(&police_hash_info);
+ return err;
}
static void __exit
police_cleanup_module(void)
{
+ tcf_hashinfo_destroy(&police_hash_info);
tcf_unregister_action(&act_police_ops);
}
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
index 7725eb4ab756..b44491e3ec17 100644
--- a/net/sched/act_simple.c
+++ b/net/sched/act_simple.c
@@ -25,15 +25,8 @@
#include <net/tc_act/tc_defact.h>
#define SIMP_TAB_MASK 7
-static struct tcf_common *tcf_simp_ht[SIMP_TAB_MASK + 1];
static u32 simp_idx_gen;
-static DEFINE_RWLOCK(simp_lock);
-
-static struct tcf_hashinfo simp_hash_info = {
- .htab = tcf_simp_ht,
- .hmask = SIMP_TAB_MASK,
- .lock = &simp_lock,
-};
+static struct tcf_hashinfo simp_hash_info;
#define SIMP_MAX_DATA 32
static int tcf_simp(struct sk_buff *skb, const struct tc_action *a,
@@ -142,10 +135,13 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla,
ret = ACT_P_CREATED;
} else {
d = to_defact(pc);
- if (!ovr) {
- tcf_simp_release(d, bind);
+
+ if (bind)
+ return 0;
+ tcf_simp_release(d, bind);
+ if (!ovr)
return -EEXIST;
- }
+
reset_policy(d, defdata, parm);
}
@@ -201,7 +197,6 @@ static struct tc_action_ops act_simp_ops = {
.dump = tcf_simp_dump,
.cleanup = tcf_simp_cleanup,
.init = tcf_simp_init,
- .walk = tcf_generic_walker,
};
MODULE_AUTHOR("Jamal Hadi Salim(2005)");
@@ -210,14 +205,23 @@ MODULE_LICENSE("GPL");
static int __init simp_init_module(void)
{
- int ret = tcf_register_action(&act_simp_ops);
+ int err, ret;
+ err = tcf_hashinfo_init(&simp_hash_info, SIMP_TAB_MASK);
+ if (err)
+ return err;
+
+ ret = tcf_register_action(&act_simp_ops);
if (!ret)
pr_info("Simple TC action Loaded\n");
+ else
+ tcf_hashinfo_destroy(&simp_hash_info);
+
return ret;
}
static void __exit simp_cleanup_module(void)
{
+ tcf_hashinfo_destroy(&simp_hash_info);
tcf_unregister_action(&act_simp_ops);
}
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c
index cb4221171f93..0fa1aad6e204 100644
--- a/net/sched/act_skbedit.c
+++ b/net/sched/act_skbedit.c
@@ -11,8 +11,7 @@
* more details.
*
* You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
+ * this program; if not, see <http://www.gnu.org/licenses/>.
*
* Author: Alexander Duyck <alexander.h.duyck@intel.com>
*/
@@ -29,15 +28,8 @@
#include <net/tc_act/tc_skbedit.h>
#define SKBEDIT_TAB_MASK 15
-static struct tcf_common *tcf_skbedit_ht[SKBEDIT_TAB_MASK + 1];
static u32 skbedit_idx_gen;
-static DEFINE_RWLOCK(skbedit_lock);
-
-static struct tcf_hashinfo skbedit_hash_info = {
- .htab = tcf_skbedit_ht,
- .hmask = SKBEDIT_TAB_MASK,
- .lock = &skbedit_lock,
-};
+static struct tcf_hashinfo skbedit_hash_info;
static int tcf_skbedit(struct sk_buff *skb, const struct tc_action *a,
struct tcf_result *res)
@@ -120,10 +112,11 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
ret = ACT_P_CREATED;
} else {
d = to_skbedit(pc);
- if (!ovr) {
- tcf_hash_release(pc, bind, &skbedit_hash_info);
+ if (bind)
+ return 0;
+ tcf_hash_release(pc, bind, &skbedit_hash_info);
+ if (!ovr)
return -EEXIST;
- }
}
spin_lock_bh(&d->tcf_lock);
@@ -203,7 +196,6 @@ static struct tc_action_ops act_skbedit_ops = {
.dump = tcf_skbedit_dump,
.cleanup = tcf_skbedit_cleanup,
.init = tcf_skbedit_init,
- .walk = tcf_generic_walker,
};
MODULE_AUTHOR("Alexander Duyck, <alexander.h.duyck@intel.com>");
@@ -212,11 +204,15 @@ MODULE_LICENSE("GPL");
static int __init skbedit_init_module(void)
{
+ int err = tcf_hashinfo_init(&skbedit_hash_info, SKBEDIT_TAB_MASK);
+ if (err)
+ return err;
return tcf_register_action(&act_skbedit_ops);
}
static void __exit skbedit_cleanup_module(void)
{
+ tcf_hashinfo_destroy(&skbedit_hash_info);
tcf_unregister_action(&act_skbedit_ops);
}
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 8e118af90973..d8c42b1b88e5 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -31,8 +31,7 @@
#include <net/pkt_cls.h>
/* The list of all installed classifier types */
-
-static struct tcf_proto_ops *tcf_proto_base __read_mostly;
+static LIST_HEAD(tcf_proto_base);
/* Protects list of registered TC modules. It is pure SMP lock. */
static DEFINE_RWLOCK(cls_mod_lock);
@@ -41,36 +40,35 @@ static DEFINE_RWLOCK(cls_mod_lock);
static const struct tcf_proto_ops *tcf_proto_lookup_ops(struct nlattr *kind)
{
- const struct tcf_proto_ops *t = NULL;
+ const struct tcf_proto_ops *t, *res = NULL;
if (kind) {
read_lock(&cls_mod_lock);
- for (t = tcf_proto_base; t; t = t->next) {
+ list_for_each_entry(t, &tcf_proto_base, head) {
if (nla_strcmp(kind, t->kind) == 0) {
- if (!try_module_get(t->owner))
- t = NULL;
+ if (try_module_get(t->owner))
+ res = t;
break;
}
}
read_unlock(&cls_mod_lock);
}
- return t;
+ return res;
}
/* Register(unregister) new classifier type */
int register_tcf_proto_ops(struct tcf_proto_ops *ops)
{
- struct tcf_proto_ops *t, **tp;
+ struct tcf_proto_ops *t;
int rc = -EEXIST;
write_lock(&cls_mod_lock);
- for (tp = &tcf_proto_base; (t = *tp) != NULL; tp = &t->next)
+ list_for_each_entry(t, &tcf_proto_base, head)
if (!strcmp(ops->kind, t->kind))
goto out;
- ops->next = NULL;
- *tp = ops;
+ list_add_tail(&ops->head, &tcf_proto_base);
rc = 0;
out:
write_unlock(&cls_mod_lock);
@@ -80,19 +78,17 @@ EXPORT_SYMBOL(register_tcf_proto_ops);
int unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
{
- struct tcf_proto_ops *t, **tp;
+ struct tcf_proto_ops *t;
int rc = -ENOENT;
write_lock(&cls_mod_lock);
- for (tp = &tcf_proto_base; (t = *tp) != NULL; tp = &t->next)
- if (t == ops)
+ list_for_each_entry(t, &tcf_proto_base, head) {
+ if (t == ops) {
+ list_del(&t->head);
+ rc = 0;
break;
-
- if (!t)
- goto out;
- *tp = t->next;
- rc = 0;
-out:
+ }
+ }
write_unlock(&cls_mod_lock);
return rc;
}
@@ -500,46 +496,41 @@ out:
void tcf_exts_destroy(struct tcf_proto *tp, struct tcf_exts *exts)
{
#ifdef CONFIG_NET_CLS_ACT
- if (exts->action) {
- tcf_action_destroy(exts->action, TCA_ACT_UNBIND);
- exts->action = NULL;
- }
+ tcf_action_destroy(&exts->actions, TCA_ACT_UNBIND);
+ INIT_LIST_HEAD(&exts->actions);
#endif
}
EXPORT_SYMBOL(tcf_exts_destroy);
int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
- struct nlattr *rate_tlv, struct tcf_exts *exts,
- const struct tcf_ext_map *map)
+ struct nlattr *rate_tlv, struct tcf_exts *exts)
{
- memset(exts, 0, sizeof(*exts));
-
#ifdef CONFIG_NET_CLS_ACT
{
struct tc_action *act;
- if (map->police && tb[map->police]) {
- act = tcf_action_init_1(net, tb[map->police], rate_tlv,
+ INIT_LIST_HEAD(&exts->actions);
+ if (exts->police && tb[exts->police]) {
+ act = tcf_action_init_1(net, tb[exts->police], rate_tlv,
"police", TCA_ACT_NOREPLACE,
TCA_ACT_BIND);
if (IS_ERR(act))
return PTR_ERR(act);
- act->type = TCA_OLD_COMPAT;
- exts->action = act;
- } else if (map->action && tb[map->action]) {
- act = tcf_action_init(net, tb[map->action], rate_tlv,
+ act->type = exts->type = TCA_OLD_COMPAT;
+ list_add(&act->list, &exts->actions);
+ } else if (exts->action && tb[exts->action]) {
+ int err;
+ err = tcf_action_init(net, tb[exts->action], rate_tlv,
NULL, TCA_ACT_NOREPLACE,
- TCA_ACT_BIND);
- if (IS_ERR(act))
- return PTR_ERR(act);
-
- exts->action = act;
+ TCA_ACT_BIND, &exts->actions);
+ if (err)
+ return err;
}
}
#else
- if ((map->action && tb[map->action]) ||
- (map->police && tb[map->police]))
+ if ((exts->action && tb[exts->action]) ||
+ (exts->police && tb[exts->police]))
return -EOPNOTSUPP;
#endif
@@ -551,43 +542,44 @@ void tcf_exts_change(struct tcf_proto *tp, struct tcf_exts *dst,
struct tcf_exts *src)
{
#ifdef CONFIG_NET_CLS_ACT
- if (src->action) {
- struct tc_action *act;
+ if (!list_empty(&src->actions)) {
+ LIST_HEAD(tmp);
tcf_tree_lock(tp);
- act = dst->action;
- dst->action = src->action;
+ list_splice_init(&dst->actions, &tmp);
+ list_splice(&src->actions, &dst->actions);
tcf_tree_unlock(tp);
- if (act)
- tcf_action_destroy(act, TCA_ACT_UNBIND);
+ tcf_action_destroy(&tmp, TCA_ACT_UNBIND);
}
#endif
}
EXPORT_SYMBOL(tcf_exts_change);
-int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts,
- const struct tcf_ext_map *map)
+#define tcf_exts_first_act(ext) \
+ list_first_entry(&(exts)->actions, struct tc_action, list)
+
+int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts)
{
#ifdef CONFIG_NET_CLS_ACT
- if (map->action && exts->action) {
+ if (exts->action && !list_empty(&exts->actions)) {
/*
* again for backward compatible mode - we want
* to work with both old and new modes of entering
* tc data even if iproute2 was newer - jhs
*/
struct nlattr *nest;
-
- if (exts->action->type != TCA_OLD_COMPAT) {
- nest = nla_nest_start(skb, map->action);
+ if (exts->type != TCA_OLD_COMPAT) {
+ nest = nla_nest_start(skb, exts->action);
if (nest == NULL)
goto nla_put_failure;
- if (tcf_action_dump(skb, exts->action, 0, 0) < 0)
+ if (tcf_action_dump(skb, &exts->actions, 0, 0) < 0)
goto nla_put_failure;
nla_nest_end(skb, nest);
- } else if (map->police) {
- nest = nla_nest_start(skb, map->police);
- if (nest == NULL)
+ } else if (exts->police) {
+ struct tc_action *act = tcf_exts_first_act(exts);
+ nest = nla_nest_start(skb, exts->police);
+ if (nest == NULL || !act)
goto nla_put_failure;
- if (tcf_action_dump_old(skb, exts->action, 0, 0) < 0)
+ if (tcf_action_dump_old(skb, act, 0, 0) < 0)
goto nla_put_failure;
nla_nest_end(skb, nest);
}
@@ -600,17 +592,14 @@ nla_put_failure: __attribute__ ((unused))
EXPORT_SYMBOL(tcf_exts_dump);
-int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts,
- const struct tcf_ext_map *map)
+int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts)
{
#ifdef CONFIG_NET_CLS_ACT
- if (exts->action)
- if (tcf_action_copy_stats(skb, exts->action, 1) < 0)
- goto nla_put_failure;
+ struct tc_action *a = tcf_exts_first_act(exts);
+ if (tcf_action_copy_stats(skb, a, 1) < 0)
+ return -1;
#endif
return 0;
-nla_put_failure: __attribute__ ((unused))
- return -1;
}
EXPORT_SYMBOL(tcf_exts_dump_stats);
diff --git a/net/sched/cls_basic.c b/net/sched/cls_basic.c
index 636d9131d870..b6552035d1f4 100644
--- a/net/sched/cls_basic.c
+++ b/net/sched/cls_basic.c
@@ -34,11 +34,6 @@ struct basic_filter {
struct list_head link;
};
-static const struct tcf_ext_map basic_ext_map = {
- .action = TCA_BASIC_ACT,
- .police = TCA_BASIC_POLICE
-};
-
static int basic_classify(struct sk_buff *skb, const struct tcf_proto *tp,
struct tcf_result *res)
{
@@ -141,7 +136,8 @@ static int basic_set_parms(struct net *net, struct tcf_proto *tp,
struct tcf_exts e;
struct tcf_ematch_tree t;
- err = tcf_exts_validate(net, tp, tb, est, &e, &basic_ext_map);
+ tcf_exts_init(&e, TCA_BASIC_ACT, TCA_BASIC_POLICE);
+ err = tcf_exts_validate(net, tp, tb, est, &e);
if (err < 0)
return err;
@@ -191,6 +187,7 @@ static int basic_change(struct net *net, struct sk_buff *in_skb,
if (f == NULL)
goto errout;
+ tcf_exts_init(&f->exts, TCA_BASIC_ACT, TCA_BASIC_POLICE);
err = -EINVAL;
if (handle)
f->handle = handle;
@@ -263,13 +260,13 @@ static int basic_dump(struct tcf_proto *tp, unsigned long fh,
nla_put_u32(skb, TCA_BASIC_CLASSID, f->res.classid))
goto nla_put_failure;
- if (tcf_exts_dump(skb, &f->exts, &basic_ext_map) < 0 ||
+ if (tcf_exts_dump(skb, &f->exts) < 0 ||
tcf_em_tree_dump(skb, &f->ematches, TCA_BASIC_EMATCHES) < 0)
goto nla_put_failure;
nla_nest_end(skb, nest);
- if (tcf_exts_dump_stats(skb, &f->exts, &basic_ext_map) < 0)
+ if (tcf_exts_dump_stats(skb, &f->exts) < 0)
goto nla_put_failure;
return skb->len;
diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c
index 1002a8226281..00a5a585e5f1 100644
--- a/net/sched/cls_bpf.c
+++ b/net/sched/cls_bpf.c
@@ -46,11 +46,6 @@ static const struct nla_policy bpf_policy[TCA_BPF_MAX + 1] = {
.len = sizeof(struct sock_filter) * BPF_MAXINSNS },
};
-static const struct tcf_ext_map bpf_ext_map = {
- .action = TCA_BPF_ACT,
- .police = TCA_BPF_POLICE,
-};
-
static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
struct tcf_result *res)
{
@@ -174,7 +169,8 @@ static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp,
if (!tb[TCA_BPF_OPS_LEN] || !tb[TCA_BPF_OPS] || !tb[TCA_BPF_CLASSID])
return -EINVAL;
- ret = tcf_exts_validate(net, tp, tb, est, &exts, &bpf_ext_map);
+ tcf_exts_init(&exts, TCA_BPF_ACT, TCA_BPF_POLICE);
+ ret = tcf_exts_validate(net, tp, tb, est, &exts);
if (ret < 0)
return ret;
@@ -271,6 +267,7 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
if (prog == NULL)
return -ENOBUFS;
+ tcf_exts_init(&prog->exts, TCA_BPF_ACT, TCA_BPF_POLICE);
if (handle == 0)
prog->handle = cls_bpf_grab_new_handle(tp, head);
else
@@ -323,14 +320,14 @@ static int cls_bpf_dump(struct tcf_proto *tp, unsigned long fh,
if (nla == NULL)
goto nla_put_failure;
- memcpy(nla_data(nla), prog->bpf_ops, nla_len(nla));
+ memcpy(nla_data(nla), prog->bpf_ops, nla_len(nla));
- if (tcf_exts_dump(skb, &prog->exts, &bpf_ext_map) < 0)
+ if (tcf_exts_dump(skb, &prog->exts) < 0)
goto nla_put_failure;
nla_nest_end(skb, nest);
- if (tcf_exts_dump_stats(skb, &prog->exts, &bpf_ext_map) < 0)
+ if (tcf_exts_dump_stats(skb, &prog->exts) < 0)
goto nla_put_failure;
return skb->len;
diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c
index 16006c92c3fd..8349fcdc50f3 100644
--- a/net/sched/cls_cgroup.c
+++ b/net/sched/cls_cgroup.c
@@ -11,109 +11,13 @@
#include <linux/module.h>
#include <linux/slab.h>
-#include <linux/types.h>
-#include <linux/string.h>
-#include <linux/errno.h>
#include <linux/skbuff.h>
-#include <linux/cgroup.h>
#include <linux/rcupdate.h>
-#include <linux/fdtable.h>
#include <net/rtnetlink.h>
#include <net/pkt_cls.h>
#include <net/sock.h>
#include <net/cls_cgroup.h>
-static inline struct cgroup_cls_state *css_cls_state(struct cgroup_subsys_state *css)
-{
- return css ? container_of(css, struct cgroup_cls_state, css) : NULL;
-}
-
-static inline struct cgroup_cls_state *task_cls_state(struct task_struct *p)
-{
- return css_cls_state(task_css(p, net_cls_subsys_id));
-}
-
-static struct cgroup_subsys_state *
-cgrp_css_alloc(struct cgroup_subsys_state *parent_css)
-{
- struct cgroup_cls_state *cs;
-
- cs = kzalloc(sizeof(*cs), GFP_KERNEL);
- if (!cs)
- return ERR_PTR(-ENOMEM);
- return &cs->css;
-}
-
-static int cgrp_css_online(struct cgroup_subsys_state *css)
-{
- struct cgroup_cls_state *cs = css_cls_state(css);
- struct cgroup_cls_state *parent = css_cls_state(css_parent(css));
-
- if (parent)
- cs->classid = parent->classid;
- return 0;
-}
-
-static void cgrp_css_free(struct cgroup_subsys_state *css)
-{
- kfree(css_cls_state(css));
-}
-
-static int update_classid(const void *v, struct file *file, unsigned n)
-{
- int err;
- struct socket *sock = sock_from_file(file, &err);
- if (sock)
- sock->sk->sk_classid = (u32)(unsigned long)v;
- return 0;
-}
-
-static void cgrp_attach(struct cgroup_subsys_state *css,
- struct cgroup_taskset *tset)
-{
- struct task_struct *p;
- struct cgroup_cls_state *cs = css_cls_state(css);
- void *v = (void *)(unsigned long)cs->classid;
-
- cgroup_taskset_for_each(p, css, tset) {
- task_lock(p);
- iterate_fd(p->files, 0, update_classid, v);
- task_unlock(p);
- }
-}
-
-static u64 read_classid(struct cgroup_subsys_state *css, struct cftype *cft)
-{
- return css_cls_state(css)->classid;
-}
-
-static int write_classid(struct cgroup_subsys_state *css, struct cftype *cft,
- u64 value)
-{
- css_cls_state(css)->classid = (u32) value;
- return 0;
-}
-
-static struct cftype ss_files[] = {
- {
- .name = "classid",
- .read_u64 = read_classid,
- .write_u64 = write_classid,
- },
- { } /* terminate */
-};
-
-struct cgroup_subsys net_cls_subsys = {
- .name = "net_cls",
- .css_alloc = cgrp_css_alloc,
- .css_online = cgrp_css_online,
- .css_free = cgrp_css_free,
- .attach = cgrp_attach,
- .subsys_id = net_cls_subsys_id,
- .base_cftypes = ss_files,
- .module = THIS_MODULE,
-};
-
struct cls_cgroup_head {
u32 handle;
struct tcf_exts exts;
@@ -172,11 +76,6 @@ static int cls_cgroup_init(struct tcf_proto *tp)
return 0;
}
-static const struct tcf_ext_map cgroup_ext_map = {
- .action = TCA_CGROUP_ACT,
- .police = TCA_CGROUP_POLICE,
-};
-
static const struct nla_policy cgroup_policy[TCA_CGROUP_MAX + 1] = {
[TCA_CGROUP_EMATCHES] = { .type = NLA_NESTED },
};
@@ -203,6 +102,7 @@ static int cls_cgroup_change(struct net *net, struct sk_buff *in_skb,
if (head == NULL)
return -ENOBUFS;
+ tcf_exts_init(&head->exts, TCA_CGROUP_ACT, TCA_CGROUP_POLICE);
head->handle = handle;
tcf_tree_lock(tp);
@@ -218,8 +118,8 @@ static int cls_cgroup_change(struct net *net, struct sk_buff *in_skb,
if (err < 0)
return err;
- err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e,
- &cgroup_ext_map);
+ tcf_exts_init(&e, TCA_CGROUP_ACT, TCA_CGROUP_POLICE);
+ err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e);
if (err < 0)
return err;
@@ -277,13 +177,13 @@ static int cls_cgroup_dump(struct tcf_proto *tp, unsigned long fh,
if (nest == NULL)
goto nla_put_failure;
- if (tcf_exts_dump(skb, &head->exts, &cgroup_ext_map) < 0 ||
+ if (tcf_exts_dump(skb, &head->exts) < 0 ||
tcf_em_tree_dump(skb, &head->ematches, TCA_CGROUP_EMATCHES) < 0)
goto nla_put_failure;
nla_nest_end(skb, nest);
- if (tcf_exts_dump_stats(skb, &head->exts, &cgroup_ext_map) < 0)
+ if (tcf_exts_dump_stats(skb, &head->exts) < 0)
goto nla_put_failure;
return skb->len;
@@ -309,25 +209,12 @@ static struct tcf_proto_ops cls_cgroup_ops __read_mostly = {
static int __init init_cgroup_cls(void)
{
- int ret;
-
- ret = cgroup_load_subsys(&net_cls_subsys);
- if (ret)
- goto out;
-
- ret = register_tcf_proto_ops(&cls_cgroup_ops);
- if (ret)
- cgroup_unload_subsys(&net_cls_subsys);
-
-out:
- return ret;
+ return register_tcf_proto_ops(&cls_cgroup_ops);
}
static void __exit exit_cgroup_cls(void)
{
unregister_tcf_proto_ops(&cls_cgroup_ops);
-
- cgroup_unload_subsys(&net_cls_subsys);
}
module_init(init_cgroup_cls);
diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c
index 7881e2fccbc2..dfd18a5c3e81 100644
--- a/net/sched/cls_flow.c
+++ b/net/sched/cls_flow.c
@@ -56,11 +56,6 @@ struct flow_filter {
u32 hashrnd;
};
-static const struct tcf_ext_map flow_ext_map = {
- .action = TCA_FLOW_ACT,
- .police = TCA_FLOW_POLICE,
-};
-
static inline u32 addr_fold(void *addr)
{
unsigned long a = (unsigned long)addr;
@@ -220,7 +215,7 @@ static u32 flow_get_vlan_tag(const struct sk_buff *skb)
static u32 flow_get_rxhash(struct sk_buff *skb)
{
- return skb_get_rxhash(skb);
+ return skb_get_hash(skb);
}
static u32 flow_key_get(struct sk_buff *skb, int key, struct flow_keys *flow)
@@ -397,7 +392,8 @@ static int flow_change(struct net *net, struct sk_buff *in_skb,
return -EOPNOTSUPP;
}
- err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e, &flow_ext_map);
+ tcf_exts_init(&e, TCA_FLOW_ACT, TCA_FLOW_POLICE);
+ err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e);
if (err < 0)
return err;
@@ -455,6 +451,7 @@ static int flow_change(struct net *net, struct sk_buff *in_skb,
f->handle = handle;
f->mask = ~0U;
+ tcf_exts_init(&f->exts, TCA_FLOW_ACT, TCA_FLOW_POLICE);
get_random_bytes(&f->hashrnd, 4);
f->perturb_timer.function = flow_perturbation;
@@ -608,7 +605,7 @@ static int flow_dump(struct tcf_proto *tp, unsigned long fh,
nla_put_u32(skb, TCA_FLOW_PERTURB, f->perturb_period / HZ))
goto nla_put_failure;
- if (tcf_exts_dump(skb, &f->exts, &flow_ext_map) < 0)
+ if (tcf_exts_dump(skb, &f->exts) < 0)
goto nla_put_failure;
#ifdef CONFIG_NET_EMATCH
if (f->ematches.hdr.nmatches &&
@@ -617,7 +614,7 @@ static int flow_dump(struct tcf_proto *tp, unsigned long fh,
#endif
nla_nest_end(skb, nest);
- if (tcf_exts_dump_stats(skb, &f->exts, &flow_ext_map) < 0)
+ if (tcf_exts_dump_stats(skb, &f->exts) < 0)
goto nla_put_failure;
return skb->len;
diff --git a/net/sched/cls_fw.c b/net/sched/cls_fw.c
index 9b97172db84a..3f9cece13807 100644
--- a/net/sched/cls_fw.c
+++ b/net/sched/cls_fw.c
@@ -46,11 +46,6 @@ struct fw_filter {
struct tcf_exts exts;
};
-static const struct tcf_ext_map fw_ext_map = {
- .action = TCA_FW_ACT,
- .police = TCA_FW_POLICE
-};
-
static inline int fw_hash(u32 handle)
{
if (HTSIZE == 4096)
@@ -200,7 +195,8 @@ fw_change_attrs(struct net *net, struct tcf_proto *tp, struct fw_filter *f,
u32 mask;
int err;
- err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e, &fw_ext_map);
+ tcf_exts_init(&e, TCA_FW_ACT, TCA_FW_POLICE);
+ err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e);
if (err < 0)
return err;
@@ -280,6 +276,7 @@ static int fw_change(struct net *net, struct sk_buff *in_skb,
if (f == NULL)
return -ENOBUFS;
+ tcf_exts_init(&f->exts, TCA_FW_ACT, TCA_FW_POLICE);
f->id = handle;
err = fw_change_attrs(net, tp, f, tb, tca, base);
@@ -359,12 +356,12 @@ static int fw_dump(struct tcf_proto *tp, unsigned long fh,
nla_put_u32(skb, TCA_FW_MASK, head->mask))
goto nla_put_failure;
- if (tcf_exts_dump(skb, &f->exts, &fw_ext_map) < 0)
+ if (tcf_exts_dump(skb, &f->exts) < 0)
goto nla_put_failure;
nla_nest_end(skb, nest);
- if (tcf_exts_dump_stats(skb, &f->exts, &fw_ext_map) < 0)
+ if (tcf_exts_dump_stats(skb, &f->exts) < 0)
goto nla_put_failure;
return skb->len;
diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c
index 37da567d833e..2473953a5948 100644
--- a/net/sched/cls_route.c
+++ b/net/sched/cls_route.c
@@ -59,11 +59,6 @@ struct route4_filter {
#define ROUTE4_FAILURE ((struct route4_filter *)(-1L))
-static const struct tcf_ext_map route_ext_map = {
- .police = TCA_ROUTE4_POLICE,
- .action = TCA_ROUTE4_ACT
-};
-
static inline int route4_fastmap_hash(u32 id, int iif)
{
return id & 0xF;
@@ -347,7 +342,8 @@ static int route4_set_parms(struct net *net, struct tcf_proto *tp,
struct route4_bucket *b;
struct tcf_exts e;
- err = tcf_exts_validate(net, tp, tb, est, &e, &route_ext_map);
+ tcf_exts_init(&e, TCA_ROUTE4_ACT, TCA_ROUTE4_POLICE);
+ err = tcf_exts_validate(net, tp, tb, est, &e);
if (err < 0)
return err;
@@ -481,6 +477,7 @@ static int route4_change(struct net *net, struct sk_buff *in_skb,
if (f == NULL)
goto errout;
+ tcf_exts_init(&f->exts, TCA_ROUTE4_ACT, TCA_ROUTE4_POLICE);
err = route4_set_parms(net, tp, base, f, handle, head, tb,
tca[TCA_RATE], 1);
if (err < 0)
@@ -589,12 +586,12 @@ static int route4_dump(struct tcf_proto *tp, unsigned long fh,
nla_put_u32(skb, TCA_ROUTE4_CLASSID, f->res.classid))
goto nla_put_failure;
- if (tcf_exts_dump(skb, &f->exts, &route_ext_map) < 0)
+ if (tcf_exts_dump(skb, &f->exts) < 0)
goto nla_put_failure;
nla_nest_end(skb, nest);
- if (tcf_exts_dump_stats(skb, &f->exts, &route_ext_map) < 0)
+ if (tcf_exts_dump_stats(skb, &f->exts) < 0)
goto nla_put_failure;
return skb->len;
diff --git a/net/sched/cls_rsvp.h b/net/sched/cls_rsvp.h
index 252d8b05872e..4f25c2ac825b 100644
--- a/net/sched/cls_rsvp.h
+++ b/net/sched/cls_rsvp.h
@@ -116,11 +116,6 @@ static inline unsigned int hash_src(__be32 *src)
return h & 0xF;
}
-static struct tcf_ext_map rsvp_ext_map = {
- .police = TCA_RSVP_POLICE,
- .action = TCA_RSVP_ACT
-};
-
#define RSVP_APPLY_RESULT() \
{ \
int r = tcf_exts_exec(skb, &f->exts, res); \
@@ -440,7 +435,8 @@ static int rsvp_change(struct net *net, struct sk_buff *in_skb,
if (err < 0)
return err;
- err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e, &rsvp_ext_map);
+ tcf_exts_init(&e, TCA_RSVP_ACT, TCA_RSVP_POLICE);
+ err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e);
if (err < 0)
return err;
@@ -471,6 +467,7 @@ static int rsvp_change(struct net *net, struct sk_buff *in_skb,
if (f == NULL)
goto errout2;
+ tcf_exts_init(&f->exts, TCA_RSVP_ACT, TCA_RSVP_POLICE);
h2 = 16;
if (tb[TCA_RSVP_SRC]) {
memcpy(f->src, nla_data(tb[TCA_RSVP_SRC]), sizeof(f->src));
@@ -633,12 +630,12 @@ static int rsvp_dump(struct tcf_proto *tp, unsigned long fh,
nla_put(skb, TCA_RSVP_SRC, sizeof(f->src), f->src))
goto nla_put_failure;
- if (tcf_exts_dump(skb, &f->exts, &rsvp_ext_map) < 0)
+ if (tcf_exts_dump(skb, &f->exts) < 0)
goto nla_put_failure;
nla_nest_end(skb, nest);
- if (tcf_exts_dump_stats(skb, &f->exts, &rsvp_ext_map) < 0)
+ if (tcf_exts_dump_stats(skb, &f->exts) < 0)
goto nla_put_failure;
return skb->len;
diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c
index b86535a40169..ffad18791c93 100644
--- a/net/sched/cls_tcindex.c
+++ b/net/sched/cls_tcindex.c
@@ -50,11 +50,6 @@ struct tcindex_data {
int fall_through; /* 0: only classify if explicit match */
};
-static const struct tcf_ext_map tcindex_ext_map = {
- .police = TCA_TCINDEX_POLICE,
- .action = TCA_TCINDEX_ACT
-};
-
static inline int
tcindex_filter_is_set(struct tcindex_filter_result *r)
{
@@ -209,17 +204,21 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
struct tcindex_filter *f = NULL; /* make gcc behave */
struct tcf_exts e;
- err = tcf_exts_validate(net, tp, tb, est, &e, &tcindex_ext_map);
+ tcf_exts_init(&e, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
+ err = tcf_exts_validate(net, tp, tb, est, &e);
if (err < 0)
return err;
memcpy(&cp, p, sizeof(cp));
memset(&new_filter_result, 0, sizeof(new_filter_result));
+ tcf_exts_init(&new_filter_result.exts, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
if (old_r)
memcpy(&cr, r, sizeof(cr));
- else
+ else {
memset(&cr, 0, sizeof(cr));
+ tcf_exts_init(&cr.exts, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
+ }
if (tb[TCA_TCINDEX_HASH])
cp.hash = nla_get_u32(tb[TCA_TCINDEX_HASH]);
@@ -468,11 +467,11 @@ static int tcindex_dump(struct tcf_proto *tp, unsigned long fh,
nla_put_u32(skb, TCA_TCINDEX_CLASSID, r->res.classid))
goto nla_put_failure;
- if (tcf_exts_dump(skb, &r->exts, &tcindex_ext_map) < 0)
+ if (tcf_exts_dump(skb, &r->exts) < 0)
goto nla_put_failure;
nla_nest_end(skb, nest);
- if (tcf_exts_dump_stats(skb, &r->exts, &tcindex_ext_map) < 0)
+ if (tcf_exts_dump_stats(skb, &r->exts) < 0)
goto nla_put_failure;
}
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index eb07a1e536e6..20f2fb79c747 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -79,11 +79,6 @@ struct tc_u_common {
u32 hgenerator;
};
-static const struct tcf_ext_map u32_ext_map = {
- .action = TCA_U32_ACT,
- .police = TCA_U32_POLICE
-};
-
static inline unsigned int u32_hash_fold(__be32 key,
const struct tc_u32_sel *sel,
u8 fshift)
@@ -352,7 +347,7 @@ static int u32_destroy_key(struct tcf_proto *tp, struct tc_u_knode *n)
return 0;
}
-static int u32_delete_key(struct tcf_proto *tp, struct tc_u_knode* key)
+static int u32_delete_key(struct tcf_proto *tp, struct tc_u_knode *key)
{
struct tc_u_knode **kp;
struct tc_u_hnode *ht = key->ht_up;
@@ -496,7 +491,8 @@ static int u32_set_parms(struct net *net, struct tcf_proto *tp,
int err;
struct tcf_exts e;
- err = tcf_exts_validate(net, tp, tb, est, &e, &u32_ext_map);
+ tcf_exts_init(&e, TCA_U32_ACT, TCA_U32_POLICE);
+ err = tcf_exts_validate(net, tp, tb, est, &e);
if (err < 0)
return err;
@@ -646,6 +642,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
n->ht_up = ht;
n->handle = handle;
n->fshift = s->hmask ? ffs(ntohl(s->hmask)) - 1 : 0;
+ tcf_exts_init(&n->exts, TCA_U32_ACT, TCA_U32_POLICE);
#ifdef CONFIG_CLS_U32_MARK
if (tb[TCA_U32_MARK]) {
@@ -759,7 +756,7 @@ static int u32_dump(struct tcf_proto *tp, unsigned long fh,
goto nla_put_failure;
#endif
- if (tcf_exts_dump(skb, &n->exts, &u32_ext_map) < 0)
+ if (tcf_exts_dump(skb, &n->exts) < 0)
goto nla_put_failure;
#ifdef CONFIG_NET_CLS_IND
@@ -778,7 +775,7 @@ static int u32_dump(struct tcf_proto *tp, unsigned long fh,
nla_nest_end(skb, nest);
if (TC_U32_KEY(n->handle))
- if (tcf_exts_dump_stats(skb, &n->exts, &u32_ext_map) < 0)
+ if (tcf_exts_dump_stats(skb, &n->exts) < 0)
goto nla_put_failure;
return skb->len;
diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c
index e5cef9567225..9b8c0b0e60d7 100644
--- a/net/sched/em_meta.c
+++ b/net/sched/em_meta.c
@@ -222,7 +222,7 @@ META_COLLECTOR(int_maclen)
META_COLLECTOR(int_rxhash)
{
- dst->value = skb_get_rxhash(skb);
+ dst->value = skb_get_hash(skb);
}
/**************************************************************************
@@ -271,40 +271,52 @@ META_COLLECTOR(int_rtiif)
* Socket Attributes
**************************************************************************/
-#define SKIP_NONLOCAL(skb) \
- if (unlikely(skb->sk == NULL)) { \
- *err = -1; \
- return; \
- }
+#define skip_nonlocal(skb) \
+ (unlikely(skb->sk == NULL))
META_COLLECTOR(int_sk_family)
{
- SKIP_NONLOCAL(skb);
+ if (skip_nonlocal(skb)) {
+ *err = -1;
+ return;
+ }
dst->value = skb->sk->sk_family;
}
META_COLLECTOR(int_sk_state)
{
- SKIP_NONLOCAL(skb);
+ if (skip_nonlocal(skb)) {
+ *err = -1;
+ return;
+ }
dst->value = skb->sk->sk_state;
}
META_COLLECTOR(int_sk_reuse)
{
- SKIP_NONLOCAL(skb);
+ if (skip_nonlocal(skb)) {
+ *err = -1;
+ return;
+ }
dst->value = skb->sk->sk_reuse;
}
META_COLLECTOR(int_sk_bound_if)
{
- SKIP_NONLOCAL(skb);
+ if (skip_nonlocal(skb)) {
+ *err = -1;
+ return;
+ }
/* No error if bound_dev_if is 0, legal userspace check */
dst->value = skb->sk->sk_bound_dev_if;
}
META_COLLECTOR(var_sk_bound_if)
{
- SKIP_NONLOCAL(skb);
+ if (skip_nonlocal(skb)) {
+ *err = -1;
+ return;
+ }
if (skb->sk->sk_bound_dev_if == 0) {
dst->value = (unsigned long) "any";
@@ -322,151 +334,226 @@ META_COLLECTOR(var_sk_bound_if)
META_COLLECTOR(int_sk_refcnt)
{
- SKIP_NONLOCAL(skb);
+ if (skip_nonlocal(skb)) {
+ *err = -1;
+ return;
+ }
dst->value = atomic_read(&skb->sk->sk_refcnt);
}
META_COLLECTOR(int_sk_rcvbuf)
{
- SKIP_NONLOCAL(skb);
+ if (skip_nonlocal(skb)) {
+ *err = -1;
+ return;
+ }
dst->value = skb->sk->sk_rcvbuf;
}
META_COLLECTOR(int_sk_shutdown)
{
- SKIP_NONLOCAL(skb);
+ if (skip_nonlocal(skb)) {
+ *err = -1;
+ return;
+ }
dst->value = skb->sk->sk_shutdown;
}
META_COLLECTOR(int_sk_proto)
{
- SKIP_NONLOCAL(skb);
+ if (skip_nonlocal(skb)) {
+ *err = -1;
+ return;
+ }
dst->value = skb->sk->sk_protocol;
}
META_COLLECTOR(int_sk_type)
{
- SKIP_NONLOCAL(skb);
+ if (skip_nonlocal(skb)) {
+ *err = -1;
+ return;
+ }
dst->value = skb->sk->sk_type;
}
META_COLLECTOR(int_sk_rmem_alloc)
{
- SKIP_NONLOCAL(skb);
+ if (skip_nonlocal(skb)) {
+ *err = -1;
+ return;
+ }
dst->value = sk_rmem_alloc_get(skb->sk);
}
META_COLLECTOR(int_sk_wmem_alloc)
{
- SKIP_NONLOCAL(skb);
+ if (skip_nonlocal(skb)) {
+ *err = -1;
+ return;
+ }
dst->value = sk_wmem_alloc_get(skb->sk);
}
META_COLLECTOR(int_sk_omem_alloc)
{
- SKIP_NONLOCAL(skb);
+ if (skip_nonlocal(skb)) {
+ *err = -1;
+ return;
+ }
dst->value = atomic_read(&skb->sk->sk_omem_alloc);
}
META_COLLECTOR(int_sk_rcv_qlen)
{
- SKIP_NONLOCAL(skb);
+ if (skip_nonlocal(skb)) {
+ *err = -1;
+ return;
+ }
dst->value = skb->sk->sk_receive_queue.qlen;
}
META_COLLECTOR(int_sk_snd_qlen)
{
- SKIP_NONLOCAL(skb);
+ if (skip_nonlocal(skb)) {
+ *err = -1;
+ return;
+ }
dst->value = skb->sk->sk_write_queue.qlen;
}
META_COLLECTOR(int_sk_wmem_queued)
{
- SKIP_NONLOCAL(skb);
+ if (skip_nonlocal(skb)) {
+ *err = -1;
+ return;
+ }
dst->value = skb->sk->sk_wmem_queued;
}
META_COLLECTOR(int_sk_fwd_alloc)
{
- SKIP_NONLOCAL(skb);
+ if (skip_nonlocal(skb)) {
+ *err = -1;
+ return;
+ }
dst->value = skb->sk->sk_forward_alloc;
}
META_COLLECTOR(int_sk_sndbuf)
{
- SKIP_NONLOCAL(skb);
+ if (skip_nonlocal(skb)) {
+ *err = -1;
+ return;
+ }
dst->value = skb->sk->sk_sndbuf;
}
META_COLLECTOR(int_sk_alloc)
{
- SKIP_NONLOCAL(skb);
+ if (skip_nonlocal(skb)) {
+ *err = -1;
+ return;
+ }
dst->value = (__force int) skb->sk->sk_allocation;
}
META_COLLECTOR(int_sk_hash)
{
- SKIP_NONLOCAL(skb);
+ if (skip_nonlocal(skb)) {
+ *err = -1;
+ return;
+ }
dst->value = skb->sk->sk_hash;
}
META_COLLECTOR(int_sk_lingertime)
{
- SKIP_NONLOCAL(skb);
+ if (skip_nonlocal(skb)) {
+ *err = -1;
+ return;
+ }
dst->value = skb->sk->sk_lingertime / HZ;
}
META_COLLECTOR(int_sk_err_qlen)
{
- SKIP_NONLOCAL(skb);
+ if (skip_nonlocal(skb)) {
+ *err = -1;
+ return;
+ }
dst->value = skb->sk->sk_error_queue.qlen;
}
META_COLLECTOR(int_sk_ack_bl)
{
- SKIP_NONLOCAL(skb);
+ if (skip_nonlocal(skb)) {
+ *err = -1;
+ return;
+ }
dst->value = skb->sk->sk_ack_backlog;
}
META_COLLECTOR(int_sk_max_ack_bl)
{
- SKIP_NONLOCAL(skb);
+ if (skip_nonlocal(skb)) {
+ *err = -1;
+ return;
+ }
dst->value = skb->sk->sk_max_ack_backlog;
}
META_COLLECTOR(int_sk_prio)
{
- SKIP_NONLOCAL(skb);
+ if (skip_nonlocal(skb)) {
+ *err = -1;
+ return;
+ }
dst->value = skb->sk->sk_priority;
}
META_COLLECTOR(int_sk_rcvlowat)
{
- SKIP_NONLOCAL(skb);
+ if (skip_nonlocal(skb)) {
+ *err = -1;
+ return;
+ }
dst->value = skb->sk->sk_rcvlowat;
}
META_COLLECTOR(int_sk_rcvtimeo)
{
- SKIP_NONLOCAL(skb);
+ if (skip_nonlocal(skb)) {
+ *err = -1;
+ return;
+ }
dst->value = skb->sk->sk_rcvtimeo / HZ;
}
META_COLLECTOR(int_sk_sndtimeo)
{
- SKIP_NONLOCAL(skb);
+ if (skip_nonlocal(skb)) {
+ *err = -1;
+ return;
+ }
dst->value = skb->sk->sk_sndtimeo / HZ;
}
META_COLLECTOR(int_sk_sendmsg_off)
{
- SKIP_NONLOCAL(skb);
+ if (skip_nonlocal(skb)) {
+ *err = -1;
+ return;
+ }
dst->value = skb->sk->sk_frag.offset;
}
META_COLLECTOR(int_sk_write_pend)
{
- SKIP_NONLOCAL(skb);
+ if (skip_nonlocal(skb)) {
+ *err = -1;
+ return;
+ }
dst->value = skb->sk->sk_write_pending;
}
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index cd81505662b8..1313145e3b86 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -135,7 +135,7 @@ static DEFINE_RWLOCK(qdisc_mod_lock);
static struct Qdisc_ops *qdisc_base;
-/* Register/uregister queueing discipline */
+/* Register/unregister queueing discipline */
int register_qdisc(struct Qdisc_ops *qops)
{
@@ -271,11 +271,15 @@ static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle)
return NULL;
}
-static void qdisc_list_add(struct Qdisc *q)
+void qdisc_list_add(struct Qdisc *q)
{
+ struct Qdisc *root = qdisc_dev(q)->qdisc;
+
+ WARN_ON_ONCE(root == &noop_qdisc);
if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS))
- list_add_tail(&q->list, &qdisc_dev(q)->qdisc->list);
+ list_add_tail(&q->list, &root->list);
}
+EXPORT_SYMBOL(qdisc_list_add);
void qdisc_list_del(struct Qdisc *q)
{
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index 7a42c81a19eb..2f80d01d42a6 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -1058,9 +1058,10 @@ static void cbq_normalize_quanta(struct cbq_sched_data *q, int prio)
cl->quantum = (cl->weight*cl->allot*q->nclasses[prio])/
q->quanta[prio];
}
- if (cl->quantum <= 0 || cl->quantum>32*qdisc_dev(cl->qdisc)->mtu) {
- pr_warning("CBQ: class %08x has bad quantum==%ld, repaired.\n",
- cl->common.classid, cl->quantum);
+ if (cl->quantum <= 0 ||
+ cl->quantum > 32*qdisc_dev(cl->qdisc)->mtu) {
+ pr_warn("CBQ: class %08x has bad quantum==%ld, repaired.\n",
+ cl->common.classid, cl->quantum);
cl->quantum = qdisc_dev(cl->qdisc)->mtu/2 + 1;
}
}
@@ -1782,8 +1783,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
qdisc_root_sleeping_lock(sch),
tca[TCA_RATE]);
if (err) {
- if (rtab)
- qdisc_put_rtab(rtab);
+ qdisc_put_rtab(rtab);
return err;
}
}
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
index 3886365cc207..49d6ef338b55 100644
--- a/net/sched/sch_dsmark.c
+++ b/net/sched/sch_dsmark.c
@@ -47,7 +47,7 @@ struct dsmark_qdisc_data {
static inline int dsmark_valid_index(struct dsmark_qdisc_data *p, u16 index)
{
- return (index <= p->indices && index > 0);
+ return index <= p->indices && index > 0;
}
/* ------------------------- Class/flow operations ------------------------- */
@@ -57,8 +57,8 @@ static int dsmark_graft(struct Qdisc *sch, unsigned long arg,
{
struct dsmark_qdisc_data *p = qdisc_priv(sch);
- pr_debug("dsmark_graft(sch %p,[qdisc %p],new %p,old %p)\n",
- sch, p, new, old);
+ pr_debug("%s(sch %p,[qdisc %p],new %p,old %p)\n",
+ __func__, sch, p, new, old);
if (new == NULL) {
new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
@@ -85,8 +85,8 @@ static struct Qdisc *dsmark_leaf(struct Qdisc *sch, unsigned long arg)
static unsigned long dsmark_get(struct Qdisc *sch, u32 classid)
{
- pr_debug("dsmark_get(sch %p,[qdisc %p],classid %x)\n",
- sch, qdisc_priv(sch), classid);
+ pr_debug("%s(sch %p,[qdisc %p],classid %x)\n",
+ __func__, sch, qdisc_priv(sch), classid);
return TC_H_MIN(classid) + 1;
}
@@ -118,8 +118,8 @@ static int dsmark_change(struct Qdisc *sch, u32 classid, u32 parent,
int err = -EINVAL;
u8 mask = 0;
- pr_debug("dsmark_change(sch %p,[qdisc %p],classid %x,parent %x),"
- "arg 0x%lx\n", sch, p, classid, parent, *arg);
+ pr_debug("%s(sch %p,[qdisc %p],classid %x,parent %x), arg 0x%lx\n",
+ __func__, sch, p, classid, parent, *arg);
if (!dsmark_valid_index(p, *arg)) {
err = -ENOENT;
@@ -166,7 +166,8 @@ static void dsmark_walk(struct Qdisc *sch, struct qdisc_walker *walker)
struct dsmark_qdisc_data *p = qdisc_priv(sch);
int i;
- pr_debug("dsmark_walk(sch %p,[qdisc %p],walker %p)\n", sch, p, walker);
+ pr_debug("%s(sch %p,[qdisc %p],walker %p)\n",
+ __func__, sch, p, walker);
if (walker->stop)
return;
@@ -199,7 +200,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch)
struct dsmark_qdisc_data *p = qdisc_priv(sch);
int err;
- pr_debug("dsmark_enqueue(skb %p,sch %p,[qdisc %p])\n", skb, sch, p);
+ pr_debug("%s(skb %p,sch %p,[qdisc %p])\n", __func__, skb, sch, p);
if (p->set_tc_index) {
switch (skb->protocol) {
@@ -275,7 +276,7 @@ static struct sk_buff *dsmark_dequeue(struct Qdisc *sch)
struct sk_buff *skb;
u32 index;
- pr_debug("dsmark_dequeue(sch %p,[qdisc %p])\n", sch, p);
+ pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p);
skb = p->q->ops->dequeue(p->q);
if (skb == NULL)
@@ -303,8 +304,8 @@ static struct sk_buff *dsmark_dequeue(struct Qdisc *sch)
* and don't need yet another qdisc as a bypass.
*/
if (p->mask[index] != 0xff || p->value[index])
- pr_warning("dsmark_dequeue: unsupported protocol %d\n",
- ntohs(skb->protocol));
+ pr_warn("%s: unsupported protocol %d\n",
+ __func__, ntohs(skb->protocol));
break;
}
@@ -315,7 +316,7 @@ static struct sk_buff *dsmark_peek(struct Qdisc *sch)
{
struct dsmark_qdisc_data *p = qdisc_priv(sch);
- pr_debug("dsmark_peek(sch %p,[qdisc %p])\n", sch, p);
+ pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p);
return p->q->ops->peek(p->q);
}
@@ -325,7 +326,7 @@ static unsigned int dsmark_drop(struct Qdisc *sch)
struct dsmark_qdisc_data *p = qdisc_priv(sch);
unsigned int len;
- pr_debug("dsmark_reset(sch %p,[qdisc %p])\n", sch, p);
+ pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p);
if (p->q->ops->drop == NULL)
return 0;
@@ -346,7 +347,7 @@ static int dsmark_init(struct Qdisc *sch, struct nlattr *opt)
u16 indices;
u8 *mask;
- pr_debug("dsmark_init(sch %p,[qdisc %p],opt %p)\n", sch, p, opt);
+ pr_debug("%s(sch %p,[qdisc %p],opt %p)\n", __func__, sch, p, opt);
if (!opt)
goto errout;
@@ -384,7 +385,7 @@ static int dsmark_init(struct Qdisc *sch, struct nlattr *opt)
if (p->q == NULL)
p->q = &noop_qdisc;
- pr_debug("dsmark_init: qdisc %p\n", p->q);
+ pr_debug("%s: qdisc %p\n", __func__, p->q);
err = 0;
errout:
@@ -395,7 +396,7 @@ static void dsmark_reset(struct Qdisc *sch)
{
struct dsmark_qdisc_data *p = qdisc_priv(sch);
- pr_debug("dsmark_reset(sch %p,[qdisc %p])\n", sch, p);
+ pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p);
qdisc_reset(p->q);
sch->q.qlen = 0;
}
@@ -404,7 +405,7 @@ static void dsmark_destroy(struct Qdisc *sch)
{
struct dsmark_qdisc_data *p = qdisc_priv(sch);
- pr_debug("dsmark_destroy(sch %p,[qdisc %p])\n", sch, p);
+ pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p);
tcf_destroy_chain(&p->filter_list);
qdisc_destroy(p->q);
@@ -417,7 +418,7 @@ static int dsmark_dump_class(struct Qdisc *sch, unsigned long cl,
struct dsmark_qdisc_data *p = qdisc_priv(sch);
struct nlattr *opts = NULL;
- pr_debug("dsmark_dump_class(sch %p,[qdisc %p],class %ld\n", sch, p, cl);
+ pr_debug("%s(sch %p,[qdisc %p],class %ld\n", __func__, sch, p, cl);
if (!dsmark_valid_index(p, cl))
return -EINVAL;
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
index 95d843961907..08ef7a42c0e4 100644
--- a/net/sched/sch_fq.c
+++ b/net/sched/sch_fq.c
@@ -47,6 +47,7 @@
#include <linux/rbtree.h>
#include <linux/hash.h>
#include <linux/prefetch.h>
+#include <linux/vmalloc.h>
#include <net/netlink.h>
#include <net/pkt_sched.h>
#include <net/sock.h>
@@ -225,7 +226,7 @@ static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q)
/* By forcing low order bit to 1, we make sure to not
* collide with a local flow (socket pointers are word aligned)
*/
- sk = (struct sock *)(skb_get_rxhash(skb) | 1L);
+ sk = (struct sock *)(skb_get_hash(skb) | 1L);
}
root = &q->fq_root[hash_32((u32)(long)sk, q->fq_trees_log)];
@@ -578,15 +579,36 @@ static void fq_rehash(struct fq_sched_data *q,
q->stat_gc_flows += fcnt;
}
-static int fq_resize(struct fq_sched_data *q, u32 log)
+static void *fq_alloc_node(size_t sz, int node)
{
+ void *ptr;
+
+ ptr = kmalloc_node(sz, GFP_KERNEL | __GFP_REPEAT | __GFP_NOWARN, node);
+ if (!ptr)
+ ptr = vmalloc_node(sz, node);
+ return ptr;
+}
+
+static void fq_free(void *addr)
+{
+ if (addr && is_vmalloc_addr(addr))
+ vfree(addr);
+ else
+ kfree(addr);
+}
+
+static int fq_resize(struct Qdisc *sch, u32 log)
+{
+ struct fq_sched_data *q = qdisc_priv(sch);
struct rb_root *array;
u32 idx;
if (q->fq_root && log == q->fq_trees_log)
return 0;
- array = kmalloc(sizeof(struct rb_root) << log, GFP_KERNEL);
+ /* If XPS was setup, we can allocate memory on right NUMA node */
+ array = fq_alloc_node(sizeof(struct rb_root) << log,
+ netdev_queue_numa_node_read(sch->dev_queue));
if (!array)
return -ENOMEM;
@@ -595,7 +617,7 @@ static int fq_resize(struct fq_sched_data *q, u32 log)
if (q->fq_root) {
fq_rehash(q, q->fq_root, q->fq_trees_log, array, log);
- kfree(q->fq_root);
+ fq_free(q->fq_root);
}
q->fq_root = array;
q->fq_trees_log = log;
@@ -676,7 +698,7 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt)
}
if (!err)
- err = fq_resize(q, fq_log);
+ err = fq_resize(sch, fq_log);
while (sch->q.qlen > sch->limit) {
struct sk_buff *skb = fq_dequeue(sch);
@@ -697,7 +719,7 @@ static void fq_destroy(struct Qdisc *sch)
struct fq_sched_data *q = qdisc_priv(sch);
fq_reset(sch);
- kfree(q->fq_root);
+ fq_free(q->fq_root);
qdisc_watchdog_cancel(&q->watchdog);
}
@@ -723,7 +745,7 @@ static int fq_init(struct Qdisc *sch, struct nlattr *opt)
if (opt)
err = fq_change(sch, opt);
else
- err = fq_resize(q, q->fq_trees_log);
+ err = fq_resize(sch, q->fq_trees_log);
return err;
}
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 922a09406ba7..32bb942d2faa 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -338,13 +338,13 @@ EXPORT_SYMBOL(netif_carrier_off);
cheaper.
*/
-static int noop_enqueue(struct sk_buff *skb, struct Qdisc * qdisc)
+static int noop_enqueue(struct sk_buff *skb, struct Qdisc *qdisc)
{
kfree_skb(skb);
return NET_XMIT_CN;
}
-static struct sk_buff *noop_dequeue(struct Qdisc * qdisc)
+static struct sk_buff *noop_dequeue(struct Qdisc *qdisc)
{
return NULL;
}
@@ -718,8 +718,8 @@ static void attach_default_qdiscs(struct net_device *dev)
} else {
qdisc = qdisc_create_dflt(txq, &mq_qdisc_ops, TC_H_ROOT);
if (qdisc) {
- qdisc->ops->attach(qdisc);
dev->qdisc = qdisc;
+ qdisc->ops->attach(qdisc);
}
}
}
diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
index d42234c0f13b..12cbc09157fc 100644
--- a/net/sched/sch_gred.c
+++ b/net/sched/sch_gred.c
@@ -370,8 +370,8 @@ static inline int gred_change_table_def(struct Qdisc *sch, struct nlattr *dps)
for (i = table->DPs; i < MAX_DPs; i++) {
if (table->tab[i]) {
- pr_warning("GRED: Warning: Destroying "
- "shadowed VQ 0x%x\n", i);
+ pr_warn("GRED: Warning: Destroying shadowed VQ 0x%x\n",
+ i);
gred_destroy_vq(table->tab[i]);
table->tab[i] = NULL;
}
diff --git a/net/sched/sch_hhf.c b/net/sched/sch_hhf.c
new file mode 100644
index 000000000000..cf7f614e841b
--- /dev/null
+++ b/net/sched/sch_hhf.c
@@ -0,0 +1,745 @@
+/* net/sched/sch_hhf.c Heavy-Hitter Filter (HHF)
+ *
+ * Copyright (C) 2013 Terry Lam <vtlam@google.com>
+ * Copyright (C) 2013 Nandita Dukkipati <nanditad@google.com>
+ */
+
+#include <linux/jhash.h>
+#include <linux/jiffies.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/vmalloc.h>
+#include <net/flow_keys.h>
+#include <net/pkt_sched.h>
+#include <net/sock.h>
+
+/* Heavy-Hitter Filter (HHF)
+ *
+ * Principles :
+ * Flows are classified into two buckets: non-heavy-hitter and heavy-hitter
+ * buckets. Initially, a new flow starts as non-heavy-hitter. Once classified
+ * as heavy-hitter, it is immediately switched to the heavy-hitter bucket.
+ * The buckets are dequeued by a Weighted Deficit Round Robin (WDRR) scheduler,
+ * in which the heavy-hitter bucket is served with less weight.
+ * In other words, non-heavy-hitters (e.g., short bursts of critical traffic)
+ * are isolated from heavy-hitters (e.g., persistent bulk traffic) and also have
+ * higher share of bandwidth.
+ *
+ * To capture heavy-hitters, we use the "multi-stage filter" algorithm in the
+ * following paper:
+ * [EV02] C. Estan and G. Varghese, "New Directions in Traffic Measurement and
+ * Accounting", in ACM SIGCOMM, 2002.
+ *
+ * Conceptually, a multi-stage filter comprises k independent hash functions
+ * and k counter arrays. Packets are indexed into k counter arrays by k hash
+ * functions, respectively. The counters are then increased by the packet sizes.
+ * Therefore,
+ * - For a heavy-hitter flow: *all* of its k array counters must be large.
+ * - For a non-heavy-hitter flow: some of its k array counters can be large
+ * due to hash collision with other small flows; however, with high
+ * probability, not *all* k counters are large.
+ *
+ * By the design of the multi-stage filter algorithm, the false negative rate
+ * (heavy-hitters getting away uncaptured) is zero. However, the algorithm is
+ * susceptible to false positives (non-heavy-hitters mistakenly classified as
+ * heavy-hitters).
+ * Therefore, we also implement the following optimizations to reduce false
+ * positives by avoiding unnecessary increment of the counter values:
+ * - Optimization O1: once a heavy-hitter is identified, its bytes are not
+ * accounted in the array counters. This technique is called "shielding"
+ * in Section 3.3.1 of [EV02].
+ * - Optimization O2: conservative update of counters
+ * (Section 3.3.2 of [EV02]),
+ * New counter value = max {old counter value,
+ * smallest counter value + packet bytes}
+ *
+ * Finally, we refresh the counters periodically since otherwise the counter
+ * values will keep accumulating.
+ *
+ * Once a flow is classified as heavy-hitter, we also save its per-flow state
+ * in an exact-matching flow table so that its subsequent packets can be
+ * dispatched to the heavy-hitter bucket accordingly.
+ *
+ *
+ * At a high level, this qdisc works as follows:
+ * Given a packet p:
+ * - If the flow-id of p (e.g., TCP 5-tuple) is already in the exact-matching
+ * heavy-hitter flow table, denoted table T, then send p to the heavy-hitter
+ * bucket.
+ * - Otherwise, forward p to the multi-stage filter, denoted filter F
+ * + If F decides that p belongs to a non-heavy-hitter flow, then send p
+ * to the non-heavy-hitter bucket.
+ * + Otherwise, if F decides that p belongs to a new heavy-hitter flow,
+ * then set up a new flow entry for the flow-id of p in the table T and
+ * send p to the heavy-hitter bucket.
+ *
+ * In this implementation:
+ * - T is a fixed-size hash-table with 1024 entries. Hash collision is
+ * resolved by linked-list chaining.
+ * - F has four counter arrays, each array containing 1024 32-bit counters.
+ * That means 4 * 1024 * 32 bits = 16KB of memory.
+ * - Since each array in F contains 1024 counters, 10 bits are sufficient to
+ * index into each array.
+ * Hence, instead of having four hash functions, we chop the 32-bit
+ * skb-hash into three 10-bit chunks, and the remaining 10-bit chunk is
+ * computed as XOR sum of those three chunks.
+ * - We need to clear the counter arrays periodically; however, directly
+ * memsetting 16KB of memory can lead to cache eviction and unwanted delay.
+ * So by representing each counter by a valid bit, we only need to reset
+ * 4K of 1 bit (i.e. 512 bytes) instead of 16KB of memory.
+ * - The Deficit Round Robin engine is taken from fq_codel implementation
+ * (net/sched/sch_fq_codel.c). Note that wdrr_bucket corresponds to
+ * fq_codel_flow in fq_codel implementation.
+ *
+ */
+
+/* Non-configurable parameters */
+#define HH_FLOWS_CNT 1024 /* number of entries in exact-matching table T */
+#define HHF_ARRAYS_CNT 4 /* number of arrays in multi-stage filter F */
+#define HHF_ARRAYS_LEN 1024 /* number of counters in each array of F */
+#define HHF_BIT_MASK_LEN 10 /* masking 10 bits */
+#define HHF_BIT_MASK 0x3FF /* bitmask of 10 bits */
+
+#define WDRR_BUCKET_CNT 2 /* two buckets for Weighted DRR */
+enum wdrr_bucket_idx {
+ WDRR_BUCKET_FOR_HH = 0, /* bucket id for heavy-hitters */
+ WDRR_BUCKET_FOR_NON_HH = 1 /* bucket id for non-heavy-hitters */
+};
+
+#define hhf_time_before(a, b) \
+ (typecheck(u32, a) && typecheck(u32, b) && ((s32)((a) - (b)) < 0))
+
+/* Heavy-hitter per-flow state */
+struct hh_flow_state {
+ u32 hash_id; /* hash of flow-id (e.g. TCP 5-tuple) */
+ u32 hit_timestamp; /* last time heavy-hitter was seen */
+ struct list_head flowchain; /* chaining under hash collision */
+};
+
+/* Weighted Deficit Round Robin (WDRR) scheduler */
+struct wdrr_bucket {
+ struct sk_buff *head;
+ struct sk_buff *tail;
+ struct list_head bucketchain;
+ int deficit;
+};
+
+struct hhf_sched_data {
+ struct wdrr_bucket buckets[WDRR_BUCKET_CNT];
+ u32 perturbation; /* hash perturbation */
+ u32 quantum; /* psched_mtu(qdisc_dev(sch)); */
+ u32 drop_overlimit; /* number of times max qdisc packet
+ * limit was hit
+ */
+ struct list_head *hh_flows; /* table T (currently active HHs) */
+ u32 hh_flows_limit; /* max active HH allocs */
+ u32 hh_flows_overlimit; /* num of disallowed HH allocs */
+ u32 hh_flows_total_cnt; /* total admitted HHs */
+ u32 hh_flows_current_cnt; /* total current HHs */
+ u32 *hhf_arrays[HHF_ARRAYS_CNT]; /* HH filter F */
+ u32 hhf_arrays_reset_timestamp; /* last time hhf_arrays
+ * was reset
+ */
+ unsigned long *hhf_valid_bits[HHF_ARRAYS_CNT]; /* shadow valid bits
+ * of hhf_arrays
+ */
+ /* Similar to the "new_flows" vs. "old_flows" concept in fq_codel DRR */
+ struct list_head new_buckets; /* list of new buckets */
+ struct list_head old_buckets; /* list of old buckets */
+
+ /* Configurable HHF parameters */
+ u32 hhf_reset_timeout; /* interval to reset counter
+ * arrays in filter F
+ * (default 40ms)
+ */
+ u32 hhf_admit_bytes; /* counter thresh to classify as
+ * HH (default 128KB).
+ * With these default values,
+ * 128KB / 40ms = 25 Mbps
+ * i.e., we expect to capture HHs
+ * sending > 25 Mbps.
+ */
+ u32 hhf_evict_timeout; /* aging threshold to evict idle
+ * HHs out of table T. This should
+ * be large enough to avoid
+ * reordering during HH eviction.
+ * (default 1s)
+ */
+ u32 hhf_non_hh_weight; /* WDRR weight for non-HHs
+ * (default 2,
+ * i.e., non-HH : HH = 2 : 1)
+ */
+};
+
+static u32 hhf_time_stamp(void)
+{
+ return jiffies;
+}
+
+static unsigned int skb_hash(const struct hhf_sched_data *q,
+ const struct sk_buff *skb)
+{
+ struct flow_keys keys;
+ unsigned int hash;
+
+ if (skb->sk && skb->sk->sk_hash)
+ return skb->sk->sk_hash;
+
+ skb_flow_dissect(skb, &keys);
+ hash = jhash_3words((__force u32)keys.dst,
+ (__force u32)keys.src ^ keys.ip_proto,
+ (__force u32)keys.ports, q->perturbation);
+ return hash;
+}
+
+/* Looks up a heavy-hitter flow in a chaining list of table T. */
+static struct hh_flow_state *seek_list(const u32 hash,
+ struct list_head *head,
+ struct hhf_sched_data *q)
+{
+ struct hh_flow_state *flow, *next;
+ u32 now = hhf_time_stamp();
+
+ if (list_empty(head))
+ return NULL;
+
+ list_for_each_entry_safe(flow, next, head, flowchain) {
+ u32 prev = flow->hit_timestamp + q->hhf_evict_timeout;
+
+ if (hhf_time_before(prev, now)) {
+ /* Delete expired heavy-hitters, but preserve one entry
+ * to avoid kzalloc() when next time this slot is hit.
+ */
+ if (list_is_last(&flow->flowchain, head))
+ return NULL;
+ list_del(&flow->flowchain);
+ kfree(flow);
+ q->hh_flows_current_cnt--;
+ } else if (flow->hash_id == hash) {
+ return flow;
+ }
+ }
+ return NULL;
+}
+
+/* Returns a flow state entry for a new heavy-hitter. Either reuses an expired
+ * entry or dynamically alloc a new entry.
+ */
+static struct hh_flow_state *alloc_new_hh(struct list_head *head,
+ struct hhf_sched_data *q)
+{
+ struct hh_flow_state *flow;
+ u32 now = hhf_time_stamp();
+
+ if (!list_empty(head)) {
+ /* Find an expired heavy-hitter flow entry. */
+ list_for_each_entry(flow, head, flowchain) {
+ u32 prev = flow->hit_timestamp + q->hhf_evict_timeout;
+
+ if (hhf_time_before(prev, now))
+ return flow;
+ }
+ }
+
+ if (q->hh_flows_current_cnt >= q->hh_flows_limit) {
+ q->hh_flows_overlimit++;
+ return NULL;
+ }
+ /* Create new entry. */
+ flow = kzalloc(sizeof(struct hh_flow_state), GFP_ATOMIC);
+ if (!flow)
+ return NULL;
+
+ q->hh_flows_current_cnt++;
+ INIT_LIST_HEAD(&flow->flowchain);
+ list_add_tail(&flow->flowchain, head);
+
+ return flow;
+}
+
+/* Assigns packets to WDRR buckets. Implements a multi-stage filter to
+ * classify heavy-hitters.
+ */
+static enum wdrr_bucket_idx hhf_classify(struct sk_buff *skb, struct Qdisc *sch)
+{
+ struct hhf_sched_data *q = qdisc_priv(sch);
+ u32 tmp_hash, hash;
+ u32 xorsum, filter_pos[HHF_ARRAYS_CNT], flow_pos;
+ struct hh_flow_state *flow;
+ u32 pkt_len, min_hhf_val;
+ int i;
+ u32 prev;
+ u32 now = hhf_time_stamp();
+
+ /* Reset the HHF counter arrays if this is the right time. */
+ prev = q->hhf_arrays_reset_timestamp + q->hhf_reset_timeout;
+ if (hhf_time_before(prev, now)) {
+ for (i = 0; i < HHF_ARRAYS_CNT; i++)
+ bitmap_zero(q->hhf_valid_bits[i], HHF_ARRAYS_LEN);
+ q->hhf_arrays_reset_timestamp = now;
+ }
+
+ /* Get hashed flow-id of the skb. */
+ hash = skb_hash(q, skb);
+
+ /* Check if this packet belongs to an already established HH flow. */
+ flow_pos = hash & HHF_BIT_MASK;
+ flow = seek_list(hash, &q->hh_flows[flow_pos], q);
+ if (flow) { /* found its HH flow */
+ flow->hit_timestamp = now;
+ return WDRR_BUCKET_FOR_HH;
+ }
+
+ /* Now pass the packet through the multi-stage filter. */
+ tmp_hash = hash;
+ xorsum = 0;
+ for (i = 0; i < HHF_ARRAYS_CNT - 1; i++) {
+ /* Split the skb_hash into three 10-bit chunks. */
+ filter_pos[i] = tmp_hash & HHF_BIT_MASK;
+ xorsum ^= filter_pos[i];
+ tmp_hash >>= HHF_BIT_MASK_LEN;
+ }
+ /* The last chunk is computed as XOR sum of other chunks. */
+ filter_pos[HHF_ARRAYS_CNT - 1] = xorsum ^ tmp_hash;
+
+ pkt_len = qdisc_pkt_len(skb);
+ min_hhf_val = ~0U;
+ for (i = 0; i < HHF_ARRAYS_CNT; i++) {
+ u32 val;
+
+ if (!test_bit(filter_pos[i], q->hhf_valid_bits[i])) {
+ q->hhf_arrays[i][filter_pos[i]] = 0;
+ __set_bit(filter_pos[i], q->hhf_valid_bits[i]);
+ }
+
+ val = q->hhf_arrays[i][filter_pos[i]] + pkt_len;
+ if (min_hhf_val > val)
+ min_hhf_val = val;
+ }
+
+ /* Found a new HH iff all counter values > HH admit threshold. */
+ if (min_hhf_val > q->hhf_admit_bytes) {
+ /* Just captured a new heavy-hitter. */
+ flow = alloc_new_hh(&q->hh_flows[flow_pos], q);
+ if (!flow) /* memory alloc problem */
+ return WDRR_BUCKET_FOR_NON_HH;
+ flow->hash_id = hash;
+ flow->hit_timestamp = now;
+ q->hh_flows_total_cnt++;
+
+ /* By returning without updating counters in q->hhf_arrays,
+ * we implicitly implement "shielding" (see Optimization O1).
+ */
+ return WDRR_BUCKET_FOR_HH;
+ }
+
+ /* Conservative update of HHF arrays (see Optimization O2). */
+ for (i = 0; i < HHF_ARRAYS_CNT; i++) {
+ if (q->hhf_arrays[i][filter_pos[i]] < min_hhf_val)
+ q->hhf_arrays[i][filter_pos[i]] = min_hhf_val;
+ }
+ return WDRR_BUCKET_FOR_NON_HH;
+}
+
+/* Removes one skb from head of bucket. */
+static struct sk_buff *dequeue_head(struct wdrr_bucket *bucket)
+{
+ struct sk_buff *skb = bucket->head;
+
+ bucket->head = skb->next;
+ skb->next = NULL;
+ return skb;
+}
+
+/* Tail-adds skb to bucket. */
+static void bucket_add(struct wdrr_bucket *bucket, struct sk_buff *skb)
+{
+ if (bucket->head == NULL)
+ bucket->head = skb;
+ else
+ bucket->tail->next = skb;
+ bucket->tail = skb;
+ skb->next = NULL;
+}
+
+static unsigned int hhf_drop(struct Qdisc *sch)
+{
+ struct hhf_sched_data *q = qdisc_priv(sch);
+ struct wdrr_bucket *bucket;
+
+ /* Always try to drop from heavy-hitters first. */
+ bucket = &q->buckets[WDRR_BUCKET_FOR_HH];
+ if (!bucket->head)
+ bucket = &q->buckets[WDRR_BUCKET_FOR_NON_HH];
+
+ if (bucket->head) {
+ struct sk_buff *skb = dequeue_head(bucket);
+
+ sch->q.qlen--;
+ sch->qstats.drops++;
+ sch->qstats.backlog -= qdisc_pkt_len(skb);
+ kfree_skb(skb);
+ }
+
+ /* Return id of the bucket from which the packet was dropped. */
+ return bucket - q->buckets;
+}
+
+static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+{
+ struct hhf_sched_data *q = qdisc_priv(sch);
+ enum wdrr_bucket_idx idx;
+ struct wdrr_bucket *bucket;
+
+ idx = hhf_classify(skb, sch);
+
+ bucket = &q->buckets[idx];
+ bucket_add(bucket, skb);
+ sch->qstats.backlog += qdisc_pkt_len(skb);
+
+ if (list_empty(&bucket->bucketchain)) {
+ unsigned int weight;
+
+ /* The logic of new_buckets vs. old_buckets is the same as
+ * new_flows vs. old_flows in the implementation of fq_codel,
+ * i.e., short bursts of non-HHs should have strict priority.
+ */
+ if (idx == WDRR_BUCKET_FOR_HH) {
+ /* Always move heavy-hitters to old bucket. */
+ weight = 1;
+ list_add_tail(&bucket->bucketchain, &q->old_buckets);
+ } else {
+ weight = q->hhf_non_hh_weight;
+ list_add_tail(&bucket->bucketchain, &q->new_buckets);
+ }
+ bucket->deficit = weight * q->quantum;
+ }
+ if (++sch->q.qlen < sch->limit)
+ return NET_XMIT_SUCCESS;
+
+ q->drop_overlimit++;
+ /* Return Congestion Notification only if we dropped a packet from this
+ * bucket.
+ */
+ if (hhf_drop(sch) == idx)
+ return NET_XMIT_CN;
+
+ /* As we dropped a packet, better let upper stack know this. */
+ qdisc_tree_decrease_qlen(sch, 1);
+ return NET_XMIT_SUCCESS;
+}
+
+static struct sk_buff *hhf_dequeue(struct Qdisc *sch)
+{
+ struct hhf_sched_data *q = qdisc_priv(sch);
+ struct sk_buff *skb = NULL;
+ struct wdrr_bucket *bucket;
+ struct list_head *head;
+
+begin:
+ head = &q->new_buckets;
+ if (list_empty(head)) {
+ head = &q->old_buckets;
+ if (list_empty(head))
+ return NULL;
+ }
+ bucket = list_first_entry(head, struct wdrr_bucket, bucketchain);
+
+ if (bucket->deficit <= 0) {
+ int weight = (bucket - q->buckets == WDRR_BUCKET_FOR_HH) ?
+ 1 : q->hhf_non_hh_weight;
+
+ bucket->deficit += weight * q->quantum;
+ list_move_tail(&bucket->bucketchain, &q->old_buckets);
+ goto begin;
+ }
+
+ if (bucket->head) {
+ skb = dequeue_head(bucket);
+ sch->q.qlen--;
+ sch->qstats.backlog -= qdisc_pkt_len(skb);
+ }
+
+ if (!skb) {
+ /* Force a pass through old_buckets to prevent starvation. */
+ if ((head == &q->new_buckets) && !list_empty(&q->old_buckets))
+ list_move_tail(&bucket->bucketchain, &q->old_buckets);
+ else
+ list_del_init(&bucket->bucketchain);
+ goto begin;
+ }
+ qdisc_bstats_update(sch, skb);
+ bucket->deficit -= qdisc_pkt_len(skb);
+
+ return skb;
+}
+
+static void hhf_reset(struct Qdisc *sch)
+{
+ struct sk_buff *skb;
+
+ while ((skb = hhf_dequeue(sch)) != NULL)
+ kfree_skb(skb);
+}
+
+static void *hhf_zalloc(size_t sz)
+{
+ void *ptr = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN);
+
+ if (!ptr)
+ ptr = vzalloc(sz);
+
+ return ptr;
+}
+
+static void hhf_free(void *addr)
+{
+ if (addr) {
+ if (is_vmalloc_addr(addr))
+ vfree(addr);
+ else
+ kfree(addr);
+ }
+}
+
+static void hhf_destroy(struct Qdisc *sch)
+{
+ int i;
+ struct hhf_sched_data *q = qdisc_priv(sch);
+
+ for (i = 0; i < HHF_ARRAYS_CNT; i++) {
+ hhf_free(q->hhf_arrays[i]);
+ hhf_free(q->hhf_valid_bits[i]);
+ }
+
+ for (i = 0; i < HH_FLOWS_CNT; i++) {
+ struct hh_flow_state *flow, *next;
+ struct list_head *head = &q->hh_flows[i];
+
+ if (list_empty(head))
+ continue;
+ list_for_each_entry_safe(flow, next, head, flowchain) {
+ list_del(&flow->flowchain);
+ kfree(flow);
+ }
+ }
+ hhf_free(q->hh_flows);
+}
+
+static const struct nla_policy hhf_policy[TCA_HHF_MAX + 1] = {
+ [TCA_HHF_BACKLOG_LIMIT] = { .type = NLA_U32 },
+ [TCA_HHF_QUANTUM] = { .type = NLA_U32 },
+ [TCA_HHF_HH_FLOWS_LIMIT] = { .type = NLA_U32 },
+ [TCA_HHF_RESET_TIMEOUT] = { .type = NLA_U32 },
+ [TCA_HHF_ADMIT_BYTES] = { .type = NLA_U32 },
+ [TCA_HHF_EVICT_TIMEOUT] = { .type = NLA_U32 },
+ [TCA_HHF_NON_HH_WEIGHT] = { .type = NLA_U32 },
+};
+
+static int hhf_change(struct Qdisc *sch, struct nlattr *opt)
+{
+ struct hhf_sched_data *q = qdisc_priv(sch);
+ struct nlattr *tb[TCA_HHF_MAX + 1];
+ unsigned int qlen;
+ int err;
+ u64 non_hh_quantum;
+ u32 new_quantum = q->quantum;
+ u32 new_hhf_non_hh_weight = q->hhf_non_hh_weight;
+
+ if (!opt)
+ return -EINVAL;
+
+ err = nla_parse_nested(tb, TCA_HHF_MAX, opt, hhf_policy);
+ if (err < 0)
+ return err;
+
+ sch_tree_lock(sch);
+
+ if (tb[TCA_HHF_BACKLOG_LIMIT])
+ sch->limit = nla_get_u32(tb[TCA_HHF_BACKLOG_LIMIT]);
+
+ if (tb[TCA_HHF_QUANTUM])
+ new_quantum = nla_get_u32(tb[TCA_HHF_QUANTUM]);
+
+ if (tb[TCA_HHF_NON_HH_WEIGHT])
+ new_hhf_non_hh_weight = nla_get_u32(tb[TCA_HHF_NON_HH_WEIGHT]);
+
+ non_hh_quantum = (u64)new_quantum * new_hhf_non_hh_weight;
+ if (non_hh_quantum > INT_MAX)
+ return -EINVAL;
+ q->quantum = new_quantum;
+ q->hhf_non_hh_weight = new_hhf_non_hh_weight;
+
+ if (tb[TCA_HHF_HH_FLOWS_LIMIT])
+ q->hh_flows_limit = nla_get_u32(tb[TCA_HHF_HH_FLOWS_LIMIT]);
+
+ if (tb[TCA_HHF_RESET_TIMEOUT]) {
+ u32 ms = nla_get_u32(tb[TCA_HHF_RESET_TIMEOUT]);
+
+ q->hhf_reset_timeout = msecs_to_jiffies(ms);
+ }
+
+ if (tb[TCA_HHF_ADMIT_BYTES])
+ q->hhf_admit_bytes = nla_get_u32(tb[TCA_HHF_ADMIT_BYTES]);
+
+ if (tb[TCA_HHF_EVICT_TIMEOUT]) {
+ u32 ms = nla_get_u32(tb[TCA_HHF_EVICT_TIMEOUT]);
+
+ q->hhf_evict_timeout = msecs_to_jiffies(ms);
+ }
+
+ qlen = sch->q.qlen;
+ while (sch->q.qlen > sch->limit) {
+ struct sk_buff *skb = hhf_dequeue(sch);
+
+ kfree_skb(skb);
+ }
+ qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);
+
+ sch_tree_unlock(sch);
+ return 0;
+}
+
+static int hhf_init(struct Qdisc *sch, struct nlattr *opt)
+{
+ struct hhf_sched_data *q = qdisc_priv(sch);
+ int i;
+
+ sch->limit = 1000;
+ q->quantum = psched_mtu(qdisc_dev(sch));
+ q->perturbation = net_random();
+ INIT_LIST_HEAD(&q->new_buckets);
+ INIT_LIST_HEAD(&q->old_buckets);
+
+ /* Configurable HHF parameters */
+ q->hhf_reset_timeout = HZ / 25; /* 40 ms */
+ q->hhf_admit_bytes = 131072; /* 128 KB */
+ q->hhf_evict_timeout = HZ; /* 1 sec */
+ q->hhf_non_hh_weight = 2;
+
+ if (opt) {
+ int err = hhf_change(sch, opt);
+
+ if (err)
+ return err;
+ }
+
+ if (!q->hh_flows) {
+ /* Initialize heavy-hitter flow table. */
+ q->hh_flows = hhf_zalloc(HH_FLOWS_CNT *
+ sizeof(struct list_head));
+ if (!q->hh_flows)
+ return -ENOMEM;
+ for (i = 0; i < HH_FLOWS_CNT; i++)
+ INIT_LIST_HEAD(&q->hh_flows[i]);
+
+ /* Cap max active HHs at twice len of hh_flows table. */
+ q->hh_flows_limit = 2 * HH_FLOWS_CNT;
+ q->hh_flows_overlimit = 0;
+ q->hh_flows_total_cnt = 0;
+ q->hh_flows_current_cnt = 0;
+
+ /* Initialize heavy-hitter filter arrays. */
+ for (i = 0; i < HHF_ARRAYS_CNT; i++) {
+ q->hhf_arrays[i] = hhf_zalloc(HHF_ARRAYS_LEN *
+ sizeof(u32));
+ if (!q->hhf_arrays[i]) {
+ hhf_destroy(sch);
+ return -ENOMEM;
+ }
+ }
+ q->hhf_arrays_reset_timestamp = hhf_time_stamp();
+
+ /* Initialize valid bits of heavy-hitter filter arrays. */
+ for (i = 0; i < HHF_ARRAYS_CNT; i++) {
+ q->hhf_valid_bits[i] = hhf_zalloc(HHF_ARRAYS_LEN /
+ BITS_PER_BYTE);
+ if (!q->hhf_valid_bits[i]) {
+ hhf_destroy(sch);
+ return -ENOMEM;
+ }
+ }
+
+ /* Initialize Weighted DRR buckets. */
+ for (i = 0; i < WDRR_BUCKET_CNT; i++) {
+ struct wdrr_bucket *bucket = q->buckets + i;
+
+ INIT_LIST_HEAD(&bucket->bucketchain);
+ }
+ }
+
+ return 0;
+}
+
+static int hhf_dump(struct Qdisc *sch, struct sk_buff *skb)
+{
+ struct hhf_sched_data *q = qdisc_priv(sch);
+ struct nlattr *opts;
+
+ opts = nla_nest_start(skb, TCA_OPTIONS);
+ if (opts == NULL)
+ goto nla_put_failure;
+
+ if (nla_put_u32(skb, TCA_HHF_BACKLOG_LIMIT, sch->limit) ||
+ nla_put_u32(skb, TCA_HHF_QUANTUM, q->quantum) ||
+ nla_put_u32(skb, TCA_HHF_HH_FLOWS_LIMIT, q->hh_flows_limit) ||
+ nla_put_u32(skb, TCA_HHF_RESET_TIMEOUT,
+ jiffies_to_msecs(q->hhf_reset_timeout)) ||
+ nla_put_u32(skb, TCA_HHF_ADMIT_BYTES, q->hhf_admit_bytes) ||
+ nla_put_u32(skb, TCA_HHF_EVICT_TIMEOUT,
+ jiffies_to_msecs(q->hhf_evict_timeout)) ||
+ nla_put_u32(skb, TCA_HHF_NON_HH_WEIGHT, q->hhf_non_hh_weight))
+ goto nla_put_failure;
+
+ nla_nest_end(skb, opts);
+ return skb->len;
+
+nla_put_failure:
+ return -1;
+}
+
+static int hhf_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
+{
+ struct hhf_sched_data *q = qdisc_priv(sch);
+ struct tc_hhf_xstats st = {
+ .drop_overlimit = q->drop_overlimit,
+ .hh_overlimit = q->hh_flows_overlimit,
+ .hh_tot_count = q->hh_flows_total_cnt,
+ .hh_cur_count = q->hh_flows_current_cnt,
+ };
+
+ return gnet_stats_copy_app(d, &st, sizeof(st));
+}
+
+static struct Qdisc_ops hhf_qdisc_ops __read_mostly = {
+ .id = "hhf",
+ .priv_size = sizeof(struct hhf_sched_data),
+
+ .enqueue = hhf_enqueue,
+ .dequeue = hhf_dequeue,
+ .peek = qdisc_peek_dequeued,
+ .drop = hhf_drop,
+ .init = hhf_init,
+ .reset = hhf_reset,
+ .destroy = hhf_destroy,
+ .change = hhf_change,
+ .dump = hhf_dump,
+ .dump_stats = hhf_dump_stats,
+ .owner = THIS_MODULE,
+};
+
+static int __init hhf_module_init(void)
+{
+ return register_qdisc(&hhf_qdisc_ops);
+}
+
+static void __exit hhf_module_exit(void)
+{
+ unregister_qdisc(&hhf_qdisc_ops);
+}
+
+module_init(hhf_module_init)
+module_exit(hhf_module_exit)
+MODULE_AUTHOR("Terry Lam");
+MODULE_AUTHOR("Nandita Dukkipati");
+MODULE_LICENSE("GPL");
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 0e1e38b40025..0db5a6eae87f 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -712,7 +712,7 @@ static s64 htb_do_events(struct htb_sched *q, const int level,
/* too much load - let's continue after a break for scheduling */
if (!(q->warned & HTB_WARN_TOOMANYEVENTS)) {
- pr_warning("htb: too many events!\n");
+ pr_warn("htb: too many events!\n");
q->warned |= HTB_WARN_TOOMANYEVENTS;
}
@@ -1276,9 +1276,10 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg)
struct Qdisc *new_q = NULL;
int last_child = 0;
- // TODO: why don't allow to delete subtree ? references ? does
- // tc subsys quarantee us that in htb_destroy it holds no class
- // refs so that we can remove children safely there ?
+ /* TODO: why don't allow to delete subtree ? references ? does
+ * tc subsys guarantee us that in htb_destroy it holds no class
+ * refs so that we can remove children safely there ?
+ */
if (cl->children || cl->filter_cnt)
return -EBUSY;
@@ -1337,7 +1338,6 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
struct htb_sched *q = qdisc_priv(sch);
struct htb_class *cl = (struct htb_class *)*arg, *parent;
struct nlattr *opt = tca[TCA_OPTIONS];
- struct qdisc_rate_table *rtab = NULL, *ctab = NULL;
struct nlattr *tb[TCA_HTB_MAX + 1];
struct tc_htb_opt *hopt;
u64 rate64, ceil64;
@@ -1361,16 +1361,11 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
goto failure;
/* Keeping backward compatible with rate_table based iproute2 tc */
- if (hopt->rate.linklayer == TC_LINKLAYER_UNAWARE) {
- rtab = qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB]);
- if (rtab)
- qdisc_put_rtab(rtab);
- }
- if (hopt->ceil.linklayer == TC_LINKLAYER_UNAWARE) {
- ctab = qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB]);
- if (ctab)
- qdisc_put_rtab(ctab);
- }
+ if (hopt->rate.linklayer == TC_LINKLAYER_UNAWARE)
+ qdisc_put_rtab(qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB]));
+
+ if (hopt->ceil.linklayer == TC_LINKLAYER_UNAWARE)
+ qdisc_put_rtab(qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB]));
if (!cl) { /* new class */
struct Qdisc *new_q;
@@ -1477,21 +1472,30 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
sch_tree_lock(sch);
}
+ rate64 = tb[TCA_HTB_RATE64] ? nla_get_u64(tb[TCA_HTB_RATE64]) : 0;
+
+ ceil64 = tb[TCA_HTB_CEIL64] ? nla_get_u64(tb[TCA_HTB_CEIL64]) : 0;
+
+ psched_ratecfg_precompute(&cl->rate, &hopt->rate, rate64);
+ psched_ratecfg_precompute(&cl->ceil, &hopt->ceil, ceil64);
+
/* it used to be a nasty bug here, we have to check that node
* is really leaf before changing cl->un.leaf !
*/
if (!cl->level) {
- cl->quantum = hopt->rate.rate / q->rate2quantum;
+ u64 quantum = cl->rate.rate_bytes_ps;
+
+ do_div(quantum, q->rate2quantum);
+ cl->quantum = min_t(u64, quantum, INT_MAX);
+
if (!hopt->quantum && cl->quantum < 1000) {
- pr_warning(
- "HTB: quantum of class %X is small. Consider r2q change.\n",
- cl->common.classid);
+ pr_warn("HTB: quantum of class %X is small. Consider r2q change.\n",
+ cl->common.classid);
cl->quantum = 1000;
}
if (!hopt->quantum && cl->quantum > 200000) {
- pr_warning(
- "HTB: quantum of class %X is big. Consider r2q change.\n",
- cl->common.classid);
+ pr_warn("HTB: quantum of class %X is big. Consider r2q change.\n",
+ cl->common.classid);
cl->quantum = 200000;
}
if (hopt->quantum)
@@ -1500,13 +1504,6 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
cl->prio = TC_HTB_NUMPRIO - 1;
}
- rate64 = tb[TCA_HTB_RATE64] ? nla_get_u64(tb[TCA_HTB_RATE64]) : 0;
-
- ceil64 = tb[TCA_HTB_CEIL64] ? nla_get_u64(tb[TCA_HTB_CEIL64]) : 0;
-
- psched_ratecfg_precompute(&cl->rate, &hopt->rate, rate64);
- psched_ratecfg_precompute(&cl->ceil, &hopt->ceil, ceil64);
-
cl->buffer = PSCHED_TICKS2NS(hopt->buffer);
cl->cbuffer = PSCHED_TICKS2NS(hopt->cbuffer);
diff --git a/net/sched/sch_mq.c b/net/sched/sch_mq.c
index 2e56185736d6..a8b2864a696b 100644
--- a/net/sched/sch_mq.c
+++ b/net/sched/sch_mq.c
@@ -78,14 +78,19 @@ static void mq_attach(struct Qdisc *sch)
{
struct net_device *dev = qdisc_dev(sch);
struct mq_sched *priv = qdisc_priv(sch);
- struct Qdisc *qdisc;
+ struct Qdisc *qdisc, *old;
unsigned int ntx;
for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
qdisc = priv->qdiscs[ntx];
- qdisc = dev_graft_qdisc(qdisc->dev_queue, qdisc);
- if (qdisc)
- qdisc_destroy(qdisc);
+ old = dev_graft_qdisc(qdisc->dev_queue, qdisc);
+ if (old)
+ qdisc_destroy(old);
+#ifdef CONFIG_NET_SCHED
+ if (ntx < dev->real_num_tx_queues)
+ qdisc_list_add(qdisc);
+#endif
+
}
kfree(priv->qdiscs);
priv->qdiscs = NULL;
diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c
index d44c868cb537..6749e2f540d0 100644
--- a/net/sched/sch_mqprio.c
+++ b/net/sched/sch_mqprio.c
@@ -167,15 +167,17 @@ static void mqprio_attach(struct Qdisc *sch)
{
struct net_device *dev = qdisc_dev(sch);
struct mqprio_sched *priv = qdisc_priv(sch);
- struct Qdisc *qdisc;
+ struct Qdisc *qdisc, *old;
unsigned int ntx;
/* Attach underlying qdisc */
for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
qdisc = priv->qdiscs[ntx];
- qdisc = dev_graft_qdisc(qdisc->dev_queue, qdisc);
- if (qdisc)
- qdisc_destroy(qdisc);
+ old = dev_graft_qdisc(qdisc->dev_queue, qdisc);
+ if (old)
+ qdisc_destroy(old);
+ if (ntx < dev->real_num_tx_queues)
+ qdisc_list_add(qdisc);
}
kfree(priv->qdiscs);
priv->qdiscs = NULL;
diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c
index 2a2b096d9a66..afb050a735fa 100644
--- a/net/sched/sch_multiq.c
+++ b/net/sched/sch_multiq.c
@@ -11,8 +11,7 @@
* more details.
*
* You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
+ * this program; if not, see <http://www.gnu.org/licenses/>.
*
* Author: Alexander Duyck <alexander.h.duyck@intel.com>
*/
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 75c94e59a3bd..090a4e3ecd0d 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -88,7 +88,7 @@ struct netem_sched_data {
u32 duplicate;
u32 reorder;
u32 corrupt;
- u32 rate;
+ u64 rate;
s32 packet_overhead;
u32 cell_size;
u32 cell_size_reciprocal;
@@ -215,10 +215,10 @@ static bool loss_4state(struct netem_sched_data *q)
if (rnd < clg->a4) {
clg->state = 4;
return true;
- } else if (clg->a4 < rnd && rnd < clg->a1) {
+ } else if (clg->a4 < rnd && rnd < clg->a1 + clg->a4) {
clg->state = 3;
return true;
- } else if (clg->a1 < rnd)
+ } else if (clg->a1 + clg->a4 < rnd)
clg->state = 1;
break;
@@ -268,10 +268,11 @@ static bool loss_gilb_ell(struct netem_sched_data *q)
clg->state = 2;
if (net_random() < clg->a4)
return true;
+ break;
case 2:
if (net_random() < clg->a2)
clg->state = 1;
- if (clg->a3 > net_random())
+ if (net_random() > clg->a3)
return true;
}
@@ -494,7 +495,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
now = netem_skb_cb(last)->time_to_send;
}
- delay += packet_len_2_sched_time(skb->len, q);
+ delay += packet_len_2_sched_time(qdisc_pkt_len(skb), q);
}
cb->time_to_send = now + delay;
@@ -728,7 +729,7 @@ static int get_loss_clg(struct Qdisc *sch, const struct nlattr *attr)
nla_for_each_nested(la, attr, rem) {
u16 type = nla_type(la);
- switch(type) {
+ switch (type) {
case NETEM_LOSS_GI: {
const struct tc_netem_gimodel *gi = nla_data(la);
@@ -781,6 +782,7 @@ static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = {
[TCA_NETEM_RATE] = { .len = sizeof(struct tc_netem_rate) },
[TCA_NETEM_LOSS] = { .type = NLA_NESTED },
[TCA_NETEM_ECN] = { .type = NLA_U32 },
+ [TCA_NETEM_RATE64] = { .type = NLA_U64 },
};
static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
@@ -851,6 +853,10 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt)
if (tb[TCA_NETEM_RATE])
get_rate(sch, tb[TCA_NETEM_RATE]);
+ if (tb[TCA_NETEM_RATE64])
+ q->rate = max_t(u64, q->rate,
+ nla_get_u64(tb[TCA_NETEM_RATE64]));
+
if (tb[TCA_NETEM_ECN])
q->ecn = nla_get_u32(tb[TCA_NETEM_ECN]);
@@ -973,7 +979,13 @@ static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
if (nla_put(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt))
goto nla_put_failure;
- rate.rate = q->rate;
+ if (q->rate >= (1ULL << 32)) {
+ if (nla_put_u64(skb, TCA_NETEM_RATE64, q->rate))
+ goto nla_put_failure;
+ rate.rate = ~0U;
+ } else {
+ rate.rate = q->rate;
+ }
rate.packet_overhead = q->packet_overhead;
rate.cell_size = q->cell_size;
rate.cell_overhead = q->cell_overhead;
diff --git a/net/sched/sch_pie.c b/net/sched/sch_pie.c
new file mode 100644
index 000000000000..fe65340c8eb4
--- /dev/null
+++ b/net/sched/sch_pie.c
@@ -0,0 +1,555 @@
+/* Copyright (C) 2013 Cisco Systems, Inc, 2013.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Author: Vijay Subramanian <vijaynsu@cisco.com>
+ * Author: Mythili Prabhu <mysuryan@cisco.com>
+ *
+ * ECN support is added by Naeem Khademi <naeemk@ifi.uio.no>
+ * University of Oslo, Norway.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/skbuff.h>
+#include <net/pkt_sched.h>
+#include <net/inet_ecn.h>
+
+#define QUEUE_THRESHOLD 10000
+#define DQCOUNT_INVALID -1
+#define MAX_PROB 0xffffffff
+#define PIE_SCALE 8
+
+/* parameters used */
+struct pie_params {
+ psched_time_t target; /* user specified target delay in pschedtime */
+ u32 tupdate; /* timer frequency (in jiffies) */
+ u32 limit; /* number of packets that can be enqueued */
+ u32 alpha; /* alpha and beta are between -4 and 4 */
+ u32 beta; /* and are used for shift relative to 1 */
+ bool ecn; /* true if ecn is enabled */
+ bool bytemode; /* to scale drop early prob based on pkt size */
+};
+
+/* variables used */
+struct pie_vars {
+ u32 prob; /* probability but scaled by u32 limit. */
+ psched_time_t burst_time;
+ psched_time_t qdelay;
+ psched_time_t qdelay_old;
+ u64 dq_count; /* measured in bytes */
+ psched_time_t dq_tstamp; /* drain rate */
+ u32 avg_dq_rate; /* bytes per pschedtime tick,scaled */
+ u32 qlen_old; /* in bytes */
+};
+
+/* statistics gathering */
+struct pie_stats {
+ u32 packets_in; /* total number of packets enqueued */
+ u32 dropped; /* packets dropped due to pie_action */
+ u32 overlimit; /* dropped due to lack of space in queue */
+ u32 maxq; /* maximum queue size */
+ u32 ecn_mark; /* packets marked with ECN */
+};
+
+/* private data for the Qdisc */
+struct pie_sched_data {
+ struct pie_params params;
+ struct pie_vars vars;
+ struct pie_stats stats;
+ struct timer_list adapt_timer;
+};
+
+static void pie_params_init(struct pie_params *params)
+{
+ params->alpha = 2;
+ params->beta = 20;
+ params->tupdate = usecs_to_jiffies(30 * USEC_PER_MSEC); /* 30 ms */
+ params->limit = 1000; /* default of 1000 packets */
+ params->target = PSCHED_NS2TICKS(20 * NSEC_PER_MSEC); /* 20 ms */
+ params->ecn = false;
+ params->bytemode = false;
+}
+
+static void pie_vars_init(struct pie_vars *vars)
+{
+ vars->dq_count = DQCOUNT_INVALID;
+ vars->avg_dq_rate = 0;
+ /* default of 100 ms in pschedtime */
+ vars->burst_time = PSCHED_NS2TICKS(100 * NSEC_PER_MSEC);
+}
+
+static bool drop_early(struct Qdisc *sch, u32 packet_size)
+{
+ struct pie_sched_data *q = qdisc_priv(sch);
+ u32 rnd;
+ u32 local_prob = q->vars.prob;
+ u32 mtu = psched_mtu(qdisc_dev(sch));
+
+ /* If there is still burst allowance left skip random early drop */
+ if (q->vars.burst_time > 0)
+ return false;
+
+ /* If current delay is less than half of target, and
+ * if drop prob is low already, disable early_drop
+ */
+ if ((q->vars.qdelay < q->params.target / 2)
+ && (q->vars.prob < MAX_PROB / 5))
+ return false;
+
+ /* If we have fewer than 2 mtu-sized packets, disable drop_early,
+ * similar to min_th in RED
+ */
+ if (sch->qstats.backlog < 2 * mtu)
+ return false;
+
+ /* If bytemode is turned on, use packet size to compute new
+ * probablity. Smaller packets will have lower drop prob in this case
+ */
+ if (q->params.bytemode && packet_size <= mtu)
+ local_prob = (local_prob / mtu) * packet_size;
+ else
+ local_prob = q->vars.prob;
+
+ rnd = net_random();
+ if (rnd < local_prob)
+ return true;
+
+ return false;
+}
+
+static int pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+{
+ struct pie_sched_data *q = qdisc_priv(sch);
+ bool enqueue = false;
+
+ if (unlikely(qdisc_qlen(sch) >= sch->limit)) {
+ q->stats.overlimit++;
+ goto out;
+ }
+
+ if (!drop_early(sch, skb->len)) {
+ enqueue = true;
+ } else if (q->params.ecn && (q->vars.prob <= MAX_PROB / 10) &&
+ INET_ECN_set_ce(skb)) {
+ /* If packet is ecn capable, mark it if drop probability
+ * is lower than 10%, else drop it.
+ */
+ q->stats.ecn_mark++;
+ enqueue = true;
+ }
+
+ /* we can enqueue the packet */
+ if (enqueue) {
+ q->stats.packets_in++;
+ if (qdisc_qlen(sch) > q->stats.maxq)
+ q->stats.maxq = qdisc_qlen(sch);
+
+ return qdisc_enqueue_tail(skb, sch);
+ }
+
+out:
+ q->stats.dropped++;
+ return qdisc_drop(skb, sch);
+}
+
+static const struct nla_policy pie_policy[TCA_PIE_MAX + 1] = {
+ [TCA_PIE_TARGET] = {.type = NLA_U32},
+ [TCA_PIE_LIMIT] = {.type = NLA_U32},
+ [TCA_PIE_TUPDATE] = {.type = NLA_U32},
+ [TCA_PIE_ALPHA] = {.type = NLA_U32},
+ [TCA_PIE_BETA] = {.type = NLA_U32},
+ [TCA_PIE_ECN] = {.type = NLA_U32},
+ [TCA_PIE_BYTEMODE] = {.type = NLA_U32},
+};
+
+static int pie_change(struct Qdisc *sch, struct nlattr *opt)
+{
+ struct pie_sched_data *q = qdisc_priv(sch);
+ struct nlattr *tb[TCA_PIE_MAX + 1];
+ unsigned int qlen;
+ int err;
+
+ if (!opt)
+ return -EINVAL;
+
+ err = nla_parse_nested(tb, TCA_PIE_MAX, opt, pie_policy);
+ if (err < 0)
+ return err;
+
+ sch_tree_lock(sch);
+
+ /* convert from microseconds to pschedtime */
+ if (tb[TCA_PIE_TARGET]) {
+ /* target is in us */
+ u32 target = nla_get_u32(tb[TCA_PIE_TARGET]);
+
+ /* convert to pschedtime */
+ q->params.target = PSCHED_NS2TICKS((u64)target * NSEC_PER_USEC);
+ }
+
+ /* tupdate is in jiffies */
+ if (tb[TCA_PIE_TUPDATE])
+ q->params.tupdate = usecs_to_jiffies(nla_get_u32(tb[TCA_PIE_TUPDATE]));
+
+ if (tb[TCA_PIE_LIMIT]) {
+ u32 limit = nla_get_u32(tb[TCA_PIE_LIMIT]);
+
+ q->params.limit = limit;
+ sch->limit = limit;
+ }
+
+ if (tb[TCA_PIE_ALPHA])
+ q->params.alpha = nla_get_u32(tb[TCA_PIE_ALPHA]);
+
+ if (tb[TCA_PIE_BETA])
+ q->params.beta = nla_get_u32(tb[TCA_PIE_BETA]);
+
+ if (tb[TCA_PIE_ECN])
+ q->params.ecn = nla_get_u32(tb[TCA_PIE_ECN]);
+
+ if (tb[TCA_PIE_BYTEMODE])
+ q->params.bytemode = nla_get_u32(tb[TCA_PIE_BYTEMODE]);
+
+ /* Drop excess packets if new limit is lower */
+ qlen = sch->q.qlen;
+ while (sch->q.qlen > sch->limit) {
+ struct sk_buff *skb = __skb_dequeue(&sch->q);
+
+ sch->qstats.backlog -= qdisc_pkt_len(skb);
+ qdisc_drop(skb, sch);
+ }
+ qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);
+
+ sch_tree_unlock(sch);
+ return 0;
+}
+
+static void pie_process_dequeue(struct Qdisc *sch, struct sk_buff *skb)
+{
+
+ struct pie_sched_data *q = qdisc_priv(sch);
+ int qlen = sch->qstats.backlog; /* current queue size in bytes */
+
+ /* If current queue is about 10 packets or more and dq_count is unset
+ * we have enough packets to calculate the drain rate. Save
+ * current time as dq_tstamp and start measurement cycle.
+ */
+ if (qlen >= QUEUE_THRESHOLD && q->vars.dq_count == DQCOUNT_INVALID) {
+ q->vars.dq_tstamp = psched_get_time();
+ q->vars.dq_count = 0;
+ }
+
+ /* Calculate the average drain rate from this value. If queue length
+ * has receded to a small value viz., <= QUEUE_THRESHOLD bytes,reset
+ * the dq_count to -1 as we don't have enough packets to calculate the
+ * drain rate anymore The following if block is entered only when we
+ * have a substantial queue built up (QUEUE_THRESHOLD bytes or more)
+ * and we calculate the drain rate for the threshold here. dq_count is
+ * in bytes, time difference in psched_time, hence rate is in
+ * bytes/psched_time.
+ */
+ if (q->vars.dq_count != DQCOUNT_INVALID) {
+ q->vars.dq_count += skb->len;
+
+ if (q->vars.dq_count >= QUEUE_THRESHOLD) {
+ psched_time_t now = psched_get_time();
+ u32 dtime = now - q->vars.dq_tstamp;
+ u32 count = q->vars.dq_count << PIE_SCALE;
+
+ if (dtime == 0)
+ return;
+
+ count = count / dtime;
+
+ if (q->vars.avg_dq_rate == 0)
+ q->vars.avg_dq_rate = count;
+ else
+ q->vars.avg_dq_rate =
+ (q->vars.avg_dq_rate -
+ (q->vars.avg_dq_rate >> 3)) + (count >> 3);
+
+ /* If the queue has receded below the threshold, we hold
+ * on to the last drain rate calculated, else we reset
+ * dq_count to 0 to re-enter the if block when the next
+ * packet is dequeued
+ */
+ if (qlen < QUEUE_THRESHOLD)
+ q->vars.dq_count = DQCOUNT_INVALID;
+ else {
+ q->vars.dq_count = 0;
+ q->vars.dq_tstamp = psched_get_time();
+ }
+
+ if (q->vars.burst_time > 0) {
+ if (q->vars.burst_time > dtime)
+ q->vars.burst_time -= dtime;
+ else
+ q->vars.burst_time = 0;
+ }
+ }
+ }
+}
+
+static void calculate_probability(struct Qdisc *sch)
+{
+ struct pie_sched_data *q = qdisc_priv(sch);
+ u32 qlen = sch->qstats.backlog; /* queue size in bytes */
+ psched_time_t qdelay = 0; /* in pschedtime */
+ psched_time_t qdelay_old = q->vars.qdelay; /* in pschedtime */
+ s32 delta = 0; /* determines the change in probability */
+ u32 oldprob;
+ u32 alpha, beta;
+ bool update_prob = true;
+
+ q->vars.qdelay_old = q->vars.qdelay;
+
+ if (q->vars.avg_dq_rate > 0)
+ qdelay = (qlen << PIE_SCALE) / q->vars.avg_dq_rate;
+ else
+ qdelay = 0;
+
+ /* If qdelay is zero and qlen is not, it means qlen is very small, less
+ * than dequeue_rate, so we do not update probabilty in this round
+ */
+ if (qdelay == 0 && qlen != 0)
+ update_prob = false;
+
+ /* Add ranges for alpha and beta, more aggressive for high dropping
+ * mode and gentle steps for light dropping mode
+ * In light dropping mode, take gentle steps; in medium dropping mode,
+ * take medium steps; in high dropping mode, take big steps.
+ */
+ if (q->vars.prob < MAX_PROB / 100) {
+ alpha =
+ (q->params.alpha * (MAX_PROB / PSCHED_TICKS_PER_SEC)) >> 7;
+ beta =
+ (q->params.beta * (MAX_PROB / PSCHED_TICKS_PER_SEC)) >> 7;
+ } else if (q->vars.prob < MAX_PROB / 10) {
+ alpha =
+ (q->params.alpha * (MAX_PROB / PSCHED_TICKS_PER_SEC)) >> 5;
+ beta =
+ (q->params.beta * (MAX_PROB / PSCHED_TICKS_PER_SEC)) >> 5;
+ } else {
+ alpha =
+ (q->params.alpha * (MAX_PROB / PSCHED_TICKS_PER_SEC)) >> 4;
+ beta =
+ (q->params.beta * (MAX_PROB / PSCHED_TICKS_PER_SEC)) >> 4;
+ }
+
+ /* alpha and beta should be between 0 and 32, in multiples of 1/16 */
+ delta += alpha * ((qdelay - q->params.target));
+ delta += beta * ((qdelay - qdelay_old));
+
+ oldprob = q->vars.prob;
+
+ /* to ensure we increase probability in steps of no more than 2% */
+ if (delta > (s32) (MAX_PROB / (100 / 2)) &&
+ q->vars.prob >= MAX_PROB / 10)
+ delta = (MAX_PROB / 100) * 2;
+
+ /* Non-linear drop:
+ * Tune drop probability to increase quickly for high delays(>= 250ms)
+ * 250ms is derived through experiments and provides error protection
+ */
+
+ if (qdelay > (PSCHED_NS2TICKS(250 * NSEC_PER_MSEC)))
+ delta += MAX_PROB / (100 / 2);
+
+ q->vars.prob += delta;
+
+ if (delta > 0) {
+ /* prevent overflow */
+ if (q->vars.prob < oldprob) {
+ q->vars.prob = MAX_PROB;
+ /* Prevent normalization error. If probability is at
+ * maximum value already, we normalize it here, and
+ * skip the check to do a non-linear drop in the next
+ * section.
+ */
+ update_prob = false;
+ }
+ } else {
+ /* prevent underflow */
+ if (q->vars.prob > oldprob)
+ q->vars.prob = 0;
+ }
+
+ /* Non-linear drop in probability: Reduce drop probability quickly if
+ * delay is 0 for 2 consecutive Tupdate periods.
+ */
+
+ if ((qdelay == 0) && (qdelay_old == 0) && update_prob)
+ q->vars.prob = (q->vars.prob * 98) / 100;
+
+ q->vars.qdelay = qdelay;
+ q->vars.qlen_old = qlen;
+
+ /* We restart the measurement cycle if the following conditions are met
+ * 1. If the delay has been low for 2 consecutive Tupdate periods
+ * 2. Calculated drop probability is zero
+ * 3. We have atleast one estimate for the avg_dq_rate ie.,
+ * is a non-zero value
+ */
+ if ((q->vars.qdelay < q->params.target / 2) &&
+ (q->vars.qdelay_old < q->params.target / 2) &&
+ (q->vars.prob == 0) &&
+ (q->vars.avg_dq_rate > 0))
+ pie_vars_init(&q->vars);
+}
+
+static void pie_timer(unsigned long arg)
+{
+ struct Qdisc *sch = (struct Qdisc *)arg;
+ struct pie_sched_data *q = qdisc_priv(sch);
+ spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch));
+
+ spin_lock(root_lock);
+ calculate_probability(sch);
+
+ /* reset the timer to fire after 'tupdate'. tupdate is in jiffies. */
+ if (q->params.tupdate)
+ mod_timer(&q->adapt_timer, jiffies + q->params.tupdate);
+ spin_unlock(root_lock);
+
+}
+
+static int pie_init(struct Qdisc *sch, struct nlattr *opt)
+{
+ struct pie_sched_data *q = qdisc_priv(sch);
+
+ pie_params_init(&q->params);
+ pie_vars_init(&q->vars);
+ sch->limit = q->params.limit;
+
+ setup_timer(&q->adapt_timer, pie_timer, (unsigned long)sch);
+ mod_timer(&q->adapt_timer, jiffies + HZ / 2);
+
+ if (opt) {
+ int err = pie_change(sch, opt);
+
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int pie_dump(struct Qdisc *sch, struct sk_buff *skb)
+{
+ struct pie_sched_data *q = qdisc_priv(sch);
+ struct nlattr *opts;
+
+ opts = nla_nest_start(skb, TCA_OPTIONS);
+ if (opts == NULL)
+ goto nla_put_failure;
+
+ /* convert target from pschedtime to us */
+ if (nla_put_u32(skb, TCA_PIE_TARGET,
+ ((u32) PSCHED_TICKS2NS(q->params.target)) /
+ NSEC_PER_USEC) ||
+ nla_put_u32(skb, TCA_PIE_LIMIT, sch->limit) ||
+ nla_put_u32(skb, TCA_PIE_TUPDATE, jiffies_to_usecs(q->params.tupdate)) ||
+ nla_put_u32(skb, TCA_PIE_ALPHA, q->params.alpha) ||
+ nla_put_u32(skb, TCA_PIE_BETA, q->params.beta) ||
+ nla_put_u32(skb, TCA_PIE_ECN, q->params.ecn) ||
+ nla_put_u32(skb, TCA_PIE_BYTEMODE, q->params.bytemode))
+ goto nla_put_failure;
+
+ return nla_nest_end(skb, opts);
+
+nla_put_failure:
+ nla_nest_cancel(skb, opts);
+ return -1;
+
+}
+
+static int pie_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
+{
+ struct pie_sched_data *q = qdisc_priv(sch);
+ struct tc_pie_xstats st = {
+ .prob = q->vars.prob,
+ .delay = ((u32) PSCHED_TICKS2NS(q->vars.qdelay)) /
+ NSEC_PER_USEC,
+ /* unscale and return dq_rate in bytes per sec */
+ .avg_dq_rate = q->vars.avg_dq_rate *
+ (PSCHED_TICKS_PER_SEC) >> PIE_SCALE,
+ .packets_in = q->stats.packets_in,
+ .overlimit = q->stats.overlimit,
+ .maxq = q->stats.maxq,
+ .dropped = q->stats.dropped,
+ .ecn_mark = q->stats.ecn_mark,
+ };
+
+ return gnet_stats_copy_app(d, &st, sizeof(st));
+}
+
+static struct sk_buff *pie_qdisc_dequeue(struct Qdisc *sch)
+{
+ struct sk_buff *skb;
+ skb = __qdisc_dequeue_head(sch, &sch->q);
+
+ if (!skb)
+ return NULL;
+
+ pie_process_dequeue(sch, skb);
+ return skb;
+}
+
+static void pie_reset(struct Qdisc *sch)
+{
+ struct pie_sched_data *q = qdisc_priv(sch);
+ qdisc_reset_queue(sch);
+ pie_vars_init(&q->vars);
+}
+
+static void pie_destroy(struct Qdisc *sch)
+{
+ struct pie_sched_data *q = qdisc_priv(sch);
+ q->params.tupdate = 0;
+ del_timer_sync(&q->adapt_timer);
+}
+
+static struct Qdisc_ops pie_qdisc_ops __read_mostly = {
+ .id = "pie",
+ .priv_size = sizeof(struct pie_sched_data),
+ .enqueue = pie_qdisc_enqueue,
+ .dequeue = pie_qdisc_dequeue,
+ .peek = qdisc_peek_dequeued,
+ .init = pie_init,
+ .destroy = pie_destroy,
+ .reset = pie_reset,
+ .change = pie_change,
+ .dump = pie_dump,
+ .dump_stats = pie_dump_stats,
+ .owner = THIS_MODULE,
+};
+
+static int __init pie_module_init(void)
+{
+ return register_qdisc(&pie_qdisc_ops);
+}
+
+static void __exit pie_module_exit(void)
+{
+ unregister_qdisc(&pie_qdisc_ops);
+}
+
+module_init(pie_module_init);
+module_exit(pie_module_exit);
+
+MODULE_DESCRIPTION("Proportional Integral controller Enhanced (PIE) scheduler");
+MODULE_AUTHOR("Vijay Subramanian");
+MODULE_AUTHOR("Mythili Prabhu");
+MODULE_LICENSE("GPL");
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index d3a1bc26dbfc..76f01e0258df 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -237,10 +237,12 @@ static inline void sfq_link(struct sfq_sched_data *q, sfq_index x)
}
#define sfq_unlink(q, x, n, p) \
- n = q->slots[x].dep.next; \
- p = q->slots[x].dep.prev; \
- sfq_dep_head(q, p)->next = n; \
- sfq_dep_head(q, n)->prev = p
+ do { \
+ n = q->slots[x].dep.next; \
+ p = q->slots[x].dep.prev; \
+ sfq_dep_head(q, p)->next = n; \
+ sfq_dep_head(q, n)->prev = p; \
+ } while (0)
static inline void sfq_dec(struct sfq_sched_data *q, sfq_index x)
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index 68f98595819c..fbba5b0ec121 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -21,6 +21,7 @@
#include <net/netlink.h>
#include <net/sch_generic.h>
#include <net/pkt_sched.h>
+#include <net/tcp.h>
/* Simple Token Bucket Filter.
@@ -117,6 +118,48 @@ struct tbf_sched_data {
};
+/* Time to Length, convert time in ns to length in bytes
+ * to determinate how many bytes can be sent in given time.
+ */
+static u64 psched_ns_t2l(const struct psched_ratecfg *r,
+ u64 time_in_ns)
+{
+ /* The formula is :
+ * len = (time_in_ns * r->rate_bytes_ps) / NSEC_PER_SEC
+ */
+ u64 len = time_in_ns * r->rate_bytes_ps;
+
+ do_div(len, NSEC_PER_SEC);
+
+ if (unlikely(r->linklayer == TC_LINKLAYER_ATM)) {
+ do_div(len, 53);
+ len = len * 48;
+ }
+
+ if (len > r->overhead)
+ len -= r->overhead;
+ else
+ len = 0;
+
+ return len;
+}
+
+/*
+ * Return length of individual segments of a gso packet,
+ * including all headers (MAC, IP, TCP/UDP)
+ */
+static unsigned int skb_gso_seglen(const struct sk_buff *skb)
+{
+ unsigned int hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
+ const struct skb_shared_info *shinfo = skb_shinfo(skb);
+
+ if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
+ hdr_len += tcp_hdrlen(skb);
+ else
+ hdr_len += sizeof(struct udphdr);
+ return hdr_len + shinfo->gso_size;
+}
+
/* GSO packet is too big, segment it so that tbf can transmit
* each segment in time
*/
@@ -136,12 +179,8 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch)
while (segs) {
nskb = segs->next;
segs->next = NULL;
- if (likely(segs->len <= q->max_size)) {
- qdisc_skb_cb(segs)->pkt_len = segs->len;
- ret = qdisc_enqueue(segs, q->qdisc);
- } else {
- ret = qdisc_reshape_fail(skb, sch);
- }
+ qdisc_skb_cb(segs)->pkt_len = segs->len;
+ ret = qdisc_enqueue(segs, q->qdisc);
if (ret != NET_XMIT_SUCCESS) {
if (net_xmit_drop_count(ret))
sch->qstats.drops++;
@@ -163,7 +202,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
int ret;
if (qdisc_pkt_len(skb) > q->max_size) {
- if (skb_is_gso(skb))
+ if (skb_is_gso(skb) && skb_gso_seglen(skb) <= q->max_size)
return tbf_segment(skb, sch);
return qdisc_reshape_fail(skb, sch);
}
@@ -268,6 +307,8 @@ static const struct nla_policy tbf_policy[TCA_TBF_MAX + 1] = {
[TCA_TBF_PTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
[TCA_TBF_RATE64] = { .type = NLA_U64 },
[TCA_TBF_PRATE64] = { .type = NLA_U64 },
+ [TCA_TBF_BURST] = { .type = NLA_U32 },
+ [TCA_TBF_PBURST] = { .type = NLA_U32 },
};
static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
@@ -276,10 +317,11 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
struct tbf_sched_data *q = qdisc_priv(sch);
struct nlattr *tb[TCA_TBF_MAX + 1];
struct tc_tbf_qopt *qopt;
- struct qdisc_rate_table *rtab = NULL;
- struct qdisc_rate_table *ptab = NULL;
struct Qdisc *child = NULL;
- int max_size, n;
+ struct psched_ratecfg rate;
+ struct psched_ratecfg peak;
+ u64 max_size;
+ s64 buffer, mtu;
u64 rate64 = 0, prate64 = 0;
err = nla_parse_nested(tb, TCA_TBF_MAX, opt, tbf_policy);
@@ -291,33 +333,13 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
goto done;
qopt = nla_data(tb[TCA_TBF_PARMS]);
- rtab = qdisc_get_rtab(&qopt->rate, tb[TCA_TBF_RTAB]);
- if (rtab == NULL)
- goto done;
+ if (qopt->rate.linklayer == TC_LINKLAYER_UNAWARE)
+ qdisc_put_rtab(qdisc_get_rtab(&qopt->rate,
+ tb[TCA_TBF_RTAB]));
- if (qopt->peakrate.rate) {
- if (qopt->peakrate.rate > qopt->rate.rate)
- ptab = qdisc_get_rtab(&qopt->peakrate, tb[TCA_TBF_PTAB]);
- if (ptab == NULL)
- goto done;
- }
-
- for (n = 0; n < 256; n++)
- if (rtab->data[n] > qopt->buffer)
- break;
- max_size = (n << qopt->rate.cell_log) - 1;
- if (ptab) {
- int size;
-
- for (n = 0; n < 256; n++)
- if (ptab->data[n] > qopt->mtu)
- break;
- size = (n << qopt->peakrate.cell_log) - 1;
- if (size < max_size)
- max_size = size;
- }
- if (max_size < 0)
- goto done;
+ if (qopt->peakrate.linklayer == TC_LINKLAYER_UNAWARE)
+ qdisc_put_rtab(qdisc_get_rtab(&qopt->peakrate,
+ tb[TCA_TBF_PTAB]));
if (q->qdisc != &noop_qdisc) {
err = fifo_set_limit(q->qdisc, qopt->limit);
@@ -331,6 +353,50 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
}
}
+ buffer = min_t(u64, PSCHED_TICKS2NS(qopt->buffer), ~0U);
+ mtu = min_t(u64, PSCHED_TICKS2NS(qopt->mtu), ~0U);
+
+ if (tb[TCA_TBF_RATE64])
+ rate64 = nla_get_u64(tb[TCA_TBF_RATE64]);
+ psched_ratecfg_precompute(&rate, &qopt->rate, rate64);
+
+ if (tb[TCA_TBF_BURST]) {
+ max_size = nla_get_u32(tb[TCA_TBF_BURST]);
+ buffer = psched_l2t_ns(&rate, max_size);
+ } else {
+ max_size = min_t(u64, psched_ns_t2l(&rate, buffer), ~0U);
+ }
+
+ if (qopt->peakrate.rate) {
+ if (tb[TCA_TBF_PRATE64])
+ prate64 = nla_get_u64(tb[TCA_TBF_PRATE64]);
+ psched_ratecfg_precompute(&peak, &qopt->peakrate, prate64);
+ if (peak.rate_bytes_ps <= rate.rate_bytes_ps) {
+ pr_warn_ratelimited("sch_tbf: peakrate %llu is lower than or equals to rate %llu !\n",
+ peak.rate_bytes_ps, rate.rate_bytes_ps);
+ err = -EINVAL;
+ goto done;
+ }
+
+ if (tb[TCA_TBF_PBURST]) {
+ u32 pburst = nla_get_u32(tb[TCA_TBF_PBURST]);
+ max_size = min_t(u32, max_size, pburst);
+ mtu = psched_l2t_ns(&peak, pburst);
+ } else {
+ max_size = min_t(u64, max_size, psched_ns_t2l(&peak, mtu));
+ }
+ }
+
+ if (max_size < psched_mtu(qdisc_dev(sch)))
+ pr_warn_ratelimited("sch_tbf: burst %llu is lower than device %s mtu (%u) !\n",
+ max_size, qdisc_dev(sch)->name,
+ psched_mtu(qdisc_dev(sch)));
+
+ if (!max_size) {
+ err = -EINVAL;
+ goto done;
+ }
+
sch_tree_lock(sch);
if (child) {
qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen);
@@ -338,19 +404,21 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
q->qdisc = child;
}
q->limit = qopt->limit;
- q->mtu = PSCHED_TICKS2NS(qopt->mtu);
+ if (tb[TCA_TBF_PBURST])
+ q->mtu = mtu;
+ else
+ q->mtu = PSCHED_TICKS2NS(qopt->mtu);
q->max_size = max_size;
- q->buffer = PSCHED_TICKS2NS(qopt->buffer);
+ if (tb[TCA_TBF_BURST])
+ q->buffer = buffer;
+ else
+ q->buffer = PSCHED_TICKS2NS(qopt->buffer);
q->tokens = q->buffer;
q->ptokens = q->mtu;
- if (tb[TCA_TBF_RATE64])
- rate64 = nla_get_u64(tb[TCA_TBF_RATE64]);
- psched_ratecfg_precompute(&q->rate, &rtab->rate, rate64);
- if (ptab) {
- if (tb[TCA_TBF_PRATE64])
- prate64 = nla_get_u64(tb[TCA_TBF_PRATE64]);
- psched_ratecfg_precompute(&q->peak, &ptab->rate, prate64);
+ memcpy(&q->rate, &rate, sizeof(struct psched_ratecfg));
+ if (qopt->peakrate.rate) {
+ memcpy(&q->peak, &peak, sizeof(struct psched_ratecfg));
q->peak_present = true;
} else {
q->peak_present = false;
@@ -359,10 +427,6 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
sch_tree_unlock(sch);
err = 0;
done:
- if (rtab)
- qdisc_put_rtab(rtab);
- if (ptab)
- qdisc_put_rtab(ptab);
return err;
}
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 68a27f9796d2..5ae609200674 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -22,9 +22,8 @@
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with GNU CC; see the file COPYING. If not, write to
- * the Free Software Foundation, 59 Temple Place - Suite 330,
- * Boston, MA 02111-1307, USA.
+ * along with GNU CC; see the file COPYING. If not, see
+ * <http://www.gnu.org/licenses/>.
*
* Please send any bug reports or fixes you make to the
* email address(es):
@@ -90,14 +89,12 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
/* Initialize the object handling fields. */
atomic_set(&asoc->base.refcnt, 1);
- asoc->base.dead = false;
/* Initialize the bind addr area. */
sctp_bind_addr_init(&asoc->base.bind_addr, ep->base.bind_addr.port);
asoc->state = SCTP_STATE_CLOSED;
asoc->cookie_life = ms_to_ktime(sp->assocparams.sasoc_cookie_life);
- asoc->frag_point = 0;
asoc->user_frag = sp->user_frag;
/* Set the association max_retrans and RTO values from the
@@ -110,8 +107,6 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
asoc->rto_max = msecs_to_jiffies(sp->rtoinfo.srto_max);
asoc->rto_min = msecs_to_jiffies(sp->rtoinfo.srto_min);
- asoc->overall_error_count = 0;
-
/* Initialize the association's heartbeat interval based on the
* sock configured value.
*/
@@ -132,18 +127,15 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
*/
asoc->param_flags = sp->param_flags;
- /* Initialize the maximum mumber of new data packets that can be sent
+ /* Initialize the maximum number of new data packets that can be sent
* in a burst.
*/
asoc->max_burst = sp->max_burst;
/* initialize association timers */
- asoc->timeouts[SCTP_EVENT_TIMEOUT_NONE] = 0;
asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_COOKIE] = asoc->rto_initial;
asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_INIT] = asoc->rto_initial;
asoc->timeouts[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN] = asoc->rto_initial;
- asoc->timeouts[SCTP_EVENT_TIMEOUT_T3_RTX] = 0;
- asoc->timeouts[SCTP_EVENT_TIMEOUT_T4_RTO] = 0;
/* sctpimpguide Section 2.12.2
* If the 'T5-shutdown-guard' timer is used, it SHOULD be set to the
@@ -152,10 +144,8 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
asoc->timeouts[SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD]
= 5 * asoc->rto_max;
- asoc->timeouts[SCTP_EVENT_TIMEOUT_HEARTBEAT] = 0;
asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = asoc->sackdelay;
- asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] =
- min_t(unsigned long, sp->autoclose, net->sctp.max_autoclose) * HZ;
+ asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] = sp->autoclose * HZ;
/* Initializes the timers */
for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i)
@@ -173,11 +163,6 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
asoc->max_init_timeo =
msecs_to_jiffies(sp->initmsg.sinit_max_init_timeo);
- /* Allocate storage for the ssnmap after the inbound and outbound
- * streams have been negotiated during Init.
- */
- asoc->ssnmap = NULL;
-
/* Set the local window size for receive.
* This is also the rcvbuf space per association.
* RFC 6 - A SCTP receiver MUST be able to receive a minimum of
@@ -190,25 +175,15 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
asoc->a_rwnd = asoc->rwnd;
- asoc->rwnd_over = 0;
- asoc->rwnd_press = 0;
-
/* Use my own max window until I learn something better. */
asoc->peer.rwnd = SCTP_DEFAULT_MAXWINDOW;
- /* Set the sndbuf size for transmit. */
- asoc->sndbuf_used = 0;
-
/* Initialize the receive memory counter */
atomic_set(&asoc->rmem_alloc, 0);
init_waitqueue_head(&asoc->wait);
asoc->c.my_vtag = sctp_generate_tag(ep);
- asoc->peer.i.init_tag = 0; /* INIT needs a vtag of 0. */
- asoc->c.peer_vtag = 0;
- asoc->c.my_ttag = 0;
- asoc->c.peer_ttag = 0;
asoc->c.my_port = ep->base.bind_addr.port;
asoc->c.initial_tsn = sctp_generate_tsn(ep);
@@ -219,7 +194,6 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
asoc->adv_peer_ack_point = asoc->ctsn_ack_point;
asoc->highest_sacked = asoc->ctsn_ack_point;
asoc->last_cwr_tsn = asoc->ctsn_ack_point;
- asoc->unack_data = 0;
/* ADDIP Section 4.1 Asconf Chunk Procedures
*
@@ -238,7 +212,6 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
/* Make an empty list of remote transport addresses. */
INIT_LIST_HEAD(&asoc->peer.transport_addr_list);
- asoc->peer.transport_count = 0;
/* RFC 2960 5.1 Normal Establishment of an Association
*
@@ -252,20 +225,15 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
* already received one packet.]
*/
asoc->peer.sack_needed = 1;
- asoc->peer.sack_cnt = 0;
asoc->peer.sack_generation = 1;
/* Assume that the peer will tell us if he recognizes ASCONF
* as part of INIT exchange.
- * The sctp_addip_noauth option is there for backward compatibilty
+ * The sctp_addip_noauth option is there for backward compatibility
* and will revert old behavior.
*/
- asoc->peer.asconf_capable = 0;
if (net->sctp.addip_noauth)
asoc->peer.asconf_capable = 1;
- asoc->asconf_addr_del_pending = NULL;
- asoc->src_out_of_asoc_ok = 0;
- asoc->new_transport = NULL;
/* Create an input queue. */
sctp_inq_init(&asoc->base.inqueue);
@@ -277,12 +245,6 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
if (!sctp_ulpq_init(&asoc->ulpq, asoc))
goto fail_init;
- memset(&asoc->peer.tsn_map, 0, sizeof(struct sctp_tsnmap));
-
- asoc->need_ecne = 0;
-
- asoc->assoc_id = 0;
-
/* Assume that peer would support both address types unless we are
* told otherwise.
*/
@@ -291,8 +253,6 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
asoc->peer.ipv6_address = 1;
INIT_LIST_HEAD(&asoc->asocs);
- asoc->autoclose = sp->autoclose;
-
asoc->default_stream = sp->default_stream;
asoc->default_ppid = sp->default_ppid;
asoc->default_flags = sp->default_flags;
@@ -300,9 +260,6 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
asoc->default_timetolive = sp->default_timetolive;
asoc->default_rcv_context = sp->default_rcv_context;
- /* SCTP_GET_ASSOC_STATS COUNTERS */
- memset(&asoc->stats, 0, sizeof(struct sctp_priv_assoc_stats));
-
/* AUTH related initializations */
INIT_LIST_HEAD(&asoc->endpoint_shared_keys);
err = sctp_auth_asoc_copy_shkeys(ep, asoc, gfp);
@@ -310,9 +267,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
goto fail_init;
asoc->active_key_id = ep->active_key_id;
- asoc->asoc_shared_key = NULL;
- asoc->default_hmac_id = 0;
/* Save the hmacs and chunks list into this association */
if (ep->auth_hmacs_list)
memcpy(asoc->c.auth_hmacs, ep->auth_hmacs_list,
@@ -997,17 +952,13 @@ int sctp_cmp_addr_exact(const union sctp_addr *ss1,
*/
struct sctp_chunk *sctp_get_ecne_prepend(struct sctp_association *asoc)
{
- struct sctp_chunk *chunk;
+ if (!asoc->need_ecne)
+ return NULL;
/* Send ECNE if needed.
* Not being able to allocate a chunk here is not deadly.
*/
- if (asoc->need_ecne)
- chunk = sctp_make_ecne(asoc, asoc->last_ecne_tsn);
- else
- chunk = NULL;
-
- return chunk;
+ return sctp_make_ecne(asoc, asoc->last_ecne_tsn);
}
/*
@@ -1268,7 +1219,7 @@ void sctp_assoc_update(struct sctp_association *asoc,
}
}
- /* SCTP-AUTH: Save the peer parameters from the new assocaitions
+ /* SCTP-AUTH: Save the peer parameters from the new associations
* and also move the association shared keys over
*/
kfree(asoc->peer.peer_random);
@@ -1396,7 +1347,7 @@ void sctp_assoc_sync_pmtu(struct sock *sk, struct sctp_association *asoc)
}
/* Should we send a SACK to update our peer? */
-static inline int sctp_peer_needs_update(struct sctp_association *asoc)
+static inline bool sctp_peer_needs_update(struct sctp_association *asoc)
{
struct net *net = sock_net(asoc->base.sk);
switch (asoc->state) {
@@ -1408,12 +1359,12 @@ static inline int sctp_peer_needs_update(struct sctp_association *asoc)
((asoc->rwnd - asoc->a_rwnd) >= max_t(__u32,
(asoc->base.sk->sk_rcvbuf >> net->sctp.rwnd_upd_shift),
asoc->pathmtu)))
- return 1;
+ return true;
break;
default:
break;
}
- return 0;
+ return false;
}
/* Increase asoc's rwnd by len and send any window update SACK if needed. */
@@ -1493,7 +1444,7 @@ void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned int len)
/* If we've reached or overflowed our receive buffer, announce
* a 0 rwnd if rwnd would still be positive. Store the
- * the pottential pressure overflow so that the window can be restored
+ * the potential pressure overflow so that the window can be restored
* back to original value.
*/
if (rx_count >= asoc->base.sk->sk_rcvbuf)
diff --git a/net/sctp/auth.c b/net/sctp/auth.c
index 46b5977978a1..683c7d1b1306 100644
--- a/net/sctp/auth.c
+++ b/net/sctp/auth.c
@@ -16,9 +16,8 @@
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with GNU CC; see the file COPYING. If not, write to
- * the Free Software Foundation, 59 Temple Place - Suite 330,
- * Boston, MA 02111-1307, USA.
+ * along with GNU CC; see the file COPYING. If not, see
+ * <http://www.gnu.org/licenses/>.
*
* Please send any bug reports or fixes you make to the
* email address(es):
@@ -42,7 +41,7 @@ static struct sctp_hmac sctp_hmac_list[SCTP_AUTH_NUM_HMACS] = {
},
{
.hmac_id = SCTP_AUTH_HMAC_ID_SHA1,
- .hmac_name="hmac(sha1)",
+ .hmac_name = "hmac(sha1)",
.hmac_len = SCTP_SHA1_SIG_SIZE,
},
{
@@ -52,7 +51,7 @@ static struct sctp_hmac sctp_hmac_list[SCTP_AUTH_NUM_HMACS] = {
#if defined (CONFIG_CRYPTO_SHA256) || defined (CONFIG_CRYPTO_SHA256_MODULE)
{
.hmac_id = SCTP_AUTH_HMAC_ID_SHA256,
- .hmac_name="hmac(sha256)",
+ .hmac_name = "hmac(sha256)",
.hmac_len = SCTP_SHA256_SIG_SIZE,
}
#endif
@@ -164,7 +163,7 @@ static int sctp_auth_compare_vectors(struct sctp_auth_bytes *vector1,
* lead-zero padded. If it is not, it
* is automatically larger numerically.
*/
- for (i = 0; i < abs(diff); i++ ) {
+ for (i = 0; i < abs(diff); i++) {
if (longer[i] != 0)
return diff;
}
@@ -227,9 +226,9 @@ static struct sctp_auth_bytes *sctp_auth_make_local_vector(
gfp_t gfp)
{
return sctp_auth_make_key_vector(
- (sctp_random_param_t*)asoc->c.auth_random,
- (sctp_chunks_param_t*)asoc->c.auth_chunks,
- (sctp_hmac_algo_param_t*)asoc->c.auth_hmacs,
+ (sctp_random_param_t *)asoc->c.auth_random,
+ (sctp_chunks_param_t *)asoc->c.auth_chunks,
+ (sctp_hmac_algo_param_t *)asoc->c.auth_hmacs,
gfp);
}
@@ -500,8 +499,7 @@ void sctp_auth_destroy_hmacs(struct crypto_hash *auth_hmacs[])
if (!auth_hmacs)
return;
- for (i = 0; i < SCTP_AUTH_NUM_HMACS; i++)
- {
+ for (i = 0; i < SCTP_AUTH_NUM_HMACS; i++) {
if (auth_hmacs[i])
crypto_free_hash(auth_hmacs[i]);
}
@@ -648,15 +646,15 @@ static int __sctp_auth_cid(sctp_cid_t chunk, struct sctp_chunks_param *param)
*/
for (i = 0; !found && i < len; i++) {
switch (param->chunks[i]) {
- case SCTP_CID_INIT:
- case SCTP_CID_INIT_ACK:
- case SCTP_CID_SHUTDOWN_COMPLETE:
- case SCTP_CID_AUTH:
+ case SCTP_CID_INIT:
+ case SCTP_CID_INIT_ACK:
+ case SCTP_CID_SHUTDOWN_COMPLETE:
+ case SCTP_CID_AUTH:
break;
- default:
+ default:
if (param->chunks[i] == chunk)
- found = 1;
+ found = 1;
break;
}
}
diff --git a/net/sctp/bind_addr.c b/net/sctp/bind_addr.c
index 077bb070052b..871cdf9567e6 100644
--- a/net/sctp/bind_addr.c
+++ b/net/sctp/bind_addr.c
@@ -21,9 +21,8 @@
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with GNU CC; see the file COPYING. If not, write to
- * the Free Software Foundation, 59 Temple Place - Suite 330,
- * Boston, MA 02111-1307, USA.
+ * along with GNU CC; see the file COPYING. If not, see
+ * <http://www.gnu.org/licenses/>.
*
* Please send any bug reports or fixes you make to the
* email address(es):
diff --git a/net/sctp/chunk.c b/net/sctp/chunk.c
index f2044fcb9dd1..158701da2d31 100644
--- a/net/sctp/chunk.c
+++ b/net/sctp/chunk.c
@@ -18,9 +18,8 @@
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with GNU CC; see the file COPYING. If not, write to
- * the Free Software Foundation, 59 Temple Place - Suite 330,
- * Boston, MA 02111-1307, USA.
+ * along with GNU CC; see the file COPYING. If not, see
+ * <http://www.gnu.org/licenses/>.
*
* Please send any bug reports or fixes you make to the
* email address(es):
@@ -255,7 +254,7 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
SCTP_INC_STATS_USER(sock_net(asoc->base.sk), SCTP_MIB_FRAGUSRMSGS);
/* Create chunks for all the full sized DATA chunks. */
- for (i=0, len=first_len; i < whole; i++) {
+ for (i = 0, len = first_len; i < whole; i++) {
frag = SCTP_DATA_MIDDLE_FRAG;
if (0 == i)
@@ -318,7 +317,7 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
goto errout;
}
- err = sctp_user_addto_chunk(chunk, offset, over,msgh->msg_iov);
+ err = sctp_user_addto_chunk(chunk, offset, over, msgh->msg_iov);
/* Put the chunk->skb back into the form expected by send. */
__skb_pull(chunk->skb, (__u8 *)chunk->chunk_hdr
diff --git a/net/sctp/command.c b/net/sctp/command.c
index 3d9a9ff69c03..dd7375851618 100644
--- a/net/sctp/command.c
+++ b/net/sctp/command.c
@@ -19,9 +19,8 @@
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with GNU CC; see the file COPYING. If not, write to
- * the Free Software Foundation, 59 Temple Place - Suite 330,
- * Boston, MA 02111-1307, USA.
+ * along with GNU CC; see the file COPYING. If not, see
+ * <http://www.gnu.org/licenses/>.
*
* Please send any bug reports or fixes you make to the
* email address(es):
diff --git a/net/sctp/debug.c b/net/sctp/debug.c
index e89015d8935a..95d7b15dad21 100644
--- a/net/sctp/debug.c
+++ b/net/sctp/debug.c
@@ -22,9 +22,8 @@
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with GNU CC; see the file COPYING. If not, write to
- * the Free Software Foundation, 59 Temple Place - Suite 330,
- * Boston, MA 02111-1307, USA.
+ * along with GNU CC; see the file COPYING. If not, see
+ * <http://www.gnu.org/licenses/>.
*
* Please send any bug reports or fixes you make to the
* email address(es):
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
index 09b8daac87c8..6ffb6c1b13b7 100644
--- a/net/sctp/endpointola.c
+++ b/net/sctp/endpointola.c
@@ -23,9 +23,8 @@
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with GNU CC; see the file COPYING. If not, write to
- * the Free Software Foundation, 59 Temple Place - Suite 330,
- * Boston, MA 02111-1307, USA.
+ * along with GNU CC; see the file COPYING. If not, see
+ * <http://www.gnu.org/licenses/>.
*
* Please send any bug reports or fixes you make to the
* email address(es):
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 98b69bbecdd9..1f4eeb43fbd6 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -23,9 +23,8 @@
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with GNU CC; see the file COPYING. If not, write to
- * the Free Software Foundation, 59 Temple Place - Suite 330,
- * Boston, MA 02111-1307, USA.
+ * along with GNU CC; see the file COPYING. If not, see
+ * <http://www.gnu.org/licenses/>.
*
* Please send any bug reports or fixes you make to the
* email address(es):
@@ -120,7 +119,7 @@ int sctp_rcv(struct sk_buff *skb)
struct sctp_af *af;
struct net *net = dev_net(skb->dev);
- if (skb->pkt_type!=PACKET_HOST)
+ if (skb->pkt_type != PACKET_HOST)
goto discard_it;
SCTP_INC_STATS_BH(net, SCTP_MIB_INSCTPPACKS);
@@ -181,8 +180,7 @@ int sctp_rcv(struct sk_buff *skb)
* If a frame arrives on an interface and the receiving socket is
* bound to another interface, via SO_BINDTODEVICE, treat it as OOTB
*/
- if (sk->sk_bound_dev_if && (sk->sk_bound_dev_if != af->skb_iif(skb)))
- {
+ if (sk->sk_bound_dev_if && (sk->sk_bound_dev_if != af->skb_iif(skb))) {
if (asoc) {
sctp_association_put(asoc);
asoc = NULL;
@@ -537,8 +535,7 @@ struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *skb,
return sk;
out:
- if (asoc)
- sctp_association_put(asoc);
+ sctp_association_put(asoc);
return NULL;
}
@@ -546,8 +543,7 @@ out:
void sctp_err_finish(struct sock *sk, struct sctp_association *asoc)
{
sctp_bh_unlock_sock(sk);
- if (asoc)
- sctp_association_put(asoc);
+ sctp_association_put(asoc);
}
/*
@@ -613,8 +609,7 @@ void sctp_v4_err(struct sk_buff *skb, __u32 info)
if (ICMP_FRAG_NEEDED == code) {
sctp_icmp_frag_needed(sk, asoc, transport, info);
goto out_unlock;
- }
- else {
+ } else {
if (ICMP_PROT_UNREACH == code) {
sctp_icmp_proto_unreachable(sk, asoc,
transport);
@@ -1058,31 +1053,31 @@ static struct sctp_association *__sctp_rcv_walk_lookup(struct net *net,
if (ch_end > skb_tail_pointer(skb))
break;
- switch(ch->type) {
- case SCTP_CID_AUTH:
- have_auth = chunk_num;
- break;
-
- case SCTP_CID_COOKIE_ECHO:
- /* If a packet arrives containing an AUTH chunk as
- * a first chunk, a COOKIE-ECHO chunk as the second
- * chunk, and possibly more chunks after them, and
- * the receiver does not have an STCB for that
- * packet, then authentication is based on
- * the contents of the COOKIE- ECHO chunk.
- */
- if (have_auth == 1 && chunk_num == 2)
- return NULL;
- break;
-
- case SCTP_CID_ASCONF:
- if (have_auth || net->sctp.addip_noauth)
- asoc = __sctp_rcv_asconf_lookup(
- net, ch, laddr,
- sctp_hdr(skb)->source,
- transportp);
- default:
- break;
+ switch (ch->type) {
+ case SCTP_CID_AUTH:
+ have_auth = chunk_num;
+ break;
+
+ case SCTP_CID_COOKIE_ECHO:
+ /* If a packet arrives containing an AUTH chunk as
+ * a first chunk, a COOKIE-ECHO chunk as the second
+ * chunk, and possibly more chunks after them, and
+ * the receiver does not have an STCB for that
+ * packet, then authentication is based on
+ * the contents of the COOKIE- ECHO chunk.
+ */
+ if (have_auth == 1 && chunk_num == 2)
+ return NULL;
+ break;
+
+ case SCTP_CID_ASCONF:
+ if (have_auth || net->sctp.addip_noauth)
+ asoc = __sctp_rcv_asconf_lookup(
+ net, ch, laddr,
+ sctp_hdr(skb)->source,
+ transportp);
+ default:
+ break;
}
if (asoc)
@@ -1119,19 +1114,10 @@ static struct sctp_association *__sctp_rcv_lookup_harder(struct net *net,
return NULL;
/* If this is INIT/INIT-ACK look inside the chunk too. */
- switch (ch->type) {
- case SCTP_CID_INIT:
- case SCTP_CID_INIT_ACK:
+ if (ch->type == SCTP_CID_INIT || ch->type == SCTP_CID_INIT_ACK)
return __sctp_rcv_init_lookup(net, skb, laddr, transportp);
- break;
- default:
- return __sctp_rcv_walk_lookup(net, skb, laddr, transportp);
- break;
- }
-
-
- return NULL;
+ return __sctp_rcv_walk_lookup(net, skb, laddr, transportp);
}
/* Lookup an association for an inbound skb. */
diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c
index 5856932fdc38..4de12afa13d4 100644
--- a/net/sctp/inqueue.c
+++ b/net/sctp/inqueue.c
@@ -24,9 +24,8 @@
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with GNU CC; see the file COPYING. If not, write to
- * the Free Software Foundation, 59 Temple Place - Suite 330,
- * Boston, MA 02111-1307, USA.
+ * along with GNU CC; see the file COPYING. If not, see
+ * <http://www.gnu.org/licenses/>.
*
* Please send any bug reports or fixes you make to the
* email address(es):
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 7567e6f1a920..0f6259a6a932 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -21,9 +21,8 @@
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with GNU CC; see the file COPYING. If not, write to
- * the Free Software Foundation, 59 Temple Place - Suite 330,
- * Boston, MA 02111-1307, USA.
+ * along with GNU CC; see the file COPYING. If not, see
+ * <http://www.gnu.org/licenses/>.
*
* Please send any bug reports or fixes you make to the
* email address(es):
@@ -173,7 +172,8 @@ static void sctp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
switch (type) {
case ICMPV6_PKT_TOOBIG:
- sctp_icmp_frag_needed(sk, asoc, transport, ntohl(info));
+ if (ip6_sk_accept_pmtu(sk))
+ sctp_icmp_frag_needed(sk, asoc, transport, ntohl(info));
goto out_unlock;
case ICMPV6_PARAMPROB:
if (ICMPV6_UNK_NEXTHDR == code) {
@@ -263,7 +263,7 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
}
final_p = fl6_update_dst(fl6, np->opt, &final);
- dst = ip6_dst_lookup_flow(sk, fl6, final_p, false);
+ dst = ip6_dst_lookup_flow(sk, fl6, final_p);
if (!asoc || saddr)
goto out;
@@ -322,7 +322,7 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
fl6->saddr = baddr->v6.sin6_addr;
fl6->fl6_sport = baddr->v6.sin6_port;
final_p = fl6_update_dst(fl6, np->opt, &final);
- dst = ip6_dst_lookup_flow(sk, fl6, final_p, false);
+ dst = ip6_dst_lookup_flow(sk, fl6, final_p);
}
out:
@@ -402,7 +402,7 @@ static void sctp_v6_copy_addrlist(struct list_head *addrlist,
}
/* Initialize a sockaddr_storage from in incoming skb. */
-static void sctp_v6_from_skb(union sctp_addr *addr,struct sk_buff *skb,
+static void sctp_v6_from_skb(union sctp_addr *addr, struct sk_buff *skb,
int is_saddr)
{
__be16 *port;
diff --git a/net/sctp/objcnt.c b/net/sctp/objcnt.c
index 647396baa56f..40e7fac96c41 100644
--- a/net/sctp/objcnt.c
+++ b/net/sctp/objcnt.c
@@ -20,9 +20,8 @@
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with GNU CC; see the file COPYING. If not, write to
- * the Free Software Foundation, 59 Temple Place - Suite 330,
- * Boston, MA 02111-1307, USA.
+ * along with GNU CC; see the file COPYING. If not, see
+ * <http://www.gnu.org/licenses/>.
*
* Please send any bug reports or fixes you make to the
* email address(es):
@@ -98,7 +97,7 @@ static void sctp_objcnt_seq_stop(struct seq_file *seq, void *v)
{
}
-static void * sctp_objcnt_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+static void *sctp_objcnt_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
++*pos;
return (*pos >= ARRAY_SIZE(sctp_dbg_objcnt)) ? NULL : (void *)pos;
diff --git a/net/sctp/output.c b/net/sctp/output.c
index e650978daf27..0f4d15fc2627 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -20,9 +20,8 @@
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with GNU CC; see the file COPYING. If not, write to
- * the Free Software Foundation, 59 Temple Place - Suite 330,
- * Boston, MA 02111-1307, USA.
+ * along with GNU CC; see the file COPYING. If not, see
+ * <http://www.gnu.org/licenses/>.
*
* Please send any bug reports or fixes you make to the
* email address(es):
@@ -281,7 +280,7 @@ static sctp_xmit_t __sctp_packet_append_chunk(struct sctp_packet *packet,
/* We believe that this chunk is OK to add to the packet */
switch (chunk->chunk_hdr->type) {
- case SCTP_CID_DATA:
+ case SCTP_CID_DATA:
/* Account for the data being in the packet */
sctp_packet_append_data(packet, chunk);
/* Disallow SACK bundling after DATA. */
@@ -293,17 +292,17 @@ static sctp_xmit_t __sctp_packet_append_chunk(struct sctp_packet *packet,
/* timestamp the chunk for rtx purposes */
chunk->sent_at = jiffies;
break;
- case SCTP_CID_COOKIE_ECHO:
+ case SCTP_CID_COOKIE_ECHO:
packet->has_cookie_echo = 1;
break;
- case SCTP_CID_SACK:
+ case SCTP_CID_SACK:
packet->has_sack = 1;
if (chunk->asoc)
chunk->asoc->stats.osacks++;
break;
- case SCTP_CID_AUTH:
+ case SCTP_CID_AUTH:
packet->has_auth = 1;
packet->auth = chunk;
break;
@@ -388,7 +387,7 @@ int sctp_packet_transmit(struct sctp_packet *packet)
int err = 0;
int padding; /* How much padding do we need? */
__u8 has_data = 0;
- struct dst_entry *dst = tp->dst;
+ struct dst_entry *dst;
unsigned char *auth = NULL; /* pointer to auth in skb data */
pr_debug("%s: packet:%p\n", __func__, packet);
@@ -421,9 +420,9 @@ int sctp_packet_transmit(struct sctp_packet *packet)
}
}
dst = dst_clone(tp->dst);
- skb_dst_set(nskb, dst);
if (!dst)
goto no_route;
+ skb_dst_set(nskb, dst);
/* Build the SCTP header. */
sh = (struct sctphdr *)skb_push(nskb, sizeof(struct sctphdr));
@@ -474,10 +473,11 @@ int sctp_packet_transmit(struct sctp_packet *packet)
* for a given destination transport address.
*/
- if (!tp->rto_pending) {
+ if (!chunk->resent && !tp->rto_pending) {
chunk->rtt_in_progress = 1;
tp->rto_pending = 1;
}
+
has_data = 1;
}
@@ -540,8 +540,7 @@ int sctp_packet_transmit(struct sctp_packet *packet)
} else {
/* no need to seed pseudo checksum for SCTP */
nskb->ip_summed = CHECKSUM_PARTIAL;
- nskb->csum_start = (skb_transport_header(nskb) -
- nskb->head);
+ nskb->csum_start = skb_transport_header(nskb) - nskb->head;
nskb->csum_offset = offsetof(struct sctphdr, checksum);
}
}
@@ -558,7 +557,7 @@ int sctp_packet_transmit(struct sctp_packet *packet)
* Note: The works for IPv6 layer checks this bit too later
* in transmission. See IP6_ECN_flow_xmit().
*/
- (*tp->af_specific->ecn_capable)(nskb->sk);
+ tp->af_specific->ecn_capable(nskb->sk);
/* Set up the IP options. */
/* BUG: not implemented
@@ -580,7 +579,8 @@ int sctp_packet_transmit(struct sctp_packet *packet)
unsigned long timeout;
/* Restart the AUTOCLOSE timer when sending data. */
- if (sctp_state(asoc, ESTABLISHED) && asoc->autoclose) {
+ if (sctp_state(asoc, ESTABLISHED) &&
+ asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE]) {
timer = &asoc->timers[SCTP_EVENT_TIMEOUT_AUTOCLOSE];
timeout = asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE];
@@ -592,7 +592,7 @@ int sctp_packet_transmit(struct sctp_packet *packet)
pr_debug("***sctp_transmit_packet*** skb->len:%d\n", nskb->len);
nskb->local_df = packet->ipfragok;
- (*tp->af_specific->sctp_xmit)(nskb, tp);
+ tp->af_specific->sctp_xmit(nskb, tp);
out:
sctp_packet_reset(packet);
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index 94df75877869..9c77947c0597 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -22,9 +22,8 @@
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with GNU CC; see the file COPYING. If not, write to
- * the Free Software Foundation, 59 Temple Place - Suite 330,
- * Boston, MA 02111-1307, USA.
+ * along with GNU CC; see the file COPYING. If not, see
+ * <http://www.gnu.org/licenses/>.
*
* Please send any bug reports or fixes you make to the
* email address(es):
@@ -111,7 +110,7 @@ static inline int sctp_cacc_skip_3_1_d(struct sctp_transport *primary,
struct sctp_transport *transport,
int count_of_newacks)
{
- if (count_of_newacks >=2 && transport != primary)
+ if (count_of_newacks >= 2 && transport != primary)
return 1;
return 0;
}
@@ -208,8 +207,6 @@ void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q)
INIT_LIST_HEAD(&q->retransmit);
INIT_LIST_HEAD(&q->sacked);
INIT_LIST_HEAD(&q->abandoned);
-
- q->empty = 1;
}
/* Free the outqueue structure and any related pending chunks.
@@ -332,7 +329,6 @@ int sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk)
SCTP_INC_STATS(net, SCTP_MIB_OUTUNORDERCHUNKS);
else
SCTP_INC_STATS(net, SCTP_MIB_OUTORDERCHUNKS);
- q->empty = 0;
break;
}
} else {
@@ -446,6 +442,8 @@ void sctp_retransmit_mark(struct sctp_outq *q,
transport->rto_pending = 0;
}
+ chunk->resent = 1;
+
/* Move the chunk to the retransmit queue. The chunks
* on the retransmit queue are always kept in order.
*/
@@ -469,7 +467,7 @@ void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport,
struct net *net = sock_net(q->asoc->base.sk);
int error = 0;
- switch(reason) {
+ switch (reason) {
case SCTP_RTXR_T3_RTX:
SCTP_INC_STATS(net, SCTP_MIB_T3_RETRANSMITS);
sctp_transport_lower_cwnd(transport, SCTP_LOWER_CWND_T3_RTX);
@@ -652,7 +650,6 @@ redo:
if (chunk->fast_retransmit == SCTP_NEED_FRTX)
chunk->fast_retransmit = SCTP_DONT_FRTX;
- q->empty = 0;
q->asoc->stats.rtxchunks++;
break;
}
@@ -1063,8 +1060,6 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
sctp_transport_reset_timers(transport);
- q->empty = 0;
-
/* Only let one DATA chunk get bundled with a
* COOKIE-ECHO chunk.
*/
@@ -1087,7 +1082,7 @@ sctp_flush_out:
*
* --xguo
*/
- while ((ltransport = sctp_list_dequeue(&transport_list)) != NULL ) {
+ while ((ltransport = sctp_list_dequeue(&transport_list)) != NULL) {
struct sctp_transport *t = list_entry(ltransport,
struct sctp_transport,
send_ready);
@@ -1216,7 +1211,7 @@ int sctp_outq_sack(struct sctp_outq *q, struct sctp_chunk *chunk)
* destinations for which cacc_saw_newack is set.
*/
if (transport->cacc.cacc_saw_newack)
- count_of_newacks ++;
+ count_of_newacks++;
}
/* Move the Cumulative TSN Ack Point if appropriate. */
@@ -1273,29 +1268,17 @@ int sctp_outq_sack(struct sctp_outq *q, struct sctp_chunk *chunk)
"advertised peer ack point:0x%x\n", __func__, asoc, ctsn,
asoc->adv_peer_ack_point);
- /* See if all chunks are acked.
- * Make sure the empty queue handler will get run later.
- */
- q->empty = (list_empty(&q->out_chunk_list) &&
- list_empty(&q->retransmit));
- if (!q->empty)
- goto finish;
-
- list_for_each_entry(transport, transport_list, transports) {
- q->empty = q->empty && list_empty(&transport->transmitted);
- if (!q->empty)
- goto finish;
- }
-
- pr_debug("%s: sack queue is empty\n", __func__);
-finish:
- return q->empty;
+ return sctp_outq_is_empty(q);
}
-/* Is the outqueue empty? */
+/* Is the outqueue empty?
+ * The queue is empty when we have not pending data, no in-flight data
+ * and nothing pending retransmissions.
+ */
int sctp_outq_is_empty(const struct sctp_outq *q)
{
- return q->empty;
+ return q->out_qlen == 0 && q->outstanding_bytes == 0 &&
+ list_empty(&q->retransmit);
}
/********************************************************************
@@ -1375,6 +1358,7 @@ static void sctp_check_transmitted(struct sctp_outq *q,
* instance).
*/
if (!tchunk->tsn_gap_acked &&
+ !tchunk->resent &&
tchunk->rtt_in_progress) {
tchunk->rtt_in_progress = 0;
rtt = jiffies - tchunk->sent_at;
@@ -1391,7 +1375,8 @@ static void sctp_check_transmitted(struct sctp_outq *q,
*/
if (!tchunk->tsn_gap_acked) {
tchunk->tsn_gap_acked = 1;
- *highest_new_tsn_in_sack = tsn;
+ if (TSN_lt(*highest_new_tsn_in_sack, tsn))
+ *highest_new_tsn_in_sack = tsn;
bytes_acked += sctp_data_size(tchunk);
if (!tchunk->transport)
migrate_bytes += sctp_data_size(tchunk);
diff --git a/net/sctp/primitive.c b/net/sctp/primitive.c
index ce1ffd811775..ab8d9f96a177 100644
--- a/net/sctp/primitive.c
+++ b/net/sctp/primitive.c
@@ -23,9 +23,8 @@
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with GNU CC; see the file COPYING. If not, write to
- * the Free Software Foundation, 59 Temple Place - Suite 330,
- * Boston, MA 02111-1307, USA.
+ * along with GNU CC; see the file COPYING. If not, see
+ * <http://www.gnu.org/licenses/>.
*
* Please send any bug reports or fixes you make to the
* email address(es):
diff --git a/net/sctp/probe.c b/net/sctp/probe.c
index 53c452efb40b..5e68b94ee640 100644
--- a/net/sctp/probe.c
+++ b/net/sctp/probe.c
@@ -38,6 +38,7 @@
#include <net/sctp/sctp.h>
#include <net/sctp/sm.h>
+MODULE_SOFTDEP("pre: sctp");
MODULE_AUTHOR("Wei Yongjun <yjwei@cn.fujitsu.com>");
MODULE_DESCRIPTION("SCTP snooper");
MODULE_LICENSE("GPL");
@@ -182,6 +183,20 @@ static struct jprobe sctp_recv_probe = {
.entry = jsctp_sf_eat_sack,
};
+static __init int sctp_setup_jprobe(void)
+{
+ int ret = register_jprobe(&sctp_recv_probe);
+
+ if (ret) {
+ if (request_module("sctp"))
+ goto out;
+ ret = register_jprobe(&sctp_recv_probe);
+ }
+
+out:
+ return ret;
+}
+
static __init int sctpprobe_init(void)
{
int ret = -ENOMEM;
@@ -202,7 +217,7 @@ static __init int sctpprobe_init(void)
&sctpprobe_fops))
goto free_kfifo;
- ret = register_jprobe(&sctp_recv_probe);
+ ret = sctp_setup_jprobe();
if (ret)
goto remove_proc;
diff --git a/net/sctp/proc.c b/net/sctp/proc.c
index 0c0642156842..63ba0bdc867a 100644
--- a/net/sctp/proc.c
+++ b/net/sctp/proc.c
@@ -16,9 +16,8 @@
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with GNU CC; see the file COPYING. If not, write to
- * the Free Software Foundation, 59 Temple Place - Suite 330,
- * Boston, MA 02111-1307, USA.
+ * along with GNU CC; see the file COPYING. If not, see
+ * <http://www.gnu.org/licenses/>.
*
* Please send any bug reports or fixes you make to the
* email address(es):
@@ -178,7 +177,7 @@ static void sctp_seq_dump_remote_addrs(struct seq_file *seq, struct sctp_associa
rcu_read_unlock();
}
-static void * sctp_eps_seq_start(struct seq_file *seq, loff_t *pos)
+static void *sctp_eps_seq_start(struct seq_file *seq, loff_t *pos)
{
if (*pos >= sctp_ep_hashsize)
return NULL;
@@ -197,7 +196,7 @@ static void sctp_eps_seq_stop(struct seq_file *seq, void *v)
}
-static void * sctp_eps_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+static void *sctp_eps_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
if (++*pos >= sctp_ep_hashsize)
return NULL;
@@ -283,7 +282,7 @@ void sctp_eps_proc_exit(struct net *net)
}
-static void * sctp_assocs_seq_start(struct seq_file *seq, loff_t *pos)
+static void *sctp_assocs_seq_start(struct seq_file *seq, loff_t *pos)
{
if (*pos >= sctp_assoc_hashsize)
return NULL;
@@ -306,7 +305,7 @@ static void sctp_assocs_seq_stop(struct seq_file *seq, void *v)
}
-static void * sctp_assocs_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+static void *sctp_assocs_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
if (++*pos >= sctp_assoc_hashsize)
return NULL;
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 5e17092f4ada..34b7726bcd7f 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -23,9 +23,8 @@
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with GNU CC; see the file COPYING. If not, write to
- * the Free Software Foundation, 59 Temple Place - Suite 330,
- * Boston, MA 02111-1307, USA.
+ * along with GNU CC; see the file COPYING. If not, see
+ * <http://www.gnu.org/licenses/>.
*
* Please send any bug reports or fixes you make to the
* email address(es):
@@ -1066,8 +1065,8 @@ static struct sctp_af sctp_af_inet = {
#endif
};
-struct sctp_pf *sctp_get_pf_specific(sa_family_t family) {
-
+struct sctp_pf *sctp_get_pf_specific(sa_family_t family)
+{
switch (family) {
case PF_INET:
return sctp_pf_inet_specific;
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index fe690320b1e4..e5f7cdb42a85 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -23,9 +23,8 @@
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with GNU CC; see the file COPYING. If not, write to
- * the Free Software Foundation, 59 Temple Place - Suite 330,
- * Boston, MA 02111-1307, USA.
+ * along with GNU CC; see the file COPYING. If not, see
+ * <http://www.gnu.org/licenses/>.
*
* Please send any bug reports or fixes you make to the
* email address(es):
@@ -1968,13 +1967,13 @@ static int sctp_verify_ext_param(struct net *net, union sctp_params param)
for (i = 0; i < num_ext; i++) {
switch (param.ext->chunks[i]) {
- case SCTP_CID_AUTH:
- have_auth = 1;
- break;
- case SCTP_CID_ASCONF:
- case SCTP_CID_ASCONF_ACK:
- have_asconf = 1;
- break;
+ case SCTP_CID_AUTH:
+ have_auth = 1;
+ break;
+ case SCTP_CID_ASCONF:
+ case SCTP_CID_ASCONF_ACK:
+ have_asconf = 1;
+ break;
}
}
@@ -2001,25 +2000,24 @@ static void sctp_process_ext_param(struct sctp_association *asoc,
for (i = 0; i < num_ext; i++) {
switch (param.ext->chunks[i]) {
- case SCTP_CID_FWD_TSN:
- if (net->sctp.prsctp_enable &&
- !asoc->peer.prsctp_capable)
+ case SCTP_CID_FWD_TSN:
+ if (net->sctp.prsctp_enable && !asoc->peer.prsctp_capable)
asoc->peer.prsctp_capable = 1;
- break;
- case SCTP_CID_AUTH:
- /* if the peer reports AUTH, assume that he
- * supports AUTH.
- */
- if (net->sctp.auth_enable)
- asoc->peer.auth_capable = 1;
- break;
- case SCTP_CID_ASCONF:
- case SCTP_CID_ASCONF_ACK:
- if (net->sctp.addip_enable)
- asoc->peer.asconf_capable = 1;
- break;
- default:
- break;
+ break;
+ case SCTP_CID_AUTH:
+ /* if the peer reports AUTH, assume that he
+ * supports AUTH.
+ */
+ if (net->sctp.auth_enable)
+ asoc->peer.auth_capable = 1;
+ break;
+ case SCTP_CID_ASCONF:
+ case SCTP_CID_ASCONF_ACK:
+ if (net->sctp.addip_enable)
+ asoc->peer.asconf_capable = 1;
+ break;
+ default:
+ break;
}
}
}
@@ -2252,7 +2250,7 @@ int sctp_verify_init(struct net *net, const struct sctp_association *asoc,
* VIOLATION error. We build the ERROR chunk here and let the normal
* error handling code build and send the packet.
*/
- if (param.v != (void*)chunk->chunk_end)
+ if (param.v != (void *)chunk->chunk_end)
return sctp_process_inv_paramlength(asoc, param.p, chunk, errp);
/* The only missing mandatory param possible today is
@@ -2267,14 +2265,14 @@ int sctp_verify_init(struct net *net, const struct sctp_association *asoc,
result = sctp_verify_param(net, asoc, param, cid, chunk, errp);
switch (result) {
- case SCTP_IERROR_ABORT:
- case SCTP_IERROR_NOMEM:
- return 0;
- case SCTP_IERROR_ERROR:
- return 1;
- case SCTP_IERROR_NO_ERROR:
- default:
- break;
+ case SCTP_IERROR_ABORT:
+ case SCTP_IERROR_NOMEM:
+ return 0;
+ case SCTP_IERROR_ERROR:
+ return 1;
+ case SCTP_IERROR_NO_ERROR:
+ default:
+ break;
}
} /* for (loop through all parameters) */
@@ -2309,7 +2307,7 @@ int sctp_process_init(struct sctp_association *asoc, struct sctp_chunk *chunk,
* added as the primary transport. The source address seems to
* be a a better choice than any of the embedded addresses.
*/
- if(!sctp_assoc_add_peer(asoc, peer_addr, gfp, SCTP_ACTIVE))
+ if (!sctp_assoc_add_peer(asoc, peer_addr, gfp, SCTP_ACTIVE))
goto nomem;
if (sctp_cmp_addr_exact(sctp_source(chunk), peer_addr))
@@ -3335,7 +3333,7 @@ static __be16 sctp_get_asconf_response(struct sctp_chunk *asconf_ack,
while (asconf_ack_len > 0) {
if (asconf_ack_param->crr_id == asconf_param->crr_id) {
- switch(asconf_ack_param->param_hdr.type) {
+ switch (asconf_ack_param->param_hdr.type) {
case SCTP_PARAM_SUCCESS_REPORT:
return SCTP_ERROR_NO_ERROR;
case SCTP_PARAM_ERR_CAUSE:
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index 1a6eef39ab2f..ded6db66fb24 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -22,9 +22,8 @@
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with GNU CC; see the file COPYING. If not, write to
- * the Free Software Foundation, 59 Temple Place - Suite 330,
- * Boston, MA 02111-1307, USA.
+ * along with GNU CC; see the file COPYING. If not, see
+ * <http://www.gnu.org/licenses/>.
*
* Please send any bug reports or fixes you make to the
* email address(es):
@@ -405,7 +404,7 @@ void sctp_generate_proto_unreach_event(unsigned long data)
struct sctp_transport *transport = (struct sctp_transport *) data;
struct sctp_association *asoc = transport->asoc;
struct net *net = sock_net(asoc->base.sk);
-
+
sctp_bh_lock_sock(asoc->base.sk);
if (sock_owned_by_user(asoc->base.sk)) {
pr_debug("%s: sock is busy\n", __func__);
@@ -544,7 +543,7 @@ static void sctp_cmd_init_failed(sctp_cmd_seq_t *commands,
{
struct sctp_ulpevent *event;
- event = sctp_ulpevent_make_assoc_change(asoc,0, SCTP_CANT_STR_ASSOC,
+ event = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_CANT_STR_ASSOC,
(__u16)error, 0, 0, NULL,
GFP_ATOMIC);
@@ -1116,7 +1115,7 @@ int sctp_do_sm(struct net *net, sctp_event_t event_type, sctp_subtype_t subtype,
sctp_init_cmd_seq(&commands);
debug_pre_sfn();
- status = (*state_fn->fn)(net, ep, asoc, subtype, event_arg, &commands);
+ status = state_fn->fn(net, ep, asoc, subtype, event_arg, &commands);
debug_post_sfn();
error = sctp_side_effects(event_type, subtype, state,
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index dfe3f36ff2aa..483dcd71b3c5 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -22,9 +22,8 @@
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with GNU CC; see the file COPYING. If not, write to
- * the Free Software Foundation, 59 Temple Place - Suite 330,
- * Boston, MA 02111-1307, USA.
+ * along with GNU CC; see the file COPYING. If not, see
+ * <http://www.gnu.org/licenses/>.
*
* Please send any bug reports or fixes you make to the
* email address(es):
@@ -820,7 +819,7 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(struct net *net,
SCTP_INC_STATS(net, SCTP_MIB_PASSIVEESTABS);
sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START, SCTP_NULL());
- if (new_asoc->autoclose)
+ if (new_asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE])
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START,
SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE));
@@ -908,7 +907,7 @@ sctp_disposition_t sctp_sf_do_5_1E_ca(struct net *net,
SCTP_INC_STATS(net, SCTP_MIB_CURRESTAB);
SCTP_INC_STATS(net, SCTP_MIB_ACTIVEESTABS);
sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START, SCTP_NULL());
- if (asoc->autoclose)
+ if (asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE])
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START,
SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE));
@@ -2946,7 +2945,7 @@ sctp_disposition_t sctp_sf_eat_data_6_2(struct net *net,
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
commands);
- error = sctp_eat_data(asoc, chunk, commands );
+ error = sctp_eat_data(asoc, chunk, commands);
switch (error) {
case SCTP_IERROR_NO_ERROR:
break;
@@ -2970,7 +2969,7 @@ sctp_disposition_t sctp_sf_eat_data_6_2(struct net *net,
if (chunk->chunk_hdr->flags & SCTP_DATA_SACK_IMM)
force = SCTP_FORCE();
- if (asoc->autoclose) {
+ if (asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE]) {
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE));
}
@@ -3067,7 +3066,7 @@ sctp_disposition_t sctp_sf_eat_data_fast_4_4(struct net *net,
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
commands);
- error = sctp_eat_data(asoc, chunk, commands );
+ error = sctp_eat_data(asoc, chunk, commands);
switch (error) {
case SCTP_IERROR_NO_ERROR:
case SCTP_IERROR_HIGH_TSN:
@@ -3682,8 +3681,7 @@ sctp_disposition_t sctp_sf_do_asconf(struct net *net,
asconf_ack->dest = chunk->source;
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(asconf_ack));
if (asoc->new_transport) {
- sctp_sf_heartbeat(ep, asoc, type, asoc->new_transport,
- commands);
+ sctp_sf_heartbeat(ep, asoc, type, asoc->new_transport, commands);
((struct sctp_association *)asoc)->new_transport = NULL;
}
@@ -3766,7 +3764,7 @@ sctp_disposition_t sctp_sf_do_asconf_ack(struct net *net,
*/
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
- sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET,SCTP_NULL());
+ sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET, SCTP_NULL());
sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
SCTP_ERROR(ECONNABORTED));
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
@@ -3800,7 +3798,7 @@ sctp_disposition_t sctp_sf_do_asconf_ack(struct net *net,
/* We are going to ABORT, so we might as well stop
* processing the rest of the chunks in the packet.
*/
- sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET,SCTP_NULL());
+ sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET, SCTP_NULL());
sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
SCTP_ERROR(ECONNABORTED));
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
@@ -3878,7 +3876,7 @@ sctp_disposition_t sctp_sf_eat_fwd_tsn(struct net *net,
SCTP_CHUNK(chunk));
/* Count this as receiving DATA. */
- if (asoc->autoclose) {
+ if (asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE]) {
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE));
}
@@ -4452,7 +4450,7 @@ static sctp_disposition_t sctp_sf_violation_chunklen(
void *arg,
sctp_cmd_seq_t *commands)
{
- static const char err_str[]="The following chunk had invalid length:";
+ static const char err_str[] = "The following chunk had invalid length:";
return sctp_sf_abort_violation(net, ep, asoc, arg, commands, err_str,
sizeof(err_str));
@@ -4515,7 +4513,7 @@ static sctp_disposition_t sctp_sf_violation_ctsn(
void *arg,
sctp_cmd_seq_t *commands)
{
- static const char err_str[]="The cumulative tsn ack beyond the max tsn currently sent:";
+ static const char err_str[] = "The cumulative tsn ack beyond the max tsn currently sent:";
return sctp_sf_abort_violation(net, ep, asoc, arg, commands, err_str,
sizeof(err_str));
@@ -4535,7 +4533,7 @@ static sctp_disposition_t sctp_sf_violation_chunk(
void *arg,
sctp_cmd_seq_t *commands)
{
- static const char err_str[]="The following chunk violates protocol:";
+ static const char err_str[] = "The following chunk violates protocol:";
if (!asoc)
return sctp_sf_violation(net, ep, asoc, type, arg, commands);
@@ -4611,7 +4609,7 @@ sctp_disposition_t sctp_sf_do_prm_asoc(struct net *net,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *repl;
- struct sctp_association* my_asoc;
+ struct sctp_association *my_asoc;
/* The comment below says that we enter COOKIE-WAIT AFTER
* sending the INIT, but that doesn't actually work in our
@@ -5267,7 +5265,7 @@ sctp_disposition_t sctp_sf_do_9_2_start_shutdown(
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD));
- if (asoc->autoclose)
+ if (asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE])
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE));
@@ -5346,7 +5344,7 @@ sctp_disposition_t sctp_sf_do_9_2_shutdown_ack(
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN));
- if (asoc->autoclose)
+ if (asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE])
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE));
@@ -6001,7 +5999,7 @@ static struct sctp_packet *sctp_ootb_pkt_new(struct net *net,
/* Special case the INIT-ACK as there is no peer's vtag
* yet.
*/
- switch(chunk->chunk_hdr->type) {
+ switch (chunk->chunk_hdr->type) {
case SCTP_CID_INIT_ACK:
{
sctp_initack_chunk_t *initack;
@@ -6018,7 +6016,7 @@ static struct sctp_packet *sctp_ootb_pkt_new(struct net *net,
/* Special case the INIT and stale COOKIE_ECHO as there is no
* vtag yet.
*/
- switch(chunk->chunk_hdr->type) {
+ switch (chunk->chunk_hdr->type) {
case SCTP_CID_INIT:
{
sctp_init_chunk_t *init;
@@ -6208,7 +6206,7 @@ static int sctp_eat_data(const struct sctp_association *asoc,
*/
if (*sk->sk_prot_creator->memory_pressure) {
if (sctp_tsnmap_has_gap(map) &&
- (sctp_tsnmap_get_ctsn(map) + 1) == tsn) {
+ (sctp_tsnmap_get_ctsn(map) + 1) == tsn) {
pr_debug("%s: under pressure, reneging for tsn:%u\n",
__func__, tsn);
deliver = SCTP_CMD_RENEGE;
@@ -6232,7 +6230,7 @@ static int sctp_eat_data(const struct sctp_association *asoc,
/* We are going to ABORT, so we might as well stop
* processing the rest of the chunks in the packet.
*/
- sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET,SCTP_NULL());
+ sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET, SCTP_NULL());
sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
SCTP_ERROR(ECONNABORTED));
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
diff --git a/net/sctp/sm_statetable.c b/net/sctp/sm_statetable.c
index c5999b2dde7d..a987d54b379c 100644
--- a/net/sctp/sm_statetable.c
+++ b/net/sctp/sm_statetable.c
@@ -22,9 +22,8 @@
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with GNU CC; see the file COPYING. If not, write to
- * the Free Software Foundation, 59 Temple Place - Suite 330,
- * Boston, MA 02111-1307, USA.
+ * along with GNU CC; see the file COPYING. If not, see
+ * <http://www.gnu.org/licenses/>.
*
* Please send any bug reports or fixes you make to the
* email address(es):
@@ -70,7 +69,7 @@ static const sctp_sm_table_entry_t bug = {
if ((event_subtype._type > (_max))) { \
pr_warn("table %p possible attack: event %d exceeds max %d\n", \
_table, event_subtype._type, _max); \
- rtn = &bug; \
+ rtn = &bug; \
} else \
rtn = &_table[event_subtype._type][(int)state]; \
\
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 72046b9729a8..d32dae78a486 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -28,9 +28,8 @@
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with GNU CC; see the file COPYING. If not, write to
- * the Free Software Foundation, 59 Temple Place - Suite 330,
- * Boston, MA 02111-1307, USA.
+ * along with GNU CC; see the file COPYING. If not, see
+ * <http://www.gnu.org/licenses/>.
*
* Please send any bug reports or fixes you make to the
* email address(es):
@@ -83,7 +82,7 @@ static int sctp_writeable(struct sock *sk);
static void sctp_wfree(struct sk_buff *skb);
static int sctp_wait_for_sndbuf(struct sctp_association *, long *timeo_p,
size_t msg_len);
-static int sctp_wait_for_packet(struct sock * sk, int *err, long *timeo_p);
+static int sctp_wait_for_packet(struct sock *sk, int *err, long *timeo_p);
static int sctp_wait_for_connect(struct sctp_association *, long *timeo_p);
static int sctp_wait_for_accept(struct sock *sk, long timeo);
static void sctp_wait_for_close(struct sock *sk, long timeo);
@@ -953,7 +952,7 @@ int sctp_asconf_mgmt(struct sctp_sock *sp, struct sctp_sockaddr_entry *addrw)
*
* Returns 0 if ok, <0 errno code on error.
*/
-static int sctp_setsockopt_bindx(struct sock* sk,
+static int sctp_setsockopt_bindx(struct sock *sk,
struct sockaddr __user *addrs,
int addrs_size, int op)
{
@@ -1040,7 +1039,7 @@ out:
* Common routine for handling connect() and sctp_connectx().
* Connect will come in with just a single address.
*/
-static int __sctp_connect(struct sock* sk,
+static int __sctp_connect(struct sock *sk,
struct sockaddr *kaddrs,
int addrs_size,
sctp_assoc_t *assoc_id)
@@ -1300,7 +1299,7 @@ out_free:
*
* Returns >=0 if ok, <0 errno code on error.
*/
-static int __sctp_setsockopt_connectx(struct sock* sk,
+static int __sctp_setsockopt_connectx(struct sock *sk,
struct sockaddr __user *addrs,
int addrs_size,
sctp_assoc_t *assoc_id)
@@ -1338,7 +1337,7 @@ static int __sctp_setsockopt_connectx(struct sock* sk,
* This is an older interface. It's kept for backward compatibility
* to the option that doesn't provide association id.
*/
-static int sctp_setsockopt_connectx_old(struct sock* sk,
+static int sctp_setsockopt_connectx_old(struct sock *sk,
struct sockaddr __user *addrs,
int addrs_size)
{
@@ -1351,7 +1350,7 @@ static int sctp_setsockopt_connectx_old(struct sock* sk,
* indication to the call. Error is always negative and association id is
* always positive.
*/
-static int sctp_setsockopt_connectx(struct sock* sk,
+static int sctp_setsockopt_connectx(struct sock *sk,
struct sockaddr __user *addrs,
int addrs_size)
{
@@ -1374,7 +1373,7 @@ static int sctp_setsockopt_connectx(struct sock* sk,
* addrs_num structure member. That way we can re-use the existing
* code.
*/
-static int sctp_getsockopt_connectx3(struct sock* sk, int len,
+static int sctp_getsockopt_connectx3(struct sock *sk, int len,
char __user *optval,
int __user *optlen)
{
@@ -1569,7 +1568,7 @@ static int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
struct net *net = sock_net(sk);
struct sctp_sock *sp;
struct sctp_endpoint *ep;
- struct sctp_association *new_asoc=NULL, *asoc=NULL;
+ struct sctp_association *new_asoc = NULL, *asoc = NULL;
struct sctp_transport *transport, *chunk_tp;
struct sctp_chunk *chunk;
union sctp_addr to;
@@ -2196,6 +2195,7 @@ static int sctp_setsockopt_autoclose(struct sock *sk, char __user *optval,
unsigned int optlen)
{
struct sctp_sock *sp = sctp_sk(sk);
+ struct net *net = sock_net(sk);
/* Applicable to UDP-style socket only */
if (sctp_style(sk, TCP))
@@ -2205,6 +2205,9 @@ static int sctp_setsockopt_autoclose(struct sock *sk, char __user *optval,
if (copy_from_user(&sp->autoclose, optval, optlen))
return -EFAULT;
+ if (sp->autoclose > net->sctp.max_autoclose)
+ sp->autoclose = net->sctp.max_autoclose;
+
return 0;
}
@@ -2459,7 +2462,7 @@ static int sctp_setsockopt_peer_addr_params(struct sock *sk,
int hb_change, pmtud_change, sackdelay_change;
if (optlen != sizeof(struct sctp_paddrparams))
- return - EINVAL;
+ return -EINVAL;
if (copy_from_user(&params, optval, optlen))
return -EFAULT;
@@ -2480,7 +2483,7 @@ static int sctp_setsockopt_peer_addr_params(struct sock *sk,
/* If an address other than INADDR_ANY is specified, and
* no transport is found, then the request is invalid.
*/
- if (!sctp_is_any(sk, ( union sctp_addr *)&params.spp_address)) {
+ if (!sctp_is_any(sk, (union sctp_addr *)&params.spp_address)) {
trans = sctp_addr_id2transport(sk, &params.spp_address,
params.spp_assoc_id);
if (!trans)
@@ -2575,8 +2578,11 @@ static int sctp_setsockopt_delayed_ack(struct sock *sk,
if (params.sack_delay == 0 && params.sack_freq == 0)
return 0;
} else if (optlen == sizeof(struct sctp_assoc_value)) {
- pr_warn("Use of struct sctp_assoc_value in delayed_ack socket option deprecated\n");
- pr_warn("Use struct sctp_sack_info instead\n");
+ pr_warn_ratelimited(DEPRECATED
+ "%s (pid %d) "
+ "Use of struct sctp_assoc_value in delayed_ack socket option.\n"
+ "Use struct sctp_sack_info instead\n",
+ current->comm, task_pid_nr(current));
if (copy_from_user(&params, optval, optlen))
return -EFAULT;
@@ -2585,7 +2591,7 @@ static int sctp_setsockopt_delayed_ack(struct sock *sk,
else
params.sack_freq = 0;
} else
- return - EINVAL;
+ return -EINVAL;
/* Validate value parameter. */
if (params.sack_delay > 500)
@@ -2811,6 +2817,8 @@ static int sctp_setsockopt_rtoinfo(struct sock *sk, char __user *optval, unsigne
{
struct sctp_rtoinfo rtoinfo;
struct sctp_association *asoc;
+ unsigned long rto_min, rto_max;
+ struct sctp_sock *sp = sctp_sk(sk);
if (optlen != sizeof (struct sctp_rtoinfo))
return -EINVAL;
@@ -2824,26 +2832,36 @@ static int sctp_setsockopt_rtoinfo(struct sock *sk, char __user *optval, unsigne
if (!asoc && rtoinfo.srto_assoc_id && sctp_style(sk, UDP))
return -EINVAL;
+ rto_max = rtoinfo.srto_max;
+ rto_min = rtoinfo.srto_min;
+
+ if (rto_max)
+ rto_max = asoc ? msecs_to_jiffies(rto_max) : rto_max;
+ else
+ rto_max = asoc ? asoc->rto_max : sp->rtoinfo.srto_max;
+
+ if (rto_min)
+ rto_min = asoc ? msecs_to_jiffies(rto_min) : rto_min;
+ else
+ rto_min = asoc ? asoc->rto_min : sp->rtoinfo.srto_min;
+
+ if (rto_min > rto_max)
+ return -EINVAL;
+
if (asoc) {
if (rtoinfo.srto_initial != 0)
asoc->rto_initial =
msecs_to_jiffies(rtoinfo.srto_initial);
- if (rtoinfo.srto_max != 0)
- asoc->rto_max = msecs_to_jiffies(rtoinfo.srto_max);
- if (rtoinfo.srto_min != 0)
- asoc->rto_min = msecs_to_jiffies(rtoinfo.srto_min);
+ asoc->rto_max = rto_max;
+ asoc->rto_min = rto_min;
} else {
/* If there is no association or the association-id = 0
* set the values to the endpoint.
*/
- struct sctp_sock *sp = sctp_sk(sk);
-
if (rtoinfo.srto_initial != 0)
sp->rtoinfo.srto_initial = rtoinfo.srto_initial;
- if (rtoinfo.srto_max != 0)
- sp->rtoinfo.srto_max = rtoinfo.srto_max;
- if (rtoinfo.srto_min != 0)
- sp->rtoinfo.srto_min = rtoinfo.srto_min;
+ sp->rtoinfo.srto_max = rto_max;
+ sp->rtoinfo.srto_min = rto_min;
}
return 0;
@@ -2979,8 +2997,11 @@ static int sctp_setsockopt_maxseg(struct sock *sk, char __user *optval, unsigned
int val;
if (optlen == sizeof(int)) {
- pr_warn("Use of int in maxseg socket option deprecated\n");
- pr_warn("Use struct sctp_assoc_value instead\n");
+ pr_warn_ratelimited(DEPRECATED
+ "%s (pid %d) "
+ "Use of int in maxseg socket option.\n"
+ "Use struct sctp_assoc_value instead\n",
+ current->comm, task_pid_nr(current));
if (copy_from_user(&val, optval, optlen))
return -EFAULT;
params.assoc_id = 0;
@@ -3237,8 +3258,11 @@ static int sctp_setsockopt_maxburst(struct sock *sk,
int assoc_id = 0;
if (optlen == sizeof(int)) {
- pr_warn("Use of int in max_burst socket option deprecated\n");
- pr_warn("Use struct sctp_assoc_value instead\n");
+ pr_warn_ratelimited(DEPRECATED
+ "%s (pid %d) "
+ "Use of int in max_burst socket option deprecated.\n"
+ "Use struct sctp_assoc_value instead\n",
+ current->comm, task_pid_nr(current));
if (copy_from_user(&val, optval, optlen))
return -EFAULT;
} else if (optlen == sizeof(struct sctp_assoc_value)) {
@@ -3317,7 +3341,7 @@ static int sctp_setsockopt_hmac_ident(struct sock *sk,
if (optlen < sizeof(struct sctp_hmacalgo))
return -EINVAL;
- hmacs= memdup_user(optval, optlen);
+ hmacs = memdup_user(optval, optlen);
if (IS_ERR(hmacs))
return PTR_ERR(hmacs);
@@ -3355,7 +3379,7 @@ static int sctp_setsockopt_auth_key(struct sock *sk,
if (optlen <= sizeof(struct sctp_authkey))
return -EINVAL;
- authkey= memdup_user(optval, optlen);
+ authkey = memdup_user(optval, optlen);
if (IS_ERR(authkey))
return PTR_ERR(authkey);
@@ -3909,7 +3933,7 @@ static int sctp_init_sock(struct sock *sk)
*/
sp->hbinterval = net->sctp.hb_interval;
sp->pathmaxrxt = net->sctp.max_retrans_path;
- sp->pathmtu = 0; // allow default discovery
+ sp->pathmtu = 0; /* allow default discovery */
sp->sackdelay = net->sctp.sack_timeout;
sp->sackfreq = 2;
sp->param_flags = SPP_HB_ENABLE |
@@ -4452,7 +4476,7 @@ static int sctp_getsockopt_peer_addr_params(struct sock *sk, int len,
/* If an address other than INADDR_ANY is specified, and
* no transport is found, then the request is invalid.
*/
- if (!sctp_is_any(sk, ( union sctp_addr *)&params.spp_address)) {
+ if (!sctp_is_any(sk, (union sctp_addr *)&params.spp_address)) {
trans = sctp_addr_id2transport(sk, &params.spp_address,
params.spp_assoc_id);
if (!trans) {
@@ -4558,12 +4582,15 @@ static int sctp_getsockopt_delayed_ack(struct sock *sk, int len,
if (copy_from_user(&params, optval, len))
return -EFAULT;
} else if (len == sizeof(struct sctp_assoc_value)) {
- pr_warn("Use of struct sctp_assoc_value in delayed_ack socket option deprecated\n");
- pr_warn("Use struct sctp_sack_info instead\n");
+ pr_warn_ratelimited(DEPRECATED
+ "%s (pid %d) "
+ "Use of struct sctp_assoc_value in delayed_ack socket option.\n"
+ "Use struct sctp_sack_info instead\n",
+ current->comm, task_pid_nr(current));
if (copy_from_user(&params, optval, len))
return -EFAULT;
} else
- return - EINVAL;
+ return -EINVAL;
/* Get association, if sack_assoc_id != 0 and the socket is a one
* to many style socket, and an association was not found, then
@@ -4653,8 +4680,8 @@ static int sctp_getsockopt_peer_addrs(struct sock *sk, int len,
if (!asoc)
return -EINVAL;
- to = optval + offsetof(struct sctp_getaddrs,addrs);
- space_left = len - offsetof(struct sctp_getaddrs,addrs);
+ to = optval + offsetof(struct sctp_getaddrs, addrs);
+ space_left = len - offsetof(struct sctp_getaddrs, addrs);
list_for_each_entry(from, &asoc->peer.transport_addr_list,
transports) {
@@ -4714,7 +4741,7 @@ static int sctp_copy_laddrs(struct sock *sk, __u16 port, void *to,
memcpy(to, &temp, addrlen);
to += addrlen;
- cnt ++;
+ cnt++;
space_left -= addrlen;
*bytes_copied += addrlen;
}
@@ -4763,8 +4790,8 @@ static int sctp_getsockopt_local_addrs(struct sock *sk, int len,
bp = &asoc->base.bind_addr;
}
- to = optval + offsetof(struct sctp_getaddrs,addrs);
- space_left = len - offsetof(struct sctp_getaddrs,addrs);
+ to = optval + offsetof(struct sctp_getaddrs, addrs);
+ space_left = len - offsetof(struct sctp_getaddrs, addrs);
addrs = kmalloc(space_left, GFP_KERNEL);
if (!addrs)
@@ -4803,7 +4830,7 @@ static int sctp_getsockopt_local_addrs(struct sock *sk, int len,
memcpy(buf, &temp, addrlen);
buf += addrlen;
bytes_copied += addrlen;
- cnt ++;
+ cnt++;
space_left -= addrlen;
}
@@ -5075,7 +5102,7 @@ static int sctp_getsockopt_associnfo(struct sock *sk, int len,
assocparams.sasoc_cookie_life = ktime_to_ms(asoc->cookie_life);
list_for_each(pos, &asoc->peer.transport_addr_list) {
- cnt ++;
+ cnt++;
}
assocparams.sasoc_number_peer_destinations = cnt;
@@ -5203,8 +5230,11 @@ static int sctp_getsockopt_maxseg(struct sock *sk, int len,
struct sctp_association *asoc;
if (len == sizeof(int)) {
- pr_warn("Use of int in maxseg socket option deprecated\n");
- pr_warn("Use struct sctp_assoc_value instead\n");
+ pr_warn_ratelimited(DEPRECATED
+ "%s (pid %d) "
+ "Use of int in maxseg socket option.\n"
+ "Use struct sctp_assoc_value instead\n",
+ current->comm, task_pid_nr(current));
params.assoc_id = 0;
} else if (len >= sizeof(struct sctp_assoc_value)) {
len = sizeof(struct sctp_assoc_value);
@@ -5295,8 +5325,11 @@ static int sctp_getsockopt_maxburst(struct sock *sk, int len,
struct sctp_association *asoc;
if (len == sizeof(int)) {
- pr_warn("Use of int in max_burst socket option deprecated\n");
- pr_warn("Use struct sctp_assoc_value instead\n");
+ pr_warn_ratelimited(DEPRECATED
+ "%s (pid %d) "
+ "Use of int in max_burst socket option.\n"
+ "Use struct sctp_assoc_value instead\n",
+ current->comm, task_pid_nr(current));
params.assoc_id = 0;
} else if (len >= sizeof(struct sctp_assoc_value)) {
len = sizeof(struct sctp_assoc_value);
@@ -5428,7 +5461,8 @@ static int sctp_getsockopt_peer_auth_chunks(struct sock *sk, int len,
return -EFAULT;
num:
len = sizeof(struct sctp_authchunks) + num_chunks;
- if (put_user(len, optlen)) return -EFAULT;
+ if (put_user(len, optlen))
+ return -EFAULT;
if (put_user(num_chunks, &p->gauth_number_of_chunks))
return -EFAULT;
return 0;
@@ -5460,7 +5494,7 @@ static int sctp_getsockopt_local_auth_chunks(struct sock *sk, int len,
return -EINVAL;
if (asoc)
- ch = (struct sctp_chunks_param*)asoc->c.auth_chunks;
+ ch = (struct sctp_chunks_param *)asoc->c.auth_chunks;
else
ch = sctp_sk(sk)->ep->auth_chunk_list;
@@ -6402,7 +6436,7 @@ static int sctp_msghdr_parse(const struct msghdr *msg, sctp_cmsgs_t *cmsgs)
* Note: This function is the same function as in core/datagram.c
* with a few modifications to make lksctp work.
*/
-static int sctp_wait_for_packet(struct sock * sk, int *err, long *timeo_p)
+static int sctp_wait_for_packet(struct sock *sk, int *err, long *timeo_p)
{
int error;
DEFINE_WAIT(wait);
diff --git a/net/sctp/ssnmap.c b/net/sctp/ssnmap.c
index 6007124aefa0..b9c8521c1a98 100644
--- a/net/sctp/ssnmap.c
+++ b/net/sctp/ssnmap.c
@@ -18,9 +18,8 @@
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with GNU CC; see the file COPYING. If not, write to
- * the Free Software Foundation, 59 Temple Place - Suite 330,
- * Boston, MA 02111-1307, USA.
+ * along with GNU CC; see the file COPYING. If not, see
+ * <http://www.gnu.org/licenses/>.
*
* Please send any bug reports or fixes you make to the
* email address(es):
diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
index 6b36561a1b3b..7135e617ab0f 100644
--- a/net/sctp/sysctl.c
+++ b/net/sctp/sysctl.c
@@ -19,9 +19,8 @@
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with GNU CC; see the file COPYING. If not, write to
- * the Free Software Foundation, 59 Temple Place - Suite 330,
- * Boston, MA 02111-1307, USA.
+ * along with GNU CC; see the file COPYING. If not, see
+ * <http://www.gnu.org/licenses/>.
*
* Please send any bug reports or fixes you make to the
* email address(es):
@@ -56,11 +55,16 @@ extern long sysctl_sctp_mem[3];
extern int sysctl_sctp_rmem[3];
extern int sysctl_sctp_wmem[3];
-static int proc_sctp_do_hmac_alg(struct ctl_table *ctl,
- int write,
+static int proc_sctp_do_hmac_alg(struct ctl_table *ctl, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos);
+static int proc_sctp_do_rto_min(struct ctl_table *ctl, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos);
+static int proc_sctp_do_rto_max(struct ctl_table *ctl, int write,
void __user *buffer, size_t *lenp,
-
loff_t *ppos);
+
static struct ctl_table sctp_table[] = {
{
.procname = "sctp_mem",
@@ -102,17 +106,17 @@ static struct ctl_table sctp_net_table[] = {
.data = &init_net.sctp.rto_min,
.maxlen = sizeof(unsigned int),
.mode = 0644,
- .proc_handler = proc_dointvec_minmax,
+ .proc_handler = proc_sctp_do_rto_min,
.extra1 = &one,
- .extra2 = &timer_max
+ .extra2 = &init_net.sctp.rto_max
},
{
.procname = "rto_max",
.data = &init_net.sctp.rto_max,
.maxlen = sizeof(unsigned int),
.mode = 0644,
- .proc_handler = proc_dointvec_minmax,
- .extra1 = &one,
+ .proc_handler = proc_sctp_do_rto_max,
+ .extra1 = &init_net.sctp.rto_min,
.extra2 = &timer_max
},
{
@@ -294,8 +298,7 @@ static struct ctl_table sctp_net_table[] = {
{ /* sentinel */ }
};
-static int proc_sctp_do_hmac_alg(struct ctl_table *ctl,
- int write,
+static int proc_sctp_do_hmac_alg(struct ctl_table *ctl, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos)
{
@@ -342,6 +345,60 @@ static int proc_sctp_do_hmac_alg(struct ctl_table *ctl,
return ret;
}
+static int proc_sctp_do_rto_min(struct ctl_table *ctl, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos)
+{
+ struct net *net = current->nsproxy->net_ns;
+ int new_value;
+ struct ctl_table tbl;
+ unsigned int min = *(unsigned int *) ctl->extra1;
+ unsigned int max = *(unsigned int *) ctl->extra2;
+ int ret;
+
+ memset(&tbl, 0, sizeof(struct ctl_table));
+ tbl.maxlen = sizeof(unsigned int);
+
+ if (write)
+ tbl.data = &new_value;
+ else
+ tbl.data = &net->sctp.rto_min;
+ ret = proc_dointvec(&tbl, write, buffer, lenp, ppos);
+ if (write) {
+ if (ret || new_value > max || new_value < min)
+ return -EINVAL;
+ net->sctp.rto_min = new_value;
+ }
+ return ret;
+}
+
+static int proc_sctp_do_rto_max(struct ctl_table *ctl, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos)
+{
+ struct net *net = current->nsproxy->net_ns;
+ int new_value;
+ struct ctl_table tbl;
+ unsigned int min = *(unsigned int *) ctl->extra1;
+ unsigned int max = *(unsigned int *) ctl->extra2;
+ int ret;
+
+ memset(&tbl, 0, sizeof(struct ctl_table));
+ tbl.maxlen = sizeof(unsigned int);
+
+ if (write)
+ tbl.data = &new_value;
+ else
+ tbl.data = &net->sctp.rto_max;
+ ret = proc_dointvec(&tbl, write, buffer, lenp, ppos);
+ if (write) {
+ if (ret || new_value > max || new_value < min)
+ return -EINVAL;
+ net->sctp.rto_max = new_value;
+ }
+ return ret;
+}
+
int sctp_sysctl_net_register(struct net *net)
{
struct ctl_table *table;
@@ -367,7 +424,7 @@ void sctp_sysctl_net_unregister(struct net *net)
kfree(table);
}
-static struct ctl_table_header * sctp_sysctl_header;
+static struct ctl_table_header *sctp_sysctl_header;
/* Sysctl registration. */
void sctp_sysctl_register(void)
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index e332efb124cc..d0810dc5f079 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -24,9 +24,8 @@
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with GNU CC; see the file COPYING. If not, write to
- * the Free Software Foundation, 59 Temple Place - Suite 330,
- * Boston, MA 02111-1307, USA.
+ * along with GNU CC; see the file COPYING. If not, see
+ * <http://www.gnu.org/licenses/>.
*
* Please send any bug reports or fixes you make to the
* email address(es):
@@ -573,7 +572,7 @@ void sctp_transport_burst_limited(struct sctp_transport *t)
u32 old_cwnd = t->cwnd;
u32 max_burst_bytes;
- if (t->burst_limited)
+ if (t->burst_limited || asoc->max_burst == 0)
return;
max_burst_bytes = t->flight_size + (asoc->max_burst * asoc->pathmtu);
diff --git a/net/sctp/tsnmap.c b/net/sctp/tsnmap.c
index fbda20028285..7635f9f2311d 100644
--- a/net/sctp/tsnmap.c
+++ b/net/sctp/tsnmap.c
@@ -21,9 +21,8 @@
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with GNU CC; see the file COPYING. If not, write to
- * the Free Software Foundation, 59 Temple Place - Suite 330,
- * Boston, MA 02111-1307, USA.
+ * along with GNU CC; see the file COPYING. If not, see
+ * <http://www.gnu.org/licenses/>.
*
* Please send any bug reports or fixes you make to the
* email address(es):
diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c
index 81089ed65456..85c64658bd0b 100644
--- a/net/sctp/ulpevent.c
+++ b/net/sctp/ulpevent.c
@@ -22,9 +22,8 @@
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with GNU CC; see the file COPYING. If not, write to
- * the Free Software Foundation, 59 Temple Place - Suite 330,
- * Boston, MA 02111-1307, USA.
+ * along with GNU CC; see the file COPYING. If not, see
+ * <http://www.gnu.org/licenses/>.
*
* Please send any bug reports or fixes you make to the
* email address(es):
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c
index 1c1484ed605d..5dc94117e9d4 100644
--- a/net/sctp/ulpqueue.c
+++ b/net/sctp/ulpqueue.c
@@ -21,9 +21,8 @@
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with GNU CC; see the file COPYING. If not, write to
- * the Free Software Foundation, 59 Temple Place - Suite 330,
- * Boston, MA 02111-1307, USA.
+ * along with GNU CC; see the file COPYING. If not, see
+ * <http://www.gnu.org/licenses/>.
*
* Please send any bug reports or fixes you make to the
* email address(es):
@@ -44,9 +43,9 @@
#include <net/sctp/sm.h>
/* Forward declarations for internal helpers. */
-static struct sctp_ulpevent * sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
+static struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
struct sctp_ulpevent *);
-static struct sctp_ulpevent * sctp_ulpq_order(struct sctp_ulpq *,
+static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *,
struct sctp_ulpevent *);
static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq);
@@ -108,7 +107,7 @@ int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
event = sctp_ulpq_reasm(ulpq, event);
/* Do ordering if needed. */
- if ((event) && (event->msg_flags & MSG_EOR)){
+ if ((event) && (event->msg_flags & MSG_EOR)) {
/* Create a temporary list to collect chunks on. */
skb_queue_head_init(&temp);
__skb_queue_tail(&temp, sctp_event2skb(event));
@@ -337,7 +336,8 @@ static struct sctp_ulpevent *sctp_make_reassembled_event(struct net *net,
pos = f_frag->next;
/* Get the last skb in the f_frag's frag_list if present. */
- for (last = list; list; last = list, list = list->next);
+ for (last = list; list; last = list, list = list->next)
+ ;
/* Add the list of remaining fragments to the first fragments
* frag_list.
@@ -727,7 +727,7 @@ static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq)
while ((event = sctp_ulpq_retrieve_reassembled(ulpq)) != NULL) {
/* Do ordering if needed. */
- if ((event) && (event->msg_flags & MSG_EOR)){
+ if ((event) && (event->msg_flags & MSG_EOR)) {
skb_queue_head_init(&temp);
__skb_queue_tail(&temp, sctp_event2skb(event));
diff --git a/net/socket.c b/net/socket.c
index 0b18693f2be6..879933aaed4c 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -1445,48 +1445,61 @@ SYSCALL_DEFINE4(socketpair, int, family, int, type, int, protocol,
err = fd1;
goto out_release_both;
}
+
fd2 = get_unused_fd_flags(flags);
if (unlikely(fd2 < 0)) {
err = fd2;
- put_unused_fd(fd1);
- goto out_release_both;
+ goto out_put_unused_1;
}
newfile1 = sock_alloc_file(sock1, flags, NULL);
if (unlikely(IS_ERR(newfile1))) {
err = PTR_ERR(newfile1);
- put_unused_fd(fd1);
- put_unused_fd(fd2);
- goto out_release_both;
+ goto out_put_unused_both;
}
newfile2 = sock_alloc_file(sock2, flags, NULL);
if (IS_ERR(newfile2)) {
err = PTR_ERR(newfile2);
- fput(newfile1);
- put_unused_fd(fd1);
- put_unused_fd(fd2);
- sock_release(sock2);
- goto out;
+ goto out_fput_1;
}
+ err = put_user(fd1, &usockvec[0]);
+ if (err)
+ goto out_fput_both;
+
+ err = put_user(fd2, &usockvec[1]);
+ if (err)
+ goto out_fput_both;
+
audit_fd_pair(fd1, fd2);
+
fd_install(fd1, newfile1);
fd_install(fd2, newfile2);
/* fd1 and fd2 may be already another descriptors.
* Not kernel problem.
*/
- err = put_user(fd1, &usockvec[0]);
- if (!err)
- err = put_user(fd2, &usockvec[1]);
- if (!err)
- return 0;
+ return 0;
- sys_close(fd2);
- sys_close(fd1);
- return err;
+out_fput_both:
+ fput(newfile2);
+ fput(newfile1);
+ put_unused_fd(fd2);
+ put_unused_fd(fd1);
+ goto out;
+out_fput_1:
+ fput(newfile1);
+ put_unused_fd(fd2);
+ put_unused_fd(fd1);
+ sock_release(sock2);
+ goto out;
+
+out_put_unused_both:
+ put_unused_fd(fd2);
+out_put_unused_1:
+ put_unused_fd(fd1);
out_release_both:
sock_release(sock2);
out_release_1:
@@ -1973,7 +1986,7 @@ static int copy_msghdr_from_user(struct msghdr *kmsg,
if (copy_from_user(kmsg, umsg, sizeof(struct msghdr)))
return -EFAULT;
if (kmsg->msg_namelen > sizeof(struct sockaddr_storage))
- return -EINVAL;
+ kmsg->msg_namelen = sizeof(struct sockaddr_storage);
return 0;
}
@@ -2968,11 +2981,8 @@ static int bond_ioctl(struct net *net, unsigned int cmd,
struct compat_ifreq __user *ifr32)
{
struct ifreq kifr;
- struct ifreq __user *uifr;
mm_segment_t old_fs;
int err;
- u32 data;
- void __user *datap;
switch (cmd) {
case SIOCBONDENSLAVE:
@@ -2989,26 +2999,13 @@ static int bond_ioctl(struct net *net, unsigned int cmd,
set_fs(old_fs);
return err;
- case SIOCBONDSLAVEINFOQUERY:
- case SIOCBONDINFOQUERY:
- uifr = compat_alloc_user_space(sizeof(*uifr));
- if (copy_in_user(&uifr->ifr_name, &ifr32->ifr_name, IFNAMSIZ))
- return -EFAULT;
-
- if (get_user(data, &ifr32->ifr_ifru.ifru_data))
- return -EFAULT;
-
- datap = compat_ptr(data);
- if (put_user(datap, &uifr->ifr_ifru.ifru_data))
- return -EFAULT;
-
- return dev_ioctl(net, cmd, uifr);
default:
return -ENOIOCTLCMD;
}
}
-static int siocdevprivate_ioctl(struct net *net, unsigned int cmd,
+/* Handle ioctls that use ifreq::ifr_data and just need struct ifreq converted */
+static int compat_ifr_data_ioctl(struct net *net, unsigned int cmd,
struct compat_ifreq __user *u_ifreq32)
{
struct ifreq __user *u_ifreq64;
@@ -3019,19 +3016,16 @@ static int siocdevprivate_ioctl(struct net *net, unsigned int cmd,
if (copy_from_user(&tmp_buf[0], &(u_ifreq32->ifr_ifrn.ifrn_name[0]),
IFNAMSIZ))
return -EFAULT;
- if (__get_user(data32, &u_ifreq32->ifr_ifru.ifru_data))
+ if (get_user(data32, &u_ifreq32->ifr_ifru.ifru_data))
return -EFAULT;
data64 = compat_ptr(data32);
u_ifreq64 = compat_alloc_user_space(sizeof(*u_ifreq64));
- /* Don't check these user accesses, just let that get trapped
- * in the ioctl handler instead.
- */
if (copy_to_user(&u_ifreq64->ifr_ifrn.ifrn_name[0], &tmp_buf[0],
IFNAMSIZ))
return -EFAULT;
- if (__put_user(data64, &u_ifreq64->ifr_ifru.ifru_data))
+ if (put_user(data64, &u_ifreq64->ifr_ifru.ifru_data))
return -EFAULT;
return dev_ioctl(net, cmd, u_ifreq64);
@@ -3111,27 +3105,6 @@ static int compat_sioc_ifmap(struct net *net, unsigned int cmd,
return err;
}
-static int compat_siocshwtstamp(struct net *net, struct compat_ifreq __user *uifr32)
-{
- void __user *uptr;
- compat_uptr_t uptr32;
- struct ifreq __user *uifr;
-
- uifr = compat_alloc_user_space(sizeof(*uifr));
- if (copy_in_user(uifr, uifr32, sizeof(struct compat_ifreq)))
- return -EFAULT;
-
- if (get_user(uptr32, &uifr32->ifr_data))
- return -EFAULT;
-
- uptr = compat_ptr(uptr32);
-
- if (put_user(uptr, &uifr->ifr_data))
- return -EFAULT;
-
- return dev_ioctl(net, SIOCSHWTSTAMP, uifr);
-}
-
struct rtentry32 {
u32 rt_pad1;
struct sockaddr rt_dst; /* target address */
@@ -3243,7 +3216,7 @@ static int compat_sock_ioctl_trans(struct file *file, struct socket *sock,
struct net *net = sock_net(sk);
if (cmd >= SIOCDEVPRIVATE && cmd <= (SIOCDEVPRIVATE + 15))
- return siocdevprivate_ioctl(net, cmd, argp);
+ return compat_ifr_data_ioctl(net, cmd, argp);
switch (cmd) {
case SIOCSIFBR:
@@ -3263,8 +3236,6 @@ static int compat_sock_ioctl_trans(struct file *file, struct socket *sock,
case SIOCBONDENSLAVE:
case SIOCBONDRELEASE:
case SIOCBONDSETHWADDR:
- case SIOCBONDSLAVEINFOQUERY:
- case SIOCBONDINFOQUERY:
case SIOCBONDCHANGEACTIVE:
return bond_ioctl(net, cmd, argp);
case SIOCADDRT:
@@ -3274,8 +3245,11 @@ static int compat_sock_ioctl_trans(struct file *file, struct socket *sock,
return do_siocgstamp(net, sock, cmd, argp);
case SIOCGSTAMPNS:
return do_siocgstampns(net, sock, cmd, argp);
+ case SIOCBONDSLAVEINFOQUERY:
+ case SIOCBONDINFOQUERY:
case SIOCSHWTSTAMP:
- return compat_siocshwtstamp(net, argp);
+ case SIOCGHWTSTAMP:
+ return compat_ifr_data_ioctl(net, cmd, argp);
case FIOSETOWN:
case SIOCSPGRP:
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index 97912b40c254..42fdfc634e56 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -1517,7 +1517,7 @@ out:
static int
gss_refresh_null(struct rpc_task *task)
{
- return -EACCES;
+ return 0;
}
static __be32 *
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index d0d14a04dce1..bf04b30a788a 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -471,15 +471,6 @@ struct rpc_filelist {
umode_t mode;
};
-static int rpc_delete_dentry(const struct dentry *dentry)
-{
- return 1;
-}
-
-static const struct dentry_operations rpc_dentry_operations = {
- .d_delete = rpc_delete_dentry,
-};
-
static struct inode *
rpc_get_inode(struct super_block *sb, umode_t mode)
{
@@ -1266,7 +1257,7 @@ rpc_fill_super(struct super_block *sb, void *data, int silent)
sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
sb->s_magic = RPCAUTH_GSSMAGIC;
sb->s_op = &s_ops;
- sb->s_d_op = &rpc_dentry_operations;
+ sb->s_d_op = &simple_dentry_operations;
sb->s_time_gran = 1;
inode = rpc_get_inode(sb, S_IFDIR | S_IRUGO | S_IXUGO);
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index 0d4402587fdf..bf860d9e75af 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -621,12 +621,6 @@ static int tipc_bcbearer_send(struct sk_buff *buf, struct tipc_bearer *unused1,
if (!p)
break; /* No more bearers to try */
- if (tipc_bearer_blocked(p)) {
- if (!s || tipc_bearer_blocked(s))
- continue; /* Can't use either bearer */
- b = s;
- }
-
tipc_nmap_diff(&bcbearer->remains, &b->nodes,
&bcbearer->remains_new);
if (bcbearer->remains_new.count == bcbearer->remains.count)
@@ -800,7 +794,7 @@ void tipc_bclink_init(void)
void tipc_bclink_stop(void)
{
spin_lock_bh(&bc_lock);
- tipc_link_stop(bcl);
+ tipc_link_purge_queues(bcl);
spin_unlock_bh(&bc_lock);
memset(bclink, 0, sizeof(*bclink));
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 3f9707a16d06..2d456ab0e843 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -1,8 +1,8 @@
/*
* net/tipc/bearer.c: TIPC bearer code
*
- * Copyright (c) 1996-2006, Ericsson AB
- * Copyright (c) 2004-2006, 2010-2011, Wind River Systems
+ * Copyright (c) 1996-2006, 2013, Ericsson AB
+ * Copyright (c) 2004-2006, 2010-2013, Wind River Systems
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -41,8 +41,13 @@
#define MAX_ADDR_STR 60
-static struct tipc_media *media_list[MAX_MEDIA];
-static u32 media_count;
+static struct tipc_media * const media_info_array[] = {
+ &eth_media_info,
+#ifdef CONFIG_TIPC_MEDIA_IB
+ &ib_media_info,
+#endif
+ NULL
+};
struct tipc_bearer tipc_bearers[MAX_BEARERS];
@@ -55,11 +60,11 @@ struct tipc_media *tipc_media_find(const char *name)
{
u32 i;
- for (i = 0; i < media_count; i++) {
- if (!strcmp(media_list[i]->name, name))
- return media_list[i];
+ for (i = 0; media_info_array[i] != NULL; i++) {
+ if (!strcmp(media_info_array[i]->name, name))
+ break;
}
- return NULL;
+ return media_info_array[i];
}
/**
@@ -69,44 +74,11 @@ static struct tipc_media *media_find_id(u8 type)
{
u32 i;
- for (i = 0; i < media_count; i++) {
- if (media_list[i]->type_id == type)
- return media_list[i];
+ for (i = 0; media_info_array[i] != NULL; i++) {
+ if (media_info_array[i]->type_id == type)
+ break;
}
- return NULL;
-}
-
-/**
- * tipc_register_media - register a media type
- *
- * Bearers for this media type must be activated separately at a later stage.
- */
-int tipc_register_media(struct tipc_media *m_ptr)
-{
- int res = -EINVAL;
-
- write_lock_bh(&tipc_net_lock);
-
- if ((strlen(m_ptr->name) + 1) > TIPC_MAX_MEDIA_NAME)
- goto exit;
- if (m_ptr->priority > TIPC_MAX_LINK_PRI)
- goto exit;
- if ((m_ptr->tolerance < TIPC_MIN_LINK_TOL) ||
- (m_ptr->tolerance > TIPC_MAX_LINK_TOL))
- goto exit;
- if (media_count >= MAX_MEDIA)
- goto exit;
- if (tipc_media_find(m_ptr->name) || media_find_id(m_ptr->type_id))
- goto exit;
-
- media_list[media_count] = m_ptr;
- media_count++;
- res = 0;
-exit:
- write_unlock_bh(&tipc_net_lock);
- if (res)
- pr_warn("Media <%s> registration error\n", m_ptr->name);
- return res;
+ return media_info_array[i];
}
/**
@@ -144,13 +116,11 @@ struct sk_buff *tipc_media_get_names(void)
if (!buf)
return NULL;
- read_lock_bh(&tipc_net_lock);
- for (i = 0; i < media_count; i++) {
+ for (i = 0; media_info_array[i] != NULL; i++) {
tipc_cfg_append_tlv(buf, TIPC_TLV_MEDIA_NAME,
- media_list[i]->name,
- strlen(media_list[i]->name) + 1);
+ media_info_array[i]->name,
+ strlen(media_info_array[i]->name) + 1);
}
- read_unlock_bh(&tipc_net_lock);
return buf;
}
@@ -215,31 +185,12 @@ struct tipc_bearer *tipc_bearer_find(const char *name)
}
/**
- * tipc_bearer_find_interface - locates bearer object with matching interface name
- */
-struct tipc_bearer *tipc_bearer_find_interface(const char *if_name)
-{
- struct tipc_bearer *b_ptr;
- char *b_if_name;
- u32 i;
-
- for (i = 0, b_ptr = tipc_bearers; i < MAX_BEARERS; i++, b_ptr++) {
- if (!b_ptr->active)
- continue;
- b_if_name = strchr(b_ptr->name, ':') + 1;
- if (!strcmp(b_if_name, if_name))
- return b_ptr;
- }
- return NULL;
-}
-
-/**
* tipc_bearer_get_names - record names of bearers in buffer
*/
struct sk_buff *tipc_bearer_get_names(void)
{
struct sk_buff *buf;
- struct tipc_bearer *b_ptr;
+ struct tipc_bearer *b;
int i, j;
buf = tipc_cfg_reply_alloc(MAX_BEARERS * TLV_SPACE(TIPC_MAX_BEARER_NAME));
@@ -247,13 +198,13 @@ struct sk_buff *tipc_bearer_get_names(void)
return NULL;
read_lock_bh(&tipc_net_lock);
- for (i = 0; i < media_count; i++) {
+ for (i = 0; media_info_array[i] != NULL; i++) {
for (j = 0; j < MAX_BEARERS; j++) {
- b_ptr = &tipc_bearers[j];
- if (b_ptr->active && (b_ptr->media == media_list[i])) {
+ b = &tipc_bearers[j];
+ if (b->active && (b->media == media_info_array[i])) {
tipc_cfg_append_tlv(buf, TIPC_TLV_BEARER_NAME,
- b_ptr->name,
- strlen(b_ptr->name) + 1);
+ b->name,
+ strlen(b->name) + 1);
}
}
}
@@ -275,31 +226,6 @@ void tipc_bearer_remove_dest(struct tipc_bearer *b_ptr, u32 dest)
tipc_disc_remove_dest(b_ptr->link_req);
}
-/*
- * Interrupt enabling new requests after bearer blocking:
- * See bearer_send().
- */
-void tipc_continue(struct tipc_bearer *b)
-{
- spin_lock_bh(&b->lock);
- b->blocked = 0;
- spin_unlock_bh(&b->lock);
-}
-
-/*
- * tipc_bearer_blocked - determines if bearer is currently blocked
- */
-int tipc_bearer_blocked(struct tipc_bearer *b)
-{
- int res;
-
- spin_lock_bh(&b->lock);
- res = b->blocked;
- spin_unlock_bh(&b->lock);
-
- return res;
-}
-
/**
* tipc_enable_bearer - enable bearer with the given name
*/
@@ -387,6 +313,7 @@ restart:
b_ptr = &tipc_bearers[bearer_id];
strcpy(b_ptr->name, name);
+ b_ptr->media = m_ptr;
res = m_ptr->enable_media(b_ptr);
if (res) {
pr_warn("Bearer <%s> rejected, enable failure (%d)\n",
@@ -395,7 +322,6 @@ restart:
}
b_ptr->identity = bearer_id;
- b_ptr->media = m_ptr;
b_ptr->tolerance = m_ptr->tolerance;
b_ptr->window = m_ptr->window;
b_ptr->net_plane = bearer_id + 'A';
@@ -420,17 +346,16 @@ exit:
}
/**
- * tipc_block_bearer - Block the bearer, and reset all its links
+ * tipc_reset_bearer - Reset all links established over this bearer
*/
-int tipc_block_bearer(struct tipc_bearer *b_ptr)
+static int tipc_reset_bearer(struct tipc_bearer *b_ptr)
{
struct tipc_link *l_ptr;
struct tipc_link *temp_l_ptr;
read_lock_bh(&tipc_net_lock);
- pr_info("Blocking bearer <%s>\n", b_ptr->name);
+ pr_info("Resetting bearer <%s>\n", b_ptr->name);
spin_lock_bh(&b_ptr->lock);
- b_ptr->blocked = 1;
list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) {
struct tipc_node *n_ptr = l_ptr->owner;
@@ -456,7 +381,6 @@ static void bearer_disable(struct tipc_bearer *b_ptr)
pr_info("Disabling bearer <%s>\n", b_ptr->name);
spin_lock_bh(&b_ptr->lock);
- b_ptr->blocked = 1;
b_ptr->media->disable_media(b_ptr);
list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) {
tipc_link_delete(l_ptr);
@@ -490,6 +414,211 @@ int tipc_disable_bearer(const char *name)
}
+/* tipc_l2_media_addr_set - initialize Ethernet media address structure
+ *
+ * Media-dependent "value" field stores MAC address in first 6 bytes
+ * and zeroes out the remaining bytes.
+ */
+void tipc_l2_media_addr_set(const struct tipc_bearer *b,
+ struct tipc_media_addr *a, char *mac)
+{
+ int len = b->media->hwaddr_len;
+
+ if (unlikely(sizeof(a->value) < len)) {
+ WARN_ONCE(1, "Media length invalid\n");
+ return;
+ }
+
+ memcpy(a->value, mac, len);
+ memset(a->value + len, 0, sizeof(a->value) - len);
+ a->media_id = b->media->type_id;
+ a->broadcast = !memcmp(mac, b->bcast_addr.value, len);
+}
+
+int tipc_enable_l2_media(struct tipc_bearer *b)
+{
+ struct net_device *dev;
+ char *driver_name = strchr((const char *)b->name, ':') + 1;
+
+ /* Find device with specified name */
+ dev = dev_get_by_name(&init_net, driver_name);
+ if (!dev)
+ return -ENODEV;
+
+ /* Associate TIPC bearer with Ethernet bearer */
+ b->media_ptr = dev;
+ memset(b->bcast_addr.value, 0, sizeof(b->bcast_addr.value));
+ memcpy(b->bcast_addr.value, dev->broadcast, b->media->hwaddr_len);
+ b->bcast_addr.media_id = b->media->type_id;
+ b->bcast_addr.broadcast = 1;
+ b->mtu = dev->mtu;
+ tipc_l2_media_addr_set(b, &b->addr, (char *)dev->dev_addr);
+ rcu_assign_pointer(dev->tipc_ptr, b);
+ return 0;
+}
+
+/* tipc_disable_l2_media - detach TIPC bearer from an Ethernet interface
+ *
+ * Mark Ethernet bearer as inactive so that incoming buffers are thrown away,
+ * then get worker thread to complete bearer cleanup. (Can't do cleanup
+ * here because cleanup code needs to sleep and caller holds spinlocks.)
+ */
+void tipc_disable_l2_media(struct tipc_bearer *b)
+{
+ struct net_device *dev = (struct net_device *)b->media_ptr;
+ RCU_INIT_POINTER(dev->tipc_ptr, NULL);
+ dev_put(dev);
+}
+
+/**
+ * tipc_l2_send_msg - send a TIPC packet out over an Ethernet interface
+ * @buf: the packet to be sent
+ * @b_ptr: the bearer throught which the packet is to be sent
+ * @dest: peer destination address
+ */
+int tipc_l2_send_msg(struct sk_buff *buf, struct tipc_bearer *b,
+ struct tipc_media_addr *dest)
+{
+ struct sk_buff *clone;
+ int delta;
+ struct net_device *dev = (struct net_device *)b->media_ptr;
+
+ clone = skb_clone(buf, GFP_ATOMIC);
+ if (!clone)
+ return 0;
+
+ delta = dev->hard_header_len - skb_headroom(buf);
+ if ((delta > 0) &&
+ pskb_expand_head(clone, SKB_DATA_ALIGN(delta), 0, GFP_ATOMIC)) {
+ kfree_skb(clone);
+ return 0;
+ }
+
+ skb_reset_network_header(clone);
+ clone->dev = dev;
+ clone->protocol = htons(ETH_P_TIPC);
+ dev_hard_header(clone, dev, ETH_P_TIPC, dest->value,
+ dev->dev_addr, clone->len);
+ dev_queue_xmit(clone);
+ return 0;
+}
+
+/* tipc_bearer_send- sends buffer to destination over bearer
+ *
+ * IMPORTANT:
+ * The media send routine must not alter the buffer being passed in
+ * as it may be needed for later retransmission!
+ */
+void tipc_bearer_send(struct tipc_bearer *b, struct sk_buff *buf,
+ struct tipc_media_addr *dest)
+{
+ b->media->send_msg(buf, b, dest);
+}
+
+/**
+ * tipc_l2_rcv_msg - handle incoming TIPC message from an interface
+ * @buf: the received packet
+ * @dev: the net device that the packet was received on
+ * @pt: the packet_type structure which was used to register this handler
+ * @orig_dev: the original receive net device in case the device is a bond
+ *
+ * Accept only packets explicitly sent to this node, or broadcast packets;
+ * ignores packets sent using interface multicast, and traffic sent to other
+ * nodes (which can happen if interface is running in promiscuous mode).
+ */
+static int tipc_l2_rcv_msg(struct sk_buff *buf, struct net_device *dev,
+ struct packet_type *pt, struct net_device *orig_dev)
+{
+ struct tipc_bearer *b_ptr;
+
+ if (!net_eq(dev_net(dev), &init_net)) {
+ kfree_skb(buf);
+ return NET_RX_DROP;
+ }
+
+ rcu_read_lock();
+ b_ptr = rcu_dereference(dev->tipc_ptr);
+ if (likely(b_ptr)) {
+ if (likely(buf->pkt_type <= PACKET_BROADCAST)) {
+ buf->next = NULL;
+ tipc_rcv(buf, b_ptr);
+ rcu_read_unlock();
+ return NET_RX_SUCCESS;
+ }
+ }
+ rcu_read_unlock();
+
+ kfree_skb(buf);
+ return NET_RX_DROP;
+}
+
+/**
+ * tipc_l2_device_event - handle device events from network device
+ * @nb: the context of the notification
+ * @evt: the type of event
+ * @ptr: the net device that the event was on
+ *
+ * This function is called by the Ethernet driver in case of link
+ * change event.
+ */
+static int tipc_l2_device_event(struct notifier_block *nb, unsigned long evt,
+ void *ptr)
+{
+ struct tipc_bearer *b_ptr;
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+
+ if (!net_eq(dev_net(dev), &init_net))
+ return NOTIFY_DONE;
+
+ rcu_read_lock();
+ b_ptr = rcu_dereference(dev->tipc_ptr);
+ if (!b_ptr) {
+ rcu_read_unlock();
+ return NOTIFY_DONE;
+ }
+
+ b_ptr->mtu = dev->mtu;
+
+ switch (evt) {
+ case NETDEV_CHANGE:
+ if (netif_carrier_ok(dev))
+ break;
+ case NETDEV_DOWN:
+ case NETDEV_CHANGEMTU:
+ case NETDEV_CHANGEADDR:
+ tipc_reset_bearer(b_ptr);
+ break;
+ case NETDEV_UNREGISTER:
+ case NETDEV_CHANGENAME:
+ tipc_disable_bearer(b_ptr->name);
+ break;
+ }
+ rcu_read_unlock();
+
+ return NOTIFY_OK;
+}
+
+static struct packet_type tipc_packet_type __read_mostly = {
+ .type = __constant_htons(ETH_P_TIPC),
+ .func = tipc_l2_rcv_msg,
+};
+
+static struct notifier_block notifier = {
+ .notifier_call = tipc_l2_device_event,
+ .priority = 0,
+};
+
+int tipc_bearer_setup(void)
+{
+ dev_add_pack(&tipc_packet_type);
+ return register_netdevice_notifier(&notifier);
+}
+
+void tipc_bearer_cleanup(void)
+{
+ unregister_netdevice_notifier(&notifier);
+ dev_remove_pack(&tipc_packet_type);
+}
void tipc_bearer_stop(void)
{
@@ -499,5 +628,4 @@ void tipc_bearer_stop(void)
if (tipc_bearers[i].active)
bearer_disable(&tipc_bearers[i]);
}
- media_count = 0;
}
diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h
index e5e04be6fffa..4f5db9ad5bf6 100644
--- a/net/tipc/bearer.h
+++ b/net/tipc/bearer.h
@@ -1,7 +1,7 @@
/*
* net/tipc/bearer.h: Include file for TIPC bearer code
*
- * Copyright (c) 1996-2006, Ericsson AB
+ * Copyright (c) 1996-2006, 2013, Ericsson AB
* Copyright (c) 2005, 2010-2011, Wind River Systems
* All rights reserved.
*
@@ -73,18 +73,18 @@ struct tipc_media_addr {
struct tipc_bearer;
/**
- * struct tipc_media - TIPC media information available to internal users
+ * struct tipc_media - Media specific info exposed to generic bearer layer
* @send_msg: routine which handles buffer transmission
* @enable_media: routine which enables a media
* @disable_media: routine which disables a media
* @addr2str: routine which converts media address to string
* @addr2msg: routine which converts media address to protocol message area
* @msg2addr: routine which converts media address from protocol message area
- * @bcast_addr: media address used in broadcasting
* @priority: default link (and bearer) priority
* @tolerance: default time (in ms) before declaring link failure
* @window: default window (in packets) before declaring link congestion
* @type_id: TIPC media identifier
+ * @hwaddr_len: TIPC media address len
* @name: media name
*/
struct tipc_media {
@@ -101,18 +101,20 @@ struct tipc_media {
u32 tolerance;
u32 window;
u32 type_id;
+ u32 hwaddr_len;
char name[TIPC_MAX_MEDIA_NAME];
};
/**
- * struct tipc_bearer - TIPC bearer structure
+ * struct tipc_bearer - Generic TIPC bearer structure
+ * @dev: ptr to associated network device
* @usr_handle: pointer to additional media-specific information about bearer
* @mtu: max packet size bearer can support
- * @blocked: non-zero if bearer is blocked
* @lock: spinlock for controlling access to bearer
* @addr: media-specific address associated with bearer
* @name: bearer name (format = media:interface)
* @media: ptr to media structure associated with bearer
+ * @bcast_addr: media address used in broadcasting
* @priority: default link priority for bearer
* @window: default window size for bearer
* @tolerance: default link tolerance for bearer
@@ -128,9 +130,8 @@ struct tipc_media {
* care of initializing all other fields.
*/
struct tipc_bearer {
- void *usr_handle; /* initalized by media */
+ void *media_ptr; /* initalized by media */
u32 mtu; /* initalized by media */
- int blocked; /* initalized by media */
struct tipc_media_addr addr; /* initalized by media */
char name[TIPC_MAX_BEARER_NAME];
spinlock_t lock;
@@ -159,55 +160,40 @@ extern struct tipc_bearer tipc_bearers[];
/*
* TIPC routines available to supported media types
*/
-int tipc_register_media(struct tipc_media *m_ptr);
-
-void tipc_recv_msg(struct sk_buff *buf, struct tipc_bearer *tb_ptr);
-
-int tipc_block_bearer(struct tipc_bearer *b_ptr);
-void tipc_continue(struct tipc_bearer *tb_ptr);
+void tipc_rcv(struct sk_buff *buf, struct tipc_bearer *tb_ptr);
int tipc_enable_bearer(const char *bearer_name, u32 disc_domain, u32 priority);
int tipc_disable_bearer(const char *name);
/*
* Routines made available to TIPC by supported media types
*/
-int tipc_eth_media_start(void);
-void tipc_eth_media_stop(void);
+extern struct tipc_media eth_media_info;
#ifdef CONFIG_TIPC_MEDIA_IB
-int tipc_ib_media_start(void);
-void tipc_ib_media_stop(void);
-#else
-static inline int tipc_ib_media_start(void) { return 0; }
-static inline void tipc_ib_media_stop(void) { return; }
+extern struct tipc_media ib_media_info;
#endif
int tipc_media_set_priority(const char *name, u32 new_value);
int tipc_media_set_window(const char *name, u32 new_value);
void tipc_media_addr_printf(char *buf, int len, struct tipc_media_addr *a);
struct sk_buff *tipc_media_get_names(void);
+void tipc_l2_media_addr_set(const struct tipc_bearer *b,
+ struct tipc_media_addr *a, char *mac);
+int tipc_enable_l2_media(struct tipc_bearer *b);
+void tipc_disable_l2_media(struct tipc_bearer *b);
+int tipc_l2_send_msg(struct sk_buff *buf, struct tipc_bearer *b,
+ struct tipc_media_addr *dest);
struct sk_buff *tipc_bearer_get_names(void);
void tipc_bearer_add_dest(struct tipc_bearer *b_ptr, u32 dest);
void tipc_bearer_remove_dest(struct tipc_bearer *b_ptr, u32 dest);
struct tipc_bearer *tipc_bearer_find(const char *name);
-struct tipc_bearer *tipc_bearer_find_interface(const char *if_name);
struct tipc_media *tipc_media_find(const char *name);
-int tipc_bearer_blocked(struct tipc_bearer *b_ptr);
+int tipc_bearer_setup(void);
+void tipc_bearer_cleanup(void);
void tipc_bearer_stop(void);
-
-/**
- * tipc_bearer_send- sends buffer to destination over bearer
- *
- * IMPORTANT:
- * The media send routine must not alter the buffer being passed in
- * as it may be needed for later retransmission!
- */
-static inline void tipc_bearer_send(struct tipc_bearer *b, struct sk_buff *buf,
- struct tipc_media_addr *dest)
-{
- b->media->send_msg(buf, b, dest);
-}
+void tipc_bearer_send(struct tipc_bearer *b, struct sk_buff *buf,
+ struct tipc_media_addr *dest);
#endif /* _TIPC_BEARER_H */
diff --git a/net/tipc/core.c b/net/tipc/core.c
index fd4eeeaa972a..f9e88d8b04ca 100644
--- a/net/tipc/core.c
+++ b/net/tipc/core.c
@@ -82,8 +82,7 @@ struct sk_buff *tipc_buf_acquire(u32 size)
static void tipc_core_stop_net(void)
{
tipc_net_stop();
- tipc_eth_media_stop();
- tipc_ib_media_stop();
+ tipc_bearer_cleanup();
}
/**
@@ -94,10 +93,7 @@ int tipc_core_start_net(unsigned long addr)
int res;
tipc_net_start(addr);
- res = tipc_eth_media_start();
- if (res < 0)
- goto err;
- res = tipc_ib_media_start();
+ res = tipc_bearer_setup();
if (res < 0)
goto err;
return res;
@@ -113,7 +109,6 @@ err:
static void tipc_core_stop(void)
{
tipc_netlink_stop();
- tipc_handler_stop();
tipc_cfg_stop();
tipc_subscr_stop();
tipc_nametbl_stop();
@@ -146,9 +141,10 @@ static int tipc_core_start(void)
res = tipc_subscr_start();
if (!res)
res = tipc_cfg_init();
- if (res)
+ if (res) {
+ tipc_handler_stop();
tipc_core_stop();
-
+ }
return res;
}
@@ -178,6 +174,7 @@ static int __init tipc_init(void)
static void __exit tipc_exit(void)
{
+ tipc_handler_stop();
tipc_core_stop_net();
tipc_core_stop();
pr_info("Deactivated\n");
diff --git a/net/tipc/core.h b/net/tipc/core.h
index 94895d4e86ab..1ff477b0450d 100644
--- a/net/tipc/core.h
+++ b/net/tipc/core.h
@@ -47,7 +47,7 @@
#include <linux/mm.h>
#include <linux/timer.h>
#include <linux/string.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/interrupt.h>
#include <linux/atomic.h>
#include <asm/hardirq.h>
diff --git a/net/tipc/discover.c b/net/tipc/discover.c
index ecc758c6eacf..412ff41b8611 100644
--- a/net/tipc/discover.c
+++ b/net/tipc/discover.c
@@ -50,6 +50,7 @@
* @dest: destination address for request messages
* @domain: network domain to which links can be established
* @num_nodes: number of nodes currently discovered (i.e. with an active link)
+ * @lock: spinlock for controlling access to requests
* @buf: request message to be (repeatedly) sent
* @timer: timer governing period between requests
* @timer_intv: current interval between requests (in ms)
@@ -59,6 +60,7 @@ struct tipc_link_req {
struct tipc_media_addr dest;
u32 domain;
int num_nodes;
+ spinlock_t lock;
struct sk_buff *buf;
struct timer_list timer;
unsigned int timer_intv;
@@ -239,7 +241,7 @@ void tipc_disc_recv_msg(struct sk_buff *buf, struct tipc_bearer *b_ptr)
/* Accept discovery message & send response, if necessary */
link_fully_up = link_working_working(link);
- if ((type == DSC_REQ_MSG) && !link_fully_up && !b_ptr->blocked) {
+ if ((type == DSC_REQ_MSG) && !link_fully_up) {
rbuf = tipc_disc_init_msg(DSC_RESP_MSG, orig, b_ptr);
if (rbuf) {
tipc_bearer_send(b_ptr, rbuf, &media_addr);
@@ -274,7 +276,9 @@ static void disc_update(struct tipc_link_req *req)
*/
void tipc_disc_add_dest(struct tipc_link_req *req)
{
+ spin_lock_bh(&req->lock);
req->num_nodes++;
+ spin_unlock_bh(&req->lock);
}
/**
@@ -283,18 +287,10 @@ void tipc_disc_add_dest(struct tipc_link_req *req)
*/
void tipc_disc_remove_dest(struct tipc_link_req *req)
{
+ spin_lock_bh(&req->lock);
req->num_nodes--;
disc_update(req);
-}
-
-/**
- * disc_send_msg - send link setup request message
- * @req: ptr to link request structure
- */
-static void disc_send_msg(struct tipc_link_req *req)
-{
- if (!req->bearer->blocked)
- tipc_bearer_send(req->bearer, req->buf, &req->dest);
+ spin_unlock_bh(&req->lock);
}
/**
@@ -307,7 +303,7 @@ static void disc_timeout(struct tipc_link_req *req)
{
int max_delay;
- spin_lock_bh(&req->bearer->lock);
+ spin_lock_bh(&req->lock);
/* Stop searching if only desired node has been found */
if (tipc_node(req->domain) && req->num_nodes) {
@@ -322,7 +318,8 @@ static void disc_timeout(struct tipc_link_req *req)
* hold at fast polling rate if don't have any associated nodes,
* otherwise hold at slow polling rate
*/
- disc_send_msg(req);
+ tipc_bearer_send(req->bearer, req->buf, &req->dest);
+
req->timer_intv *= 2;
if (req->num_nodes)
@@ -334,7 +331,7 @@ static void disc_timeout(struct tipc_link_req *req)
k_start_timer(&req->timer, req->timer_intv);
exit:
- spin_unlock_bh(&req->bearer->lock);
+ spin_unlock_bh(&req->lock);
}
/**
@@ -365,10 +362,11 @@ int tipc_disc_create(struct tipc_bearer *b_ptr, struct tipc_media_addr *dest,
req->domain = dest_domain;
req->num_nodes = 0;
req->timer_intv = TIPC_LINK_REQ_INIT;
+ spin_lock_init(&req->lock);
k_init_timer(&req->timer, (Handler)disc_timeout, (unsigned long)req);
k_start_timer(&req->timer, req->timer_intv);
b_ptr->link_req = req;
- disc_send_msg(req);
+ tipc_bearer_send(req->bearer, req->buf, &req->dest);
return 0;
}
diff --git a/net/tipc/eth_media.c b/net/tipc/eth_media.c
index f80d59f5a161..67cf3f935dba 100644
--- a/net/tipc/eth_media.c
+++ b/net/tipc/eth_media.c
@@ -1,7 +1,7 @@
/*
* net/tipc/eth_media.c: Ethernet bearer support for TIPC
*
- * Copyright (c) 2001-2007, Ericsson AB
+ * Copyright (c) 2001-2007, 2013, Ericsson AB
* Copyright (c) 2005-2008, 2011-2013, Wind River Systems
* All rights reserved.
*
@@ -37,259 +37,11 @@
#include "core.h"
#include "bearer.h"
-#define MAX_ETH_MEDIA MAX_BEARERS
-
#define ETH_ADDR_OFFSET 4 /* message header offset of MAC address */
-/**
- * struct eth_media - Ethernet bearer data structure
- * @bearer: ptr to associated "generic" bearer structure
- * @dev: ptr to associated Ethernet network device
- * @tipc_packet_type: used in binding TIPC to Ethernet driver
- * @setup: work item used when enabling bearer
- * @cleanup: work item used when disabling bearer
- */
-struct eth_media {
- struct tipc_bearer *bearer;
- struct net_device *dev;
- struct packet_type tipc_packet_type;
- struct work_struct setup;
- struct work_struct cleanup;
-};
-
-static struct tipc_media eth_media_info;
-static struct eth_media eth_media_array[MAX_ETH_MEDIA];
-static int eth_started;
-
-static int recv_notification(struct notifier_block *nb, unsigned long evt,
- void *dv);
-/*
- * Network device notifier info
- */
-static struct notifier_block notifier = {
- .notifier_call = recv_notification,
- .priority = 0
-};
-
-/**
- * eth_media_addr_set - initialize Ethernet media address structure
- *
- * Media-dependent "value" field stores MAC address in first 6 bytes
- * and zeroes out the remaining bytes.
- */
-static void eth_media_addr_set(const struct tipc_bearer *tb_ptr,
- struct tipc_media_addr *a, char *mac)
-{
- memcpy(a->value, mac, ETH_ALEN);
- memset(a->value + ETH_ALEN, 0, sizeof(a->value) - ETH_ALEN);
- a->media_id = TIPC_MEDIA_TYPE_ETH;
- a->broadcast = !memcmp(mac, tb_ptr->bcast_addr.value, ETH_ALEN);
-}
-
-/**
- * send_msg - send a TIPC message out over an Ethernet interface
- */
-static int send_msg(struct sk_buff *buf, struct tipc_bearer *tb_ptr,
- struct tipc_media_addr *dest)
-{
- struct sk_buff *clone;
- struct net_device *dev;
- int delta;
-
- clone = skb_clone(buf, GFP_ATOMIC);
- if (!clone)
- return 0;
-
- dev = ((struct eth_media *)(tb_ptr->usr_handle))->dev;
- delta = dev->hard_header_len - skb_headroom(buf);
-
- if ((delta > 0) &&
- pskb_expand_head(clone, SKB_DATA_ALIGN(delta), 0, GFP_ATOMIC)) {
- kfree_skb(clone);
- return 0;
- }
-
- skb_reset_network_header(clone);
- clone->dev = dev;
- clone->protocol = htons(ETH_P_TIPC);
- dev_hard_header(clone, dev, ETH_P_TIPC, dest->value,
- dev->dev_addr, clone->len);
- dev_queue_xmit(clone);
- return 0;
-}
-
-/**
- * recv_msg - handle incoming TIPC message from an Ethernet interface
- *
- * Accept only packets explicitly sent to this node, or broadcast packets;
- * ignores packets sent using Ethernet multicast, and traffic sent to other
- * nodes (which can happen if interface is running in promiscuous mode).
- */
-static int recv_msg(struct sk_buff *buf, struct net_device *dev,
- struct packet_type *pt, struct net_device *orig_dev)
-{
- struct eth_media *eb_ptr = (struct eth_media *)pt->af_packet_priv;
-
- if (!net_eq(dev_net(dev), &init_net)) {
- kfree_skb(buf);
- return NET_RX_DROP;
- }
-
- if (likely(eb_ptr->bearer)) {
- if (likely(buf->pkt_type <= PACKET_BROADCAST)) {
- buf->next = NULL;
- tipc_recv_msg(buf, eb_ptr->bearer);
- return NET_RX_SUCCESS;
- }
- }
- kfree_skb(buf);
- return NET_RX_DROP;
-}
-
-/**
- * setup_media - setup association between Ethernet bearer and interface
- */
-static void setup_media(struct work_struct *work)
-{
- struct eth_media *eb_ptr =
- container_of(work, struct eth_media, setup);
-
- dev_add_pack(&eb_ptr->tipc_packet_type);
-}
-
-/**
- * enable_media - attach TIPC bearer to an Ethernet interface
- */
-static int enable_media(struct tipc_bearer *tb_ptr)
-{
- struct net_device *dev;
- struct eth_media *eb_ptr = &eth_media_array[0];
- struct eth_media *stop = &eth_media_array[MAX_ETH_MEDIA];
- char *driver_name = strchr((const char *)tb_ptr->name, ':') + 1;
- int pending_dev = 0;
-
- /* Find unused Ethernet bearer structure */
- while (eb_ptr->dev) {
- if (!eb_ptr->bearer)
- pending_dev++;
- if (++eb_ptr == stop)
- return pending_dev ? -EAGAIN : -EDQUOT;
- }
-
- /* Find device with specified name */
- dev = dev_get_by_name(&init_net, driver_name);
- if (!dev)
- return -ENODEV;
-
- /* Create Ethernet bearer for device */
- eb_ptr->dev = dev;
- eb_ptr->tipc_packet_type.type = htons(ETH_P_TIPC);
- eb_ptr->tipc_packet_type.dev = dev;
- eb_ptr->tipc_packet_type.func = recv_msg;
- eb_ptr->tipc_packet_type.af_packet_priv = eb_ptr;
- INIT_LIST_HEAD(&(eb_ptr->tipc_packet_type.list));
- INIT_WORK(&eb_ptr->setup, setup_media);
- schedule_work(&eb_ptr->setup);
-
- /* Associate TIPC bearer with Ethernet bearer */
- eb_ptr->bearer = tb_ptr;
- tb_ptr->usr_handle = (void *)eb_ptr;
- memset(tb_ptr->bcast_addr.value, 0, sizeof(tb_ptr->bcast_addr.value));
- memcpy(tb_ptr->bcast_addr.value, dev->broadcast, ETH_ALEN);
- tb_ptr->bcast_addr.media_id = TIPC_MEDIA_TYPE_ETH;
- tb_ptr->bcast_addr.broadcast = 1;
- tb_ptr->mtu = dev->mtu;
- tb_ptr->blocked = 0;
- eth_media_addr_set(tb_ptr, &tb_ptr->addr, (char *)dev->dev_addr);
- return 0;
-}
-
-/**
- * cleanup_media - break association between Ethernet bearer and interface
- *
- * This routine must be invoked from a work queue because it can sleep.
- */
-static void cleanup_media(struct work_struct *work)
-{
- struct eth_media *eb_ptr =
- container_of(work, struct eth_media, cleanup);
-
- dev_remove_pack(&eb_ptr->tipc_packet_type);
- dev_put(eb_ptr->dev);
- eb_ptr->dev = NULL;
-}
-
-/**
- * disable_media - detach TIPC bearer from an Ethernet interface
- *
- * Mark Ethernet bearer as inactive so that incoming buffers are thrown away,
- * then get worker thread to complete bearer cleanup. (Can't do cleanup
- * here because cleanup code needs to sleep and caller holds spinlocks.)
- */
-static void disable_media(struct tipc_bearer *tb_ptr)
-{
- struct eth_media *eb_ptr = (struct eth_media *)tb_ptr->usr_handle;
-
- eb_ptr->bearer = NULL;
- INIT_WORK(&eb_ptr->cleanup, cleanup_media);
- schedule_work(&eb_ptr->cleanup);
-}
-
-/**
- * recv_notification - handle device updates from OS
- *
- * Change the state of the Ethernet bearer (if any) associated with the
- * specified device.
- */
-static int recv_notification(struct notifier_block *nb, unsigned long evt,
- void *ptr)
-{
- struct net_device *dev = netdev_notifier_info_to_dev(ptr);
- struct eth_media *eb_ptr = &eth_media_array[0];
- struct eth_media *stop = &eth_media_array[MAX_ETH_MEDIA];
-
- if (!net_eq(dev_net(dev), &init_net))
- return NOTIFY_DONE;
-
- while ((eb_ptr->dev != dev)) {
- if (++eb_ptr == stop)
- return NOTIFY_DONE; /* couldn't find device */
- }
- if (!eb_ptr->bearer)
- return NOTIFY_DONE; /* bearer had been disabled */
-
- eb_ptr->bearer->mtu = dev->mtu;
-
- switch (evt) {
- case NETDEV_CHANGE:
- if (netif_carrier_ok(dev))
- tipc_continue(eb_ptr->bearer);
- else
- tipc_block_bearer(eb_ptr->bearer);
- break;
- case NETDEV_UP:
- tipc_continue(eb_ptr->bearer);
- break;
- case NETDEV_DOWN:
- tipc_block_bearer(eb_ptr->bearer);
- break;
- case NETDEV_CHANGEMTU:
- case NETDEV_CHANGEADDR:
- tipc_block_bearer(eb_ptr->bearer);
- tipc_continue(eb_ptr->bearer);
- break;
- case NETDEV_UNREGISTER:
- case NETDEV_CHANGENAME:
- tipc_disable_bearer(eb_ptr->bearer->name);
- break;
- }
- return NOTIFY_OK;
-}
-
-/**
- * eth_addr2str - convert Ethernet address to string
- */
-static int eth_addr2str(struct tipc_media_addr *a, char *str_buf, int str_size)
+/* convert Ethernet address to string */
+static int tipc_eth_addr2str(struct tipc_media_addr *a, char *str_buf,
+ int str_size)
{
if (str_size < 18) /* 18 = strlen("aa:bb:cc:dd:ee:ff\0") */
return 1;
@@ -298,10 +50,8 @@ static int eth_addr2str(struct tipc_media_addr *a, char *str_buf, int str_size)
return 0;
}
-/**
- * eth_str2addr - convert Ethernet address format to message header format
- */
-static int eth_addr2msg(struct tipc_media_addr *a, char *msg_area)
+/* convert Ethernet address format to message header format */
+static int tipc_eth_addr2msg(struct tipc_media_addr *a, char *msg_area)
{
memset(msg_area, 0, TIPC_MEDIA_ADDR_SIZE);
msg_area[TIPC_MEDIA_TYPE_OFFSET] = TIPC_MEDIA_TYPE_ETH;
@@ -309,68 +59,30 @@ static int eth_addr2msg(struct tipc_media_addr *a, char *msg_area)
return 0;
}
-/**
- * eth_str2addr - convert message header address format to Ethernet format
- */
-static int eth_msg2addr(const struct tipc_bearer *tb_ptr,
- struct tipc_media_addr *a, char *msg_area)
+/* convert message header address format to Ethernet format */
+static int tipc_eth_msg2addr(const struct tipc_bearer *tb_ptr,
+ struct tipc_media_addr *a, char *msg_area)
{
if (msg_area[TIPC_MEDIA_TYPE_OFFSET] != TIPC_MEDIA_TYPE_ETH)
return 1;
- eth_media_addr_set(tb_ptr, a, msg_area + ETH_ADDR_OFFSET);
+ tipc_l2_media_addr_set(tb_ptr, a, msg_area + ETH_ADDR_OFFSET);
return 0;
}
-/*
- * Ethernet media registration info
- */
-static struct tipc_media eth_media_info = {
- .send_msg = send_msg,
- .enable_media = enable_media,
- .disable_media = disable_media,
- .addr2str = eth_addr2str,
- .addr2msg = eth_addr2msg,
- .msg2addr = eth_msg2addr,
+/* Ethernet media registration info */
+struct tipc_media eth_media_info = {
+ .send_msg = tipc_l2_send_msg,
+ .enable_media = tipc_enable_l2_media,
+ .disable_media = tipc_disable_l2_media,
+ .addr2str = tipc_eth_addr2str,
+ .addr2msg = tipc_eth_addr2msg,
+ .msg2addr = tipc_eth_msg2addr,
.priority = TIPC_DEF_LINK_PRI,
.tolerance = TIPC_DEF_LINK_TOL,
.window = TIPC_DEF_LINK_WIN,
.type_id = TIPC_MEDIA_TYPE_ETH,
+ .hwaddr_len = ETH_ALEN,
.name = "eth"
};
-/**
- * tipc_eth_media_start - activate Ethernet bearer support
- *
- * Register Ethernet media type with TIPC bearer code. Also register
- * with OS for notifications about device state changes.
- */
-int tipc_eth_media_start(void)
-{
- int res;
-
- if (eth_started)
- return -EINVAL;
-
- res = tipc_register_media(&eth_media_info);
- if (res)
- return res;
-
- res = register_netdevice_notifier(&notifier);
- if (!res)
- eth_started = 1;
- return res;
-}
-
-/**
- * tipc_eth_media_stop - deactivate Ethernet bearer support
- */
-void tipc_eth_media_stop(void)
-{
- if (!eth_started)
- return;
-
- flush_scheduled_work();
- unregister_netdevice_notifier(&notifier);
- eth_started = 0;
-}
diff --git a/net/tipc/handler.c b/net/tipc/handler.c
index b36f0fcd9bdf..e4bc8a296744 100644
--- a/net/tipc/handler.c
+++ b/net/tipc/handler.c
@@ -56,12 +56,13 @@ unsigned int tipc_k_signal(Handler routine, unsigned long argument)
{
struct queue_item *item;
+ spin_lock_bh(&qitem_lock);
if (!handler_enabled) {
pr_err("Signal request ignored by handler\n");
+ spin_unlock_bh(&qitem_lock);
return -ENOPROTOOPT;
}
- spin_lock_bh(&qitem_lock);
item = kmem_cache_alloc(tipc_queue_item_cache, GFP_ATOMIC);
if (!item) {
pr_err("Signal queue out of memory\n");
@@ -112,10 +113,14 @@ void tipc_handler_stop(void)
struct list_head *l, *n;
struct queue_item *item;
- if (!handler_enabled)
+ spin_lock_bh(&qitem_lock);
+ if (!handler_enabled) {
+ spin_unlock_bh(&qitem_lock);
return;
-
+ }
handler_enabled = 0;
+ spin_unlock_bh(&qitem_lock);
+
tasklet_kill(&tipc_tasklet);
spin_lock_bh(&qitem_lock);
diff --git a/net/tipc/ib_media.c b/net/tipc/ib_media.c
index c13989297464..844a77e25828 100644
--- a/net/tipc/ib_media.c
+++ b/net/tipc/ib_media.c
@@ -42,252 +42,9 @@
#include "core.h"
#include "bearer.h"
-#define MAX_IB_MEDIA MAX_BEARERS
-
-/**
- * struct ib_media - Infiniband media data structure
- * @bearer: ptr to associated "generic" bearer structure
- * @dev: ptr to associated Infiniband network device
- * @tipc_packet_type: used in binding TIPC to Infiniband driver
- * @cleanup: work item used when disabling bearer
- */
-
-struct ib_media {
- struct tipc_bearer *bearer;
- struct net_device *dev;
- struct packet_type tipc_packet_type;
- struct work_struct setup;
- struct work_struct cleanup;
-};
-
-static struct tipc_media ib_media_info;
-static struct ib_media ib_media_array[MAX_IB_MEDIA];
-static int ib_started;
-
-/**
- * ib_media_addr_set - initialize Infiniband media address structure
- *
- * Media-dependent "value" field stores MAC address in first 6 bytes
- * and zeroes out the remaining bytes.
- */
-static void ib_media_addr_set(const struct tipc_bearer *tb_ptr,
- struct tipc_media_addr *a, char *mac)
-{
- BUILD_BUG_ON(sizeof(a->value) < INFINIBAND_ALEN);
- memcpy(a->value, mac, INFINIBAND_ALEN);
- a->media_id = TIPC_MEDIA_TYPE_IB;
- a->broadcast = !memcmp(mac, tb_ptr->bcast_addr.value, INFINIBAND_ALEN);
-}
-
-/**
- * send_msg - send a TIPC message out over an InfiniBand interface
- */
-static int send_msg(struct sk_buff *buf, struct tipc_bearer *tb_ptr,
- struct tipc_media_addr *dest)
-{
- struct sk_buff *clone;
- struct net_device *dev;
- int delta;
-
- clone = skb_clone(buf, GFP_ATOMIC);
- if (!clone)
- return 0;
-
- dev = ((struct ib_media *)(tb_ptr->usr_handle))->dev;
- delta = dev->hard_header_len - skb_headroom(buf);
-
- if ((delta > 0) &&
- pskb_expand_head(clone, SKB_DATA_ALIGN(delta), 0, GFP_ATOMIC)) {
- kfree_skb(clone);
- return 0;
- }
-
- skb_reset_network_header(clone);
- clone->dev = dev;
- clone->protocol = htons(ETH_P_TIPC);
- dev_hard_header(clone, dev, ETH_P_TIPC, dest->value,
- dev->dev_addr, clone->len);
- dev_queue_xmit(clone);
- return 0;
-}
-
-/**
- * recv_msg - handle incoming TIPC message from an InfiniBand interface
- *
- * Accept only packets explicitly sent to this node, or broadcast packets;
- * ignores packets sent using InfiniBand multicast, and traffic sent to other
- * nodes (which can happen if interface is running in promiscuous mode).
- */
-static int recv_msg(struct sk_buff *buf, struct net_device *dev,
- struct packet_type *pt, struct net_device *orig_dev)
-{
- struct ib_media *ib_ptr = (struct ib_media *)pt->af_packet_priv;
-
- if (!net_eq(dev_net(dev), &init_net)) {
- kfree_skb(buf);
- return NET_RX_DROP;
- }
-
- if (likely(ib_ptr->bearer)) {
- if (likely(buf->pkt_type <= PACKET_BROADCAST)) {
- buf->next = NULL;
- tipc_recv_msg(buf, ib_ptr->bearer);
- return NET_RX_SUCCESS;
- }
- }
- kfree_skb(buf);
- return NET_RX_DROP;
-}
-
-/**
- * setup_bearer - setup association between InfiniBand bearer and interface
- */
-static void setup_media(struct work_struct *work)
-{
- struct ib_media *ib_ptr =
- container_of(work, struct ib_media, setup);
-
- dev_add_pack(&ib_ptr->tipc_packet_type);
-}
-
-/**
- * enable_media - attach TIPC bearer to an InfiniBand interface
- */
-static int enable_media(struct tipc_bearer *tb_ptr)
-{
- struct net_device *dev;
- struct ib_media *ib_ptr = &ib_media_array[0];
- struct ib_media *stop = &ib_media_array[MAX_IB_MEDIA];
- char *driver_name = strchr((const char *)tb_ptr->name, ':') + 1;
- int pending_dev = 0;
-
- /* Find unused InfiniBand bearer structure */
- while (ib_ptr->dev) {
- if (!ib_ptr->bearer)
- pending_dev++;
- if (++ib_ptr == stop)
- return pending_dev ? -EAGAIN : -EDQUOT;
- }
-
- /* Find device with specified name */
- dev = dev_get_by_name(&init_net, driver_name);
- if (!dev)
- return -ENODEV;
-
- /* Create InfiniBand bearer for device */
- ib_ptr->dev = dev;
- ib_ptr->tipc_packet_type.type = htons(ETH_P_TIPC);
- ib_ptr->tipc_packet_type.dev = dev;
- ib_ptr->tipc_packet_type.func = recv_msg;
- ib_ptr->tipc_packet_type.af_packet_priv = ib_ptr;
- INIT_LIST_HEAD(&(ib_ptr->tipc_packet_type.list));
- INIT_WORK(&ib_ptr->setup, setup_media);
- schedule_work(&ib_ptr->setup);
-
- /* Associate TIPC bearer with InfiniBand bearer */
- ib_ptr->bearer = tb_ptr;
- tb_ptr->usr_handle = (void *)ib_ptr;
- memset(tb_ptr->bcast_addr.value, 0, sizeof(tb_ptr->bcast_addr.value));
- memcpy(tb_ptr->bcast_addr.value, dev->broadcast, INFINIBAND_ALEN);
- tb_ptr->bcast_addr.media_id = TIPC_MEDIA_TYPE_IB;
- tb_ptr->bcast_addr.broadcast = 1;
- tb_ptr->mtu = dev->mtu;
- tb_ptr->blocked = 0;
- ib_media_addr_set(tb_ptr, &tb_ptr->addr, (char *)dev->dev_addr);
- return 0;
-}
-
-/**
- * cleanup_bearer - break association between InfiniBand bearer and interface
- *
- * This routine must be invoked from a work queue because it can sleep.
- */
-static void cleanup_bearer(struct work_struct *work)
-{
- struct ib_media *ib_ptr =
- container_of(work, struct ib_media, cleanup);
-
- dev_remove_pack(&ib_ptr->tipc_packet_type);
- dev_put(ib_ptr->dev);
- ib_ptr->dev = NULL;
-}
-
-/**
- * disable_media - detach TIPC bearer from an InfiniBand interface
- *
- * Mark InfiniBand bearer as inactive so that incoming buffers are thrown away,
- * then get worker thread to complete bearer cleanup. (Can't do cleanup
- * here because cleanup code needs to sleep and caller holds spinlocks.)
- */
-static void disable_media(struct tipc_bearer *tb_ptr)
-{
- struct ib_media *ib_ptr = (struct ib_media *)tb_ptr->usr_handle;
-
- ib_ptr->bearer = NULL;
- INIT_WORK(&ib_ptr->cleanup, cleanup_bearer);
- schedule_work(&ib_ptr->cleanup);
-}
-
-/**
- * recv_notification - handle device updates from OS
- *
- * Change the state of the InfiniBand bearer (if any) associated with the
- * specified device.
- */
-static int recv_notification(struct notifier_block *nb, unsigned long evt,
- void *ptr)
-{
- struct net_device *dev = netdev_notifier_info_to_dev(ptr);
- struct ib_media *ib_ptr = &ib_media_array[0];
- struct ib_media *stop = &ib_media_array[MAX_IB_MEDIA];
-
- if (!net_eq(dev_net(dev), &init_net))
- return NOTIFY_DONE;
-
- while ((ib_ptr->dev != dev)) {
- if (++ib_ptr == stop)
- return NOTIFY_DONE; /* couldn't find device */
- }
- if (!ib_ptr->bearer)
- return NOTIFY_DONE; /* bearer had been disabled */
-
- ib_ptr->bearer->mtu = dev->mtu;
-
- switch (evt) {
- case NETDEV_CHANGE:
- if (netif_carrier_ok(dev))
- tipc_continue(ib_ptr->bearer);
- else
- tipc_block_bearer(ib_ptr->bearer);
- break;
- case NETDEV_UP:
- tipc_continue(ib_ptr->bearer);
- break;
- case NETDEV_DOWN:
- tipc_block_bearer(ib_ptr->bearer);
- break;
- case NETDEV_CHANGEMTU:
- case NETDEV_CHANGEADDR:
- tipc_block_bearer(ib_ptr->bearer);
- tipc_continue(ib_ptr->bearer);
- break;
- case NETDEV_UNREGISTER:
- case NETDEV_CHANGENAME:
- tipc_disable_bearer(ib_ptr->bearer->name);
- break;
- }
- return NOTIFY_OK;
-}
-
-static struct notifier_block notifier = {
- .notifier_call = recv_notification,
- .priority = 0,
-};
-
-/**
- * ib_addr2str - convert InfiniBand address to string
- */
-static int ib_addr2str(struct tipc_media_addr *a, char *str_buf, int str_size)
+/* convert InfiniBand address to string */
+static int tipc_ib_addr2str(struct tipc_media_addr *a, char *str_buf,
+ int str_size)
{
if (str_size < 60) /* 60 = 19 * strlen("xx:") + strlen("xx\0") */
return 1;
@@ -297,10 +54,8 @@ static int ib_addr2str(struct tipc_media_addr *a, char *str_buf, int str_size)
return 0;
}
-/**
- * ib_addr2msg - convert InfiniBand address format to message header format
- */
-static int ib_addr2msg(struct tipc_media_addr *a, char *msg_area)
+/* convert InfiniBand address format to message header format */
+static int tipc_ib_addr2msg(struct tipc_media_addr *a, char *msg_area)
{
memset(msg_area, 0, TIPC_MEDIA_ADDR_SIZE);
msg_area[TIPC_MEDIA_TYPE_OFFSET] = TIPC_MEDIA_TYPE_IB;
@@ -308,65 +63,27 @@ static int ib_addr2msg(struct tipc_media_addr *a, char *msg_area)
return 0;
}
-/**
- * ib_msg2addr - convert message header address format to InfiniBand format
- */
-static int ib_msg2addr(const struct tipc_bearer *tb_ptr,
- struct tipc_media_addr *a, char *msg_area)
+/* convert message header address format to InfiniBand format */
+static int tipc_ib_msg2addr(const struct tipc_bearer *tb_ptr,
+ struct tipc_media_addr *a, char *msg_area)
{
- ib_media_addr_set(tb_ptr, a, msg_area);
+ tipc_l2_media_addr_set(tb_ptr, a, msg_area);
return 0;
}
-/*
- * InfiniBand media registration info
- */
-static struct tipc_media ib_media_info = {
- .send_msg = send_msg,
- .enable_media = enable_media,
- .disable_media = disable_media,
- .addr2str = ib_addr2str,
- .addr2msg = ib_addr2msg,
- .msg2addr = ib_msg2addr,
+/* InfiniBand media registration info */
+struct tipc_media ib_media_info = {
+ .send_msg = tipc_l2_send_msg,
+ .enable_media = tipc_enable_l2_media,
+ .disable_media = tipc_disable_l2_media,
+ .addr2str = tipc_ib_addr2str,
+ .addr2msg = tipc_ib_addr2msg,
+ .msg2addr = tipc_ib_msg2addr,
.priority = TIPC_DEF_LINK_PRI,
.tolerance = TIPC_DEF_LINK_TOL,
.window = TIPC_DEF_LINK_WIN,
.type_id = TIPC_MEDIA_TYPE_IB,
+ .hwaddr_len = INFINIBAND_ALEN,
.name = "ib"
};
-/**
- * tipc_ib_media_start - activate InfiniBand bearer support
- *
- * Register InfiniBand media type with TIPC bearer code. Also register
- * with OS for notifications about device state changes.
- */
-int tipc_ib_media_start(void)
-{
- int res;
-
- if (ib_started)
- return -EINVAL;
-
- res = tipc_register_media(&ib_media_info);
- if (res)
- return res;
-
- res = register_netdevice_notifier(&notifier);
- if (!res)
- ib_started = 1;
- return res;
-}
-
-/**
- * tipc_ib_media_stop - deactivate InfiniBand bearer support
- */
-void tipc_ib_media_stop(void)
-{
- if (!ib_started)
- return;
-
- flush_scheduled_work();
- unregister_netdevice_notifier(&notifier);
- ib_started = 0;
-}
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 69cd9bf3f561..471973ff134f 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -1,7 +1,7 @@
/*
* net/tipc/link.c: TIPC link code
*
- * Copyright (c) 1996-2007, 2012, Ericsson AB
+ * Copyright (c) 1996-2007, 2012-2014, Ericsson AB
* Copyright (c) 2004-2007, 2010-2013, Wind River Systems
* All rights reserved.
*
@@ -78,8 +78,8 @@ static const char *link_unk_evt = "Unknown link event ";
static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr,
struct sk_buff *buf);
static void link_recv_proto_msg(struct tipc_link *l_ptr, struct sk_buff *buf);
-static int link_recv_changeover_msg(struct tipc_link **l_ptr,
- struct sk_buff **buf);
+static int tipc_link_tunnel_rcv(struct tipc_link **l_ptr,
+ struct sk_buff **buf);
static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tolerance);
static int link_send_sections_long(struct tipc_port *sender,
struct iovec const *msg_sect,
@@ -87,7 +87,6 @@ static int link_send_sections_long(struct tipc_port *sender,
static void link_state_event(struct tipc_link *l_ptr, u32 event);
static void link_reset_statistics(struct tipc_link *l_ptr);
static void link_print(struct tipc_link *l_ptr, const char *str);
-static void link_start(struct tipc_link *l_ptr);
static int link_send_long_buf(struct tipc_link *l_ptr, struct sk_buff *buf);
static void tipc_link_send_sync(struct tipc_link *l);
static void tipc_link_recv_sync(struct tipc_node *n, struct sk_buff *buf);
@@ -278,9 +277,11 @@ struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
tipc_node_attach_link(n_ptr, l_ptr);
- k_init_timer(&l_ptr->timer, (Handler)link_timeout, (unsigned long)l_ptr);
+ k_init_timer(&l_ptr->timer, (Handler)link_timeout,
+ (unsigned long)l_ptr);
list_add_tail(&l_ptr->link_list, &b_ptr->links);
- tipc_k_signal((Handler)link_start, (unsigned long)l_ptr);
+
+ link_state_event(l_ptr, STARTING_EVT);
return l_ptr;
}
@@ -305,19 +306,13 @@ void tipc_link_delete(struct tipc_link *l_ptr)
tipc_node_lock(l_ptr->owner);
tipc_link_reset(l_ptr);
tipc_node_detach_link(l_ptr->owner, l_ptr);
- tipc_link_stop(l_ptr);
+ tipc_link_purge_queues(l_ptr);
list_del_init(&l_ptr->link_list);
tipc_node_unlock(l_ptr->owner);
k_term_timer(&l_ptr->timer);
kfree(l_ptr);
}
-static void link_start(struct tipc_link *l_ptr)
-{
- tipc_node_lock(l_ptr->owner);
- link_state_event(l_ptr, STARTING_EVT);
- tipc_node_unlock(l_ptr->owner);
-}
/**
* link_schedule_port - schedule port for deferred sending
@@ -386,14 +381,7 @@ exit:
*/
static void link_release_outqueue(struct tipc_link *l_ptr)
{
- struct sk_buff *buf = l_ptr->first_out;
- struct sk_buff *next;
-
- while (buf) {
- next = buf->next;
- kfree_skb(buf);
- buf = next;
- }
+ kfree_skb_list(l_ptr->first_out);
l_ptr->first_out = NULL;
l_ptr->out_queue_size = 0;
}
@@ -410,37 +398,20 @@ void tipc_link_reset_fragments(struct tipc_link *l_ptr)
}
/**
- * tipc_link_stop - purge all inbound and outbound messages associated with link
+ * tipc_link_purge_queues - purge all pkt queues associated with link
* @l_ptr: pointer to link
*/
-void tipc_link_stop(struct tipc_link *l_ptr)
+void tipc_link_purge_queues(struct tipc_link *l_ptr)
{
- struct sk_buff *buf;
- struct sk_buff *next;
-
- buf = l_ptr->oldest_deferred_in;
- while (buf) {
- next = buf->next;
- kfree_skb(buf);
- buf = next;
- }
-
- buf = l_ptr->first_out;
- while (buf) {
- next = buf->next;
- kfree_skb(buf);
- buf = next;
- }
-
+ kfree_skb_list(l_ptr->oldest_deferred_in);
+ kfree_skb_list(l_ptr->first_out);
tipc_link_reset_fragments(l_ptr);
-
kfree_skb(l_ptr->proto_msg_queue);
l_ptr->proto_msg_queue = NULL;
}
void tipc_link_reset(struct tipc_link *l_ptr)
{
- struct sk_buff *buf;
u32 prev_state = l_ptr->state;
u32 checkpoint = l_ptr->next_in_no;
int was_active_link = tipc_link_is_active(l_ptr);
@@ -461,8 +432,7 @@ void tipc_link_reset(struct tipc_link *l_ptr)
tipc_node_link_down(l_ptr->owner, l_ptr);
tipc_bearer_remove_dest(l_ptr->b_ptr, l_ptr->addr);
- if (was_active_link && tipc_node_active_links(l_ptr->owner) &&
- l_ptr->owner->permit_changeover) {
+ if (was_active_link && tipc_node_active_links(l_ptr->owner)) {
l_ptr->reset_checkpoint = checkpoint;
l_ptr->exp_msg_count = START_CHANGEOVER;
}
@@ -471,12 +441,7 @@ void tipc_link_reset(struct tipc_link *l_ptr)
link_release_outqueue(l_ptr);
kfree_skb(l_ptr->proto_msg_queue);
l_ptr->proto_msg_queue = NULL;
- buf = l_ptr->oldest_deferred_in;
- while (buf) {
- struct sk_buff *next = buf->next;
- kfree_skb(buf);
- buf = next;
- }
+ kfree_skb_list(l_ptr->oldest_deferred_in);
if (!list_empty(&l_ptr->waiting_ports))
tipc_link_wakeup_ports(l_ptr, 1);
@@ -517,10 +482,11 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
if (!l_ptr->started && (event != STARTING_EVT))
return; /* Not yet. */
- if (link_blocked(l_ptr)) {
+ /* Check whether changeover is going on */
+ if (l_ptr->exp_msg_count) {
if (event == TIMEOUT_EVT)
link_set_timer(l_ptr, cont_intv);
- return; /* Changeover going on */
+ return;
}
switch (l_ptr->state) {
@@ -790,8 +756,7 @@ int tipc_link_send_buf(struct tipc_link *l_ptr, struct sk_buff *buf)
return link_send_long_buf(l_ptr, buf);
/* Packet can be queued or sent. */
- if (likely(!tipc_bearer_blocked(l_ptr->b_ptr) &&
- !link_congested(l_ptr))) {
+ if (likely(!link_congested(l_ptr))) {
link_add_to_outqueue(l_ptr, buf, msg);
tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr);
@@ -957,14 +922,13 @@ static int link_send_buf_fast(struct tipc_link *l_ptr, struct sk_buff *buf,
if (likely(!link_congested(l_ptr))) {
if (likely(msg_size(msg) <= l_ptr->max_pkt)) {
- if (likely(!tipc_bearer_blocked(l_ptr->b_ptr))) {
- link_add_to_outqueue(l_ptr, buf, msg);
- tipc_bearer_send(l_ptr->b_ptr, buf,
- &l_ptr->media_addr);
- l_ptr->unacked_window = 0;
- return res;
- }
- } else
+ link_add_to_outqueue(l_ptr, buf, msg);
+ tipc_bearer_send(l_ptr->b_ptr, buf,
+ &l_ptr->media_addr);
+ l_ptr->unacked_window = 0;
+ return res;
+ }
+ else
*used_max_pkt = l_ptr->max_pkt;
}
return tipc_link_send_buf(l_ptr, buf); /* All other cases */
@@ -1013,8 +977,7 @@ exit:
}
/* Exit if link (or bearer) is congested */
- if (link_congested(l_ptr) ||
- tipc_bearer_blocked(l_ptr->b_ptr)) {
+ if (link_congested(l_ptr)) {
res = link_schedule_port(l_ptr,
sender->ref, res);
goto exit;
@@ -1127,10 +1090,7 @@ again:
if (copy_from_user(buf->data + fragm_crs, sect_crs, sz)) {
res = -EFAULT;
error:
- for (; buf_chain; buf_chain = buf) {
- buf = buf_chain->next;
- kfree_skb(buf_chain);
- }
+ kfree_skb_list(buf_chain);
return res;
}
sect_crs += sz;
@@ -1180,18 +1140,12 @@ error:
if (l_ptr->max_pkt < max_pkt) {
sender->max_pkt = l_ptr->max_pkt;
tipc_node_unlock(node);
- for (; buf_chain; buf_chain = buf) {
- buf = buf_chain->next;
- kfree_skb(buf_chain);
- }
+ kfree_skb_list(buf_chain);
goto again;
}
} else {
reject:
- for (; buf_chain; buf_chain = buf) {
- buf = buf_chain->next;
- kfree_skb(buf_chain);
- }
+ kfree_skb_list(buf_chain);
return tipc_port_reject_sections(sender, hdr, msg_sect,
len, TIPC_ERR_NO_NODE);
}
@@ -1209,7 +1163,7 @@ reject:
/*
* tipc_link_push_packet: Push one unsent packet to the media
*/
-u32 tipc_link_push_packet(struct tipc_link *l_ptr)
+static u32 tipc_link_push_packet(struct tipc_link *l_ptr)
{
struct sk_buff *buf = l_ptr->first_out;
u32 r_q_size = l_ptr->retransm_queue_size;
@@ -1281,9 +1235,6 @@ void tipc_link_push_queue(struct tipc_link *l_ptr)
{
u32 res;
- if (tipc_bearer_blocked(l_ptr->b_ptr))
- return;
-
do {
res = tipc_link_push_packet(l_ptr);
} while (!res);
@@ -1370,26 +1321,15 @@ void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *buf,
msg = buf_msg(buf);
- if (tipc_bearer_blocked(l_ptr->b_ptr)) {
- if (l_ptr->retransm_queue_size == 0) {
- l_ptr->retransm_queue_head = msg_seqno(msg);
- l_ptr->retransm_queue_size = retransmits;
- } else {
- pr_err("Unexpected retransmit on link %s (qsize=%d)\n",
- l_ptr->name, l_ptr->retransm_queue_size);
+ /* Detect repeated retransmit failures */
+ if (l_ptr->last_retransmitted == msg_seqno(msg)) {
+ if (++l_ptr->stale_count > 100) {
+ link_retransmit_failure(l_ptr, buf);
+ return;
}
- return;
} else {
- /* Detect repeated retransmit failures on unblocked bearer */
- if (l_ptr->last_retransmitted == msg_seqno(msg)) {
- if (++l_ptr->stale_count > 100) {
- link_retransmit_failure(l_ptr, buf);
- return;
- }
- } else {
- l_ptr->last_retransmitted = msg_seqno(msg);
- l_ptr->stale_count = 1;
- }
+ l_ptr->last_retransmitted = msg_seqno(msg);
+ l_ptr->stale_count = 1;
}
while (retransmits && (buf != l_ptr->next_out) && buf) {
@@ -1476,14 +1416,14 @@ static int link_recv_buf_validate(struct sk_buff *buf)
}
/**
- * tipc_recv_msg - process TIPC messages arriving from off-node
+ * tipc_rcv - process TIPC packets/messages arriving from off-node
* @head: pointer to message buffer chain
* @tb_ptr: pointer to bearer message arrived on
*
* Invoked with no locks held. Bearer pointer must point to a valid bearer
* structure (i.e. cannot be NULL), but bearer can be inactive.
*/
-void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *b_ptr)
+void tipc_rcv(struct sk_buff *head, struct tipc_bearer *b_ptr)
{
read_lock_bh(&tipc_net_lock);
while (head) {
@@ -1657,7 +1597,7 @@ deliver:
continue;
case CHANGEOVER_PROTOCOL:
type = msg_type(msg);
- if (link_recv_changeover_msg(&l_ptr, &buf)) {
+ if (tipc_link_tunnel_rcv(&l_ptr, &buf)) {
msg = buf_msg(buf);
seq_no = msg_seqno(msg);
if (type == ORIGINAL_MSG)
@@ -1786,7 +1726,8 @@ void tipc_link_send_proto_msg(struct tipc_link *l_ptr, u32 msg_typ,
l_ptr->proto_msg_queue = NULL;
}
- if (link_blocked(l_ptr))
+ /* Don't send protocol message during link changeover */
+ if (l_ptr->exp_msg_count)
return;
/* Abort non-RESET send if communication with node is prohibited */
@@ -1861,12 +1802,6 @@ void tipc_link_send_proto_msg(struct tipc_link *l_ptr, u32 msg_typ,
skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg));
buf->priority = TC_PRIO_CONTROL;
- /* Defer message if bearer is already blocked */
- if (tipc_bearer_blocked(l_ptr->b_ptr)) {
- l_ptr->proto_msg_queue = buf;
- return;
- }
-
tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr);
l_ptr->unacked_window = 0;
kfree_skb(buf);
@@ -1885,7 +1820,8 @@ static void link_recv_proto_msg(struct tipc_link *l_ptr, struct sk_buff *buf)
u32 msg_tol;
struct tipc_msg *msg = buf_msg(buf);
- if (link_blocked(l_ptr))
+ /* Discard protocol message during link changeover */
+ if (l_ptr->exp_msg_count)
goto exit;
/* record unnumbered packet arrival (force mismatch on next timeout) */
@@ -1895,8 +1831,6 @@ static void link_recv_proto_msg(struct tipc_link *l_ptr, struct sk_buff *buf)
if (tipc_own_addr > msg_prevnode(msg))
l_ptr->b_ptr->net_plane = msg_net_plane(msg);
- l_ptr->owner->permit_changeover = msg_redundant_link(msg);
-
switch (msg_type(msg)) {
case RESET_MSG:
@@ -2012,13 +1946,13 @@ exit:
}
-/*
- * tipc_link_tunnel(): Send one message via a link belonging to
- * another bearer. Owner node is locked.
+/* tipc_link_tunnel_xmit(): Tunnel one packet via a link belonging to
+ * a different bearer. Owner node is locked.
*/
-static void tipc_link_tunnel(struct tipc_link *l_ptr,
- struct tipc_msg *tunnel_hdr, struct tipc_msg *msg,
- u32 selector)
+static void tipc_link_tunnel_xmit(struct tipc_link *l_ptr,
+ struct tipc_msg *tunnel_hdr,
+ struct tipc_msg *msg,
+ u32 selector)
{
struct tipc_link *tunnel;
struct sk_buff *buf;
@@ -2041,12 +1975,13 @@ static void tipc_link_tunnel(struct tipc_link *l_ptr,
}
-
-/*
- * changeover(): Send whole message queue via the remaining link
- * Owner node is locked.
+/* tipc_link_failover_send_queue(): A link has gone down, but a second
+ * link is still active. We can do failover. Tunnel the failing link's
+ * whole send queue via the remaining link. This way, we don't lose
+ * any packets, and sequence order is preserved for subsequent traffic
+ * sent over the remaining link. Owner node is locked.
*/
-void tipc_link_changeover(struct tipc_link *l_ptr)
+void tipc_link_failover_send_queue(struct tipc_link *l_ptr)
{
u32 msgcount = l_ptr->out_queue_size;
struct sk_buff *crs = l_ptr->first_out;
@@ -2057,11 +1992,6 @@ void tipc_link_changeover(struct tipc_link *l_ptr)
if (!tunnel)
return;
- if (!l_ptr->owner->permit_changeover) {
- pr_warn("%speer did not permit changeover\n", link_co_err);
- return;
- }
-
tipc_msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL,
ORIGINAL_MSG, INT_H_SIZE, l_ptr->addr);
msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
@@ -2095,20 +2025,30 @@ void tipc_link_changeover(struct tipc_link *l_ptr)
msgcount = msg_msgcnt(msg);
while (msgcount--) {
msg_set_seqno(m, msg_seqno(msg));
- tipc_link_tunnel(l_ptr, &tunnel_hdr, m,
- msg_link_selector(m));
+ tipc_link_tunnel_xmit(l_ptr, &tunnel_hdr, m,
+ msg_link_selector(m));
pos += align(msg_size(m));
m = (struct tipc_msg *)pos;
}
} else {
- tipc_link_tunnel(l_ptr, &tunnel_hdr, msg,
- msg_link_selector(msg));
+ tipc_link_tunnel_xmit(l_ptr, &tunnel_hdr, msg,
+ msg_link_selector(msg));
}
crs = crs->next;
}
}
-void tipc_link_send_duplicate(struct tipc_link *l_ptr, struct tipc_link *tunnel)
+/* tipc_link_dup_send_queue(): A second link has become active. Tunnel a
+ * duplicate of the first link's send queue via the new link. This way, we
+ * are guaranteed that currently queued packets from a socket are delivered
+ * before future traffic from the same socket, even if this is using the
+ * new link. The last arriving copy of each duplicate packet is dropped at
+ * the receiving end by the regular protocol check, so packet cardinality
+ * and sequence order is preserved per sender/receiver socket pair.
+ * Owner node is locked.
+ */
+void tipc_link_dup_send_queue(struct tipc_link *l_ptr,
+ struct tipc_link *tunnel)
{
struct sk_buff *iter;
struct tipc_msg tunnel_hdr;
@@ -2164,12 +2104,14 @@ static struct sk_buff *buf_extract(struct sk_buff *skb, u32 from_pos)
return eb;
}
-/*
- * link_recv_changeover_msg(): Receive tunneled packet sent
- * via other link. Node is locked. Return extracted buffer.
+/* tipc_link_tunnel_rcv(): Receive a tunneled packet, sent
+ * via other link as result of a failover (ORIGINAL_MSG) or
+ * a new active link (DUPLICATE_MSG). Failover packets are
+ * returned to the active link for delivery upwards.
+ * Owner node is locked.
*/
-static int link_recv_changeover_msg(struct tipc_link **l_ptr,
- struct sk_buff **buf)
+static int tipc_link_tunnel_rcv(struct tipc_link **l_ptr,
+ struct sk_buff **buf)
{
struct sk_buff *tunnel_buf = *buf;
struct tipc_link *dest_link;
@@ -2306,11 +2248,7 @@ static int link_send_long_buf(struct tipc_link *l_ptr, struct sk_buff *buf)
fragm = tipc_buf_acquire(fragm_sz + INT_H_SIZE);
if (fragm == NULL) {
kfree_skb(buf);
- while (buf_chain) {
- buf = buf_chain;
- buf_chain = buf_chain->next;
- kfree_skb(buf);
- }
+ kfree_skb_list(buf_chain);
return -ENOMEM;
}
msg_set_size(&fragm_hdr, fragm_sz + INT_H_SIZE);
diff --git a/net/tipc/link.h b/net/tipc/link.h
index 8a6c1026644d..3b6aa65b608c 100644
--- a/net/tipc/link.h
+++ b/net/tipc/link.h
@@ -112,7 +112,6 @@ struct tipc_stats {
* @continuity_interval: link continuity testing interval [in ms]
* @abort_limit: # of unacknowledged continuity probes needed to reset link
* @state: current state of link FSM
- * @blocked: indicates if link has been administratively blocked
* @fsm_msg_cnt: # of protocol messages link FSM has sent in current state
* @proto_msg: template for control messages generated by link
* @pmsg: convenience pointer to "proto_msg" field
@@ -162,7 +161,6 @@ struct tipc_link {
u32 continuity_interval;
u32 abort_limit;
int state;
- int blocked;
u32 fsm_msg_cnt;
struct {
unchar hdr[INT_H_SIZE];
@@ -218,16 +216,20 @@ struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
struct tipc_bearer *b_ptr,
const struct tipc_media_addr *media_addr);
void tipc_link_delete(struct tipc_link *l_ptr);
-void tipc_link_changeover(struct tipc_link *l_ptr);
-void tipc_link_send_duplicate(struct tipc_link *l_ptr, struct tipc_link *dest);
+void tipc_link_failover_send_queue(struct tipc_link *l_ptr);
+void tipc_link_dup_send_queue(struct tipc_link *l_ptr,
+ struct tipc_link *dest);
void tipc_link_reset_fragments(struct tipc_link *l_ptr);
int tipc_link_is_up(struct tipc_link *l_ptr);
int tipc_link_is_active(struct tipc_link *l_ptr);
-u32 tipc_link_push_packet(struct tipc_link *l_ptr);
-void tipc_link_stop(struct tipc_link *l_ptr);
-struct sk_buff *tipc_link_cmd_config(const void *req_tlv_area, int req_tlv_space, u16 cmd);
-struct sk_buff *tipc_link_cmd_show_stats(const void *req_tlv_area, int req_tlv_space);
-struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area, int req_tlv_space);
+void tipc_link_purge_queues(struct tipc_link *l_ptr);
+struct sk_buff *tipc_link_cmd_config(const void *req_tlv_area,
+ int req_tlv_space,
+ u16 cmd);
+struct sk_buff *tipc_link_cmd_show_stats(const void *req_tlv_area,
+ int req_tlv_space);
+struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area,
+ int req_tlv_space);
void tipc_link_reset(struct tipc_link *l_ptr);
int tipc_link_send(struct sk_buff *buf, u32 dest, u32 selector);
void tipc_link_send_names(struct list_head *message_list, u32 dest);
@@ -312,11 +314,6 @@ static inline int link_reset_reset(struct tipc_link *l_ptr)
return l_ptr->state == RESET_RESET;
}
-static inline int link_blocked(struct tipc_link *l_ptr)
-{
- return l_ptr->exp_msg_count || l_ptr->blocked;
-}
-
static inline int link_congested(struct tipc_link *l_ptr)
{
return l_ptr->out_queue_size >= l_ptr->queue_limit[0];
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c
index 09dcd54b04e1..92a1533af4e0 100644
--- a/net/tipc/name_table.c
+++ b/net/tipc/name_table.c
@@ -148,8 +148,7 @@ static struct publication *publ_create(u32 type, u32 lower, u32 upper,
*/
static struct sub_seq *tipc_subseq_alloc(u32 cnt)
{
- struct sub_seq *sseq = kcalloc(cnt, sizeof(struct sub_seq), GFP_ATOMIC);
- return sseq;
+ return kcalloc(cnt, sizeof(struct sub_seq), GFP_ATOMIC);
}
/**
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 25100c0a6fe8..efe4d41bf11b 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -162,7 +162,7 @@ void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
pr_info("New link <%s> becomes standby\n", l_ptr->name);
return;
}
- tipc_link_send_duplicate(active[0], l_ptr);
+ tipc_link_dup_send_queue(active[0], l_ptr);
if (l_ptr->priority == active[0]->priority) {
active[0] = l_ptr;
return;
@@ -225,7 +225,7 @@ void tipc_node_link_down(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
if (active[0] == l_ptr)
node_select_active_links(n_ptr);
if (tipc_node_is_up(n_ptr))
- tipc_link_changeover(l_ptr);
+ tipc_link_failover_send_queue(l_ptr);
else
node_lost_contact(n_ptr);
}
@@ -235,11 +235,6 @@ int tipc_node_active_links(struct tipc_node *n_ptr)
return n_ptr->active_links[0] != NULL;
}
-int tipc_node_redundant_links(struct tipc_node *n_ptr)
-{
- return n_ptr->working_links > 1;
-}
-
int tipc_node_is_up(struct tipc_node *n_ptr)
{
return tipc_node_active_links(n_ptr);
@@ -291,11 +286,7 @@ static void node_lost_contact(struct tipc_node *n_ptr)
/* Flush broadcast link info associated with lost node */
if (n_ptr->bclink.recv_permitted) {
- while (n_ptr->bclink.deferred_head) {
- struct sk_buff *buf = n_ptr->bclink.deferred_head;
- n_ptr->bclink.deferred_head = buf->next;
- kfree_skb(buf);
- }
+ kfree_skb_list(n_ptr->bclink.deferred_head);
n_ptr->bclink.deferred_size = 0;
if (n_ptr->bclink.reasm_head) {
diff --git a/net/tipc/node.h b/net/tipc/node.h
index e5e96c04e167..63e2e8ead2fe 100644
--- a/net/tipc/node.h
+++ b/net/tipc/node.h
@@ -64,7 +64,6 @@
* @working_links: number of working links to node (both active and standby)
* @block_setup: bit mask of conditions preventing link establishment to node
* @link_cnt: number of links to node
- * @permit_changeover: non-zero if node has redundant links to this system
* @signature: node instance identifier
* @bclink: broadcast-related info
* @acked: sequence # of last outbound b'cast message acknowledged by node
@@ -89,7 +88,6 @@ struct tipc_node {
int link_cnt;
int working_links;
int block_setup;
- int permit_changeover;
u32 signature;
struct {
u32 acked;
@@ -115,7 +113,6 @@ void tipc_node_detach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr);
void tipc_node_link_down(struct tipc_node *n_ptr, struct tipc_link *l_ptr);
void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr);
int tipc_node_active_links(struct tipc_node *n_ptr);
-int tipc_node_redundant_links(struct tipc_node *n_ptr);
int tipc_node_is_up(struct tipc_node *n_ptr);
struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space);
struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space);
diff --git a/net/tipc/port.c b/net/tipc/port.c
index c081a7632302..b742b2654525 100644
--- a/net/tipc/port.c
+++ b/net/tipc/port.c
@@ -251,18 +251,15 @@ struct tipc_port *tipc_createport(struct sock *sk,
return p_ptr;
}
-int tipc_deleteport(u32 ref)
+int tipc_deleteport(struct tipc_port *p_ptr)
{
- struct tipc_port *p_ptr;
struct sk_buff *buf = NULL;
- tipc_withdraw(ref, 0, NULL);
- p_ptr = tipc_port_lock(ref);
- if (!p_ptr)
- return -EINVAL;
+ tipc_withdraw(p_ptr, 0, NULL);
- tipc_ref_discard(ref);
- tipc_port_unlock(p_ptr);
+ spin_lock_bh(p_ptr->lock);
+ tipc_ref_discard(p_ptr->ref);
+ spin_unlock_bh(p_ptr->lock);
k_cancel_timer(&p_ptr->timer);
if (p_ptr->connected) {
@@ -704,47 +701,36 @@ int tipc_set_portimportance(u32 ref, unsigned int imp)
}
-int tipc_publish(u32 ref, unsigned int scope, struct tipc_name_seq const *seq)
+int tipc_publish(struct tipc_port *p_ptr, unsigned int scope,
+ struct tipc_name_seq const *seq)
{
- struct tipc_port *p_ptr;
struct publication *publ;
u32 key;
- int res = -EINVAL;
- p_ptr = tipc_port_lock(ref);
- if (!p_ptr)
+ if (p_ptr->connected)
return -EINVAL;
+ key = p_ptr->ref + p_ptr->pub_count + 1;
+ if (key == p_ptr->ref)
+ return -EADDRINUSE;
- if (p_ptr->connected)
- goto exit;
- key = ref + p_ptr->pub_count + 1;
- if (key == ref) {
- res = -EADDRINUSE;
- goto exit;
- }
publ = tipc_nametbl_publish(seq->type, seq->lower, seq->upper,
scope, p_ptr->ref, key);
if (publ) {
list_add(&publ->pport_list, &p_ptr->publications);
p_ptr->pub_count++;
p_ptr->published = 1;
- res = 0;
+ return 0;
}
-exit:
- tipc_port_unlock(p_ptr);
- return res;
+ return -EINVAL;
}
-int tipc_withdraw(u32 ref, unsigned int scope, struct tipc_name_seq const *seq)
+int tipc_withdraw(struct tipc_port *p_ptr, unsigned int scope,
+ struct tipc_name_seq const *seq)
{
- struct tipc_port *p_ptr;
struct publication *publ;
struct publication *tpubl;
int res = -EINVAL;
- p_ptr = tipc_port_lock(ref);
- if (!p_ptr)
- return -EINVAL;
if (!seq) {
list_for_each_entry_safe(publ, tpubl,
&p_ptr->publications, pport_list) {
@@ -771,7 +757,6 @@ int tipc_withdraw(u32 ref, unsigned int scope, struct tipc_name_seq const *seq)
}
if (list_empty(&p_ptr->publications))
p_ptr->published = 0;
- tipc_port_unlock(p_ptr);
return res;
}
@@ -832,17 +817,14 @@ exit:
*/
int __tipc_disconnect(struct tipc_port *tp_ptr)
{
- int res;
-
if (tp_ptr->connected) {
tp_ptr->connected = 0;
/* let timer expire on it's own to avoid deadlock! */
tipc_nodesub_unsubscribe(&tp_ptr->subscription);
- res = 0;
- } else {
- res = -ENOTCONN;
+ return 0;
}
- return res;
+
+ return -ENOTCONN;
}
/*
diff --git a/net/tipc/port.h b/net/tipc/port.h
index 912253597343..34f12bd4074e 100644
--- a/net/tipc/port.h
+++ b/net/tipc/port.h
@@ -116,7 +116,7 @@ int tipc_reject_msg(struct sk_buff *buf, u32 err);
void tipc_acknowledge(u32 port_ref, u32 ack);
-int tipc_deleteport(u32 portref);
+int tipc_deleteport(struct tipc_port *p_ptr);
int tipc_portimportance(u32 portref, unsigned int *importance);
int tipc_set_portimportance(u32 portref, unsigned int importance);
@@ -127,9 +127,9 @@ int tipc_set_portunreliable(u32 portref, unsigned int isunreliable);
int tipc_portunreturnable(u32 portref, unsigned int *isunreturnable);
int tipc_set_portunreturnable(u32 portref, unsigned int isunreturnable);
-int tipc_publish(u32 portref, unsigned int scope,
+int tipc_publish(struct tipc_port *p_ptr, unsigned int scope,
struct tipc_name_seq const *name_seq);
-int tipc_withdraw(u32 portref, unsigned int scope,
+int tipc_withdraw(struct tipc_port *p_ptr, unsigned int scope,
struct tipc_name_seq const *name_seq);
int tipc_connect(u32 portref, struct tipc_portid const *port);
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 3b61851bb927..c8341d1f995e 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -239,7 +239,6 @@ static int tipc_sk_create(struct net *net, struct socket *sock, int protocol,
int tipc_sock_create_local(int type, struct socket **res)
{
int rc;
- struct sock *sk;
rc = sock_create_lite(AF_TIPC, type, 0, res);
if (rc < 0) {
@@ -248,8 +247,6 @@ int tipc_sock_create_local(int type, struct socket **res)
}
tipc_sk_create(&init_net, *res, 0, 1);
- sk = (*res)->sk;
-
return 0;
}
@@ -354,7 +351,7 @@ static int release(struct socket *sock)
* Delete TIPC port; this ensures no more messages are queued
* (also disconnects an active connection & sends a 'FIN-' to peer)
*/
- res = tipc_deleteport(tport->ref);
+ res = tipc_deleteport(tport);
/* Discard any remaining (connection-based) messages in receive queue */
__skb_queue_purge(&sk->sk_receive_queue);
@@ -386,30 +383,46 @@ static int release(struct socket *sock)
*/
static int bind(struct socket *sock, struct sockaddr *uaddr, int uaddr_len)
{
+ struct sock *sk = sock->sk;
struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
- u32 portref = tipc_sk_port(sock->sk)->ref;
+ struct tipc_port *tport = tipc_sk_port(sock->sk);
+ int res = -EINVAL;
- if (unlikely(!uaddr_len))
- return tipc_withdraw(portref, 0, NULL);
+ lock_sock(sk);
+ if (unlikely(!uaddr_len)) {
+ res = tipc_withdraw(tport, 0, NULL);
+ goto exit;
+ }
- if (uaddr_len < sizeof(struct sockaddr_tipc))
- return -EINVAL;
- if (addr->family != AF_TIPC)
- return -EAFNOSUPPORT;
+ if (uaddr_len < sizeof(struct sockaddr_tipc)) {
+ res = -EINVAL;
+ goto exit;
+ }
+ if (addr->family != AF_TIPC) {
+ res = -EAFNOSUPPORT;
+ goto exit;
+ }
if (addr->addrtype == TIPC_ADDR_NAME)
addr->addr.nameseq.upper = addr->addr.nameseq.lower;
- else if (addr->addrtype != TIPC_ADDR_NAMESEQ)
- return -EAFNOSUPPORT;
+ else if (addr->addrtype != TIPC_ADDR_NAMESEQ) {
+ res = -EAFNOSUPPORT;
+ goto exit;
+ }
if ((addr->addr.nameseq.type < TIPC_RESERVED_TYPES) &&
(addr->addr.nameseq.type != TIPC_TOP_SRV) &&
- (addr->addr.nameseq.type != TIPC_CFG_SRV))
- return -EACCES;
+ (addr->addr.nameseq.type != TIPC_CFG_SRV)) {
+ res = -EACCES;
+ goto exit;
+ }
- return (addr->scope > 0) ?
- tipc_publish(portref, addr->scope, &addr->addr.nameseq) :
- tipc_withdraw(portref, -addr->scope, &addr->addr.nameseq);
+ res = (addr->scope > 0) ?
+ tipc_publish(tport, addr->scope, &addr->addr.nameseq) :
+ tipc_withdraw(tport, -addr->scope, &addr->addr.nameseq);
+exit:
+ release_sock(sk);
+ return res;
}
/**
@@ -754,16 +767,11 @@ static int send_stream(struct kiocb *iocb, struct socket *sock,
/* Handle special cases where there is no connection */
if (unlikely(sock->state != SS_CONNECTED)) {
- if (sock->state == SS_UNCONNECTED) {
+ if (sock->state == SS_UNCONNECTED)
res = send_packet(NULL, sock, m, total_len);
- goto exit;
- } else if (sock->state == SS_DISCONNECTING) {
- res = -EPIPE;
- goto exit;
- } else {
- res = -ENOTCONN;
- goto exit;
- }
+ else
+ res = sock->state == SS_DISCONNECTING ? -EPIPE : -ENOTCONN;
+ goto exit;
}
if (unlikely(m->msg_name)) {
@@ -1311,14 +1319,12 @@ static u32 filter_connect(struct tipc_sock *tsock, struct sk_buff **buf)
static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *buf)
{
struct tipc_msg *msg = buf_msg(buf);
- unsigned int limit;
if (msg_connected(msg))
- limit = sysctl_tipc_rmem[2];
- else
- limit = sk->sk_rcvbuf >> TIPC_CRITICAL_IMPORTANCE <<
- msg_importance(msg);
- return limit;
+ return sysctl_tipc_rmem[2];
+
+ return sk->sk_rcvbuf >> TIPC_CRITICAL_IMPORTANCE <<
+ msg_importance(msg);
}
/**
@@ -1514,14 +1520,12 @@ static int connect(struct socket *sock, struct sockaddr *dest, int destlen,
sock->state != SS_CONNECTING,
timeout ? (long)msecs_to_jiffies(timeout)
: MAX_SCHEDULE_TIMEOUT);
- lock_sock(sk);
if (res <= 0) {
if (res == 0)
res = -ETIMEDOUT;
- else
- ; /* leave "res" unchanged */
- goto exit;
+ return res;
}
+ lock_sock(sk);
}
if (unlikely(sock->state == SS_DISCONNECTING))
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 01625ccc3ae6..800ca61758ff 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -80,6 +80,8 @@
* with BSD names.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/signal.h>
@@ -366,7 +368,7 @@ static void unix_sock_destructor(struct sock *sk)
WARN_ON(!sk_unhashed(sk));
WARN_ON(sk->sk_socket);
if (!sock_flag(sk, SOCK_DEAD)) {
- printk(KERN_INFO "Attempt to release alive unix socket: %p\n", sk);
+ pr_info("Attempt to release alive unix socket: %p\n", sk);
return;
}
@@ -378,7 +380,7 @@ static void unix_sock_destructor(struct sock *sk)
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
local_bh_enable();
#ifdef UNIX_REFCNT_DEBUG
- printk(KERN_DEBUG "UNIX %p is destroyed, %ld are still alive.\n", sk,
+ pr_debug("UNIX %p is destroyed, %ld are still alive.\n", sk,
atomic_long_read(&unix_nr_socks));
#endif
}
@@ -530,13 +532,17 @@ static int unix_seqpacket_sendmsg(struct kiocb *, struct socket *,
static int unix_seqpacket_recvmsg(struct kiocb *, struct socket *,
struct msghdr *, size_t, int);
-static void unix_set_peek_off(struct sock *sk, int val)
+static int unix_set_peek_off(struct sock *sk, int val)
{
struct unix_sock *u = unix_sk(sk);
- mutex_lock(&u->readlock);
+ if (mutex_lock_interruptible(&u->readlock))
+ return -EINTR;
+
sk->sk_peek_off = val;
mutex_unlock(&u->readlock);
+
+ return 0;
}
@@ -714,7 +720,9 @@ static int unix_autobind(struct socket *sock)
int err;
unsigned int retries = 0;
- mutex_lock(&u->readlock);
+ err = mutex_lock_interruptible(&u->readlock);
+ if (err)
+ return err;
err = 0;
if (u->addr)
@@ -873,7 +881,9 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
goto out;
addr_len = err;
- mutex_lock(&u->readlock);
+ err = mutex_lock_interruptible(&u->readlock);
+ if (err)
+ goto out;
err = -EINVAL;
if (u->addr)
@@ -2433,8 +2443,7 @@ static int __init af_unix_init(void)
rc = proto_register(&unix_proto, 1);
if (rc != 0) {
- printk(KERN_CRIT "%s: Cannot create unix_sock SLAB cache!\n",
- __func__);
+ pr_crit("%s: Cannot create unix_sock SLAB cache!\n", __func__);
goto out;
}
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index 7622789d3750..c8a8297cd4b8 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -35,6 +35,8 @@
* response
*/
+#define pr_fmt(fmt) "X25: " fmt
+
#include <linux/module.h>
#include <linux/capability.h>
#include <linux/errno.h>
@@ -1809,7 +1811,7 @@ static int __init x25_init(void)
if (rc != 0)
goto out_sock;
- printk(KERN_INFO "X.25 for Linux Version 0.2\n");
+ pr_info("Linux Version 0.2\n");
x25_register_sysctl();
rc = x25_proc_init();
diff --git a/net/x25/x25_dev.c b/net/x25/x25_dev.c
index a8a236338e61..39231237e1c3 100644
--- a/net/x25/x25_dev.c
+++ b/net/x25/x25_dev.c
@@ -17,6 +17,8 @@
* 2000-09-04 Henner Eisen Prevent freeing a dangling skb.
*/
+#define pr_fmt(fmt) "X25: " fmt
+
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
@@ -89,7 +91,7 @@ static int x25_receive_data(struct sk_buff *skb, struct x25_neigh *nb)
*/
if (frametype != X25_CLEAR_CONFIRMATION)
- printk(KERN_DEBUG "x25_receive_data(): unknown frame type %2x\n",frametype);
+ pr_debug("x25_receive_data(): unknown frame type %2x\n",frametype);
return 0;
}
@@ -114,7 +116,7 @@ int x25_lapb_receive_frame(struct sk_buff *skb, struct net_device *dev,
*/
nb = x25_get_neigh(dev);
if (!nb) {
- printk(KERN_DEBUG "X.25: unknown neighbour - %s\n", dev->name);
+ pr_debug("unknown neighbour - %s\n", dev->name);
goto drop;
}
@@ -154,7 +156,7 @@ void x25_establish_link(struct x25_neigh *nb)
switch (nb->dev->type) {
case ARPHRD_X25:
if ((skb = alloc_skb(1, GFP_ATOMIC)) == NULL) {
- printk(KERN_ERR "x25_dev: out of memory\n");
+ pr_err("x25_dev: out of memory\n");
return;
}
ptr = skb_put(skb, 1);
@@ -189,7 +191,7 @@ void x25_terminate_link(struct x25_neigh *nb)
skb = alloc_skb(1, GFP_ATOMIC);
if (!skb) {
- printk(KERN_ERR "x25_dev: out of memory\n");
+ pr_err("x25_dev: out of memory\n");
return;
}
diff --git a/net/x25/x25_facilities.c b/net/x25/x25_facilities.c
index b8253250d723..7ecd04c21360 100644
--- a/net/x25/x25_facilities.c
+++ b/net/x25/x25_facilities.c
@@ -21,6 +21,8 @@
* on response.
*/
+#define pr_fmt(fmt) "X25: " fmt
+
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/skbuff.h>
@@ -109,7 +111,7 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities,
case X25_MARKER:
break;
default:
- printk(KERN_DEBUG "X.25: unknown facility "
+ pr_debug("unknown facility "
"%02X, value %02X\n",
p[0], p[1]);
break;
@@ -132,7 +134,7 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities,
*vc_fac_mask |= X25_MASK_WINDOW_SIZE;
break;
default:
- printk(KERN_DEBUG "X.25: unknown facility "
+ pr_debug("unknown facility "
"%02X, values %02X, %02X\n",
p[0], p[1], p[2]);
break;
@@ -143,7 +145,7 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities,
case X25_FAC_CLASS_C:
if (len < 4)
return -1;
- printk(KERN_DEBUG "X.25: unknown facility %02X, "
+ pr_debug("unknown facility %02X, "
"values %02X, %02X, %02X\n",
p[0], p[1], p[2], p[3]);
p += 4;
@@ -172,7 +174,7 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities,
*vc_fac_mask |= X25_MASK_CALLED_AE;
break;
default:
- printk(KERN_DEBUG "X.25: unknown facility %02X,"
+ pr_debug("unknown facility %02X,"
"length %d\n", p[0], p[1]);
break;
}
@@ -341,12 +343,12 @@ void x25_limit_facilities(struct x25_facilities *facilities,
if (!nb->extended) {
if (facilities->winsize_in > 7) {
- printk(KERN_DEBUG "X.25: incoming winsize limited to 7\n");
+ pr_debug("incoming winsize limited to 7\n");
facilities->winsize_in = 7;
}
if (facilities->winsize_out > 7) {
facilities->winsize_out = 7;
- printk( KERN_DEBUG "X.25: outgoing winsize limited to 7\n");
+ pr_debug("outgoing winsize limited to 7\n");
}
}
}
diff --git a/net/x25/x25_forward.c b/net/x25/x25_forward.c
index c541b622ae16..cf561f1613e1 100644
--- a/net/x25/x25_forward.c
+++ b/net/x25/x25_forward.c
@@ -8,6 +8,9 @@
* History
* 03-01-2007 Added forwarding for x.25 Andrew Hendry
*/
+
+#define pr_fmt(fmt) "X25: " fmt
+
#include <linux/if_arp.h>
#include <linux/init.h>
#include <linux/slab.h>
@@ -51,7 +54,7 @@ int x25_forward_call(struct x25_address *dest_addr, struct x25_neigh *from,
list_for_each(entry, &x25_forward_list) {
x25_frwd = list_entry(entry, struct x25_forward, node);
if (x25_frwd->lci == lci) {
- printk(KERN_WARNING "X.25: call request for lci which is already registered!, transmitting but not registering new pair\n");
+ pr_warn("call request for lci which is already registered!, transmitting but not registering new pair\n");
same_lci = 1;
}
}
diff --git a/net/x25/x25_in.c b/net/x25/x25_in.c
index a49cd4ec551a..d1b0dc79bb6f 100644
--- a/net/x25/x25_in.c
+++ b/net/x25/x25_in.c
@@ -23,6 +23,8 @@
* i-frames.
*/
+#define pr_fmt(fmt) "X25: " fmt
+
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/kernel.h>
@@ -317,7 +319,7 @@ static int x25_state3_machine(struct sock *sk, struct sk_buff *skb, int frametyp
break;
default:
- printk(KERN_WARNING "x25: unknown %02X in state 3\n", frametype);
+ pr_warn("unknown %02X in state 3\n", frametype);
break;
}
diff --git a/net/x25/x25_link.c b/net/x25/x25_link.c
index 4acacf3c6617..fd5ffb25873f 100644
--- a/net/x25/x25_link.c
+++ b/net/x25/x25_link.c
@@ -21,6 +21,8 @@
* 2000-09-04 Henner Eisen dev_hold() / dev_put() for x25_neigh.
*/
+#define pr_fmt(fmt) "X25: " fmt
+
#include <linux/kernel.h>
#include <linux/jiffies.h>
#include <linux/timer.h>
@@ -93,13 +95,13 @@ void x25_link_control(struct sk_buff *skb, struct x25_neigh *nb,
if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 4))
break;
- printk(KERN_WARNING "x25: diagnostic #%d - %02X %02X %02X\n",
+ pr_warn("diagnostic #%d - %02X %02X %02X\n",
skb->data[3], skb->data[4],
skb->data[5], skb->data[6]);
break;
default:
- printk(KERN_WARNING "x25: received unknown %02X with LCI 000\n",
+ pr_warn("received unknown %02X with LCI 000\n",
frametype);
break;
}
diff --git a/net/x25/x25_subr.c b/net/x25/x25_subr.c
index 5170d52bfd96..6b5af65f491f 100644
--- a/net/x25/x25_subr.c
+++ b/net/x25/x25_subr.c
@@ -23,6 +23,8 @@
* restriction on response.
*/
+#define pr_fmt(fmt) "X25: " fmt
+
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/string.h>
@@ -148,7 +150,7 @@ void x25_write_internal(struct sock *sk, int frametype)
case X25_RESET_CONFIRMATION:
break;
default:
- printk(KERN_ERR "X.25: invalid frame type %02X\n", frametype);
+ pr_err("invalid frame type %02X\n", frametype);
return;
}
@@ -338,7 +340,7 @@ int x25_decode(struct sock *sk, struct sk_buff *skb, int *ns, int *nr, int *q,
}
}
- printk(KERN_DEBUG "X.25: invalid PLP frame %02X %02X %02X\n",
+ pr_debug("invalid PLP frame %02X %02X %02X\n",
frame[0], frame[1], frame[2]);
return X25_ILLEGAL;
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 9a91f7431c41..e205c4b56afb 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -39,12 +39,7 @@
#define XFRM_QUEUE_TMO_MAX ((unsigned)(60*HZ))
#define XFRM_MAX_QUEUE_LEN 100
-DEFINE_MUTEX(xfrm_cfg_mutex);
-EXPORT_SYMBOL(xfrm_cfg_mutex);
-
-static DEFINE_SPINLOCK(xfrm_policy_sk_bundle_lock);
static struct dst_entry *xfrm_policy_sk_bundles;
-static DEFINE_RWLOCK(xfrm_policy_lock);
static DEFINE_SPINLOCK(xfrm_policy_afinfo_lock);
static struct xfrm_policy_afinfo __rcu *xfrm_policy_afinfo[NPROTO]
@@ -438,7 +433,7 @@ static void xfrm_bydst_resize(struct net *net, int dir)
if (!ndst)
return;
- write_lock_bh(&xfrm_policy_lock);
+ write_lock_bh(&net->xfrm.xfrm_policy_lock);
for (i = hmask; i >= 0; i--)
xfrm_dst_hash_transfer(odst + i, ndst, nhashmask);
@@ -446,7 +441,7 @@ static void xfrm_bydst_resize(struct net *net, int dir)
net->xfrm.policy_bydst[dir].table = ndst;
net->xfrm.policy_bydst[dir].hmask = nhashmask;
- write_unlock_bh(&xfrm_policy_lock);
+ write_unlock_bh(&net->xfrm.xfrm_policy_lock);
xfrm_hash_free(odst, (hmask + 1) * sizeof(struct hlist_head));
}
@@ -463,7 +458,7 @@ static void xfrm_byidx_resize(struct net *net, int total)
if (!nidx)
return;
- write_lock_bh(&xfrm_policy_lock);
+ write_lock_bh(&net->xfrm.xfrm_policy_lock);
for (i = hmask; i >= 0; i--)
xfrm_idx_hash_transfer(oidx + i, nidx, nhashmask);
@@ -471,7 +466,7 @@ static void xfrm_byidx_resize(struct net *net, int total)
net->xfrm.policy_byidx = nidx;
net->xfrm.policy_idx_hmask = nhashmask;
- write_unlock_bh(&xfrm_policy_lock);
+ write_unlock_bh(&net->xfrm.xfrm_policy_lock);
xfrm_hash_free(oidx, (hmask + 1) * sizeof(struct hlist_head));
}
@@ -504,7 +499,7 @@ static inline int xfrm_byidx_should_resize(struct net *net, int total)
void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si)
{
- read_lock_bh(&xfrm_policy_lock);
+ read_lock_bh(&net->xfrm.xfrm_policy_lock);
si->incnt = net->xfrm.policy_count[XFRM_POLICY_IN];
si->outcnt = net->xfrm.policy_count[XFRM_POLICY_OUT];
si->fwdcnt = net->xfrm.policy_count[XFRM_POLICY_FWD];
@@ -513,7 +508,7 @@ void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si)
si->fwdscnt = net->xfrm.policy_count[XFRM_POLICY_FWD+XFRM_POLICY_MAX];
si->spdhcnt = net->xfrm.policy_idx_hmask;
si->spdhmcnt = xfrm_policy_hashmax;
- read_unlock_bh(&xfrm_policy_lock);
+ read_unlock_bh(&net->xfrm.xfrm_policy_lock);
}
EXPORT_SYMBOL(xfrm_spd_getinfo);
@@ -538,7 +533,7 @@ static void xfrm_hash_resize(struct work_struct *work)
/* Generate new index... KAME seems to generate them ordered by cost
* of an absolute inpredictability of ordering of rules. This will not pass. */
-static u32 xfrm_gen_index(struct net *net, int dir)
+static u32 xfrm_gen_index(struct net *net, int dir, u32 index)
{
static u32 idx_generator;
@@ -548,8 +543,14 @@ static u32 xfrm_gen_index(struct net *net, int dir)
u32 idx;
int found;
- idx = (idx_generator | dir);
- idx_generator += 8;
+ if (!index) {
+ idx = (idx_generator | dir);
+ idx_generator += 8;
+ } else {
+ idx = index;
+ index = 0;
+ }
+
if (idx == 0)
idx = 8;
list = net->xfrm.policy_byidx + idx_hash(net, idx);
@@ -630,7 +631,7 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
struct hlist_head *chain;
struct hlist_node *newpos;
- write_lock_bh(&xfrm_policy_lock);
+ write_lock_bh(&net->xfrm.xfrm_policy_lock);
chain = policy_hash_bysel(net, &policy->selector, policy->family, dir);
delpol = NULL;
newpos = NULL;
@@ -641,7 +642,7 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
xfrm_sec_ctx_match(pol->security, policy->security) &&
!WARN_ON(delpol)) {
if (excl) {
- write_unlock_bh(&xfrm_policy_lock);
+ write_unlock_bh(&net->xfrm.xfrm_policy_lock);
return -EEXIST;
}
delpol = pol;
@@ -672,14 +673,14 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
xfrm_policy_requeue(delpol, policy);
__xfrm_policy_unlink(delpol, dir);
}
- policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir);
+ policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir, policy->index);
hlist_add_head(&policy->byidx, net->xfrm.policy_byidx+idx_hash(net, policy->index));
policy->curlft.add_time = get_seconds();
policy->curlft.use_time = 0;
if (!mod_timer(&policy->timer, jiffies + HZ))
xfrm_pol_hold(policy);
list_add(&policy->walk.all, &net->xfrm.policy_all);
- write_unlock_bh(&xfrm_policy_lock);
+ write_unlock_bh(&net->xfrm.xfrm_policy_lock);
if (delpol)
xfrm_policy_kill(delpol);
@@ -699,7 +700,7 @@ struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u8 type,
struct hlist_head *chain;
*err = 0;
- write_lock_bh(&xfrm_policy_lock);
+ write_lock_bh(&net->xfrm.xfrm_policy_lock);
chain = policy_hash_bysel(net, sel, sel->family, dir);
ret = NULL;
hlist_for_each_entry(pol, chain, bydst) {
@@ -712,7 +713,7 @@ struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u8 type,
*err = security_xfrm_policy_delete(
pol->security);
if (*err) {
- write_unlock_bh(&xfrm_policy_lock);
+ write_unlock_bh(&net->xfrm.xfrm_policy_lock);
return pol;
}
__xfrm_policy_unlink(pol, dir);
@@ -721,7 +722,7 @@ struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u8 type,
break;
}
}
- write_unlock_bh(&xfrm_policy_lock);
+ write_unlock_bh(&net->xfrm.xfrm_policy_lock);
if (ret && delete)
xfrm_policy_kill(ret);
@@ -740,7 +741,7 @@ struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8 type,
return NULL;
*err = 0;
- write_lock_bh(&xfrm_policy_lock);
+ write_lock_bh(&net->xfrm.xfrm_policy_lock);
chain = net->xfrm.policy_byidx + idx_hash(net, id);
ret = NULL;
hlist_for_each_entry(pol, chain, byidx) {
@@ -751,7 +752,7 @@ struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8 type,
*err = security_xfrm_policy_delete(
pol->security);
if (*err) {
- write_unlock_bh(&xfrm_policy_lock);
+ write_unlock_bh(&net->xfrm.xfrm_policy_lock);
return pol;
}
__xfrm_policy_unlink(pol, dir);
@@ -760,7 +761,7 @@ struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8 type,
break;
}
}
- write_unlock_bh(&xfrm_policy_lock);
+ write_unlock_bh(&net->xfrm.xfrm_policy_lock);
if (ret && delete)
xfrm_policy_kill(ret);
@@ -823,7 +824,7 @@ int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info)
{
int dir, err = 0, cnt = 0;
- write_lock_bh(&xfrm_policy_lock);
+ write_lock_bh(&net->xfrm.xfrm_policy_lock);
err = xfrm_policy_flush_secctx_check(net, type, audit_info);
if (err)
@@ -839,7 +840,7 @@ int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info)
if (pol->type != type)
continue;
__xfrm_policy_unlink(pol, dir);
- write_unlock_bh(&xfrm_policy_lock);
+ write_unlock_bh(&net->xfrm.xfrm_policy_lock);
cnt++;
xfrm_audit_policy_delete(pol, 1, audit_info->loginuid,
@@ -848,7 +849,7 @@ int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info)
xfrm_policy_kill(pol);
- write_lock_bh(&xfrm_policy_lock);
+ write_lock_bh(&net->xfrm.xfrm_policy_lock);
goto again1;
}
@@ -860,7 +861,7 @@ int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info)
if (pol->type != type)
continue;
__xfrm_policy_unlink(pol, dir);
- write_unlock_bh(&xfrm_policy_lock);
+ write_unlock_bh(&net->xfrm.xfrm_policy_lock);
cnt++;
xfrm_audit_policy_delete(pol, 1,
@@ -869,7 +870,7 @@ int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info)
audit_info->secid);
xfrm_policy_kill(pol);
- write_lock_bh(&xfrm_policy_lock);
+ write_lock_bh(&net->xfrm.xfrm_policy_lock);
goto again2;
}
}
@@ -878,7 +879,7 @@ int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info)
if (!cnt)
err = -ESRCH;
out:
- write_unlock_bh(&xfrm_policy_lock);
+ write_unlock_bh(&net->xfrm.xfrm_policy_lock);
return err;
}
EXPORT_SYMBOL(xfrm_policy_flush);
@@ -898,7 +899,7 @@ int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
if (list_empty(&walk->walk.all) && walk->seq != 0)
return 0;
- write_lock_bh(&xfrm_policy_lock);
+ write_lock_bh(&net->xfrm.xfrm_policy_lock);
if (list_empty(&walk->walk.all))
x = list_first_entry(&net->xfrm.policy_all, struct xfrm_policy_walk_entry, all);
else
@@ -924,7 +925,7 @@ int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
}
list_del_init(&walk->walk.all);
out:
- write_unlock_bh(&xfrm_policy_lock);
+ write_unlock_bh(&net->xfrm.xfrm_policy_lock);
return error;
}
EXPORT_SYMBOL(xfrm_policy_walk);
@@ -938,14 +939,14 @@ void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type)
}
EXPORT_SYMBOL(xfrm_policy_walk_init);
-void xfrm_policy_walk_done(struct xfrm_policy_walk *walk)
+void xfrm_policy_walk_done(struct xfrm_policy_walk *walk, struct net *net)
{
if (list_empty(&walk->walk.all))
return;
- write_lock_bh(&xfrm_policy_lock);
+ write_lock_bh(&net->xfrm.xfrm_policy_lock); /*FIXME where is net? */
list_del(&walk->walk.all);
- write_unlock_bh(&xfrm_policy_lock);
+ write_unlock_bh(&net->xfrm.xfrm_policy_lock);
}
EXPORT_SYMBOL(xfrm_policy_walk_done);
@@ -990,7 +991,7 @@ static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type,
if (unlikely(!daddr || !saddr))
return NULL;
- read_lock_bh(&xfrm_policy_lock);
+ read_lock_bh(&net->xfrm.xfrm_policy_lock);
chain = policy_hash_direct(net, daddr, saddr, family, dir);
ret = NULL;
hlist_for_each_entry(pol, chain, bydst) {
@@ -1026,7 +1027,7 @@ static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type,
if (ret)
xfrm_pol_hold(ret);
fail:
- read_unlock_bh(&xfrm_policy_lock);
+ read_unlock_bh(&net->xfrm.xfrm_policy_lock);
return ret;
}
@@ -1103,8 +1104,9 @@ static struct xfrm_policy *xfrm_sk_policy_lookup(struct sock *sk, int dir,
const struct flowi *fl)
{
struct xfrm_policy *pol;
+ struct net *net = sock_net(sk);
- read_lock_bh(&xfrm_policy_lock);
+ read_lock_bh(&net->xfrm.xfrm_policy_lock);
if ((pol = sk->sk_policy[dir]) != NULL) {
bool match = xfrm_selector_match(&pol->selector, fl,
sk->sk_family);
@@ -1128,7 +1130,7 @@ static struct xfrm_policy *xfrm_sk_policy_lookup(struct sock *sk, int dir,
pol = NULL;
}
out:
- read_unlock_bh(&xfrm_policy_lock);
+ read_unlock_bh(&net->xfrm.xfrm_policy_lock);
return pol;
}
@@ -1166,9 +1168,11 @@ static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
int xfrm_policy_delete(struct xfrm_policy *pol, int dir)
{
- write_lock_bh(&xfrm_policy_lock);
+ struct net *net = xp_net(pol);
+
+ write_lock_bh(&net->xfrm.xfrm_policy_lock);
pol = __xfrm_policy_unlink(pol, dir);
- write_unlock_bh(&xfrm_policy_lock);
+ write_unlock_bh(&net->xfrm.xfrm_policy_lock);
if (pol) {
xfrm_policy_kill(pol);
return 0;
@@ -1187,12 +1191,12 @@ int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
return -EINVAL;
#endif
- write_lock_bh(&xfrm_policy_lock);
+ write_lock_bh(&net->xfrm.xfrm_policy_lock);
old_pol = sk->sk_policy[dir];
sk->sk_policy[dir] = pol;
if (pol) {
pol->curlft.add_time = get_seconds();
- pol->index = xfrm_gen_index(net, XFRM_POLICY_MAX+dir);
+ pol->index = xfrm_gen_index(net, XFRM_POLICY_MAX+dir, 0);
__xfrm_policy_link(pol, XFRM_POLICY_MAX+dir);
}
if (old_pol) {
@@ -1204,7 +1208,7 @@ int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
*/
__xfrm_policy_unlink(old_pol, XFRM_POLICY_MAX+dir);
}
- write_unlock_bh(&xfrm_policy_lock);
+ write_unlock_bh(&net->xfrm.xfrm_policy_lock);
if (old_pol) {
xfrm_policy_kill(old_pol);
@@ -1215,6 +1219,7 @@ int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
static struct xfrm_policy *clone_policy(const struct xfrm_policy *old, int dir)
{
struct xfrm_policy *newp = xfrm_policy_alloc(xp_net(old), GFP_ATOMIC);
+ struct net *net = xp_net(old);
if (newp) {
newp->selector = old->selector;
@@ -1233,9 +1238,9 @@ static struct xfrm_policy *clone_policy(const struct xfrm_policy *old, int dir)
newp->type = old->type;
memcpy(newp->xfrm_vec, old->xfrm_vec,
newp->xfrm_nr*sizeof(struct xfrm_tmpl));
- write_lock_bh(&xfrm_policy_lock);
+ write_lock_bh(&net->xfrm.xfrm_policy_lock);
__xfrm_policy_link(newp, XFRM_POLICY_MAX+dir);
- write_unlock_bh(&xfrm_policy_lock);
+ write_unlock_bh(&net->xfrm.xfrm_policy_lock);
xfrm_pol_put(newp);
}
return newp;
@@ -1636,20 +1641,22 @@ free_dst:
goto out;
}
-static int inline
-xfrm_dst_alloc_copy(void **target, const void *src, int size)
+#ifdef CONFIG_XFRM_SUB_POLICY
+static int xfrm_dst_alloc_copy(void **target, const void *src, int size)
{
if (!*target) {
*target = kmalloc(size, GFP_ATOMIC);
if (!*target)
return -ENOMEM;
}
+
memcpy(*target, src, size);
return 0;
}
+#endif
-static int inline
-xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel)
+static int xfrm_dst_update_parent(struct dst_entry *dst,
+ const struct xfrm_selector *sel)
{
#ifdef CONFIG_XFRM_SUB_POLICY
struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
@@ -1660,8 +1667,8 @@ xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel)
#endif
}
-static int inline
-xfrm_dst_update_origin(struct dst_entry *dst, const struct flowi *fl)
+static int xfrm_dst_update_origin(struct dst_entry *dst,
+ const struct flowi *fl)
{
#ifdef CONFIG_XFRM_SUB_POLICY
struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
@@ -1896,8 +1903,7 @@ static struct xfrm_dst *xfrm_create_dummy_bundle(struct net *net,
if (IS_ERR(xdst))
return xdst;
- if (net->xfrm.sysctl_larval_drop || num_xfrms <= 0 ||
- (fl->flowi_flags & FLOWI_FLAG_CAN_SLEEP))
+ if (net->xfrm.sysctl_larval_drop || num_xfrms <= 0)
return xdst;
dst1 = &xdst->u.dst;
@@ -2072,7 +2078,6 @@ struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
u8 dir = policy_to_flow_dir(XFRM_POLICY_OUT);
int i, err, num_pols, num_xfrms = 0, drop_pols = 0;
-restart:
dst = NULL;
xdst = NULL;
route = NULL;
@@ -2106,10 +2111,10 @@ restart:
dst_hold(&xdst->u.dst);
- spin_lock_bh(&xfrm_policy_sk_bundle_lock);
+ spin_lock_bh(&net->xfrm.xfrm_policy_sk_bundle_lock);
xdst->u.dst.next = xfrm_policy_sk_bundles;
xfrm_policy_sk_bundles = &xdst->u.dst;
- spin_unlock_bh(&xfrm_policy_sk_bundle_lock);
+ spin_unlock_bh(&net->xfrm.xfrm_policy_sk_bundle_lock);
route = xdst->route;
}
@@ -2152,23 +2157,8 @@ restart:
return make_blackhole(net, family, dst_orig);
}
- if (fl->flowi_flags & FLOWI_FLAG_CAN_SLEEP) {
- DECLARE_WAITQUEUE(wait, current);
- add_wait_queue(&net->xfrm.km_waitq, &wait);
- set_current_state(TASK_INTERRUPTIBLE);
- schedule();
- set_current_state(TASK_RUNNING);
- remove_wait_queue(&net->xfrm.km_waitq, &wait);
-
- if (!signal_pending(current)) {
- dst_release(dst);
- goto restart;
- }
-
- err = -ERESTART;
- } else
- err = -EAGAIN;
+ err = -EAGAIN;
XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
goto error;
@@ -2434,7 +2424,7 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
}
xfrm_nr = ti;
if (npols > 1) {
- xfrm_tmpl_sort(stp, tpp, xfrm_nr, family);
+ xfrm_tmpl_sort(stp, tpp, xfrm_nr, family, net);
tpp = stp;
}
@@ -2563,10 +2553,10 @@ static void __xfrm_garbage_collect(struct net *net)
{
struct dst_entry *head, *next;
- spin_lock_bh(&xfrm_policy_sk_bundle_lock);
+ spin_lock_bh(&net->xfrm.xfrm_policy_sk_bundle_lock);
head = xfrm_policy_sk_bundles;
xfrm_policy_sk_bundles = NULL;
- spin_unlock_bh(&xfrm_policy_sk_bundle_lock);
+ spin_unlock_bh(&net->xfrm.xfrm_policy_sk_bundle_lock);
while (head) {
next = head->next;
@@ -2950,6 +2940,13 @@ static int __net_init xfrm_net_init(struct net *net)
rv = xfrm_sysctl_init(net);
if (rv < 0)
goto out_sysctl;
+
+ /* Initialize the per-net locks here */
+ spin_lock_init(&net->xfrm.xfrm_state_lock);
+ rwlock_init(&net->xfrm.xfrm_policy_lock);
+ spin_lock_init(&net->xfrm.xfrm_policy_sk_bundle_lock);
+ mutex_init(&net->xfrm.xfrm_cfg_mutex);
+
return 0;
out_sysctl:
@@ -3070,14 +3067,14 @@ static bool xfrm_migrate_selector_match(const struct xfrm_selector *sel_cmp,
}
static struct xfrm_policy * xfrm_migrate_policy_find(const struct xfrm_selector *sel,
- u8 dir, u8 type)
+ u8 dir, u8 type, struct net *net)
{
struct xfrm_policy *pol, *ret = NULL;
struct hlist_head *chain;
u32 priority = ~0U;
- read_lock_bh(&xfrm_policy_lock);
- chain = policy_hash_direct(&init_net, &sel->daddr, &sel->saddr, sel->family, dir);
+ read_lock_bh(&net->xfrm.xfrm_policy_lock); /*FIXME*/
+ chain = policy_hash_direct(net, &sel->daddr, &sel->saddr, sel->family, dir);
hlist_for_each_entry(pol, chain, bydst) {
if (xfrm_migrate_selector_match(sel, &pol->selector) &&
pol->type == type) {
@@ -3086,7 +3083,7 @@ static struct xfrm_policy * xfrm_migrate_policy_find(const struct xfrm_selector
break;
}
}
- chain = &init_net.xfrm.policy_inexact[dir];
+ chain = &net->xfrm.policy_inexact[dir];
hlist_for_each_entry(pol, chain, bydst) {
if (xfrm_migrate_selector_match(sel, &pol->selector) &&
pol->type == type &&
@@ -3099,7 +3096,7 @@ static struct xfrm_policy * xfrm_migrate_policy_find(const struct xfrm_selector
if (ret)
xfrm_pol_hold(ret);
- read_unlock_bh(&xfrm_policy_lock);
+ read_unlock_bh(&net->xfrm.xfrm_policy_lock);
return ret;
}
@@ -3210,7 +3207,7 @@ static int xfrm_migrate_check(const struct xfrm_migrate *m, int num_migrate)
int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
struct xfrm_migrate *m, int num_migrate,
- struct xfrm_kmaddress *k)
+ struct xfrm_kmaddress *k, struct net *net)
{
int i, err, nx_cur = 0, nx_new = 0;
struct xfrm_policy *pol = NULL;
@@ -3223,14 +3220,14 @@ int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
goto out;
/* Stage 1 - find policy */
- if ((pol = xfrm_migrate_policy_find(sel, dir, type)) == NULL) {
+ if ((pol = xfrm_migrate_policy_find(sel, dir, type, net)) == NULL) {
err = -ENOENT;
goto out;
}
/* Stage 2 - find and update state(s) */
for (i = 0, mp = m; i < num_migrate; i++, mp++) {
- if ((x = xfrm_migrate_state_find(mp))) {
+ if ((x = xfrm_migrate_state_find(mp, net))) {
x_cur[nx_cur] = x;
nx_cur++;
if ((xc = xfrm_state_migrate(x, mp))) {
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 68c2f357a183..a62c25ea3631 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -35,8 +35,6 @@
destination/tunnel endpoint. (output)
*/
-static DEFINE_SPINLOCK(xfrm_state_lock);
-
static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024;
static inline unsigned int xfrm_dst_hash(struct net *net,
@@ -127,7 +125,7 @@ static void xfrm_hash_resize(struct work_struct *work)
goto out_unlock;
}
- spin_lock_bh(&xfrm_state_lock);
+ spin_lock_bh(&net->xfrm.xfrm_state_lock);
nhashmask = (nsize / sizeof(struct hlist_head)) - 1U;
for (i = net->xfrm.state_hmask; i >= 0; i--)
@@ -144,7 +142,7 @@ static void xfrm_hash_resize(struct work_struct *work)
net->xfrm.state_byspi = nspi;
net->xfrm.state_hmask = nhashmask;
- spin_unlock_bh(&xfrm_state_lock);
+ spin_unlock_bh(&net->xfrm.xfrm_state_lock);
osize = (ohashmask + 1) * sizeof(struct hlist_head);
xfrm_hash_free(odst, osize);
@@ -374,8 +372,6 @@ static void xfrm_state_gc_task(struct work_struct *work)
hlist_for_each_entry_safe(x, tmp, &gc_list, gclist)
xfrm_state_gc_destroy(x);
-
- wake_up(&net->xfrm.km_waitq);
}
static inline unsigned long make_jiffies(long secs)
@@ -390,7 +386,6 @@ static enum hrtimer_restart xfrm_timer_handler(struct hrtimer * me)
{
struct tasklet_hrtimer *thr = container_of(me, struct tasklet_hrtimer, timer);
struct xfrm_state *x = container_of(thr, struct xfrm_state, mtimer);
- struct net *net = xs_net(x);
unsigned long now = get_seconds();
long next = LONG_MAX;
int warn = 0;
@@ -460,12 +455,8 @@ resched:
goto out;
expired:
- if (x->km.state == XFRM_STATE_ACQ && x->id.spi == 0) {
+ if (x->km.state == XFRM_STATE_ACQ && x->id.spi == 0)
x->km.state = XFRM_STATE_EXPIRED;
- wake_up(&net->xfrm.km_waitq);
- next = 2;
- goto resched;
- }
err = __xfrm_state_delete(x);
if (!err)
@@ -535,14 +526,14 @@ int __xfrm_state_delete(struct xfrm_state *x)
if (x->km.state != XFRM_STATE_DEAD) {
x->km.state = XFRM_STATE_DEAD;
- spin_lock(&xfrm_state_lock);
+ spin_lock(&net->xfrm.xfrm_state_lock);
list_del(&x->km.all);
hlist_del(&x->bydst);
hlist_del(&x->bysrc);
if (x->id.spi)
hlist_del(&x->byspi);
net->xfrm.state_num--;
- spin_unlock(&xfrm_state_lock);
+ spin_unlock(&net->xfrm.xfrm_state_lock);
/* All xfrm_state objects are created by xfrm_state_alloc.
* The xfrm_state_alloc call gives a reference, and that
@@ -603,7 +594,7 @@ int xfrm_state_flush(struct net *net, u8 proto, struct xfrm_audit *audit_info)
{
int i, err = 0, cnt = 0;
- spin_lock_bh(&xfrm_state_lock);
+ spin_lock_bh(&net->xfrm.xfrm_state_lock);
err = xfrm_state_flush_secctx_check(net, proto, audit_info);
if (err)
goto out;
@@ -616,7 +607,7 @@ restart:
if (!xfrm_state_kern(x) &&
xfrm_id_proto_match(x->id.proto, proto)) {
xfrm_state_hold(x);
- spin_unlock_bh(&xfrm_state_lock);
+ spin_unlock_bh(&net->xfrm.xfrm_state_lock);
err = xfrm_state_delete(x);
xfrm_audit_state_delete(x, err ? 0 : 1,
@@ -627,7 +618,7 @@ restart:
if (!err)
cnt++;
- spin_lock_bh(&xfrm_state_lock);
+ spin_lock_bh(&net->xfrm.xfrm_state_lock);
goto restart;
}
}
@@ -636,19 +627,18 @@ restart:
err = 0;
out:
- spin_unlock_bh(&xfrm_state_lock);
- wake_up(&net->xfrm.km_waitq);
+ spin_unlock_bh(&net->xfrm.xfrm_state_lock);
return err;
}
EXPORT_SYMBOL(xfrm_state_flush);
void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si)
{
- spin_lock_bh(&xfrm_state_lock);
+ spin_lock_bh(&net->xfrm.xfrm_state_lock);
si->sadcnt = net->xfrm.state_num;
si->sadhcnt = net->xfrm.state_hmask;
si->sadhmcnt = xfrm_state_hashmax;
- spin_unlock_bh(&xfrm_state_lock);
+ spin_unlock_bh(&net->xfrm.xfrm_state_lock);
}
EXPORT_SYMBOL(xfrm_sad_getinfo);
@@ -801,7 +791,7 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr,
to_put = NULL;
- spin_lock_bh(&xfrm_state_lock);
+ spin_lock_bh(&net->xfrm.xfrm_state_lock);
h = xfrm_dst_hash(net, daddr, saddr, tmpl->reqid, encap_family);
hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) {
if (x->props.family == encap_family &&
@@ -886,7 +876,7 @@ out:
xfrm_state_hold(x);
else
*err = acquire_in_progress ? -EAGAIN : error;
- spin_unlock_bh(&xfrm_state_lock);
+ spin_unlock_bh(&net->xfrm.xfrm_state_lock);
if (to_put)
xfrm_state_put(to_put);
return x;
@@ -900,7 +890,7 @@ xfrm_stateonly_find(struct net *net, u32 mark,
unsigned int h;
struct xfrm_state *rx = NULL, *x = NULL;
- spin_lock(&xfrm_state_lock);
+ spin_lock(&net->xfrm.xfrm_state_lock);
h = xfrm_dst_hash(net, daddr, saddr, reqid, family);
hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) {
if (x->props.family == family &&
@@ -918,7 +908,7 @@ xfrm_stateonly_find(struct net *net, u32 mark,
if (rx)
xfrm_state_hold(rx);
- spin_unlock(&xfrm_state_lock);
+ spin_unlock(&net->xfrm.xfrm_state_lock);
return rx;
@@ -950,14 +940,12 @@ static void __xfrm_state_insert(struct xfrm_state *x)
if (x->replay_maxage)
mod_timer(&x->rtimer, jiffies + x->replay_maxage);
- wake_up(&net->xfrm.km_waitq);
-
net->xfrm.state_num++;
xfrm_hash_grow_check(net, x->bydst.next != NULL);
}
-/* xfrm_state_lock is held */
+/* net->xfrm.xfrm_state_lock is held */
static void __xfrm_state_bump_genids(struct xfrm_state *xnew)
{
struct net *net = xs_net(xnew);
@@ -980,14 +968,16 @@ static void __xfrm_state_bump_genids(struct xfrm_state *xnew)
void xfrm_state_insert(struct xfrm_state *x)
{
- spin_lock_bh(&xfrm_state_lock);
+ struct net *net = xs_net(x);
+
+ spin_lock_bh(&net->xfrm.xfrm_state_lock);
__xfrm_state_bump_genids(x);
__xfrm_state_insert(x);
- spin_unlock_bh(&xfrm_state_lock);
+ spin_unlock_bh(&net->xfrm.xfrm_state_lock);
}
EXPORT_SYMBOL(xfrm_state_insert);
-/* xfrm_state_lock is held */
+/* net->xfrm.xfrm_state_lock is held */
static struct xfrm_state *__find_acq_core(struct net *net,
const struct xfrm_mark *m,
unsigned short family, u8 mode,
@@ -1079,7 +1069,7 @@ int xfrm_state_add(struct xfrm_state *x)
to_put = NULL;
- spin_lock_bh(&xfrm_state_lock);
+ spin_lock_bh(&net->xfrm.xfrm_state_lock);
x1 = __xfrm_state_locate(x, use_spi, family);
if (x1) {
@@ -1108,7 +1098,7 @@ int xfrm_state_add(struct xfrm_state *x)
err = 0;
out:
- spin_unlock_bh(&xfrm_state_lock);
+ spin_unlock_bh(&net->xfrm.xfrm_state_lock);
if (x1) {
xfrm_state_delete(x1);
@@ -1203,16 +1193,16 @@ out:
return NULL;
}
-/* xfrm_state_lock is held */
-struct xfrm_state * xfrm_migrate_state_find(struct xfrm_migrate *m)
+/* net->xfrm.xfrm_state_lock is held */
+struct xfrm_state *xfrm_migrate_state_find(struct xfrm_migrate *m, struct net *net)
{
unsigned int h;
struct xfrm_state *x;
if (m->reqid) {
- h = xfrm_dst_hash(&init_net, &m->old_daddr, &m->old_saddr,
+ h = xfrm_dst_hash(net, &m->old_daddr, &m->old_saddr,
m->reqid, m->old_family);
- hlist_for_each_entry(x, init_net.xfrm.state_bydst+h, bydst) {
+ hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) {
if (x->props.mode != m->mode ||
x->id.proto != m->proto)
continue;
@@ -1227,9 +1217,9 @@ struct xfrm_state * xfrm_migrate_state_find(struct xfrm_migrate *m)
return x;
}
} else {
- h = xfrm_src_hash(&init_net, &m->old_daddr, &m->old_saddr,
+ h = xfrm_src_hash(net, &m->old_daddr, &m->old_saddr,
m->old_family);
- hlist_for_each_entry(x, init_net.xfrm.state_bysrc+h, bysrc) {
+ hlist_for_each_entry(x, net->xfrm.state_bysrc+h, bysrc) {
if (x->props.mode != m->mode ||
x->id.proto != m->proto)
continue;
@@ -1283,10 +1273,11 @@ int xfrm_state_update(struct xfrm_state *x)
struct xfrm_state *x1, *to_put;
int err;
int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
+ struct net *net = xs_net(x);
to_put = NULL;
- spin_lock_bh(&xfrm_state_lock);
+ spin_lock_bh(&net->xfrm.xfrm_state_lock);
x1 = __xfrm_state_locate(x, use_spi, x->props.family);
err = -ESRCH;
@@ -1306,7 +1297,7 @@ int xfrm_state_update(struct xfrm_state *x)
err = 0;
out:
- spin_unlock_bh(&xfrm_state_lock);
+ spin_unlock_bh(&net->xfrm.xfrm_state_lock);
if (to_put)
xfrm_state_put(to_put);
@@ -1377,9 +1368,9 @@ xfrm_state_lookup(struct net *net, u32 mark, const xfrm_address_t *daddr, __be32
{
struct xfrm_state *x;
- spin_lock_bh(&xfrm_state_lock);
+ spin_lock_bh(&net->xfrm.xfrm_state_lock);
x = __xfrm_state_lookup(net, mark, daddr, spi, proto, family);
- spin_unlock_bh(&xfrm_state_lock);
+ spin_unlock_bh(&net->xfrm.xfrm_state_lock);
return x;
}
EXPORT_SYMBOL(xfrm_state_lookup);
@@ -1391,9 +1382,9 @@ xfrm_state_lookup_byaddr(struct net *net, u32 mark,
{
struct xfrm_state *x;
- spin_lock_bh(&xfrm_state_lock);
+ spin_lock_bh(&net->xfrm.xfrm_state_lock);
x = __xfrm_state_lookup_byaddr(net, mark, daddr, saddr, proto, family);
- spin_unlock_bh(&xfrm_state_lock);
+ spin_unlock_bh(&net->xfrm.xfrm_state_lock);
return x;
}
EXPORT_SYMBOL(xfrm_state_lookup_byaddr);
@@ -1405,9 +1396,9 @@ xfrm_find_acq(struct net *net, const struct xfrm_mark *mark, u8 mode, u32 reqid,
{
struct xfrm_state *x;
- spin_lock_bh(&xfrm_state_lock);
+ spin_lock_bh(&net->xfrm.xfrm_state_lock);
x = __find_acq_core(net, mark, family, mode, reqid, proto, daddr, saddr, create);
- spin_unlock_bh(&xfrm_state_lock);
+ spin_unlock_bh(&net->xfrm.xfrm_state_lock);
return x;
}
@@ -1416,17 +1407,17 @@ EXPORT_SYMBOL(xfrm_find_acq);
#ifdef CONFIG_XFRM_SUB_POLICY
int
xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n,
- unsigned short family)
+ unsigned short family, struct net *net)
{
int err = 0;
struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
if (!afinfo)
return -EAFNOSUPPORT;
- spin_lock_bh(&xfrm_state_lock);
+ spin_lock_bh(&net->xfrm.xfrm_state_lock); /*FIXME*/
if (afinfo->tmpl_sort)
err = afinfo->tmpl_sort(dst, src, n);
- spin_unlock_bh(&xfrm_state_lock);
+ spin_unlock_bh(&net->xfrm.xfrm_state_lock);
xfrm_state_put_afinfo(afinfo);
return err;
}
@@ -1438,13 +1429,15 @@ xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n,
{
int err = 0;
struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
+ struct net *net = xs_net(*dst);
+
if (!afinfo)
return -EAFNOSUPPORT;
- spin_lock_bh(&xfrm_state_lock);
+ spin_lock_bh(&net->xfrm.xfrm_state_lock);
if (afinfo->state_sort)
err = afinfo->state_sort(dst, src, n);
- spin_unlock_bh(&xfrm_state_lock);
+ spin_unlock_bh(&net->xfrm.xfrm_state_lock);
xfrm_state_put_afinfo(afinfo);
return err;
}
@@ -1476,9 +1469,9 @@ struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq)
{
struct xfrm_state *x;
- spin_lock_bh(&xfrm_state_lock);
+ spin_lock_bh(&net->xfrm.xfrm_state_lock);
x = __xfrm_find_acq_byseq(net, mark, seq);
- spin_unlock_bh(&xfrm_state_lock);
+ spin_unlock_bh(&net->xfrm.xfrm_state_lock);
return x;
}
EXPORT_SYMBOL(xfrm_find_acq_byseq);
@@ -1496,6 +1489,30 @@ u32 xfrm_get_acqseq(void)
}
EXPORT_SYMBOL(xfrm_get_acqseq);
+int verify_spi_info(u8 proto, u32 min, u32 max)
+{
+ switch (proto) {
+ case IPPROTO_AH:
+ case IPPROTO_ESP:
+ break;
+
+ case IPPROTO_COMP:
+ /* IPCOMP spi is 16-bits. */
+ if (max >= 0x10000)
+ return -EINVAL;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (min > max)
+ return -EINVAL;
+
+ return 0;
+}
+EXPORT_SYMBOL(verify_spi_info);
+
int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 high)
{
struct net *net = xs_net(x);
@@ -1536,10 +1553,10 @@ int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 high)
}
}
if (x->id.spi) {
- spin_lock_bh(&xfrm_state_lock);
+ spin_lock_bh(&net->xfrm.xfrm_state_lock);
h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, x->props.family);
hlist_add_head(&x->byspi, net->xfrm.state_byspi+h);
- spin_unlock_bh(&xfrm_state_lock);
+ spin_unlock_bh(&net->xfrm.xfrm_state_lock);
err = 0;
}
@@ -1562,7 +1579,7 @@ int xfrm_state_walk(struct net *net, struct xfrm_state_walk *walk,
if (walk->seq != 0 && list_empty(&walk->all))
return 0;
- spin_lock_bh(&xfrm_state_lock);
+ spin_lock_bh(&net->xfrm.xfrm_state_lock);
if (list_empty(&walk->all))
x = list_first_entry(&net->xfrm.state_all, struct xfrm_state_walk, all);
else
@@ -1586,7 +1603,7 @@ int xfrm_state_walk(struct net *net, struct xfrm_state_walk *walk,
}
list_del_init(&walk->all);
out:
- spin_unlock_bh(&xfrm_state_lock);
+ spin_unlock_bh(&net->xfrm.xfrm_state_lock);
return err;
}
EXPORT_SYMBOL(xfrm_state_walk);
@@ -1600,14 +1617,14 @@ void xfrm_state_walk_init(struct xfrm_state_walk *walk, u8 proto)
}
EXPORT_SYMBOL(xfrm_state_walk_init);
-void xfrm_state_walk_done(struct xfrm_state_walk *walk)
+void xfrm_state_walk_done(struct xfrm_state_walk *walk, struct net *net)
{
if (list_empty(&walk->all))
return;
- spin_lock_bh(&xfrm_state_lock);
+ spin_lock_bh(&net->xfrm.xfrm_state_lock);
list_del(&walk->all);
- spin_unlock_bh(&xfrm_state_lock);
+ spin_unlock_bh(&net->xfrm.xfrm_state_lock);
}
EXPORT_SYMBOL(xfrm_state_walk_done);
@@ -1655,16 +1672,12 @@ EXPORT_SYMBOL(km_state_notify);
void km_state_expired(struct xfrm_state *x, int hard, u32 portid)
{
- struct net *net = xs_net(x);
struct km_event c;
c.data.hard = hard;
c.portid = portid;
c.event = XFRM_MSG_EXPIRE;
km_state_notify(x, &c);
-
- if (hard)
- wake_up(&net->xfrm.km_waitq);
}
EXPORT_SYMBOL(km_state_expired);
@@ -1707,16 +1720,12 @@ EXPORT_SYMBOL(km_new_mapping);
void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 portid)
{
- struct net *net = xp_net(pol);
struct km_event c;
c.data.hard = hard;
c.portid = portid;
c.event = XFRM_MSG_POLEXPIRE;
km_policy_notify(pol, dir, &c);
-
- if (hard)
- wake_up(&net->xfrm.km_waitq);
}
EXPORT_SYMBOL(km_policy_expired);
@@ -2025,7 +2034,7 @@ int __net_init xfrm_state_init(struct net *net)
INIT_WORK(&net->xfrm.state_hash_work, xfrm_hash_resize);
INIT_HLIST_HEAD(&net->xfrm.state_gc_list);
INIT_WORK(&net->xfrm.state_gc_work, xfrm_state_gc_task);
- init_waitqueue_head(&net->xfrm.km_waitq);
+ spin_lock_init(&net->xfrm.xfrm_state_lock);
return 0;
out_byspi:
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index f964d4c00ffb..97681a390402 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -181,7 +181,9 @@ static int verify_newsa_info(struct xfrm_usersa_info *p,
attrs[XFRMA_ALG_AEAD] ||
attrs[XFRMA_ALG_CRYPT] ||
attrs[XFRMA_ALG_COMP] ||
- attrs[XFRMA_TFCPAD])
+ attrs[XFRMA_TFCPAD] ||
+ (ntohl(p->id.spi) >= 0x10000))
+
goto out;
break;
@@ -877,7 +879,10 @@ static int dump_one_state(struct xfrm_state *x, int count, void *ptr)
static int xfrm_dump_sa_done(struct netlink_callback *cb)
{
struct xfrm_state_walk *walk = (struct xfrm_state_walk *) &cb->args[1];
- xfrm_state_walk_done(walk);
+ struct sock *sk = cb->skb->sk;
+ struct net *net = sock_net(sk);
+
+ xfrm_state_walk_done(walk, net);
return 0;
}
@@ -1074,29 +1079,6 @@ out_noput:
return err;
}
-static int verify_userspi_info(struct xfrm_userspi_info *p)
-{
- switch (p->info.id.proto) {
- case IPPROTO_AH:
- case IPPROTO_ESP:
- break;
-
- case IPPROTO_COMP:
- /* IPCOMP spi is 16-bits. */
- if (p->max >= 0x10000)
- return -EINVAL;
- break;
-
- default:
- return -EINVAL;
- }
-
- if (p->min > p->max)
- return -EINVAL;
-
- return 0;
-}
-
static int xfrm_alloc_userspi(struct sk_buff *skb, struct nlmsghdr *nlh,
struct nlattr **attrs)
{
@@ -1111,7 +1093,7 @@ static int xfrm_alloc_userspi(struct sk_buff *skb, struct nlmsghdr *nlh,
struct xfrm_mark m;
p = nlmsg_data(nlh);
- err = verify_userspi_info(p);
+ err = verify_spi_info(p->info.id.proto, p->min, p->max);
if (err)
goto out_noput;
@@ -1189,6 +1171,8 @@ static int verify_policy_type(u8 type)
static int verify_newpolicy_info(struct xfrm_userpolicy_info *p)
{
+ int ret;
+
switch (p->share) {
case XFRM_SHARE_ANY:
case XFRM_SHARE_SESSION:
@@ -1224,7 +1208,13 @@ static int verify_newpolicy_info(struct xfrm_userpolicy_info *p)
return -EINVAL;
}
- return verify_policy_dir(p->dir);
+ ret = verify_policy_dir(p->dir);
+ if (ret)
+ return ret;
+ if (p->index && ((p->index & XFRM_POLICY_MAX) != p->dir))
+ return -EINVAL;
+
+ return 0;
}
static int copy_from_user_sec_ctx(struct xfrm_policy *pol, struct nlattr **attrs)
@@ -1547,8 +1537,9 @@ static int dump_one_policy(struct xfrm_policy *xp, int dir, int count, void *ptr
static int xfrm_dump_policy_done(struct netlink_callback *cb)
{
struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *) &cb->args[1];
+ struct net *net = sock_net(cb->skb->sk);
- xfrm_policy_walk_done(walk);
+ xfrm_policy_walk_done(walk, net);
return 0;
}
@@ -2129,6 +2120,7 @@ static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh,
u8 type;
int err;
int n = 0;
+ struct net *net = sock_net(skb->sk);
if (attrs[XFRMA_MIGRATE] == NULL)
return -EINVAL;
@@ -2146,7 +2138,7 @@ static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh,
if (!n)
return 0;
- xfrm_migrate(&pi->sel, pi->dir, type, m, n, kmp);
+ xfrm_migrate(&pi->sel, pi->dir, type, m, n, kmp, net);
return 0;
}
@@ -2394,9 +2386,11 @@ static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
static void xfrm_netlink_rcv(struct sk_buff *skb)
{
- mutex_lock(&xfrm_cfg_mutex);
+ struct net *net = sock_net(skb->sk);
+
+ mutex_lock(&net->xfrm.xfrm_cfg_mutex);
netlink_rcv_skb(skb, &xfrm_user_rcv_msg);
- mutex_unlock(&xfrm_cfg_mutex);
+ mutex_unlock(&net->xfrm.xfrm_cfg_mutex);
}
static inline size_t xfrm_expire_msgsize(void)
diff --git a/scripts/asn1_compiler.c b/scripts/asn1_compiler.c
index db0e5cd34c70..91c4117637ae 100644
--- a/scripts/asn1_compiler.c
+++ b/scripts/asn1_compiler.c
@@ -1353,6 +1353,8 @@ static void render_out_of_line_list(FILE *out)
render_opcode(out, "ASN1_OP_END_SET_OF%s,\n", act);
render_opcode(out, "_jump_target(%u),\n", entry);
break;
+ default:
+ break;
}
if (e->action)
render_opcode(out, "_action(ACT_%s),\n",
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 61090e0ff613..9c9810030377 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -3289,6 +3289,7 @@ sub process {
}
}
if (!defined $suppress_whiletrailers{$linenr} &&
+ defined($stat) && defined($cond) &&
$line =~ /\b(?:if|while|for)\s*\(/ && $line !~ /^.\s*#/) {
my ($s, $c) = ($stat, $cond);
diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh
index 32b10f53d0b4..2dcb37736d84 100644
--- a/scripts/link-vmlinux.sh
+++ b/scripts/link-vmlinux.sh
@@ -82,7 +82,9 @@ kallsyms()
kallsymopt="${kallsymopt} --all-symbols"
fi
- kallsymopt="${kallsymopt} --page-offset=$CONFIG_PAGE_OFFSET"
+ if [ -n "${CONFIG_ARM}" ] && [ -n "${CONFIG_PAGE_OFFSET}" ]; then
+ kallsymopt="${kallsymopt} --page-offset=$CONFIG_PAGE_OFFSET"
+ fi
local aflags="${KBUILD_AFLAGS} ${KBUILD_AFLAGS_KERNEL} \
${NOSTDINC_FLAGS} ${LINUXINCLUDE} ${KBUILD_CPPFLAGS}"
diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl
index d0da66396f62..91280b82da08 100755
--- a/scripts/recordmcount.pl
+++ b/scripts/recordmcount.pl
@@ -364,7 +364,8 @@ if ($arch eq "x86_64") {
} elsif ($arch eq "blackfin") {
$mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\s__mcount\$";
$mcount_adjust = -4;
-} elsif ($arch eq "tilegx") {
+} elsif ($arch eq "tilegx" || $arch eq "tile") {
+ # Default to the newer TILE-Gx architecture if only "tile" is given.
$mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\s__mcount\$";
$type = ".quad";
$alignment = 8;
diff --git a/scripts/sortextable.c b/scripts/sortextable.c
index 5f7a8b663cb9..7941fbdfb050 100644
--- a/scripts/sortextable.c
+++ b/scripts/sortextable.c
@@ -31,6 +31,10 @@
#include <tools/be_byteshift.h>
#include <tools/le_byteshift.h>
+#ifndef EM_ARCOMPACT
+#define EM_ARCOMPACT 93
+#endif
+
#ifndef EM_AARCH64
#define EM_AARCH64 183
#endif
@@ -268,6 +272,7 @@ do_file(char const *const fname)
case EM_S390:
custom_sort = sort_relative_table;
break;
+ case EM_ARCOMPACT:
case EM_ARM:
case EM_AARCH64:
case EM_MIPS:
diff --git a/security/Makefile b/security/Makefile
index c26c81e92571..a5918e01a4f7 100644
--- a/security/Makefile
+++ b/security/Makefile
@@ -16,7 +16,6 @@ obj-$(CONFIG_MMU) += min_addr.o
# Object file lists
obj-$(CONFIG_SECURITY) += security.o capability.o
obj-$(CONFIG_SECURITYFS) += inode.o
-# Must precede capability.o in order to stack properly.
obj-$(CONFIG_SECURITY_SELINUX) += selinux/built-in.o
obj-$(CONFIG_SECURITY_SMACK) += smack/built-in.o
obj-$(CONFIG_AUDIT) += lsm_audit.o
diff --git a/security/apparmor/audit.c b/security/apparmor/audit.c
index 031d2d9dd695..89c78658031f 100644
--- a/security/apparmor/audit.c
+++ b/security/apparmor/audit.c
@@ -111,7 +111,6 @@ static const char *const aa_audit_type[] = {
static void audit_pre(struct audit_buffer *ab, void *ca)
{
struct common_audit_data *sa = ca;
- struct task_struct *tsk = sa->aad->tsk ? sa->aad->tsk : current;
if (aa_g_audit_header) {
audit_log_format(ab, "apparmor=");
@@ -132,11 +131,6 @@ static void audit_pre(struct audit_buffer *ab, void *ca)
if (sa->aad->profile) {
struct aa_profile *profile = sa->aad->profile;
- pid_t pid;
- rcu_read_lock();
- pid = rcu_dereference(tsk->real_parent)->pid;
- rcu_read_unlock();
- audit_log_format(ab, " parent=%d", pid);
if (profile->ns != root_ns) {
audit_log_format(ab, " namespace=");
audit_log_untrustedstring(ab, profile->ns->base.hname);
@@ -149,12 +143,6 @@ static void audit_pre(struct audit_buffer *ab, void *ca)
audit_log_format(ab, " name=");
audit_log_untrustedstring(ab, sa->aad->name);
}
-
- if (sa->aad->tsk) {
- audit_log_format(ab, " pid=%d comm=", tsk->pid);
- audit_log_untrustedstring(ab, tsk->comm);
- }
-
}
/**
@@ -212,7 +200,7 @@ int aa_audit(int type, struct aa_profile *profile, gfp_t gfp,
if (sa->aad->type == AUDIT_APPARMOR_KILL)
(void)send_sig_info(SIGKILL, NULL,
- sa->aad->tsk ? sa->aad->tsk : current);
+ sa->u.tsk ? sa->u.tsk : current);
if (sa->aad->type == AUDIT_APPARMOR_ALLOWED)
return complain_error(sa->aad->error);
diff --git a/security/apparmor/capability.c b/security/apparmor/capability.c
index 84d1f5f53877..1101c6f64bb7 100644
--- a/security/apparmor/capability.c
+++ b/security/apparmor/capability.c
@@ -53,8 +53,7 @@ static void audit_cb(struct audit_buffer *ab, void *va)
/**
* audit_caps - audit a capability
- * @profile: profile confining task (NOT NULL)
- * @task: task capability test was performed against (NOT NULL)
+ * @profile: profile being tested for confinement (NOT NULL)
* @cap: capability tested
* @error: error code returned by test
*
@@ -63,8 +62,7 @@ static void audit_cb(struct audit_buffer *ab, void *va)
*
* Returns: 0 or sa->error on success, error code on failure
*/
-static int audit_caps(struct aa_profile *profile, struct task_struct *task,
- int cap, int error)
+static int audit_caps(struct aa_profile *profile, int cap, int error)
{
struct audit_cache *ent;
int type = AUDIT_APPARMOR_AUTO;
@@ -73,7 +71,6 @@ static int audit_caps(struct aa_profile *profile, struct task_struct *task,
sa.type = LSM_AUDIT_DATA_CAP;
sa.aad = &aad;
sa.u.cap = cap;
- sa.aad->tsk = task;
sa.aad->op = OP_CAPABLE;
sa.aad->error = error;
@@ -124,8 +121,7 @@ static int profile_capable(struct aa_profile *profile, int cap)
/**
* aa_capable - test permission to use capability
- * @task: task doing capability test against (NOT NULL)
- * @profile: profile confining @task (NOT NULL)
+ * @profile: profile being tested against (NOT NULL)
* @cap: capability to be tested
* @audit: whether an audit record should be generated
*
@@ -133,8 +129,7 @@ static int profile_capable(struct aa_profile *profile, int cap)
*
* Returns: 0 on success, or else an error code.
*/
-int aa_capable(struct task_struct *task, struct aa_profile *profile, int cap,
- int audit)
+int aa_capable(struct aa_profile *profile, int cap, int audit)
{
int error = profile_capable(profile, cap);
@@ -144,5 +139,5 @@ int aa_capable(struct task_struct *task, struct aa_profile *profile, int cap,
return error;
}
- return audit_caps(profile, task, cap, error);
+ return audit_caps(profile, cap, error);
}
diff --git a/security/apparmor/domain.c b/security/apparmor/domain.c
index 26c607c971f5..452567d3a08e 100644
--- a/security/apparmor/domain.c
+++ b/security/apparmor/domain.c
@@ -50,23 +50,21 @@ void aa_free_domain_entries(struct aa_domain *domain)
/**
* may_change_ptraced_domain - check if can change profile on ptraced task
- * @task: task we want to change profile of (NOT NULL)
* @to_profile: profile to change to (NOT NULL)
*
- * Check if the task is ptraced and if so if the tracing task is allowed
+ * Check if current is ptraced and if so if the tracing task is allowed
* to trace the new domain
*
* Returns: %0 or error if change not allowed
*/
-static int may_change_ptraced_domain(struct task_struct *task,
- struct aa_profile *to_profile)
+static int may_change_ptraced_domain(struct aa_profile *to_profile)
{
struct task_struct *tracer;
struct aa_profile *tracerp = NULL;
int error = 0;
rcu_read_lock();
- tracer = ptrace_parent(task);
+ tracer = ptrace_parent(current);
if (tracer)
/* released below */
tracerp = aa_get_task_profile(tracer);
@@ -75,7 +73,7 @@ static int may_change_ptraced_domain(struct task_struct *task,
if (!tracer || unconfined(tracerp))
goto out;
- error = aa_may_ptrace(tracer, tracerp, to_profile, PTRACE_MODE_ATTACH);
+ error = aa_may_ptrace(tracerp, to_profile, PTRACE_MODE_ATTACH);
out:
rcu_read_unlock();
@@ -477,7 +475,7 @@ int apparmor_bprm_set_creds(struct linux_binprm *bprm)
}
if (bprm->unsafe & (LSM_UNSAFE_PTRACE | LSM_UNSAFE_PTRACE_CAP)) {
- error = may_change_ptraced_domain(current, new_profile);
+ error = may_change_ptraced_domain(new_profile);
if (error) {
aa_put_profile(new_profile);
goto audit;
@@ -690,7 +688,7 @@ int aa_change_hat(const char *hats[], int count, u64 token, bool permtest)
}
}
- error = may_change_ptraced_domain(current, hat);
+ error = may_change_ptraced_domain(hat);
if (error) {
info = "ptraced";
error = -EPERM;
@@ -829,7 +827,7 @@ int aa_change_profile(const char *ns_name, const char *hname, bool onexec,
}
/* check if tracing task is allowed to trace target domain */
- error = may_change_ptraced_domain(current, target);
+ error = may_change_ptraced_domain(target);
if (error) {
info = "ptrace prevents transition";
goto audit;
diff --git a/security/apparmor/include/audit.h b/security/apparmor/include/audit.h
index 30e8d7687259..ba3dfd17f23f 100644
--- a/security/apparmor/include/audit.h
+++ b/security/apparmor/include/audit.h
@@ -109,7 +109,6 @@ struct apparmor_audit_data {
void *profile;
const char *name;
const char *info;
- struct task_struct *tsk;
union {
void *target;
struct {
diff --git a/security/apparmor/include/capability.h b/security/apparmor/include/capability.h
index 2e7c9d6a2f3b..fc3fa381d850 100644
--- a/security/apparmor/include/capability.h
+++ b/security/apparmor/include/capability.h
@@ -4,7 +4,7 @@
* This file contains AppArmor capability mediation definitions.
*
* Copyright (C) 1998-2008 Novell/SUSE
- * Copyright 2009-2010 Canonical Ltd.
+ * Copyright 2009-2013 Canonical Ltd.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
@@ -38,8 +38,7 @@ struct aa_caps {
extern struct aa_fs_entry aa_fs_entry_caps[];
-int aa_capable(struct task_struct *task, struct aa_profile *profile, int cap,
- int audit);
+int aa_capable(struct aa_profile *profile, int cap, int audit);
static inline void aa_free_cap_rules(struct aa_caps *caps)
{
diff --git a/security/apparmor/include/ipc.h b/security/apparmor/include/ipc.h
index aeda0fbc8b2f..288ca76e2fb1 100644
--- a/security/apparmor/include/ipc.h
+++ b/security/apparmor/include/ipc.h
@@ -19,8 +19,8 @@
struct aa_profile;
-int aa_may_ptrace(struct task_struct *tracer_task, struct aa_profile *tracer,
- struct aa_profile *tracee, unsigned int mode);
+int aa_may_ptrace(struct aa_profile *tracer, struct aa_profile *tracee,
+ unsigned int mode);
int aa_ptrace(struct task_struct *tracer, struct task_struct *tracee,
unsigned int mode);
diff --git a/security/apparmor/ipc.c b/security/apparmor/ipc.c
index c51d2266587e..777ac1c47253 100644
--- a/security/apparmor/ipc.c
+++ b/security/apparmor/ipc.c
@@ -54,15 +54,14 @@ static int aa_audit_ptrace(struct aa_profile *profile,
/**
* aa_may_ptrace - test if tracer task can trace the tracee
- * @tracer_task: task who will do the tracing (NOT NULL)
* @tracer: profile of the task doing the tracing (NOT NULL)
* @tracee: task to be traced
* @mode: whether PTRACE_MODE_READ || PTRACE_MODE_ATTACH
*
* Returns: %0 else error code if permission denied or error
*/
-int aa_may_ptrace(struct task_struct *tracer_task, struct aa_profile *tracer,
- struct aa_profile *tracee, unsigned int mode)
+int aa_may_ptrace(struct aa_profile *tracer, struct aa_profile *tracee,
+ unsigned int mode)
{
/* TODO: currently only based on capability, not extended ptrace
* rules,
@@ -72,7 +71,7 @@ int aa_may_ptrace(struct task_struct *tracer_task, struct aa_profile *tracer,
if (unconfined(tracer) || tracer == tracee)
return 0;
/* log this capability request */
- return aa_capable(tracer_task, tracer, CAP_SYS_PTRACE, 1);
+ return aa_capable(tracer, CAP_SYS_PTRACE, 1);
}
/**
@@ -101,7 +100,7 @@ int aa_ptrace(struct task_struct *tracer, struct task_struct *tracee,
if (!unconfined(tracer_p)) {
struct aa_profile *tracee_p = aa_get_task_profile(tracee);
- error = aa_may_ptrace(tracer, tracer_p, tracee_p, mode);
+ error = aa_may_ptrace(tracer_p, tracee_p, mode);
error = aa_audit_ptrace(tracer_p, tracee_p, error);
aa_put_profile(tracee_p);
diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
index fb99e18123b4..4257b7e2796b 100644
--- a/security/apparmor/lsm.c
+++ b/security/apparmor/lsm.c
@@ -145,7 +145,7 @@ static int apparmor_capable(const struct cred *cred, struct user_namespace *ns,
if (!error) {
profile = aa_cred_profile(cred);
if (!unconfined(profile))
- error = aa_capable(current, profile, cap, audit);
+ error = aa_capable(profile, cap, audit);
}
return error;
}
diff --git a/security/capability.c b/security/capability.c
index dbeb9bc27b24..8b4f24ae4338 100644
--- a/security/capability.c
+++ b/security/capability.c
@@ -777,9 +777,15 @@ static int cap_xfrm_policy_delete_security(struct xfrm_sec_ctx *ctx)
return 0;
}
-static int cap_xfrm_state_alloc_security(struct xfrm_state *x,
- struct xfrm_user_sec_ctx *sec_ctx,
- u32 secid)
+static int cap_xfrm_state_alloc(struct xfrm_state *x,
+ struct xfrm_user_sec_ctx *sec_ctx)
+{
+ return 0;
+}
+
+static int cap_xfrm_state_alloc_acquire(struct xfrm_state *x,
+ struct xfrm_sec_ctx *polsec,
+ u32 secid)
{
return 0;
}
@@ -1101,7 +1107,8 @@ void __init security_fixup_ops(struct security_operations *ops)
set_to_cap_if_null(ops, xfrm_policy_clone_security);
set_to_cap_if_null(ops, xfrm_policy_free_security);
set_to_cap_if_null(ops, xfrm_policy_delete_security);
- set_to_cap_if_null(ops, xfrm_state_alloc_security);
+ set_to_cap_if_null(ops, xfrm_state_alloc);
+ set_to_cap_if_null(ops, xfrm_state_alloc_acquire);
set_to_cap_if_null(ops, xfrm_state_free_security);
set_to_cap_if_null(ops, xfrm_state_delete_security);
set_to_cap_if_null(ops, xfrm_policy_lookup);
diff --git a/security/integrity/digsig.c b/security/integrity/digsig.c
index 0b759e17a131..b4af4ebc5be2 100644
--- a/security/integrity/digsig.c
+++ b/security/integrity/digsig.c
@@ -28,7 +28,7 @@ static const char *keyring_name[INTEGRITY_KEYRING_MAX] = {
};
int integrity_digsig_verify(const unsigned int id, const char *sig, int siglen,
- const char *digest, int digestlen)
+ const char *digest, int digestlen)
{
if (id >= INTEGRITY_KEYRING_MAX)
return -EINVAL;
@@ -44,9 +44,10 @@ int integrity_digsig_verify(const unsigned int id, const char *sig, int siglen,
}
}
- switch (sig[0]) {
+ switch (sig[1]) {
case 1:
- return digsig_verify(keyring[id], sig, siglen,
+ /* v1 API expect signature without xattr type */
+ return digsig_verify(keyring[id], sig + 1, siglen - 1,
digest, digestlen);
case 2:
return asymmetric_verify(keyring[id], sig, siglen,
diff --git a/security/integrity/digsig_asymmetric.c b/security/integrity/digsig_asymmetric.c
index b4754667659d..9eae4809006b 100644
--- a/security/integrity/digsig_asymmetric.c
+++ b/security/integrity/digsig_asymmetric.c
@@ -20,17 +20,6 @@
#include "integrity.h"
/*
- * signature format v2 - for using with asymmetric keys
- */
-struct signature_v2_hdr {
- uint8_t version; /* signature format version */
- uint8_t hash_algo; /* Digest algorithm [enum pkey_hash_algo] */
- uint32_t keyid; /* IMA key identifier - not X509/PGP specific*/
- uint16_t sig_size; /* signature size */
- uint8_t sig[0]; /* signature payload */
-} __packed;
-
-/*
* Request an asymmetric key.
*/
static struct key *request_asymmetric_key(struct key *keyring, uint32_t keyid)
diff --git a/security/integrity/evm/evm_main.c b/security/integrity/evm/evm_main.c
index af9b6852f4e1..336b3ddfe63f 100644
--- a/security/integrity/evm/evm_main.c
+++ b/security/integrity/evm/evm_main.c
@@ -123,7 +123,7 @@ static enum integrity_status evm_verify_hmac(struct dentry *dentry,
goto out;
}
- xattr_len = rc - 1;
+ xattr_len = rc;
/* check value type */
switch (xattr_data->type) {
@@ -143,7 +143,7 @@ static enum integrity_status evm_verify_hmac(struct dentry *dentry,
if (rc)
break;
rc = integrity_digsig_verify(INTEGRITY_KEYRING_EVM,
- xattr_data->digest, xattr_len,
+ (const char *)xattr_data, xattr_len,
calc.digest, sizeof(calc.digest));
if (!rc) {
/* we probably want to replace rsa with hmac here */
diff --git a/security/integrity/evm/evm_posix_acl.c b/security/integrity/evm/evm_posix_acl.c
index b1753e98bf9a..46408b9e62e8 100644
--- a/security/integrity/evm/evm_posix_acl.c
+++ b/security/integrity/evm/evm_posix_acl.c
@@ -11,8 +11,9 @@
#include <linux/module.h>
#include <linux/xattr.h>
+#include <linux/evm.h>
-int posix_xattr_acl(char *xattr)
+int posix_xattr_acl(const char *xattr)
{
int xattr_len = strlen(xattr);
diff --git a/security/integrity/iint.c b/security/integrity/iint.c
index 74522dbd10a6..c49d3f14cbec 100644
--- a/security/integrity/iint.c
+++ b/security/integrity/iint.c
@@ -70,6 +70,8 @@ struct integrity_iint_cache *integrity_iint_find(struct inode *inode)
static void iint_free(struct integrity_iint_cache *iint)
{
+ kfree(iint->ima_hash);
+ iint->ima_hash = NULL;
iint->version = 0;
iint->flags = 0UL;
iint->ima_file_status = INTEGRITY_UNKNOWN;
diff --git a/security/integrity/ima/Kconfig b/security/integrity/ima/Kconfig
index 39196abaff0d..81a27971d884 100644
--- a/security/integrity/ima/Kconfig
+++ b/security/integrity/ima/Kconfig
@@ -9,6 +9,7 @@ config IMA
select CRYPTO_HMAC
select CRYPTO_MD5
select CRYPTO_SHA1
+ select CRYPTO_HASH_INFO
select TCG_TPM if HAS_IOMEM && !UML
select TCG_TIS if TCG_TPM && X86
select TCG_IBMVTPM if TCG_TPM && PPC64
@@ -45,6 +46,69 @@ config IMA_LSM_RULES
help
Disabling this option will disregard LSM based policy rules.
+choice
+ prompt "Default template"
+ default IMA_NG_TEMPLATE
+ depends on IMA
+ help
+ Select the default IMA measurement template.
+
+ The original 'ima' measurement list template contains a
+ hash, defined as 20 bytes, and a null terminated pathname,
+ limited to 255 characters. The 'ima-ng' measurement list
+ template permits both larger hash digests and longer
+ pathnames.
+
+ config IMA_TEMPLATE
+ bool "ima"
+ config IMA_NG_TEMPLATE
+ bool "ima-ng (default)"
+ config IMA_SIG_TEMPLATE
+ bool "ima-sig"
+endchoice
+
+config IMA_DEFAULT_TEMPLATE
+ string
+ depends on IMA
+ default "ima" if IMA_TEMPLATE
+ default "ima-ng" if IMA_NG_TEMPLATE
+ default "ima-sig" if IMA_SIG_TEMPLATE
+
+choice
+ prompt "Default integrity hash algorithm"
+ default IMA_DEFAULT_HASH_SHA1
+ depends on IMA
+ help
+ Select the default hash algorithm used for the measurement
+ list, integrity appraisal and audit log. The compiled default
+ hash algorithm can be overwritten using the kernel command
+ line 'ima_hash=' option.
+
+ config IMA_DEFAULT_HASH_SHA1
+ bool "SHA1 (default)"
+ depends on CRYPTO_SHA1
+
+ config IMA_DEFAULT_HASH_SHA256
+ bool "SHA256"
+ depends on CRYPTO_SHA256 && !IMA_TEMPLATE
+
+ config IMA_DEFAULT_HASH_SHA512
+ bool "SHA512"
+ depends on CRYPTO_SHA512 && !IMA_TEMPLATE
+
+ config IMA_DEFAULT_HASH_WP512
+ bool "WP512"
+ depends on CRYPTO_WP512 && !IMA_TEMPLATE
+endchoice
+
+config IMA_DEFAULT_HASH
+ string
+ depends on IMA
+ default "sha1" if IMA_DEFAULT_HASH_SHA1
+ default "sha256" if IMA_DEFAULT_HASH_SHA256
+ default "sha512" if IMA_DEFAULT_HASH_SHA512
+ default "wp512" if IMA_DEFAULT_HASH_WP512
+
config IMA_APPRAISE
bool "Appraise integrity measurements"
depends on IMA
diff --git a/security/integrity/ima/Makefile b/security/integrity/ima/Makefile
index 56dfee7cbf61..d79263d2fdbf 100644
--- a/security/integrity/ima/Makefile
+++ b/security/integrity/ima/Makefile
@@ -6,5 +6,5 @@
obj-$(CONFIG_IMA) += ima.o
ima-y := ima_fs.o ima_queue.o ima_init.o ima_main.o ima_crypto.o ima_api.o \
- ima_policy.o
+ ima_policy.o ima_template.o ima_template_lib.o
ima-$(CONFIG_IMA_APPRAISE) += ima_appraise.o
diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
index b3dd616560f7..0356e1d437ca 100644
--- a/security/integrity/ima/ima.h
+++ b/security/integrity/ima/ima.h
@@ -26,7 +26,8 @@
#include "../integrity.h"
-enum ima_show_type { IMA_SHOW_BINARY, IMA_SHOW_ASCII };
+enum ima_show_type { IMA_SHOW_BINARY, IMA_SHOW_BINARY_NO_FIELD_LEN,
+ IMA_SHOW_ASCII };
enum tpm_pcrs { TPM_PCR0 = 0, TPM_PCR8 = 8 };
/* digest size for IMA, fits SHA1 or MD5 */
@@ -36,23 +37,48 @@ enum tpm_pcrs { TPM_PCR0 = 0, TPM_PCR8 = 8 };
#define IMA_HASH_BITS 9
#define IMA_MEASURE_HTABLE_SIZE (1 << IMA_HASH_BITS)
+#define IMA_TEMPLATE_FIELD_ID_MAX_LEN 16
+#define IMA_TEMPLATE_NUM_FIELDS_MAX 15
+
+#define IMA_TEMPLATE_IMA_NAME "ima"
+#define IMA_TEMPLATE_IMA_FMT "d|n"
+
/* set during initialization */
extern int ima_initialized;
extern int ima_used_chip;
-extern char *ima_hash;
+extern int ima_hash_algo;
extern int ima_appraise;
-/* IMA inode template definition */
-struct ima_template_data {
- u8 digest[IMA_DIGEST_SIZE]; /* sha1/md5 measurement hash */
- char file_name[IMA_EVENT_NAME_LEN_MAX + 1]; /* name + \0 */
+/* IMA template field data definition */
+struct ima_field_data {
+ u8 *data;
+ u32 len;
+};
+
+/* IMA template field definition */
+struct ima_template_field {
+ const char field_id[IMA_TEMPLATE_FIELD_ID_MAX_LEN];
+ int (*field_init) (struct integrity_iint_cache *iint, struct file *file,
+ const unsigned char *filename,
+ struct evm_ima_xattr_data *xattr_value,
+ int xattr_len, struct ima_field_data *field_data);
+ void (*field_show) (struct seq_file *m, enum ima_show_type show,
+ struct ima_field_data *field_data);
+};
+
+/* IMA template descriptor definition */
+struct ima_template_desc {
+ char *name;
+ char *fmt;
+ int num_fields;
+ struct ima_template_field **fields;
};
struct ima_template_entry {
- u8 digest[IMA_DIGEST_SIZE]; /* sha1 or md5 measurement hash */
- const char *template_name;
- int template_len;
- struct ima_template_data template;
+ u8 digest[TPM_DIGEST_SIZE]; /* sha1 or md5 measurement hash */
+ struct ima_template_desc *template_desc; /* template descriptor */
+ u32 template_data_len;
+ struct ima_field_data template_data[0]; /* template related data */
};
struct ima_queue_entry {
@@ -69,13 +95,22 @@ int ima_fs_init(void);
void ima_fs_cleanup(void);
int ima_inode_alloc(struct inode *inode);
int ima_add_template_entry(struct ima_template_entry *entry, int violation,
- const char *op, struct inode *inode);
-int ima_calc_file_hash(struct file *file, char *digest);
-int ima_calc_buffer_hash(const void *data, int len, char *digest);
-int ima_calc_boot_aggregate(char *digest);
-void ima_add_violation(struct inode *inode, const unsigned char *filename,
+ const char *op, struct inode *inode,
+ const unsigned char *filename);
+int ima_calc_file_hash(struct file *file, struct ima_digest_data *hash);
+int ima_calc_field_array_hash(struct ima_field_data *field_data,
+ struct ima_template_desc *desc, int num_fields,
+ struct ima_digest_data *hash);
+int __init ima_calc_boot_aggregate(struct ima_digest_data *hash);
+void ima_add_violation(struct file *file, const unsigned char *filename,
const char *op, const char *cause);
int ima_init_crypto(void);
+void ima_putc(struct seq_file *m, void *data, int datalen);
+void ima_print_digest(struct seq_file *m, u8 *digest, int size);
+struct ima_template_desc *ima_template_desc_current(void);
+int ima_init_template(void);
+
+int ima_init_template(void);
/*
* used to protect h_table and sha_table
@@ -98,14 +133,22 @@ static inline unsigned long ima_hash_key(u8 *digest)
int ima_get_action(struct inode *inode, int mask, int function);
int ima_must_measure(struct inode *inode, int mask, int function);
int ima_collect_measurement(struct integrity_iint_cache *iint,
- struct file *file);
+ struct file *file,
+ struct evm_ima_xattr_data **xattr_value,
+ int *xattr_len);
void ima_store_measurement(struct integrity_iint_cache *iint, struct file *file,
- const unsigned char *filename);
+ const unsigned char *filename,
+ struct evm_ima_xattr_data *xattr_value,
+ int xattr_len);
void ima_audit_measurement(struct integrity_iint_cache *iint,
const unsigned char *filename);
+int ima_alloc_init_template(struct integrity_iint_cache *iint,
+ struct file *file, const unsigned char *filename,
+ struct evm_ima_xattr_data *xattr_value,
+ int xattr_len, struct ima_template_entry **entry);
int ima_store_template(struct ima_template_entry *entry, int violation,
- struct inode *inode);
-void ima_template_show(struct seq_file *m, void *e, enum ima_show_type show);
+ struct inode *inode, const unsigned char *filename);
+void ima_free_template_entry(struct ima_template_entry *entry);
const char *ima_d_path(struct path *path, char **pathbuf);
/* rbtree tree calls to lookup, insert, delete
@@ -131,17 +174,25 @@ void ima_delete_rules(void);
#ifdef CONFIG_IMA_APPRAISE
int ima_appraise_measurement(int func, struct integrity_iint_cache *iint,
- struct file *file, const unsigned char *filename);
+ struct file *file, const unsigned char *filename,
+ struct evm_ima_xattr_data *xattr_value,
+ int xattr_len);
int ima_must_appraise(struct inode *inode, int mask, enum ima_hooks func);
void ima_update_xattr(struct integrity_iint_cache *iint, struct file *file);
enum integrity_status ima_get_cache_status(struct integrity_iint_cache *iint,
int func);
+void ima_get_hash_algo(struct evm_ima_xattr_data *xattr_value, int xattr_len,
+ struct ima_digest_data *hash);
+int ima_read_xattr(struct dentry *dentry,
+ struct evm_ima_xattr_data **xattr_value);
#else
static inline int ima_appraise_measurement(int func,
struct integrity_iint_cache *iint,
struct file *file,
- const unsigned char *filename)
+ const unsigned char *filename,
+ struct evm_ima_xattr_data *xattr_value,
+ int xattr_len)
{
return INTEGRITY_UNKNOWN;
}
@@ -162,6 +213,19 @@ static inline enum integrity_status ima_get_cache_status(struct integrity_iint_c
{
return INTEGRITY_UNKNOWN;
}
+
+static inline void ima_get_hash_algo(struct evm_ima_xattr_data *xattr_value,
+ int xattr_len,
+ struct ima_digest_data *hash)
+{
+}
+
+static inline int ima_read_xattr(struct dentry *dentry,
+ struct evm_ima_xattr_data **xattr_value)
+{
+ return 0;
+}
+
#endif
/* LSM based policy rules require audit */
diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
index 1c03e8f1e0e1..c38bbce8c6a6 100644
--- a/security/integrity/ima/ima_api.c
+++ b/security/integrity/ima/ima_api.c
@@ -18,9 +18,59 @@
#include <linux/fs.h>
#include <linux/xattr.h>
#include <linux/evm.h>
+#include <crypto/hash_info.h>
#include "ima.h"
-static const char *IMA_TEMPLATE_NAME = "ima";
+/*
+ * ima_free_template_entry - free an existing template entry
+ */
+void ima_free_template_entry(struct ima_template_entry *entry)
+{
+ int i;
+
+ for (i = 0; i < entry->template_desc->num_fields; i++)
+ kfree(entry->template_data[i].data);
+
+ kfree(entry);
+}
+
+/*
+ * ima_alloc_init_template - create and initialize a new template entry
+ */
+int ima_alloc_init_template(struct integrity_iint_cache *iint,
+ struct file *file, const unsigned char *filename,
+ struct evm_ima_xattr_data *xattr_value,
+ int xattr_len, struct ima_template_entry **entry)
+{
+ struct ima_template_desc *template_desc = ima_template_desc_current();
+ int i, result = 0;
+
+ *entry = kzalloc(sizeof(**entry) + template_desc->num_fields *
+ sizeof(struct ima_field_data), GFP_NOFS);
+ if (!*entry)
+ return -ENOMEM;
+
+ (*entry)->template_desc = template_desc;
+ for (i = 0; i < template_desc->num_fields; i++) {
+ struct ima_template_field *field = template_desc->fields[i];
+ u32 len;
+
+ result = field->field_init(iint, file, filename,
+ xattr_value, xattr_len,
+ &((*entry)->template_data[i]));
+ if (result != 0)
+ goto out;
+
+ len = (*entry)->template_data[i].len;
+ (*entry)->template_data_len += sizeof(len);
+ (*entry)->template_data_len += len;
+ }
+ return 0;
+out:
+ ima_free_template_entry(*entry);
+ *entry = NULL;
+ return result;
+}
/*
* ima_store_template - store ima template measurements
@@ -39,28 +89,35 @@ static const char *IMA_TEMPLATE_NAME = "ima";
* Returns 0 on success, error code otherwise
*/
int ima_store_template(struct ima_template_entry *entry,
- int violation, struct inode *inode)
+ int violation, struct inode *inode,
+ const unsigned char *filename)
{
const char *op = "add_template_measure";
const char *audit_cause = "hashing_error";
+ char *template_name = entry->template_desc->name;
int result;
-
- memset(entry->digest, 0, sizeof(entry->digest));
- entry->template_name = IMA_TEMPLATE_NAME;
- entry->template_len = sizeof(entry->template);
+ struct {
+ struct ima_digest_data hdr;
+ char digest[TPM_DIGEST_SIZE];
+ } hash;
if (!violation) {
- result = ima_calc_buffer_hash(&entry->template,
- entry->template_len,
- entry->digest);
+ int num_fields = entry->template_desc->num_fields;
+
+ /* this function uses default algo */
+ hash.hdr.algo = HASH_ALGO_SHA1;
+ result = ima_calc_field_array_hash(&entry->template_data[0],
+ entry->template_desc,
+ num_fields, &hash.hdr);
if (result < 0) {
integrity_audit_msg(AUDIT_INTEGRITY_PCR, inode,
- entry->template_name, op,
+ template_name, op,
audit_cause, result, 0);
return result;
}
+ memcpy(entry->digest, hash.hdr.digest, hash.hdr.length);
}
- result = ima_add_template_entry(entry, violation, op, inode);
+ result = ima_add_template_entry(entry, violation, op, inode, filename);
return result;
}
@@ -71,26 +128,26 @@ int ima_store_template(struct ima_template_entry *entry,
* By extending the PCR with 0xFF's instead of with zeroes, the PCR
* value is invalidated.
*/
-void ima_add_violation(struct inode *inode, const unsigned char *filename,
+void ima_add_violation(struct file *file, const unsigned char *filename,
const char *op, const char *cause)
{
struct ima_template_entry *entry;
+ struct inode *inode = file->f_dentry->d_inode;
int violation = 1;
int result;
/* can overflow, only indicator */
atomic_long_inc(&ima_htable.violations);
- entry = kmalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry) {
+ result = ima_alloc_init_template(NULL, file, filename,
+ NULL, 0, &entry);
+ if (result < 0) {
result = -ENOMEM;
goto err_out;
}
- memset(&entry->template, 0, sizeof(entry->template));
- strncpy(entry->template.file_name, filename, IMA_EVENT_NAME_LEN_MAX);
- result = ima_store_template(entry, violation, inode);
+ result = ima_store_template(entry, violation, inode, filename);
if (result < 0)
- kfree(entry);
+ ima_free_template_entry(entry);
err_out:
integrity_audit_msg(AUDIT_INTEGRITY_PCR, inode, filename,
op, cause, result, 0);
@@ -138,20 +195,42 @@ int ima_must_measure(struct inode *inode, int mask, int function)
* Return 0 on success, error code otherwise
*/
int ima_collect_measurement(struct integrity_iint_cache *iint,
- struct file *file)
+ struct file *file,
+ struct evm_ima_xattr_data **xattr_value,
+ int *xattr_len)
{
struct inode *inode = file_inode(file);
const char *filename = file->f_dentry->d_name.name;
int result = 0;
+ struct {
+ struct ima_digest_data hdr;
+ char digest[IMA_MAX_DIGEST_SIZE];
+ } hash;
+
+ if (xattr_value)
+ *xattr_len = ima_read_xattr(file->f_dentry, xattr_value);
if (!(iint->flags & IMA_COLLECTED)) {
u64 i_version = file_inode(file)->i_version;
- iint->ima_xattr.type = IMA_XATTR_DIGEST;
- result = ima_calc_file_hash(file, iint->ima_xattr.digest);
+ /* use default hash algorithm */
+ hash.hdr.algo = ima_hash_algo;
+
+ if (xattr_value)
+ ima_get_hash_algo(*xattr_value, *xattr_len, &hash.hdr);
+
+ result = ima_calc_file_hash(file, &hash.hdr);
if (!result) {
- iint->version = i_version;
- iint->flags |= IMA_COLLECTED;
+ int length = sizeof(hash.hdr) + hash.hdr.length;
+ void *tmpbuf = krealloc(iint->ima_hash, length,
+ GFP_NOFS);
+ if (tmpbuf) {
+ iint->ima_hash = tmpbuf;
+ memcpy(iint->ima_hash, &hash, length);
+ iint->version = i_version;
+ iint->flags |= IMA_COLLECTED;
+ } else
+ result = -ENOMEM;
}
}
if (result)
@@ -177,7 +256,9 @@ int ima_collect_measurement(struct integrity_iint_cache *iint,
* Must be called with iint->mutex held.
*/
void ima_store_measurement(struct integrity_iint_cache *iint,
- struct file *file, const unsigned char *filename)
+ struct file *file, const unsigned char *filename,
+ struct evm_ima_xattr_data *xattr_value,
+ int xattr_len)
{
const char *op = "add_template_measure";
const char *audit_cause = "ENOMEM";
@@ -189,37 +270,35 @@ void ima_store_measurement(struct integrity_iint_cache *iint,
if (iint->flags & IMA_MEASURED)
return;
- entry = kmalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry) {
+ result = ima_alloc_init_template(iint, file, filename,
+ xattr_value, xattr_len, &entry);
+ if (result < 0) {
integrity_audit_msg(AUDIT_INTEGRITY_PCR, inode, filename,
op, audit_cause, result, 0);
return;
}
- memset(&entry->template, 0, sizeof(entry->template));
- memcpy(entry->template.digest, iint->ima_xattr.digest, IMA_DIGEST_SIZE);
- strcpy(entry->template.file_name,
- (strlen(filename) > IMA_EVENT_NAME_LEN_MAX) ?
- file->f_dentry->d_name.name : filename);
- result = ima_store_template(entry, violation, inode);
+ result = ima_store_template(entry, violation, inode, filename);
if (!result || result == -EEXIST)
iint->flags |= IMA_MEASURED;
if (result < 0)
- kfree(entry);
+ ima_free_template_entry(entry);
}
void ima_audit_measurement(struct integrity_iint_cache *iint,
const unsigned char *filename)
{
struct audit_buffer *ab;
- char hash[(IMA_DIGEST_SIZE * 2) + 1];
+ char hash[(iint->ima_hash->length * 2) + 1];
+ const char *algo_name = hash_algo_name[iint->ima_hash->algo];
+ char algo_hash[sizeof(hash) + strlen(algo_name) + 2];
int i;
if (iint->flags & IMA_AUDITED)
return;
- for (i = 0; i < IMA_DIGEST_SIZE; i++)
- hex_byte_pack(hash + (i * 2), iint->ima_xattr.digest[i]);
+ for (i = 0; i < iint->ima_hash->length; i++)
+ hex_byte_pack(hash + (i * 2), iint->ima_hash->digest[i]);
hash[i * 2] = '\0';
ab = audit_log_start(current->audit_context, GFP_KERNEL,
@@ -230,7 +309,8 @@ void ima_audit_measurement(struct integrity_iint_cache *iint,
audit_log_format(ab, "file=");
audit_log_untrustedstring(ab, filename);
audit_log_format(ab, " hash=");
- audit_log_untrustedstring(ab, hash);
+ snprintf(algo_hash, sizeof(algo_hash), "%s:%s", algo_name, hash);
+ audit_log_untrustedstring(ab, algo_hash);
audit_log_task_info(ab, current);
audit_log_end(ab);
diff --git a/security/integrity/ima/ima_appraise.c b/security/integrity/ima/ima_appraise.c
index 2d4becab8918..734e9468aca0 100644
--- a/security/integrity/ima/ima_appraise.c
+++ b/security/integrity/ima/ima_appraise.c
@@ -15,6 +15,7 @@
#include <linux/magic.h>
#include <linux/ima.h>
#include <linux/evm.h>
+#include <crypto/hash_info.h>
#include "ima.h"
@@ -43,19 +44,31 @@ int ima_must_appraise(struct inode *inode, int mask, enum ima_hooks func)
}
static int ima_fix_xattr(struct dentry *dentry,
- struct integrity_iint_cache *iint)
+ struct integrity_iint_cache *iint)
{
- iint->ima_xattr.type = IMA_XATTR_DIGEST;
- return __vfs_setxattr_noperm(dentry, XATTR_NAME_IMA,
- (u8 *)&iint->ima_xattr,
- sizeof(iint->ima_xattr), 0);
+ int rc, offset;
+ u8 algo = iint->ima_hash->algo;
+
+ if (algo <= HASH_ALGO_SHA1) {
+ offset = 1;
+ iint->ima_hash->xattr.sha1.type = IMA_XATTR_DIGEST;
+ } else {
+ offset = 0;
+ iint->ima_hash->xattr.ng.type = IMA_XATTR_DIGEST_NG;
+ iint->ima_hash->xattr.ng.algo = algo;
+ }
+ rc = __vfs_setxattr_noperm(dentry, XATTR_NAME_IMA,
+ &iint->ima_hash->xattr.data[offset],
+ (sizeof(iint->ima_hash->xattr) - offset) +
+ iint->ima_hash->length, 0);
+ return rc;
}
/* Return specific func appraised cached result */
enum integrity_status ima_get_cache_status(struct integrity_iint_cache *iint,
int func)
{
- switch(func) {
+ switch (func) {
case MMAP_CHECK:
return iint->ima_mmap_status;
case BPRM_CHECK:
@@ -71,7 +84,7 @@ enum integrity_status ima_get_cache_status(struct integrity_iint_cache *iint,
static void ima_set_cache_status(struct integrity_iint_cache *iint,
int func, enum integrity_status status)
{
- switch(func) {
+ switch (func) {
case MMAP_CHECK:
iint->ima_mmap_status = status;
break;
@@ -90,7 +103,7 @@ static void ima_set_cache_status(struct integrity_iint_cache *iint,
static void ima_cache_flags(struct integrity_iint_cache *iint, int func)
{
- switch(func) {
+ switch (func) {
case MMAP_CHECK:
iint->flags |= (IMA_MMAP_APPRAISED | IMA_APPRAISED);
break;
@@ -107,6 +120,50 @@ static void ima_cache_flags(struct integrity_iint_cache *iint, int func)
}
}
+void ima_get_hash_algo(struct evm_ima_xattr_data *xattr_value, int xattr_len,
+ struct ima_digest_data *hash)
+{
+ struct signature_v2_hdr *sig;
+
+ if (!xattr_value || xattr_len < 2)
+ return;
+
+ switch (xattr_value->type) {
+ case EVM_IMA_XATTR_DIGSIG:
+ sig = (typeof(sig))xattr_value;
+ if (sig->version != 2 || xattr_len <= sizeof(*sig))
+ return;
+ hash->algo = sig->hash_algo;
+ break;
+ case IMA_XATTR_DIGEST_NG:
+ hash->algo = xattr_value->digest[0];
+ break;
+ case IMA_XATTR_DIGEST:
+ /* this is for backward compatibility */
+ if (xattr_len == 21) {
+ unsigned int zero = 0;
+ if (!memcmp(&xattr_value->digest[16], &zero, 4))
+ hash->algo = HASH_ALGO_MD5;
+ else
+ hash->algo = HASH_ALGO_SHA1;
+ } else if (xattr_len == 17)
+ hash->algo = HASH_ALGO_MD5;
+ break;
+ }
+}
+
+int ima_read_xattr(struct dentry *dentry,
+ struct evm_ima_xattr_data **xattr_value)
+{
+ struct inode *inode = dentry->d_inode;
+
+ if (!inode->i_op->getxattr)
+ return 0;
+
+ return vfs_getxattr_alloc(dentry, XATTR_NAME_IMA, (char **)xattr_value,
+ 0, GFP_NOFS);
+}
+
/*
* ima_appraise_measurement - appraise file measurement
*
@@ -116,23 +173,22 @@ static void ima_cache_flags(struct integrity_iint_cache *iint, int func)
* Return 0 on success, error code otherwise
*/
int ima_appraise_measurement(int func, struct integrity_iint_cache *iint,
- struct file *file, const unsigned char *filename)
+ struct file *file, const unsigned char *filename,
+ struct evm_ima_xattr_data *xattr_value,
+ int xattr_len)
{
struct dentry *dentry = file->f_dentry;
struct inode *inode = dentry->d_inode;
- struct evm_ima_xattr_data *xattr_value = NULL;
enum integrity_status status = INTEGRITY_UNKNOWN;
const char *op = "appraise_data";
char *cause = "unknown";
- int rc;
+ int rc = xattr_len, hash_start = 0;
if (!ima_appraise)
return 0;
if (!inode->i_op->getxattr)
return INTEGRITY_UNKNOWN;
- rc = vfs_getxattr_alloc(dentry, XATTR_NAME_IMA, (char **)&xattr_value,
- 0, GFP_NOFS);
if (rc <= 0) {
if (rc && rc != -ENODATA)
goto out;
@@ -153,14 +209,25 @@ int ima_appraise_measurement(int func, struct integrity_iint_cache *iint,
goto out;
}
switch (xattr_value->type) {
+ case IMA_XATTR_DIGEST_NG:
+ /* first byte contains algorithm id */
+ hash_start = 1;
case IMA_XATTR_DIGEST:
if (iint->flags & IMA_DIGSIG_REQUIRED) {
cause = "IMA signature required";
status = INTEGRITY_FAIL;
break;
}
- rc = memcmp(xattr_value->digest, iint->ima_xattr.digest,
- IMA_DIGEST_SIZE);
+ if (xattr_len - sizeof(xattr_value->type) - hash_start >=
+ iint->ima_hash->length)
+ /* xattr length may be longer. md5 hash in previous
+ version occupied 20 bytes in xattr, instead of 16
+ */
+ rc = memcmp(&xattr_value->digest[hash_start],
+ iint->ima_hash->digest,
+ iint->ima_hash->length);
+ else
+ rc = -EINVAL;
if (rc) {
cause = "invalid-hash";
status = INTEGRITY_FAIL;
@@ -171,9 +238,9 @@ int ima_appraise_measurement(int func, struct integrity_iint_cache *iint,
case EVM_IMA_XATTR_DIGSIG:
iint->flags |= IMA_DIGSIG;
rc = integrity_digsig_verify(INTEGRITY_KEYRING_IMA,
- xattr_value->digest, rc - 1,
- iint->ima_xattr.digest,
- IMA_DIGEST_SIZE);
+ (const char *)xattr_value, rc,
+ iint->ima_hash->digest,
+ iint->ima_hash->length);
if (rc == -EOPNOTSUPP) {
status = INTEGRITY_UNKNOWN;
} else if (rc) {
@@ -203,7 +270,6 @@ out:
ima_cache_flags(iint, func);
}
ima_set_cache_status(iint, func, status);
- kfree(xattr_value);
return status;
}
@@ -219,7 +285,7 @@ void ima_update_xattr(struct integrity_iint_cache *iint, struct file *file)
if (iint->flags & IMA_DIGSIG)
return;
- rc = ima_collect_measurement(iint, file);
+ rc = ima_collect_measurement(iint, file, NULL, NULL);
if (rc < 0)
return;
diff --git a/security/integrity/ima/ima_crypto.c b/security/integrity/ima/ima_crypto.c
index a02e0791cf15..fdf60def52e9 100644
--- a/security/integrity/ima/ima_crypto.c
+++ b/security/integrity/ima/ima_crypto.c
@@ -20,6 +20,7 @@
#include <linux/err.h>
#include <linux/slab.h>
#include <crypto/hash.h>
+#include <crypto/hash_info.h>
#include "ima.h"
static struct crypto_shash *ima_shash_tfm;
@@ -28,31 +29,58 @@ int ima_init_crypto(void)
{
long rc;
- ima_shash_tfm = crypto_alloc_shash(ima_hash, 0, 0);
+ ima_shash_tfm = crypto_alloc_shash(hash_algo_name[ima_hash_algo], 0, 0);
if (IS_ERR(ima_shash_tfm)) {
rc = PTR_ERR(ima_shash_tfm);
- pr_err("Can not allocate %s (reason: %ld)\n", ima_hash, rc);
+ pr_err("Can not allocate %s (reason: %ld)\n",
+ hash_algo_name[ima_hash_algo], rc);
return rc;
}
return 0;
}
+static struct crypto_shash *ima_alloc_tfm(enum hash_algo algo)
+{
+ struct crypto_shash *tfm = ima_shash_tfm;
+ int rc;
+
+ if (algo != ima_hash_algo && algo < HASH_ALGO__LAST) {
+ tfm = crypto_alloc_shash(hash_algo_name[algo], 0, 0);
+ if (IS_ERR(tfm)) {
+ rc = PTR_ERR(tfm);
+ pr_err("Can not allocate %s (reason: %d)\n",
+ hash_algo_name[algo], rc);
+ }
+ }
+ return tfm;
+}
+
+static void ima_free_tfm(struct crypto_shash *tfm)
+{
+ if (tfm != ima_shash_tfm)
+ crypto_free_shash(tfm);
+}
+
/*
* Calculate the MD5/SHA1 file digest
*/
-int ima_calc_file_hash(struct file *file, char *digest)
+static int ima_calc_file_hash_tfm(struct file *file,
+ struct ima_digest_data *hash,
+ struct crypto_shash *tfm)
{
loff_t i_size, offset = 0;
char *rbuf;
int rc, read = 0;
struct {
struct shash_desc shash;
- char ctx[crypto_shash_descsize(ima_shash_tfm)];
+ char ctx[crypto_shash_descsize(tfm)];
} desc;
- desc.shash.tfm = ima_shash_tfm;
+ desc.shash.tfm = tfm;
desc.shash.flags = 0;
+ hash->length = crypto_shash_digestsize(tfm);
+
rc = crypto_shash_init(&desc.shash);
if (rc != 0)
return rc;
@@ -85,27 +113,90 @@ int ima_calc_file_hash(struct file *file, char *digest)
}
kfree(rbuf);
if (!rc)
- rc = crypto_shash_final(&desc.shash, digest);
+ rc = crypto_shash_final(&desc.shash, hash->digest);
if (read)
file->f_mode &= ~FMODE_READ;
out:
return rc;
}
+int ima_calc_file_hash(struct file *file, struct ima_digest_data *hash)
+{
+ struct crypto_shash *tfm;
+ int rc;
+
+ tfm = ima_alloc_tfm(hash->algo);
+ if (IS_ERR(tfm))
+ return PTR_ERR(tfm);
+
+ rc = ima_calc_file_hash_tfm(file, hash, tfm);
+
+ ima_free_tfm(tfm);
+
+ return rc;
+}
+
/*
- * Calculate the hash of a given buffer
+ * Calculate the hash of template data
*/
-int ima_calc_buffer_hash(const void *data, int len, char *digest)
+static int ima_calc_field_array_hash_tfm(struct ima_field_data *field_data,
+ struct ima_template_desc *td,
+ int num_fields,
+ struct ima_digest_data *hash,
+ struct crypto_shash *tfm)
{
struct {
struct shash_desc shash;
- char ctx[crypto_shash_descsize(ima_shash_tfm)];
+ char ctx[crypto_shash_descsize(tfm)];
} desc;
+ int rc, i;
- desc.shash.tfm = ima_shash_tfm;
+ desc.shash.tfm = tfm;
desc.shash.flags = 0;
- return crypto_shash_digest(&desc.shash, data, len, digest);
+ hash->length = crypto_shash_digestsize(tfm);
+
+ rc = crypto_shash_init(&desc.shash);
+ if (rc != 0)
+ return rc;
+
+ for (i = 0; i < num_fields; i++) {
+ if (strcmp(td->name, IMA_TEMPLATE_IMA_NAME) != 0) {
+ rc = crypto_shash_update(&desc.shash,
+ (const u8 *) &field_data[i].len,
+ sizeof(field_data[i].len));
+ if (rc)
+ break;
+ }
+ rc = crypto_shash_update(&desc.shash, field_data[i].data,
+ field_data[i].len);
+ if (rc)
+ break;
+ }
+
+ if (!rc)
+ rc = crypto_shash_final(&desc.shash, hash->digest);
+
+ return rc;
+}
+
+int ima_calc_field_array_hash(struct ima_field_data *field_data,
+ struct ima_template_desc *desc, int num_fields,
+ struct ima_digest_data *hash)
+{
+ struct crypto_shash *tfm;
+ int rc;
+
+ tfm = ima_alloc_tfm(hash->algo);
+ if (IS_ERR(tfm))
+ return PTR_ERR(tfm);
+
+ rc = ima_calc_field_array_hash_tfm(field_data, desc, num_fields,
+ hash, tfm);
+
+ ima_free_tfm(tfm);
+
+ return rc;
}
static void __init ima_pcrread(int idx, u8 *pcr)
@@ -120,16 +211,17 @@ static void __init ima_pcrread(int idx, u8 *pcr)
/*
* Calculate the boot aggregate hash
*/
-int __init ima_calc_boot_aggregate(char *digest)
+static int __init ima_calc_boot_aggregate_tfm(char *digest,
+ struct crypto_shash *tfm)
{
- u8 pcr_i[IMA_DIGEST_SIZE];
+ u8 pcr_i[TPM_DIGEST_SIZE];
int rc, i;
struct {
struct shash_desc shash;
- char ctx[crypto_shash_descsize(ima_shash_tfm)];
+ char ctx[crypto_shash_descsize(tfm)];
} desc;
- desc.shash.tfm = ima_shash_tfm;
+ desc.shash.tfm = tfm;
desc.shash.flags = 0;
rc = crypto_shash_init(&desc.shash);
@@ -140,9 +232,26 @@ int __init ima_calc_boot_aggregate(char *digest)
for (i = TPM_PCR0; i < TPM_PCR8; i++) {
ima_pcrread(i, pcr_i);
/* now accumulate with current aggregate */
- rc = crypto_shash_update(&desc.shash, pcr_i, IMA_DIGEST_SIZE);
+ rc = crypto_shash_update(&desc.shash, pcr_i, TPM_DIGEST_SIZE);
}
if (!rc)
crypto_shash_final(&desc.shash, digest);
return rc;
}
+
+int __init ima_calc_boot_aggregate(struct ima_digest_data *hash)
+{
+ struct crypto_shash *tfm;
+ int rc;
+
+ tfm = ima_alloc_tfm(hash->algo);
+ if (IS_ERR(tfm))
+ return PTR_ERR(tfm);
+
+ hash->length = crypto_shash_digestsize(tfm);
+ rc = ima_calc_boot_aggregate_tfm(hash->digest, tfm);
+
+ ima_free_tfm(tfm);
+
+ return rc;
+}
diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c
index 38477c9c3415..db01125926bd 100644
--- a/security/integrity/ima/ima_fs.c
+++ b/security/integrity/ima/ima_fs.c
@@ -88,8 +88,7 @@ static void *ima_measurements_next(struct seq_file *m, void *v, loff_t *pos)
* against concurrent list-extension
*/
rcu_read_lock();
- qe = list_entry_rcu(qe->later.next,
- struct ima_queue_entry, later);
+ qe = list_entry_rcu(qe->later.next, struct ima_queue_entry, later);
rcu_read_unlock();
(*pos)++;
@@ -100,7 +99,7 @@ static void ima_measurements_stop(struct seq_file *m, void *v)
{
}
-static void ima_putc(struct seq_file *m, void *data, int datalen)
+void ima_putc(struct seq_file *m, void *data, int datalen)
{
while (datalen--)
seq_putc(m, *(char *)data++);
@@ -111,6 +110,7 @@ static void ima_putc(struct seq_file *m, void *data, int datalen)
* char[20]=template digest
* 32bit-le=template name size
* char[n]=template name
+ * [eventdata length]
* eventdata[n]=template specific data
*/
static int ima_measurements_show(struct seq_file *m, void *v)
@@ -120,6 +120,8 @@ static int ima_measurements_show(struct seq_file *m, void *v)
struct ima_template_entry *e;
int namelen;
u32 pcr = CONFIG_IMA_MEASURE_PCR_IDX;
+ bool is_ima_template = false;
+ int i;
/* get entry */
e = qe->entry;
@@ -134,18 +136,32 @@ static int ima_measurements_show(struct seq_file *m, void *v)
ima_putc(m, &pcr, sizeof pcr);
/* 2nd: template digest */
- ima_putc(m, e->digest, IMA_DIGEST_SIZE);
+ ima_putc(m, e->digest, TPM_DIGEST_SIZE);
/* 3rd: template name size */
- namelen = strlen(e->template_name);
+ namelen = strlen(e->template_desc->name);
ima_putc(m, &namelen, sizeof namelen);
/* 4th: template name */
- ima_putc(m, (void *)e->template_name, namelen);
+ ima_putc(m, e->template_desc->name, namelen);
+
+ /* 5th: template length (except for 'ima' template) */
+ if (strcmp(e->template_desc->name, IMA_TEMPLATE_IMA_NAME) == 0)
+ is_ima_template = true;
+
+ if (!is_ima_template)
+ ima_putc(m, &e->template_data_len,
+ sizeof(e->template_data_len));
+
+ /* 6th: template specific data */
+ for (i = 0; i < e->template_desc->num_fields; i++) {
+ enum ima_show_type show = IMA_SHOW_BINARY;
+ struct ima_template_field *field = e->template_desc->fields[i];
- /* 5th: template specific data */
- ima_template_show(m, (struct ima_template_data *)&e->template,
- IMA_SHOW_BINARY);
+ if (is_ima_template && strcmp(field->field_id, "d") == 0)
+ show = IMA_SHOW_BINARY_NO_FIELD_LEN;
+ field->field_show(m, show, &e->template_data[i]);
+ }
return 0;
}
@@ -168,41 +184,21 @@ static const struct file_operations ima_measurements_ops = {
.release = seq_release,
};
-static void ima_print_digest(struct seq_file *m, u8 *digest)
+void ima_print_digest(struct seq_file *m, u8 *digest, int size)
{
int i;
- for (i = 0; i < IMA_DIGEST_SIZE; i++)
+ for (i = 0; i < size; i++)
seq_printf(m, "%02x", *(digest + i));
}
-void ima_template_show(struct seq_file *m, void *e, enum ima_show_type show)
-{
- struct ima_template_data *entry = e;
- int namelen;
-
- switch (show) {
- case IMA_SHOW_ASCII:
- ima_print_digest(m, entry->digest);
- seq_printf(m, " %s\n", entry->file_name);
- break;
- case IMA_SHOW_BINARY:
- ima_putc(m, entry->digest, IMA_DIGEST_SIZE);
-
- namelen = strlen(entry->file_name);
- ima_putc(m, &namelen, sizeof namelen);
- ima_putc(m, entry->file_name, namelen);
- default:
- break;
- }
-}
-
/* print in ascii */
static int ima_ascii_measurements_show(struct seq_file *m, void *v)
{
/* the list never shrinks, so we don't need a lock here */
struct ima_queue_entry *qe = v;
struct ima_template_entry *e;
+ int i;
/* get entry */
e = qe->entry;
@@ -213,14 +209,21 @@ static int ima_ascii_measurements_show(struct seq_file *m, void *v)
seq_printf(m, "%2d ", CONFIG_IMA_MEASURE_PCR_IDX);
/* 2nd: SHA1 template hash */
- ima_print_digest(m, e->digest);
+ ima_print_digest(m, e->digest, TPM_DIGEST_SIZE);
/* 3th: template name */
- seq_printf(m, " %s ", e->template_name);
+ seq_printf(m, " %s", e->template_desc->name);
/* 4th: template specific data */
- ima_template_show(m, (struct ima_template_data *)&e->template,
- IMA_SHOW_ASCII);
+ for (i = 0; i < e->template_desc->num_fields; i++) {
+ seq_puts(m, " ");
+ if (e->template_data[i].len == 0)
+ continue;
+
+ e->template_desc->fields[i]->field_show(m, IMA_SHOW_ASCII,
+ &e->template_data[i]);
+ }
+ seq_puts(m, "\n");
return 0;
}
diff --git a/security/integrity/ima/ima_init.c b/security/integrity/ima/ima_init.c
index 162ea723db3d..37122768554a 100644
--- a/security/integrity/ima/ima_init.c
+++ b/security/integrity/ima/ima_init.c
@@ -18,6 +18,7 @@
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/err.h>
+#include <crypto/hash_info.h>
#include "ima.h"
/* name for boot aggregate entry */
@@ -42,30 +43,39 @@ int ima_used_chip;
static void __init ima_add_boot_aggregate(void)
{
struct ima_template_entry *entry;
+ struct integrity_iint_cache tmp_iint, *iint = &tmp_iint;
const char *op = "add_boot_aggregate";
const char *audit_cause = "ENOMEM";
int result = -ENOMEM;
- int violation = 1;
+ int violation = 0;
+ struct {
+ struct ima_digest_data hdr;
+ char digest[TPM_DIGEST_SIZE];
+ } hash;
- entry = kmalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry)
- goto err_out;
+ memset(iint, 0, sizeof(*iint));
+ memset(&hash, 0, sizeof(hash));
+ iint->ima_hash = &hash.hdr;
+ iint->ima_hash->algo = HASH_ALGO_SHA1;
+ iint->ima_hash->length = SHA1_DIGEST_SIZE;
- memset(&entry->template, 0, sizeof(entry->template));
- strncpy(entry->template.file_name, boot_aggregate_name,
- IMA_EVENT_NAME_LEN_MAX);
if (ima_used_chip) {
- violation = 0;
- result = ima_calc_boot_aggregate(entry->template.digest);
+ result = ima_calc_boot_aggregate(&hash.hdr);
if (result < 0) {
audit_cause = "hashing_error";
- kfree(entry);
goto err_out;
}
}
- result = ima_store_template(entry, violation, NULL);
+
+ result = ima_alloc_init_template(iint, NULL, boot_aggregate_name,
+ NULL, 0, &entry);
+ if (result < 0)
+ return;
+
+ result = ima_store_template(entry, violation, NULL,
+ boot_aggregate_name);
if (result < 0)
- kfree(entry);
+ ima_free_template_entry(entry);
return;
err_out:
integrity_audit_msg(AUDIT_INTEGRITY_PCR, NULL, boot_aggregate_name, op,
@@ -74,7 +84,7 @@ err_out:
int __init ima_init(void)
{
- u8 pcr_i[IMA_DIGEST_SIZE];
+ u8 pcr_i[TPM_DIGEST_SIZE];
int rc;
ima_used_chip = 0;
@@ -88,6 +98,10 @@ int __init ima_init(void)
rc = ima_init_crypto();
if (rc)
return rc;
+ rc = ima_init_template();
+ if (rc != 0)
+ return rc;
+
ima_add_boot_aggregate(); /* boot aggregate must be first entry */
ima_init_policy();
diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
index e9508d5bbfcf..149ee1119f87 100644
--- a/security/integrity/ima/ima_main.c
+++ b/security/integrity/ima/ima_main.c
@@ -24,6 +24,7 @@
#include <linux/slab.h>
#include <linux/xattr.h>
#include <linux/ima.h>
+#include <crypto/hash_info.h>
#include "ima.h"
@@ -35,11 +36,33 @@ int ima_appraise = IMA_APPRAISE_ENFORCE;
int ima_appraise;
#endif
-char *ima_hash = "sha1";
+int ima_hash_algo = HASH_ALGO_SHA1;
+static int hash_setup_done;
+
static int __init hash_setup(char *str)
{
- if (strncmp(str, "md5", 3) == 0)
- ima_hash = "md5";
+ struct ima_template_desc *template_desc = ima_template_desc_current();
+ int i;
+
+ if (hash_setup_done)
+ return 1;
+
+ if (strcmp(template_desc->name, IMA_TEMPLATE_IMA_NAME) == 0) {
+ if (strncmp(str, "sha1", 4) == 0)
+ ima_hash_algo = HASH_ALGO_SHA1;
+ else if (strncmp(str, "md5", 3) == 0)
+ ima_hash_algo = HASH_ALGO_MD5;
+ goto out;
+ }
+
+ for (i = 0; i < HASH_ALGO__LAST; i++) {
+ if (strcmp(str, hash_algo_name[i]) == 0) {
+ ima_hash_algo = i;
+ break;
+ }
+ }
+out:
+ hash_setup_done = 1;
return 1;
}
__setup("ima_hash=", hash_setup);
@@ -92,10 +115,9 @@ out:
pathname = dentry->d_name.name;
if (send_tomtou)
- ima_add_violation(inode, pathname,
- "invalid_pcr", "ToMToU");
+ ima_add_violation(file, pathname, "invalid_pcr", "ToMToU");
if (send_writers)
- ima_add_violation(inode, pathname,
+ ima_add_violation(file, pathname,
"invalid_pcr", "open_writers");
kfree(pathbuf);
}
@@ -144,9 +166,12 @@ static int process_measurement(struct file *file, const char *filename,
{
struct inode *inode = file_inode(file);
struct integrity_iint_cache *iint;
+ struct ima_template_desc *template_desc = ima_template_desc_current();
char *pathbuf = NULL;
const char *pathname = NULL;
int rc = -ENOMEM, action, must_appraise, _func;
+ struct evm_ima_xattr_data *xattr_value = NULL, **xattr_ptr = NULL;
+ int xattr_len = 0;
if (!ima_initialized || !S_ISREG(inode->i_mode))
return 0;
@@ -185,7 +210,13 @@ static int process_measurement(struct file *file, const char *filename,
goto out_digsig;
}
- rc = ima_collect_measurement(iint, file);
+ if (strcmp(template_desc->name, IMA_TEMPLATE_IMA_NAME) == 0) {
+ if (action & IMA_APPRAISE_SUBMASK)
+ xattr_ptr = &xattr_value;
+ } else
+ xattr_ptr = &xattr_value;
+
+ rc = ima_collect_measurement(iint, file, xattr_ptr, &xattr_len);
if (rc != 0)
goto out_digsig;
@@ -194,9 +225,11 @@ static int process_measurement(struct file *file, const char *filename,
pathname = (const char *)file->f_dentry->d_name.name;
if (action & IMA_MEASURE)
- ima_store_measurement(iint, file, pathname);
+ ima_store_measurement(iint, file, pathname,
+ xattr_value, xattr_len);
if (action & IMA_APPRAISE_SUBMASK)
- rc = ima_appraise_measurement(_func, iint, file, pathname);
+ rc = ima_appraise_measurement(_func, iint, file, pathname,
+ xattr_value, xattr_len);
if (action & IMA_AUDIT)
ima_audit_measurement(iint, pathname);
kfree(pathbuf);
@@ -205,6 +238,7 @@ out_digsig:
rc = -EACCES;
out:
mutex_unlock(&inode->i_mutex);
+ kfree(xattr_value);
if ((rc && must_appraise) && (ima_appraise & IMA_APPRAISE_ENFORCE))
return -EACCES;
return 0;
@@ -244,9 +278,9 @@ int ima_file_mmap(struct file *file, unsigned long prot)
int ima_bprm_check(struct linux_binprm *bprm)
{
return process_measurement(bprm->file,
- (strcmp(bprm->filename, bprm->interp) == 0) ?
- bprm->filename : bprm->interp,
- MAY_EXEC, BPRM_CHECK);
+ (strcmp(bprm->filename, bprm->interp) == 0) ?
+ bprm->filename : bprm->interp,
+ MAY_EXEC, BPRM_CHECK);
}
/**
@@ -263,8 +297,8 @@ int ima_file_check(struct file *file, int mask)
{
ima_rdwr_violation_check(file);
return process_measurement(file, NULL,
- mask & (MAY_READ | MAY_WRITE | MAY_EXEC),
- FILE_CHECK);
+ mask & (MAY_READ | MAY_WRITE | MAY_EXEC),
+ FILE_CHECK);
}
EXPORT_SYMBOL_GPL(ima_file_check);
@@ -294,6 +328,7 @@ static int __init init_ima(void)
{
int error;
+ hash_setup(CONFIG_IMA_DEFAULT_HASH);
error = ima_init();
if (!error)
ima_initialized = 1;
diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c
index 399433ad614e..a9c3d3cd1990 100644
--- a/security/integrity/ima/ima_policy.c
+++ b/security/integrity/ima/ima_policy.c
@@ -73,7 +73,6 @@ static struct ima_rule_entry default_rules[] = {
{.action = DONT_MEASURE,.fsmagic = SYSFS_MAGIC,.flags = IMA_FSMAGIC},
{.action = DONT_MEASURE,.fsmagic = DEBUGFS_MAGIC,.flags = IMA_FSMAGIC},
{.action = DONT_MEASURE,.fsmagic = TMPFS_MAGIC,.flags = IMA_FSMAGIC},
- {.action = DONT_MEASURE,.fsmagic = RAMFS_MAGIC,.flags = IMA_FSMAGIC},
{.action = DONT_MEASURE,.fsmagic = DEVPTS_SUPER_MAGIC,.flags = IMA_FSMAGIC},
{.action = DONT_MEASURE,.fsmagic = BINFMTFS_MAGIC,.flags = IMA_FSMAGIC},
{.action = DONT_MEASURE,.fsmagic = SECURITYFS_MAGIC,.flags = IMA_FSMAGIC},
diff --git a/security/integrity/ima/ima_queue.c b/security/integrity/ima/ima_queue.c
index ff63fe00c195..d85e99761f4f 100644
--- a/security/integrity/ima/ima_queue.c
+++ b/security/integrity/ima/ima_queue.c
@@ -50,7 +50,7 @@ static struct ima_queue_entry *ima_lookup_digest_entry(u8 *digest_value)
key = ima_hash_key(digest_value);
rcu_read_lock();
hlist_for_each_entry_rcu(qe, &ima_htable.queue[key], hnext) {
- rc = memcmp(qe->entry->digest, digest_value, IMA_DIGEST_SIZE);
+ rc = memcmp(qe->entry->digest, digest_value, TPM_DIGEST_SIZE);
if (rc == 0) {
ret = qe;
break;
@@ -104,9 +104,10 @@ static int ima_pcr_extend(const u8 *hash)
* and extend the pcr.
*/
int ima_add_template_entry(struct ima_template_entry *entry, int violation,
- const char *op, struct inode *inode)
+ const char *op, struct inode *inode,
+ const unsigned char *filename)
{
- u8 digest[IMA_DIGEST_SIZE];
+ u8 digest[TPM_DIGEST_SIZE];
const char *audit_cause = "hash_added";
char tpm_audit_cause[AUDIT_CAUSE_LEN_MAX];
int audit_info = 1;
@@ -141,8 +142,7 @@ int ima_add_template_entry(struct ima_template_entry *entry, int violation,
}
out:
mutex_unlock(&ima_extend_list_mutex);
- integrity_audit_msg(AUDIT_INTEGRITY_PCR, inode,
- entry->template.file_name,
+ integrity_audit_msg(AUDIT_INTEGRITY_PCR, inode, filename,
op, audit_cause, result, audit_info);
return result;
}
diff --git a/security/integrity/ima/ima_template.c b/security/integrity/ima/ima_template.c
new file mode 100644
index 000000000000..635695f6a185
--- /dev/null
+++ b/security/integrity/ima/ima_template.c
@@ -0,0 +1,187 @@
+/*
+ * Copyright (C) 2013 Politecnico di Torino, Italy
+ * TORSEC group -- http://security.polito.it
+ *
+ * Author: Roberto Sassu <roberto.sassu@polito.it>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ *
+ * File: ima_template.c
+ * Helpers to manage template descriptors.
+ */
+#include <crypto/hash_info.h>
+
+#include "ima.h"
+#include "ima_template_lib.h"
+
+static struct ima_template_desc defined_templates[] = {
+ {.name = IMA_TEMPLATE_IMA_NAME, .fmt = IMA_TEMPLATE_IMA_FMT},
+ {.name = "ima-ng",.fmt = "d-ng|n-ng"},
+ {.name = "ima-sig",.fmt = "d-ng|n-ng|sig"},
+};
+
+static struct ima_template_field supported_fields[] = {
+ {.field_id = "d",.field_init = ima_eventdigest_init,
+ .field_show = ima_show_template_digest},
+ {.field_id = "n",.field_init = ima_eventname_init,
+ .field_show = ima_show_template_string},
+ {.field_id = "d-ng",.field_init = ima_eventdigest_ng_init,
+ .field_show = ima_show_template_digest_ng},
+ {.field_id = "n-ng",.field_init = ima_eventname_ng_init,
+ .field_show = ima_show_template_string},
+ {.field_id = "sig",.field_init = ima_eventsig_init,
+ .field_show = ima_show_template_sig},
+};
+
+static struct ima_template_desc *ima_template;
+static struct ima_template_desc *lookup_template_desc(const char *name);
+
+static int __init ima_template_setup(char *str)
+{
+ struct ima_template_desc *template_desc;
+ int template_len = strlen(str);
+
+ /*
+ * Verify that a template with the supplied name exists.
+ * If not, use CONFIG_IMA_DEFAULT_TEMPLATE.
+ */
+ template_desc = lookup_template_desc(str);
+ if (!template_desc)
+ return 1;
+
+ /*
+ * Verify whether the current hash algorithm is supported
+ * by the 'ima' template.
+ */
+ if (template_len == 3 && strcmp(str, IMA_TEMPLATE_IMA_NAME) == 0 &&
+ ima_hash_algo != HASH_ALGO_SHA1 && ima_hash_algo != HASH_ALGO_MD5) {
+ pr_err("IMA: template does not support hash alg\n");
+ return 1;
+ }
+
+ ima_template = template_desc;
+ return 1;
+}
+__setup("ima_template=", ima_template_setup);
+
+static struct ima_template_desc *lookup_template_desc(const char *name)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(defined_templates); i++) {
+ if (strcmp(defined_templates[i].name, name) == 0)
+ return defined_templates + i;
+ }
+
+ return NULL;
+}
+
+static struct ima_template_field *lookup_template_field(const char *field_id)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(supported_fields); i++)
+ if (strncmp(supported_fields[i].field_id, field_id,
+ IMA_TEMPLATE_FIELD_ID_MAX_LEN) == 0)
+ return &supported_fields[i];
+ return NULL;
+}
+
+static int template_fmt_size(const char *template_fmt)
+{
+ char c;
+ int template_fmt_len = strlen(template_fmt);
+ int i = 0, j = 0;
+
+ while (i < template_fmt_len) {
+ c = template_fmt[i];
+ if (c == '|')
+ j++;
+ i++;
+ }
+
+ return j + 1;
+}
+
+static int template_desc_init_fields(const char *template_fmt,
+ struct ima_template_field ***fields,
+ int *num_fields)
+{
+ char *c, *template_fmt_copy, *template_fmt_ptr;
+ int template_num_fields = template_fmt_size(template_fmt);
+ int i, result = 0;
+
+ if (template_num_fields > IMA_TEMPLATE_NUM_FIELDS_MAX)
+ return -EINVAL;
+
+ /* copying is needed as strsep() modifies the original buffer */
+ template_fmt_copy = kstrdup(template_fmt, GFP_KERNEL);
+ if (template_fmt_copy == NULL)
+ return -ENOMEM;
+
+ *fields = kzalloc(template_num_fields * sizeof(*fields), GFP_KERNEL);
+ if (*fields == NULL) {
+ result = -ENOMEM;
+ goto out;
+ }
+
+ template_fmt_ptr = template_fmt_copy;
+ for (i = 0; (c = strsep(&template_fmt_ptr, "|")) != NULL &&
+ i < template_num_fields; i++) {
+ struct ima_template_field *f = lookup_template_field(c);
+
+ if (!f) {
+ result = -ENOENT;
+ goto out;
+ }
+ (*fields)[i] = f;
+ }
+ *num_fields = i;
+out:
+ if (result < 0) {
+ kfree(*fields);
+ *fields = NULL;
+ }
+ kfree(template_fmt_copy);
+ return result;
+}
+
+static int init_defined_templates(void)
+{
+ int i = 0;
+ int result = 0;
+
+ /* Init defined templates. */
+ for (i = 0; i < ARRAY_SIZE(defined_templates); i++) {
+ struct ima_template_desc *template = &defined_templates[i];
+
+ result = template_desc_init_fields(template->fmt,
+ &(template->fields),
+ &(template->num_fields));
+ if (result < 0)
+ return result;
+ }
+ return result;
+}
+
+struct ima_template_desc *ima_template_desc_current(void)
+{
+ if (!ima_template)
+ ima_template =
+ lookup_template_desc(CONFIG_IMA_DEFAULT_TEMPLATE);
+ return ima_template;
+}
+
+int ima_init_template(void)
+{
+ int result;
+
+ result = init_defined_templates();
+ if (result < 0)
+ return result;
+
+ return 0;
+}
diff --git a/security/integrity/ima/ima_template_lib.c b/security/integrity/ima/ima_template_lib.c
new file mode 100644
index 000000000000..c38adcc910fb
--- /dev/null
+++ b/security/integrity/ima/ima_template_lib.c
@@ -0,0 +1,351 @@
+/*
+ * Copyright (C) 2013 Politecnico di Torino, Italy
+ * TORSEC group -- http://security.polito.it
+ *
+ * Author: Roberto Sassu <roberto.sassu@polito.it>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ *
+ * File: ima_template_lib.c
+ * Library of supported template fields.
+ */
+#include <crypto/hash_info.h>
+
+#include "ima_template_lib.h"
+
+static bool ima_template_hash_algo_allowed(u8 algo)
+{
+ if (algo == HASH_ALGO_SHA1 || algo == HASH_ALGO_MD5)
+ return true;
+
+ return false;
+}
+
+enum data_formats {
+ DATA_FMT_DIGEST = 0,
+ DATA_FMT_DIGEST_WITH_ALGO,
+ DATA_FMT_EVENT_NAME,
+ DATA_FMT_STRING,
+ DATA_FMT_HEX
+};
+
+static int ima_write_template_field_data(const void *data, const u32 datalen,
+ enum data_formats datafmt,
+ struct ima_field_data *field_data)
+{
+ u8 *buf, *buf_ptr;
+ u32 buflen;
+
+ switch (datafmt) {
+ case DATA_FMT_EVENT_NAME:
+ buflen = IMA_EVENT_NAME_LEN_MAX + 1;
+ break;
+ case DATA_FMT_STRING:
+ buflen = datalen + 1;
+ break;
+ default:
+ buflen = datalen;
+ }
+
+ buf = kzalloc(buflen, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ memcpy(buf, data, datalen);
+
+ /*
+ * Replace all space characters with underscore for event names and
+ * strings. This avoid that, during the parsing of a measurements list,
+ * filenames with spaces or that end with the suffix ' (deleted)' are
+ * split into multiple template fields (the space is the delimitator
+ * character for measurements lists in ASCII format).
+ */
+ if (datafmt == DATA_FMT_EVENT_NAME || datafmt == DATA_FMT_STRING) {
+ for (buf_ptr = buf; buf_ptr - buf < datalen; buf_ptr++)
+ if (*buf_ptr == ' ')
+ *buf_ptr = '_';
+ }
+
+ field_data->data = buf;
+ field_data->len = buflen;
+ return 0;
+}
+
+static void ima_show_template_data_ascii(struct seq_file *m,
+ enum ima_show_type show,
+ enum data_formats datafmt,
+ struct ima_field_data *field_data)
+{
+ u8 *buf_ptr = field_data->data, buflen = field_data->len;
+
+ switch (datafmt) {
+ case DATA_FMT_DIGEST_WITH_ALGO:
+ buf_ptr = strnchr(field_data->data, buflen, ':');
+ if (buf_ptr != field_data->data)
+ seq_printf(m, "%s", field_data->data);
+
+ /* skip ':' and '\0' */
+ buf_ptr += 2;
+ buflen -= buf_ptr - field_data->data;
+ case DATA_FMT_DIGEST:
+ case DATA_FMT_HEX:
+ if (!buflen)
+ break;
+ ima_print_digest(m, buf_ptr, buflen);
+ break;
+ case DATA_FMT_STRING:
+ seq_printf(m, "%s", buf_ptr);
+ break;
+ default:
+ break;
+ }
+}
+
+static void ima_show_template_data_binary(struct seq_file *m,
+ enum ima_show_type show,
+ enum data_formats datafmt,
+ struct ima_field_data *field_data)
+{
+ if (show != IMA_SHOW_BINARY_NO_FIELD_LEN)
+ ima_putc(m, &field_data->len, sizeof(u32));
+
+ if (!field_data->len)
+ return;
+
+ ima_putc(m, field_data->data, field_data->len);
+}
+
+static void ima_show_template_field_data(struct seq_file *m,
+ enum ima_show_type show,
+ enum data_formats datafmt,
+ struct ima_field_data *field_data)
+{
+ switch (show) {
+ case IMA_SHOW_ASCII:
+ ima_show_template_data_ascii(m, show, datafmt, field_data);
+ break;
+ case IMA_SHOW_BINARY:
+ case IMA_SHOW_BINARY_NO_FIELD_LEN:
+ ima_show_template_data_binary(m, show, datafmt, field_data);
+ break;
+ default:
+ break;
+ }
+}
+
+void ima_show_template_digest(struct seq_file *m, enum ima_show_type show,
+ struct ima_field_data *field_data)
+{
+ ima_show_template_field_data(m, show, DATA_FMT_DIGEST, field_data);
+}
+
+void ima_show_template_digest_ng(struct seq_file *m, enum ima_show_type show,
+ struct ima_field_data *field_data)
+{
+ ima_show_template_field_data(m, show, DATA_FMT_DIGEST_WITH_ALGO,
+ field_data);
+}
+
+void ima_show_template_string(struct seq_file *m, enum ima_show_type show,
+ struct ima_field_data *field_data)
+{
+ ima_show_template_field_data(m, show, DATA_FMT_STRING, field_data);
+}
+
+void ima_show_template_sig(struct seq_file *m, enum ima_show_type show,
+ struct ima_field_data *field_data)
+{
+ ima_show_template_field_data(m, show, DATA_FMT_HEX, field_data);
+}
+
+static int ima_eventdigest_init_common(u8 *digest, u32 digestsize, u8 hash_algo,
+ struct ima_field_data *field_data,
+ bool size_limit)
+{
+ /*
+ * digest formats:
+ * - DATA_FMT_DIGEST: digest
+ * - DATA_FMT_DIGEST_WITH_ALGO: [<hash algo>] + ':' + '\0' + digest,
+ * where <hash algo> is provided if the hash algoritm is not
+ * SHA1 or MD5
+ */
+ u8 buffer[CRYPTO_MAX_ALG_NAME + 2 + IMA_MAX_DIGEST_SIZE] = { 0 };
+ enum data_formats fmt = DATA_FMT_DIGEST;
+ u32 offset = 0;
+
+ if (!size_limit) {
+ fmt = DATA_FMT_DIGEST_WITH_ALGO;
+ if (hash_algo < HASH_ALGO__LAST)
+ offset += snprintf(buffer, CRYPTO_MAX_ALG_NAME + 1,
+ "%s", hash_algo_name[hash_algo]);
+ buffer[offset] = ':';
+ offset += 2;
+ }
+
+ if (digest)
+ memcpy(buffer + offset, digest, digestsize);
+ else
+ /*
+ * If digest is NULL, the event being recorded is a violation.
+ * Make room for the digest by increasing the offset of
+ * IMA_DIGEST_SIZE.
+ */
+ offset += IMA_DIGEST_SIZE;
+
+ return ima_write_template_field_data(buffer, offset + digestsize,
+ fmt, field_data);
+}
+
+/*
+ * This function writes the digest of an event (with size limit).
+ */
+int ima_eventdigest_init(struct integrity_iint_cache *iint, struct file *file,
+ const unsigned char *filename,
+ struct evm_ima_xattr_data *xattr_value, int xattr_len,
+ struct ima_field_data *field_data)
+{
+ struct {
+ struct ima_digest_data hdr;
+ char digest[IMA_MAX_DIGEST_SIZE];
+ } hash;
+ u8 *cur_digest = NULL;
+ u32 cur_digestsize = 0;
+ struct inode *inode;
+ int result;
+
+ memset(&hash, 0, sizeof(hash));
+
+ if (!iint) /* recording a violation. */
+ goto out;
+
+ if (ima_template_hash_algo_allowed(iint->ima_hash->algo)) {
+ cur_digest = iint->ima_hash->digest;
+ cur_digestsize = iint->ima_hash->length;
+ goto out;
+ }
+
+ if (!file) /* missing info to re-calculate the digest */
+ return -EINVAL;
+
+ inode = file_inode(file);
+ hash.hdr.algo = ima_template_hash_algo_allowed(ima_hash_algo) ?
+ ima_hash_algo : HASH_ALGO_SHA1;
+ result = ima_calc_file_hash(file, &hash.hdr);
+ if (result) {
+ integrity_audit_msg(AUDIT_INTEGRITY_DATA, inode,
+ filename, "collect_data",
+ "failed", result, 0);
+ return result;
+ }
+ cur_digest = hash.hdr.digest;
+ cur_digestsize = hash.hdr.length;
+out:
+ return ima_eventdigest_init_common(cur_digest, cur_digestsize, -1,
+ field_data, true);
+}
+
+/*
+ * This function writes the digest of an event (without size limit).
+ */
+int ima_eventdigest_ng_init(struct integrity_iint_cache *iint,
+ struct file *file, const unsigned char *filename,
+ struct evm_ima_xattr_data *xattr_value,
+ int xattr_len, struct ima_field_data *field_data)
+{
+ u8 *cur_digest = NULL, hash_algo = HASH_ALGO__LAST;
+ u32 cur_digestsize = 0;
+
+ /* If iint is NULL, we are recording a violation. */
+ if (!iint)
+ goto out;
+
+ cur_digest = iint->ima_hash->digest;
+ cur_digestsize = iint->ima_hash->length;
+
+ hash_algo = iint->ima_hash->algo;
+out:
+ return ima_eventdigest_init_common(cur_digest, cur_digestsize,
+ hash_algo, field_data, false);
+}
+
+static int ima_eventname_init_common(struct integrity_iint_cache *iint,
+ struct file *file,
+ const unsigned char *filename,
+ struct ima_field_data *field_data,
+ bool size_limit)
+{
+ const char *cur_filename = NULL;
+ u32 cur_filename_len = 0;
+ enum data_formats fmt = size_limit ?
+ DATA_FMT_EVENT_NAME : DATA_FMT_STRING;
+
+ BUG_ON(filename == NULL && file == NULL);
+
+ if (filename) {
+ cur_filename = filename;
+ cur_filename_len = strlen(filename);
+
+ if (!size_limit || cur_filename_len <= IMA_EVENT_NAME_LEN_MAX)
+ goto out;
+ }
+
+ if (file) {
+ cur_filename = file->f_dentry->d_name.name;
+ cur_filename_len = strlen(cur_filename);
+ } else
+ /*
+ * Truncate filename if the latter is too long and
+ * the file descriptor is not available.
+ */
+ cur_filename_len = IMA_EVENT_NAME_LEN_MAX;
+out:
+ return ima_write_template_field_data(cur_filename, cur_filename_len,
+ fmt, field_data);
+}
+
+/*
+ * This function writes the name of an event (with size limit).
+ */
+int ima_eventname_init(struct integrity_iint_cache *iint, struct file *file,
+ const unsigned char *filename,
+ struct evm_ima_xattr_data *xattr_value, int xattr_len,
+ struct ima_field_data *field_data)
+{
+ return ima_eventname_init_common(iint, file, filename,
+ field_data, true);
+}
+
+/*
+ * This function writes the name of an event (without size limit).
+ */
+int ima_eventname_ng_init(struct integrity_iint_cache *iint, struct file *file,
+ const unsigned char *filename,
+ struct evm_ima_xattr_data *xattr_value, int xattr_len,
+ struct ima_field_data *field_data)
+{
+ return ima_eventname_init_common(iint, file, filename,
+ field_data, false);
+}
+
+/*
+ * ima_eventsig_init - include the file signature as part of the template data
+ */
+int ima_eventsig_init(struct integrity_iint_cache *iint, struct file *file,
+ const unsigned char *filename,
+ struct evm_ima_xattr_data *xattr_value, int xattr_len,
+ struct ima_field_data *field_data)
+{
+ enum data_formats fmt = DATA_FMT_HEX;
+ int rc = 0;
+
+ if ((!xattr_value) || (xattr_value->type != EVM_IMA_XATTR_DIGSIG))
+ goto out;
+
+ rc = ima_write_template_field_data(xattr_value, xattr_len, fmt,
+ field_data);
+out:
+ return rc;
+}
diff --git a/security/integrity/ima/ima_template_lib.h b/security/integrity/ima/ima_template_lib.h
new file mode 100644
index 000000000000..63f6b52cb1c2
--- /dev/null
+++ b/security/integrity/ima/ima_template_lib.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2013 Politecnico di Torino, Italy
+ * TORSEC group -- http://security.polito.it
+ *
+ * Author: Roberto Sassu <roberto.sassu@polito.it>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ *
+ * File: ima_template_lib.h
+ * Header for the library of supported template fields.
+ */
+#ifndef __LINUX_IMA_TEMPLATE_LIB_H
+#define __LINUX_IMA_TEMPLATE_LIB_H
+
+#include <linux/seq_file.h>
+#include "ima.h"
+
+void ima_show_template_digest(struct seq_file *m, enum ima_show_type show,
+ struct ima_field_data *field_data);
+void ima_show_template_digest_ng(struct seq_file *m, enum ima_show_type show,
+ struct ima_field_data *field_data);
+void ima_show_template_string(struct seq_file *m, enum ima_show_type show,
+ struct ima_field_data *field_data);
+void ima_show_template_sig(struct seq_file *m, enum ima_show_type show,
+ struct ima_field_data *field_data);
+int ima_eventdigest_init(struct integrity_iint_cache *iint, struct file *file,
+ const unsigned char *filename,
+ struct evm_ima_xattr_data *xattr_value, int xattr_len,
+ struct ima_field_data *field_data);
+int ima_eventname_init(struct integrity_iint_cache *iint, struct file *file,
+ const unsigned char *filename,
+ struct evm_ima_xattr_data *xattr_value, int xattr_len,
+ struct ima_field_data *field_data);
+int ima_eventdigest_ng_init(struct integrity_iint_cache *iint,
+ struct file *file, const unsigned char *filename,
+ struct evm_ima_xattr_data *xattr_value,
+ int xattr_len, struct ima_field_data *field_data);
+int ima_eventname_ng_init(struct integrity_iint_cache *iint, struct file *file,
+ const unsigned char *filename,
+ struct evm_ima_xattr_data *xattr_value, int xattr_len,
+ struct ima_field_data *field_data);
+int ima_eventsig_init(struct integrity_iint_cache *iint, struct file *file,
+ const unsigned char *filename,
+ struct evm_ima_xattr_data *xattr_value, int xattr_len,
+ struct ima_field_data *field_data);
+#endif /* __LINUX_IMA_TEMPLATE_LIB_H */
diff --git a/security/integrity/integrity.h b/security/integrity/integrity.h
index c42fb7a70dee..2fb5e53e927f 100644
--- a/security/integrity/integrity.h
+++ b/security/integrity/integrity.h
@@ -54,25 +54,57 @@ enum evm_ima_xattr_type {
IMA_XATTR_DIGEST = 0x01,
EVM_XATTR_HMAC,
EVM_IMA_XATTR_DIGSIG,
+ IMA_XATTR_DIGEST_NG,
};
struct evm_ima_xattr_data {
u8 type;
u8 digest[SHA1_DIGEST_SIZE];
-} __attribute__((packed));
+} __packed;
+
+#define IMA_MAX_DIGEST_SIZE 64
+
+struct ima_digest_data {
+ u8 algo;
+ u8 length;
+ union {
+ struct {
+ u8 unused;
+ u8 type;
+ } sha1;
+ struct {
+ u8 type;
+ u8 algo;
+ } ng;
+ u8 data[2];
+ } xattr;
+ u8 digest[0];
+} __packed;
+
+/*
+ * signature format v2 - for using with asymmetric keys
+ */
+struct signature_v2_hdr {
+ uint8_t type; /* xattr type */
+ uint8_t version; /* signature format version */
+ uint8_t hash_algo; /* Digest algorithm [enum pkey_hash_algo] */
+ uint32_t keyid; /* IMA key identifier - not X509/PGP specific */
+ uint16_t sig_size; /* signature size */
+ uint8_t sig[0]; /* signature payload */
+} __packed;
/* integrity data associated with an inode */
struct integrity_iint_cache {
- struct rb_node rb_node; /* rooted in integrity_iint_tree */
+ struct rb_node rb_node; /* rooted in integrity_iint_tree */
struct inode *inode; /* back pointer to inode in question */
u64 version; /* track inode changes */
unsigned long flags;
- struct evm_ima_xattr_data ima_xattr;
enum integrity_status ima_file_status:4;
enum integrity_status ima_mmap_status:4;
enum integrity_status ima_bprm_status:4;
enum integrity_status ima_module_status:4;
enum integrity_status evm_status:4;
+ struct ima_digest_data *ima_hash;
};
/* rbtree tree calls to lookup, insert, delete
@@ -89,7 +121,7 @@ struct integrity_iint_cache *integrity_iint_find(struct inode *inode);
#ifdef CONFIG_INTEGRITY_SIGNATURE
int integrity_digsig_verify(const unsigned int id, const char *sig, int siglen,
- const char *digest, int digestlen);
+ const char *digest, int digestlen);
#else
diff --git a/security/keys/Kconfig b/security/keys/Kconfig
index a90d6d300dbd..a4f3f8c48d6e 100644
--- a/security/keys/Kconfig
+++ b/security/keys/Kconfig
@@ -4,6 +4,7 @@
config KEYS
bool "Enable access key retention support"
+ select ASSOCIATIVE_ARRAY
help
This option provides support for retaining authentication tokens and
access keys in the kernel.
@@ -19,6 +20,34 @@ config KEYS
If you are unsure as to whether this is required, answer N.
+config PERSISTENT_KEYRINGS
+ bool "Enable register of persistent per-UID keyrings"
+ depends on KEYS
+ help
+ This option provides a register of persistent per-UID keyrings,
+ primarily aimed at Kerberos key storage. The keyrings are persistent
+ in the sense that they stay around after all processes of that UID
+ have exited, not that they survive the machine being rebooted.
+
+ A particular keyring may be accessed by either the user whose keyring
+ it is or by a process with administrative privileges. The active
+ LSMs gets to rule on which admin-level processes get to access the
+ cache.
+
+ Keyrings are created and added into the register upon demand and get
+ removed if they expire (a default timeout is set upon creation).
+
+config BIG_KEYS
+ bool "Large payload keys"
+ depends on KEYS
+ depends on TMPFS
+ help
+ This option provides support for holding large keys within the kernel
+ (for example Kerberos ticket caches). The data may be stored out to
+ swapspace by tmpfs.
+
+ If you are unsure as to whether this is required, answer N.
+
config TRUSTED_KEYS
tristate "TRUSTED KEYS"
depends on KEYS && TCG_TPM
diff --git a/security/keys/Makefile b/security/keys/Makefile
index 504aaa008388..dfb3a7bededf 100644
--- a/security/keys/Makefile
+++ b/security/keys/Makefile
@@ -18,9 +18,11 @@ obj-y := \
obj-$(CONFIG_KEYS_COMPAT) += compat.o
obj-$(CONFIG_PROC_FS) += proc.o
obj-$(CONFIG_SYSCTL) += sysctl.o
+obj-$(CONFIG_PERSISTENT_KEYRINGS) += persistent.o
#
# Key types
#
+obj-$(CONFIG_BIG_KEYS) += big_key.o
obj-$(CONFIG_TRUSTED_KEYS) += trusted.o
obj-$(CONFIG_ENCRYPTED_KEYS) += encrypted-keys/
diff --git a/security/keys/big_key.c b/security/keys/big_key.c
new file mode 100644
index 000000000000..8137b27d641d
--- /dev/null
+++ b/security/keys/big_key.c
@@ -0,0 +1,207 @@
+/* Large capacity key type
+ *
+ * Copyright (C) 2013 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/seq_file.h>
+#include <linux/file.h>
+#include <linux/shmem_fs.h>
+#include <linux/err.h>
+#include <keys/user-type.h>
+#include <keys/big_key-type.h>
+
+MODULE_LICENSE("GPL");
+
+/*
+ * If the data is under this limit, there's no point creating a shm file to
+ * hold it as the permanently resident metadata for the shmem fs will be at
+ * least as large as the data.
+ */
+#define BIG_KEY_FILE_THRESHOLD (sizeof(struct inode) + sizeof(struct dentry))
+
+/*
+ * big_key defined keys take an arbitrary string as the description and an
+ * arbitrary blob of data as the payload
+ */
+struct key_type key_type_big_key = {
+ .name = "big_key",
+ .def_lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT,
+ .instantiate = big_key_instantiate,
+ .match = user_match,
+ .revoke = big_key_revoke,
+ .destroy = big_key_destroy,
+ .describe = big_key_describe,
+ .read = big_key_read,
+};
+
+/*
+ * Instantiate a big key
+ */
+int big_key_instantiate(struct key *key, struct key_preparsed_payload *prep)
+{
+ struct path *path = (struct path *)&key->payload.data2;
+ struct file *file;
+ ssize_t written;
+ size_t datalen = prep->datalen;
+ int ret;
+
+ ret = -EINVAL;
+ if (datalen <= 0 || datalen > 1024 * 1024 || !prep->data)
+ goto error;
+
+ /* Set an arbitrary quota */
+ ret = key_payload_reserve(key, 16);
+ if (ret < 0)
+ goto error;
+
+ key->type_data.x[1] = datalen;
+
+ if (datalen > BIG_KEY_FILE_THRESHOLD) {
+ /* Create a shmem file to store the data in. This will permit the data
+ * to be swapped out if needed.
+ *
+ * TODO: Encrypt the stored data with a temporary key.
+ */
+ file = shmem_kernel_file_setup("", datalen, 0);
+ if (IS_ERR(file)) {
+ ret = PTR_ERR(file);
+ goto err_quota;
+ }
+
+ written = kernel_write(file, prep->data, prep->datalen, 0);
+ if (written != datalen) {
+ ret = written;
+ if (written >= 0)
+ ret = -ENOMEM;
+ goto err_fput;
+ }
+
+ /* Pin the mount and dentry to the key so that we can open it again
+ * later
+ */
+ *path = file->f_path;
+ path_get(path);
+ fput(file);
+ } else {
+ /* Just store the data in a buffer */
+ void *data = kmalloc(datalen, GFP_KERNEL);
+ if (!data) {
+ ret = -ENOMEM;
+ goto err_quota;
+ }
+
+ key->payload.data = memcpy(data, prep->data, prep->datalen);
+ }
+ return 0;
+
+err_fput:
+ fput(file);
+err_quota:
+ key_payload_reserve(key, 0);
+error:
+ return ret;
+}
+
+/*
+ * dispose of the links from a revoked keyring
+ * - called with the key sem write-locked
+ */
+void big_key_revoke(struct key *key)
+{
+ struct path *path = (struct path *)&key->payload.data2;
+
+ /* clear the quota */
+ key_payload_reserve(key, 0);
+ if (key_is_instantiated(key) && key->type_data.x[1] > BIG_KEY_FILE_THRESHOLD)
+ vfs_truncate(path, 0);
+}
+
+/*
+ * dispose of the data dangling from the corpse of a big_key key
+ */
+void big_key_destroy(struct key *key)
+{
+ if (key->type_data.x[1] > BIG_KEY_FILE_THRESHOLD) {
+ struct path *path = (struct path *)&key->payload.data2;
+ path_put(path);
+ path->mnt = NULL;
+ path->dentry = NULL;
+ } else {
+ kfree(key->payload.data);
+ key->payload.data = NULL;
+ }
+}
+
+/*
+ * describe the big_key key
+ */
+void big_key_describe(const struct key *key, struct seq_file *m)
+{
+ unsigned long datalen = key->type_data.x[1];
+
+ seq_puts(m, key->description);
+
+ if (key_is_instantiated(key))
+ seq_printf(m, ": %lu [%s]",
+ datalen,
+ datalen > BIG_KEY_FILE_THRESHOLD ? "file" : "buff");
+}
+
+/*
+ * read the key data
+ * - the key's semaphore is read-locked
+ */
+long big_key_read(const struct key *key, char __user *buffer, size_t buflen)
+{
+ unsigned long datalen = key->type_data.x[1];
+ long ret;
+
+ if (!buffer || buflen < datalen)
+ return datalen;
+
+ if (datalen > BIG_KEY_FILE_THRESHOLD) {
+ struct path *path = (struct path *)&key->payload.data2;
+ struct file *file;
+ loff_t pos;
+
+ file = dentry_open(path, O_RDONLY, current_cred());
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+
+ pos = 0;
+ ret = vfs_read(file, buffer, datalen, &pos);
+ fput(file);
+ if (ret >= 0 && ret != datalen)
+ ret = -EIO;
+ } else {
+ ret = datalen;
+ if (copy_to_user(buffer, key->payload.data, datalen) != 0)
+ ret = -EFAULT;
+ }
+
+ return ret;
+}
+
+/*
+ * Module stuff
+ */
+static int __init big_key_init(void)
+{
+ return register_key_type(&key_type_big_key);
+}
+
+static void __exit big_key_cleanup(void)
+{
+ unregister_key_type(&key_type_big_key);
+}
+
+module_init(big_key_init);
+module_exit(big_key_cleanup);
diff --git a/security/keys/compat.c b/security/keys/compat.c
index d65fa7fa29ba..bbd32c729dbb 100644
--- a/security/keys/compat.c
+++ b/security/keys/compat.c
@@ -138,6 +138,9 @@ asmlinkage long compat_sys_keyctl(u32 option,
case KEYCTL_INVALIDATE:
return keyctl_invalidate_key(arg2);
+ case KEYCTL_GET_PERSISTENT:
+ return keyctl_get_persistent(arg2, arg3);
+
default:
return -EOPNOTSUPP;
}
diff --git a/security/keys/gc.c b/security/keys/gc.c
index d67c97bb1025..d3222b6d7d59 100644
--- a/security/keys/gc.c
+++ b/security/keys/gc.c
@@ -131,50 +131,6 @@ void key_gc_keytype(struct key_type *ktype)
}
/*
- * Garbage collect pointers from a keyring.
- *
- * Not called with any locks held. The keyring's key struct will not be
- * deallocated under us as only our caller may deallocate it.
- */
-static void key_gc_keyring(struct key *keyring, time_t limit)
-{
- struct keyring_list *klist;
- int loop;
-
- kenter("%x", key_serial(keyring));
-
- if (keyring->flags & ((1 << KEY_FLAG_INVALIDATED) |
- (1 << KEY_FLAG_REVOKED)))
- goto dont_gc;
-
- /* scan the keyring looking for dead keys */
- rcu_read_lock();
- klist = rcu_dereference(keyring->payload.subscriptions);
- if (!klist)
- goto unlock_dont_gc;
-
- loop = klist->nkeys;
- smp_rmb();
- for (loop--; loop >= 0; loop--) {
- struct key *key = rcu_dereference(klist->keys[loop]);
- if (key_is_dead(key, limit))
- goto do_gc;
- }
-
-unlock_dont_gc:
- rcu_read_unlock();
-dont_gc:
- kleave(" [no gc]");
- return;
-
-do_gc:
- rcu_read_unlock();
-
- keyring_gc(keyring, limit);
- kleave(" [gc]");
-}
-
-/*
* Garbage collect a list of unreferenced, detached keys
*/
static noinline void key_gc_unused_keys(struct list_head *keys)
@@ -392,8 +348,7 @@ found_unreferenced_key:
*/
found_keyring:
spin_unlock(&key_serial_lock);
- kdebug("scan keyring %d", key->serial);
- key_gc_keyring(key, limit);
+ keyring_gc(key, limit);
goto maybe_resched;
/* We found a dead key that is still referenced. Reset its type and
diff --git a/security/keys/internal.h b/security/keys/internal.h
index d4f1468b9b50..80b2aac4f50c 100644
--- a/security/keys/internal.h
+++ b/security/keys/internal.h
@@ -89,42 +89,53 @@ extern struct key_type *key_type_lookup(const char *type);
extern void key_type_put(struct key_type *ktype);
extern int __key_link_begin(struct key *keyring,
- const struct key_type *type,
- const char *description,
- unsigned long *_prealloc);
+ const struct keyring_index_key *index_key,
+ struct assoc_array_edit **_edit);
extern int __key_link_check_live_key(struct key *keyring, struct key *key);
-extern void __key_link(struct key *keyring, struct key *key,
- unsigned long *_prealloc);
+extern void __key_link(struct key *key, struct assoc_array_edit **_edit);
extern void __key_link_end(struct key *keyring,
- struct key_type *type,
- unsigned long prealloc);
+ const struct keyring_index_key *index_key,
+ struct assoc_array_edit *edit);
-extern key_ref_t __keyring_search_one(key_ref_t keyring_ref,
- const struct key_type *type,
- const char *description,
- key_perm_t perm);
+extern key_ref_t find_key_to_update(key_ref_t keyring_ref,
+ const struct keyring_index_key *index_key);
extern struct key *keyring_search_instkey(struct key *keyring,
key_serial_t target_id);
+extern int iterate_over_keyring(const struct key *keyring,
+ int (*func)(const struct key *key, void *data),
+ void *data);
+
typedef int (*key_match_func_t)(const struct key *, const void *);
+struct keyring_search_context {
+ struct keyring_index_key index_key;
+ const struct cred *cred;
+ key_match_func_t match;
+ const void *match_data;
+ unsigned flags;
+#define KEYRING_SEARCH_LOOKUP_TYPE 0x0001 /* [as type->def_lookup_type] */
+#define KEYRING_SEARCH_NO_STATE_CHECK 0x0002 /* Skip state checks */
+#define KEYRING_SEARCH_DO_STATE_CHECK 0x0004 /* Override NO_STATE_CHECK */
+#define KEYRING_SEARCH_NO_UPDATE_TIME 0x0008 /* Don't update times */
+#define KEYRING_SEARCH_NO_CHECK_PERM 0x0010 /* Don't check permissions */
+#define KEYRING_SEARCH_DETECT_TOO_DEEP 0x0020 /* Give an error on excessive depth */
+
+ int (*iterator)(const void *object, void *iterator_data);
+
+ /* Internal stuff */
+ int skipped_ret;
+ bool possessed;
+ key_ref_t result;
+ struct timespec now;
+};
+
extern key_ref_t keyring_search_aux(key_ref_t keyring_ref,
- const struct cred *cred,
- struct key_type *type,
- const void *description,
- key_match_func_t match,
- bool no_state_check);
-
-extern key_ref_t search_my_process_keyrings(struct key_type *type,
- const void *description,
- key_match_func_t match,
- bool no_state_check,
- const struct cred *cred);
-extern key_ref_t search_process_keyrings(struct key_type *type,
- const void *description,
- key_match_func_t match,
- const struct cred *cred);
+ struct keyring_search_context *ctx);
+
+extern key_ref_t search_my_process_keyrings(struct keyring_search_context *ctx);
+extern key_ref_t search_process_keyrings(struct keyring_search_context *ctx);
extern struct key *find_keyring_by_name(const char *name, bool skip_perm_check);
@@ -202,7 +213,7 @@ extern struct key *key_get_instantiation_authkey(key_serial_t target_id);
/*
* Determine whether a key is dead.
*/
-static inline bool key_is_dead(struct key *key, time_t limit)
+static inline bool key_is_dead(const struct key *key, time_t limit)
{
return
key->flags & ((1 << KEY_FLAG_DEAD) |
@@ -244,6 +255,15 @@ extern long keyctl_invalidate_key(key_serial_t);
extern long keyctl_instantiate_key_common(key_serial_t,
const struct iovec *,
unsigned, size_t, key_serial_t);
+#ifdef CONFIG_PERSISTENT_KEYRINGS
+extern long keyctl_get_persistent(uid_t, key_serial_t);
+extern unsigned persistent_keyring_expiry;
+#else
+static inline long keyctl_get_persistent(uid_t uid, key_serial_t destring)
+{
+ return -EOPNOTSUPP;
+}
+#endif
/*
* Debugging key validation
diff --git a/security/keys/key.c b/security/keys/key.c
index 8fb7c7bd4657..6e21c11e48bc 100644
--- a/security/keys/key.c
+++ b/security/keys/key.c
@@ -242,8 +242,8 @@ struct key *key_alloc(struct key_type *type, const char *desc,
}
}
- desclen = strlen(desc) + 1;
- quotalen = desclen + type->def_datalen;
+ desclen = strlen(desc);
+ quotalen = desclen + 1 + type->def_datalen;
/* get hold of the key tracking for this user */
user = key_user_lookup(uid);
@@ -272,12 +272,13 @@ struct key *key_alloc(struct key_type *type, const char *desc,
}
/* allocate and initialise the key and its description */
- key = kmem_cache_alloc(key_jar, GFP_KERNEL);
+ key = kmem_cache_zalloc(key_jar, GFP_KERNEL);
if (!key)
goto no_memory_2;
if (desc) {
- key->description = kmemdup(desc, desclen, GFP_KERNEL);
+ key->index_key.desc_len = desclen;
+ key->index_key.description = kmemdup(desc, desclen + 1, GFP_KERNEL);
if (!key->description)
goto no_memory_3;
}
@@ -285,22 +286,18 @@ struct key *key_alloc(struct key_type *type, const char *desc,
atomic_set(&key->usage, 1);
init_rwsem(&key->sem);
lockdep_set_class(&key->sem, &type->lock_class);
- key->type = type;
+ key->index_key.type = type;
key->user = user;
key->quotalen = quotalen;
key->datalen = type->def_datalen;
key->uid = uid;
key->gid = gid;
key->perm = perm;
- key->flags = 0;
- key->expiry = 0;
- key->payload.data = NULL;
- key->security = NULL;
if (!(flags & KEY_ALLOC_NOT_IN_QUOTA))
key->flags |= 1 << KEY_FLAG_IN_QUOTA;
-
- memset(&key->type_data, 0, sizeof(key->type_data));
+ if (flags & KEY_ALLOC_TRUSTED)
+ key->flags |= 1 << KEY_FLAG_TRUSTED;
#ifdef KEY_DEBUGGING
key->magic = KEY_DEBUG_MAGIC;
@@ -408,7 +405,7 @@ static int __key_instantiate_and_link(struct key *key,
struct key_preparsed_payload *prep,
struct key *keyring,
struct key *authkey,
- unsigned long *_prealloc)
+ struct assoc_array_edit **_edit)
{
int ret, awaken;
@@ -435,7 +432,7 @@ static int __key_instantiate_and_link(struct key *key,
/* and link it into the destination keyring */
if (keyring)
- __key_link(keyring, key, _prealloc);
+ __key_link(key, _edit);
/* disable the authorisation key */
if (authkey)
@@ -475,7 +472,7 @@ int key_instantiate_and_link(struct key *key,
struct key *authkey)
{
struct key_preparsed_payload prep;
- unsigned long prealloc;
+ struct assoc_array_edit *edit;
int ret;
memset(&prep, 0, sizeof(prep));
@@ -489,17 +486,15 @@ int key_instantiate_and_link(struct key *key,
}
if (keyring) {
- ret = __key_link_begin(keyring, key->type, key->description,
- &prealloc);
+ ret = __key_link_begin(keyring, &key->index_key, &edit);
if (ret < 0)
goto error_free_preparse;
}
- ret = __key_instantiate_and_link(key, &prep, keyring, authkey,
- &prealloc);
+ ret = __key_instantiate_and_link(key, &prep, keyring, authkey, &edit);
if (keyring)
- __key_link_end(keyring, key->type, prealloc);
+ __key_link_end(keyring, &key->index_key, edit);
error_free_preparse:
if (key->type->preparse)
@@ -537,7 +532,7 @@ int key_reject_and_link(struct key *key,
struct key *keyring,
struct key *authkey)
{
- unsigned long prealloc;
+ struct assoc_array_edit *edit;
struct timespec now;
int ret, awaken, link_ret = 0;
@@ -548,8 +543,7 @@ int key_reject_and_link(struct key *key,
ret = -EBUSY;
if (keyring)
- link_ret = __key_link_begin(keyring, key->type,
- key->description, &prealloc);
+ link_ret = __key_link_begin(keyring, &key->index_key, &edit);
mutex_lock(&key_construction_mutex);
@@ -557,9 +551,10 @@ int key_reject_and_link(struct key *key,
if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) {
/* mark the key as being negatively instantiated */
atomic_inc(&key->user->nikeys);
+ key->type_data.reject_error = -error;
+ smp_wmb();
set_bit(KEY_FLAG_NEGATIVE, &key->flags);
set_bit(KEY_FLAG_INSTANTIATED, &key->flags);
- key->type_data.reject_error = -error;
now = current_kernel_time();
key->expiry = now.tv_sec + timeout;
key_schedule_gc(key->expiry + key_gc_delay);
@@ -571,7 +566,7 @@ int key_reject_and_link(struct key *key,
/* and link it into the destination keyring */
if (keyring && link_ret == 0)
- __key_link(keyring, key, &prealloc);
+ __key_link(key, &edit);
/* disable the authorisation key */
if (authkey)
@@ -581,7 +576,7 @@ int key_reject_and_link(struct key *key,
mutex_unlock(&key_construction_mutex);
if (keyring)
- __key_link_end(keyring, key->type, prealloc);
+ __key_link_end(keyring, &key->index_key, edit);
/* wake up anyone waiting for a key to be constructed */
if (awaken)
@@ -645,7 +640,7 @@ found:
/* this races with key_put(), but that doesn't matter since key_put()
* doesn't actually change the key
*/
- atomic_inc(&key->usage);
+ __key_get(key);
error:
spin_unlock(&key_serial_lock);
@@ -780,25 +775,27 @@ key_ref_t key_create_or_update(key_ref_t keyring_ref,
key_perm_t perm,
unsigned long flags)
{
- unsigned long prealloc;
+ struct keyring_index_key index_key = {
+ .description = description,
+ };
struct key_preparsed_payload prep;
+ struct assoc_array_edit *edit;
const struct cred *cred = current_cred();
- struct key_type *ktype;
struct key *keyring, *key = NULL;
key_ref_t key_ref;
int ret;
/* look up the key type to see if it's one of the registered kernel
* types */
- ktype = key_type_lookup(type);
- if (IS_ERR(ktype)) {
+ index_key.type = key_type_lookup(type);
+ if (IS_ERR(index_key.type)) {
key_ref = ERR_PTR(-ENODEV);
goto error;
}
key_ref = ERR_PTR(-EINVAL);
- if (!ktype->match || !ktype->instantiate ||
- (!description && !ktype->preparse))
+ if (!index_key.type->match || !index_key.type->instantiate ||
+ (!index_key.description && !index_key.type->preparse))
goto error_put_type;
keyring = key_ref_to_ptr(keyring_ref);
@@ -812,21 +809,28 @@ key_ref_t key_create_or_update(key_ref_t keyring_ref,
memset(&prep, 0, sizeof(prep));
prep.data = payload;
prep.datalen = plen;
- prep.quotalen = ktype->def_datalen;
- if (ktype->preparse) {
- ret = ktype->preparse(&prep);
+ prep.quotalen = index_key.type->def_datalen;
+ prep.trusted = flags & KEY_ALLOC_TRUSTED;
+ if (index_key.type->preparse) {
+ ret = index_key.type->preparse(&prep);
if (ret < 0) {
key_ref = ERR_PTR(ret);
goto error_put_type;
}
- if (!description)
- description = prep.description;
+ if (!index_key.description)
+ index_key.description = prep.description;
key_ref = ERR_PTR(-EINVAL);
- if (!description)
+ if (!index_key.description)
goto error_free_prep;
}
+ index_key.desc_len = strlen(index_key.description);
+
+ key_ref = ERR_PTR(-EPERM);
+ if (!prep.trusted && test_bit(KEY_FLAG_TRUSTED_ONLY, &keyring->flags))
+ goto error_free_prep;
+ flags |= prep.trusted ? KEY_ALLOC_TRUSTED : 0;
- ret = __key_link_begin(keyring, ktype, description, &prealloc);
+ ret = __key_link_begin(keyring, &index_key, &edit);
if (ret < 0) {
key_ref = ERR_PTR(ret);
goto error_free_prep;
@@ -844,10 +848,9 @@ key_ref_t key_create_or_update(key_ref_t keyring_ref,
* key of the same type and description in the destination keyring and
* update that instead if possible
*/
- if (ktype->update) {
- key_ref = __keyring_search_one(keyring_ref, ktype, description,
- 0);
- if (!IS_ERR(key_ref))
+ if (index_key.type->update) {
+ key_ref = find_key_to_update(keyring_ref, &index_key);
+ if (key_ref)
goto found_matching_key;
}
@@ -856,23 +859,24 @@ key_ref_t key_create_or_update(key_ref_t keyring_ref,
perm = KEY_POS_VIEW | KEY_POS_SEARCH | KEY_POS_LINK | KEY_POS_SETATTR;
perm |= KEY_USR_VIEW;
- if (ktype->read)
+ if (index_key.type->read)
perm |= KEY_POS_READ;
- if (ktype == &key_type_keyring || ktype->update)
+ if (index_key.type == &key_type_keyring ||
+ index_key.type->update)
perm |= KEY_POS_WRITE;
}
/* allocate a new key */
- key = key_alloc(ktype, description, cred->fsuid, cred->fsgid, cred,
- perm, flags);
+ key = key_alloc(index_key.type, index_key.description,
+ cred->fsuid, cred->fsgid, cred, perm, flags);
if (IS_ERR(key)) {
key_ref = ERR_CAST(key);
goto error_link_end;
}
/* instantiate it and link it into the target keyring */
- ret = __key_instantiate_and_link(key, &prep, keyring, NULL, &prealloc);
+ ret = __key_instantiate_and_link(key, &prep, keyring, NULL, &edit);
if (ret < 0) {
key_put(key);
key_ref = ERR_PTR(ret);
@@ -882,12 +886,12 @@ key_ref_t key_create_or_update(key_ref_t keyring_ref,
key_ref = make_key_ref(key, is_key_possessed(keyring_ref));
error_link_end:
- __key_link_end(keyring, ktype, prealloc);
+ __key_link_end(keyring, &index_key, edit);
error_free_prep:
- if (ktype->preparse)
- ktype->free_preparse(&prep);
+ if (index_key.type->preparse)
+ index_key.type->free_preparse(&prep);
error_put_type:
- key_type_put(ktype);
+ key_type_put(index_key.type);
error:
return key_ref;
@@ -895,7 +899,7 @@ error:
/* we found a matching key, so we're going to try to update it
* - we can drop the locks first as we have the key pinned
*/
- __key_link_end(keyring, ktype, prealloc);
+ __key_link_end(keyring, &index_key, edit);
key_ref = __key_update(key_ref, &prep);
goto error_free_prep;
diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
index 33cfd27b4de2..cee72ce64222 100644
--- a/security/keys/keyctl.c
+++ b/security/keys/keyctl.c
@@ -1667,6 +1667,9 @@ SYSCALL_DEFINE5(keyctl, int, option, unsigned long, arg2, unsigned long, arg3,
case KEYCTL_INVALIDATE:
return keyctl_invalidate_key((key_serial_t) arg2);
+ case KEYCTL_GET_PERSISTENT:
+ return keyctl_get_persistent((uid_t)arg2, (key_serial_t)arg3);
+
default:
return -EOPNOTSUPP;
}
diff --git a/security/keys/keyring.c b/security/keys/keyring.c
index 6ece7f2e5707..d46cbc5e335e 100644
--- a/security/keys/keyring.c
+++ b/security/keys/keyring.c
@@ -1,6 +1,6 @@
/* Keyring handling
*
- * Copyright (C) 2004-2005, 2008 Red Hat, Inc. All Rights Reserved.
+ * Copyright (C) 2004-2005, 2008, 2013 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
@@ -17,25 +17,11 @@
#include <linux/seq_file.h>
#include <linux/err.h>
#include <keys/keyring-type.h>
+#include <keys/user-type.h>
+#include <linux/assoc_array_priv.h>
#include <linux/uaccess.h>
#include "internal.h"
-#define rcu_dereference_locked_keyring(keyring) \
- (rcu_dereference_protected( \
- (keyring)->payload.subscriptions, \
- rwsem_is_locked((struct rw_semaphore *)&(keyring)->sem)))
-
-#define rcu_deref_link_locked(klist, index, keyring) \
- (rcu_dereference_protected( \
- (klist)->keys[index], \
- rwsem_is_locked((struct rw_semaphore *)&(keyring)->sem)))
-
-#define MAX_KEYRING_LINKS \
- min_t(size_t, USHRT_MAX - 1, \
- ((PAGE_SIZE - sizeof(struct keyring_list)) / sizeof(struct key *)))
-
-#define KEY_LINK_FIXQUOTA 1UL
-
/*
* When plumbing the depths of the key tree, this sets a hard limit
* set on how deep we're willing to go.
@@ -47,6 +33,28 @@
*/
#define KEYRING_NAME_HASH_SIZE (1 << 5)
+/*
+ * We mark pointers we pass to the associative array with bit 1 set if
+ * they're keyrings and clear otherwise.
+ */
+#define KEYRING_PTR_SUBTYPE 0x2UL
+
+static inline bool keyring_ptr_is_keyring(const struct assoc_array_ptr *x)
+{
+ return (unsigned long)x & KEYRING_PTR_SUBTYPE;
+}
+static inline struct key *keyring_ptr_to_key(const struct assoc_array_ptr *x)
+{
+ void *object = assoc_array_ptr_to_leaf(x);
+ return (struct key *)((unsigned long)object & ~KEYRING_PTR_SUBTYPE);
+}
+static inline void *keyring_key_to_ptr(struct key *key)
+{
+ if (key->type == &key_type_keyring)
+ return (void *)((unsigned long)key | KEYRING_PTR_SUBTYPE);
+ return key;
+}
+
static struct list_head keyring_name_hash[KEYRING_NAME_HASH_SIZE];
static DEFINE_RWLOCK(keyring_name_lock);
@@ -67,7 +75,6 @@ static inline unsigned keyring_hash(const char *desc)
*/
static int keyring_instantiate(struct key *keyring,
struct key_preparsed_payload *prep);
-static int keyring_match(const struct key *keyring, const void *criterion);
static void keyring_revoke(struct key *keyring);
static void keyring_destroy(struct key *keyring);
static void keyring_describe(const struct key *keyring, struct seq_file *m);
@@ -76,9 +83,9 @@ static long keyring_read(const struct key *keyring,
struct key_type key_type_keyring = {
.name = "keyring",
- .def_datalen = sizeof(struct keyring_list),
+ .def_datalen = 0,
.instantiate = keyring_instantiate,
- .match = keyring_match,
+ .match = user_match,
.revoke = keyring_revoke,
.destroy = keyring_destroy,
.describe = keyring_describe,
@@ -127,6 +134,7 @@ static int keyring_instantiate(struct key *keyring,
ret = -EINVAL;
if (prep->datalen == 0) {
+ assoc_array_init(&keyring->keys);
/* make the keyring available by name if it has one */
keyring_publish_name(keyring);
ret = 0;
@@ -136,15 +144,225 @@ static int keyring_instantiate(struct key *keyring,
}
/*
- * Match keyrings on their name
+ * Multiply 64-bits by 32-bits to 96-bits and fold back to 64-bit. Ideally we'd
+ * fold the carry back too, but that requires inline asm.
+ */
+static u64 mult_64x32_and_fold(u64 x, u32 y)
+{
+ u64 hi = (u64)(u32)(x >> 32) * y;
+ u64 lo = (u64)(u32)(x) * y;
+ return lo + ((u64)(u32)hi << 32) + (u32)(hi >> 32);
+}
+
+/*
+ * Hash a key type and description.
+ */
+static unsigned long hash_key_type_and_desc(const struct keyring_index_key *index_key)
+{
+ const unsigned level_shift = ASSOC_ARRAY_LEVEL_STEP;
+ const unsigned long fan_mask = ASSOC_ARRAY_FAN_MASK;
+ const char *description = index_key->description;
+ unsigned long hash, type;
+ u32 piece;
+ u64 acc;
+ int n, desc_len = index_key->desc_len;
+
+ type = (unsigned long)index_key->type;
+
+ acc = mult_64x32_and_fold(type, desc_len + 13);
+ acc = mult_64x32_and_fold(acc, 9207);
+ for (;;) {
+ n = desc_len;
+ if (n <= 0)
+ break;
+ if (n > 4)
+ n = 4;
+ piece = 0;
+ memcpy(&piece, description, n);
+ description += n;
+ desc_len -= n;
+ acc = mult_64x32_and_fold(acc, piece);
+ acc = mult_64x32_and_fold(acc, 9207);
+ }
+
+ /* Fold the hash down to 32 bits if need be. */
+ hash = acc;
+ if (ASSOC_ARRAY_KEY_CHUNK_SIZE == 32)
+ hash ^= acc >> 32;
+
+ /* Squidge all the keyrings into a separate part of the tree to
+ * ordinary keys by making sure the lowest level segment in the hash is
+ * zero for keyrings and non-zero otherwise.
+ */
+ if (index_key->type != &key_type_keyring && (hash & fan_mask) == 0)
+ return hash | (hash >> (ASSOC_ARRAY_KEY_CHUNK_SIZE - level_shift)) | 1;
+ if (index_key->type == &key_type_keyring && (hash & fan_mask) != 0)
+ return (hash + (hash << level_shift)) & ~fan_mask;
+ return hash;
+}
+
+/*
+ * Build the next index key chunk.
+ *
+ * On 32-bit systems the index key is laid out as:
+ *
+ * 0 4 5 9...
+ * hash desclen typeptr desc[]
+ *
+ * On 64-bit systems:
+ *
+ * 0 8 9 17...
+ * hash desclen typeptr desc[]
+ *
+ * We return it one word-sized chunk at a time.
*/
-static int keyring_match(const struct key *keyring, const void *description)
+static unsigned long keyring_get_key_chunk(const void *data, int level)
+{
+ const struct keyring_index_key *index_key = data;
+ unsigned long chunk = 0;
+ long offset = 0;
+ int desc_len = index_key->desc_len, n = sizeof(chunk);
+
+ level /= ASSOC_ARRAY_KEY_CHUNK_SIZE;
+ switch (level) {
+ case 0:
+ return hash_key_type_and_desc(index_key);
+ case 1:
+ return ((unsigned long)index_key->type << 8) | desc_len;
+ case 2:
+ if (desc_len == 0)
+ return (u8)((unsigned long)index_key->type >>
+ (ASSOC_ARRAY_KEY_CHUNK_SIZE - 8));
+ n--;
+ offset = 1;
+ default:
+ offset += sizeof(chunk) - 1;
+ offset += (level - 3) * sizeof(chunk);
+ if (offset >= desc_len)
+ return 0;
+ desc_len -= offset;
+ if (desc_len > n)
+ desc_len = n;
+ offset += desc_len;
+ do {
+ chunk <<= 8;
+ chunk |= ((u8*)index_key->description)[--offset];
+ } while (--desc_len > 0);
+
+ if (level == 2) {
+ chunk <<= 8;
+ chunk |= (u8)((unsigned long)index_key->type >>
+ (ASSOC_ARRAY_KEY_CHUNK_SIZE - 8));
+ }
+ return chunk;
+ }
+}
+
+static unsigned long keyring_get_object_key_chunk(const void *object, int level)
+{
+ const struct key *key = keyring_ptr_to_key(object);
+ return keyring_get_key_chunk(&key->index_key, level);
+}
+
+static bool keyring_compare_object(const void *object, const void *data)
{
- return keyring->description &&
- strcmp(keyring->description, description) == 0;
+ const struct keyring_index_key *index_key = data;
+ const struct key *key = keyring_ptr_to_key(object);
+
+ return key->index_key.type == index_key->type &&
+ key->index_key.desc_len == index_key->desc_len &&
+ memcmp(key->index_key.description, index_key->description,
+ index_key->desc_len) == 0;
}
/*
+ * Compare the index keys of a pair of objects and determine the bit position
+ * at which they differ - if they differ.
+ */
+static int keyring_diff_objects(const void *object, const void *data)
+{
+ const struct key *key_a = keyring_ptr_to_key(object);
+ const struct keyring_index_key *a = &key_a->index_key;
+ const struct keyring_index_key *b = data;
+ unsigned long seg_a, seg_b;
+ int level, i;
+
+ level = 0;
+ seg_a = hash_key_type_and_desc(a);
+ seg_b = hash_key_type_and_desc(b);
+ if ((seg_a ^ seg_b) != 0)
+ goto differ;
+
+ /* The number of bits contributed by the hash is controlled by a
+ * constant in the assoc_array headers. Everything else thereafter we
+ * can deal with as being machine word-size dependent.
+ */
+ level += ASSOC_ARRAY_KEY_CHUNK_SIZE / 8;
+ seg_a = a->desc_len;
+ seg_b = b->desc_len;
+ if ((seg_a ^ seg_b) != 0)
+ goto differ;
+
+ /* The next bit may not work on big endian */
+ level++;
+ seg_a = (unsigned long)a->type;
+ seg_b = (unsigned long)b->type;
+ if ((seg_a ^ seg_b) != 0)
+ goto differ;
+
+ level += sizeof(unsigned long);
+ if (a->desc_len == 0)
+ goto same;
+
+ i = 0;
+ if (((unsigned long)a->description | (unsigned long)b->description) &
+ (sizeof(unsigned long) - 1)) {
+ do {
+ seg_a = *(unsigned long *)(a->description + i);
+ seg_b = *(unsigned long *)(b->description + i);
+ if ((seg_a ^ seg_b) != 0)
+ goto differ_plus_i;
+ i += sizeof(unsigned long);
+ } while (i < (a->desc_len & (sizeof(unsigned long) - 1)));
+ }
+
+ for (; i < a->desc_len; i++) {
+ seg_a = *(unsigned char *)(a->description + i);
+ seg_b = *(unsigned char *)(b->description + i);
+ if ((seg_a ^ seg_b) != 0)
+ goto differ_plus_i;
+ }
+
+same:
+ return -1;
+
+differ_plus_i:
+ level += i;
+differ:
+ i = level * 8 + __ffs(seg_a ^ seg_b);
+ return i;
+}
+
+/*
+ * Free an object after stripping the keyring flag off of the pointer.
+ */
+static void keyring_free_object(void *object)
+{
+ key_put(keyring_ptr_to_key(object));
+}
+
+/*
+ * Operations for keyring management by the index-tree routines.
+ */
+static const struct assoc_array_ops keyring_assoc_array_ops = {
+ .get_key_chunk = keyring_get_key_chunk,
+ .get_object_key_chunk = keyring_get_object_key_chunk,
+ .compare_object = keyring_compare_object,
+ .diff_objects = keyring_diff_objects,
+ .free_object = keyring_free_object,
+};
+
+/*
* Clean up a keyring when it is destroyed. Unpublish its name if it had one
* and dispose of its data.
*
@@ -155,9 +373,6 @@ static int keyring_match(const struct key *keyring, const void *description)
*/
static void keyring_destroy(struct key *keyring)
{
- struct keyring_list *klist;
- int loop;
-
if (keyring->description) {
write_lock(&keyring_name_lock);
@@ -168,12 +383,7 @@ static void keyring_destroy(struct key *keyring)
write_unlock(&keyring_name_lock);
}
- klist = rcu_access_pointer(keyring->payload.subscriptions);
- if (klist) {
- for (loop = klist->nkeys - 1; loop >= 0; loop--)
- key_put(rcu_access_pointer(klist->keys[loop]));
- kfree(klist);
- }
+ assoc_array_destroy(&keyring->keys, &keyring_assoc_array_ops);
}
/*
@@ -181,76 +391,88 @@ static void keyring_destroy(struct key *keyring)
*/
static void keyring_describe(const struct key *keyring, struct seq_file *m)
{
- struct keyring_list *klist;
-
if (keyring->description)
seq_puts(m, keyring->description);
else
seq_puts(m, "[anon]");
if (key_is_instantiated(keyring)) {
- rcu_read_lock();
- klist = rcu_dereference(keyring->payload.subscriptions);
- if (klist)
- seq_printf(m, ": %u/%u", klist->nkeys, klist->maxkeys);
+ if (keyring->keys.nr_leaves_on_tree != 0)
+ seq_printf(m, ": %lu", keyring->keys.nr_leaves_on_tree);
else
seq_puts(m, ": empty");
- rcu_read_unlock();
}
}
+struct keyring_read_iterator_context {
+ size_t qty;
+ size_t count;
+ key_serial_t __user *buffer;
+};
+
+static int keyring_read_iterator(const void *object, void *data)
+{
+ struct keyring_read_iterator_context *ctx = data;
+ const struct key *key = keyring_ptr_to_key(object);
+ int ret;
+
+ kenter("{%s,%d},,{%zu/%zu}",
+ key->type->name, key->serial, ctx->count, ctx->qty);
+
+ if (ctx->count >= ctx->qty)
+ return 1;
+
+ ret = put_user(key->serial, ctx->buffer);
+ if (ret < 0)
+ return ret;
+ ctx->buffer++;
+ ctx->count += sizeof(key->serial);
+ return 0;
+}
+
/*
* Read a list of key IDs from the keyring's contents in binary form
*
- * The keyring's semaphore is read-locked by the caller.
+ * The keyring's semaphore is read-locked by the caller. This prevents someone
+ * from modifying it under us - which could cause us to read key IDs multiple
+ * times.
*/
static long keyring_read(const struct key *keyring,
char __user *buffer, size_t buflen)
{
- struct keyring_list *klist;
- struct key *key;
- size_t qty, tmp;
- int loop, ret;
+ struct keyring_read_iterator_context ctx;
+ unsigned long nr_keys;
+ int ret;
- ret = 0;
- klist = rcu_dereference_locked_keyring(keyring);
- if (klist) {
- /* calculate how much data we could return */
- qty = klist->nkeys * sizeof(key_serial_t);
-
- if (buffer && buflen > 0) {
- if (buflen > qty)
- buflen = qty;
-
- /* copy the IDs of the subscribed keys into the
- * buffer */
- ret = -EFAULT;
-
- for (loop = 0; loop < klist->nkeys; loop++) {
- key = rcu_deref_link_locked(klist, loop,
- keyring);
-
- tmp = sizeof(key_serial_t);
- if (tmp > buflen)
- tmp = buflen;
-
- if (copy_to_user(buffer,
- &key->serial,
- tmp) != 0)
- goto error;
-
- buflen -= tmp;
- if (buflen == 0)
- break;
- buffer += tmp;
- }
- }
+ kenter("{%d},,%zu", key_serial(keyring), buflen);
+
+ if (buflen & (sizeof(key_serial_t) - 1))
+ return -EINVAL;
+
+ nr_keys = keyring->keys.nr_leaves_on_tree;
+ if (nr_keys == 0)
+ return 0;
- ret = qty;
+ /* Calculate how much data we could return */
+ ctx.qty = nr_keys * sizeof(key_serial_t);
+
+ if (!buffer || !buflen)
+ return ctx.qty;
+
+ if (buflen > ctx.qty)
+ ctx.qty = buflen;
+
+ /* Copy the IDs of the subscribed keys into the buffer */
+ ctx.buffer = (key_serial_t __user *)buffer;
+ ctx.count = 0;
+ ret = assoc_array_iterate(&keyring->keys, keyring_read_iterator, &ctx);
+ if (ret < 0) {
+ kleave(" = %d [iterate]", ret);
+ return ret;
}
-error:
- return ret;
+ kleave(" = %zu [ok]", ctx.count);
+ return ctx.count;
}
/*
@@ -277,227 +499,361 @@ struct key *keyring_alloc(const char *description, kuid_t uid, kgid_t gid,
}
EXPORT_SYMBOL(keyring_alloc);
-/**
- * keyring_search_aux - Search a keyring tree for a key matching some criteria
- * @keyring_ref: A pointer to the keyring with possession indicator.
- * @cred: The credentials to use for permissions checks.
- * @type: The type of key to search for.
- * @description: Parameter for @match.
- * @match: Function to rule on whether or not a key is the one required.
- * @no_state_check: Don't check if a matching key is bad
- *
- * Search the supplied keyring tree for a key that matches the criteria given.
- * The root keyring and any linked keyrings must grant Search permission to the
- * caller to be searchable and keys can only be found if they too grant Search
- * to the caller. The possession flag on the root keyring pointer controls use
- * of the possessor bits in permissions checking of the entire tree. In
- * addition, the LSM gets to forbid keyring searches and key matches.
- *
- * The search is performed as a breadth-then-depth search up to the prescribed
- * limit (KEYRING_SEARCH_MAX_DEPTH).
- *
- * Keys are matched to the type provided and are then filtered by the match
- * function, which is given the description to use in any way it sees fit. The
- * match function may use any attributes of a key that it wishes to to
- * determine the match. Normally the match function from the key type would be
- * used.
- *
- * RCU is used to prevent the keyring key lists from disappearing without the
- * need to take lots of locks.
- *
- * Returns a pointer to the found key and increments the key usage count if
- * successful; -EAGAIN if no matching keys were found, or if expired or revoked
- * keys were found; -ENOKEY if only negative keys were found; -ENOTDIR if the
- * specified keyring wasn't a keyring.
- *
- * In the case of a successful return, the possession attribute from
- * @keyring_ref is propagated to the returned key reference.
+/*
+ * Iteration function to consider each key found.
*/
-key_ref_t keyring_search_aux(key_ref_t keyring_ref,
- const struct cred *cred,
- struct key_type *type,
- const void *description,
- key_match_func_t match,
- bool no_state_check)
+static int keyring_search_iterator(const void *object, void *iterator_data)
{
- struct {
- /* Need a separate keylist pointer for RCU purposes */
- struct key *keyring;
- struct keyring_list *keylist;
- int kix;
- } stack[KEYRING_SEARCH_MAX_DEPTH];
-
- struct keyring_list *keylist;
- struct timespec now;
- unsigned long possessed, kflags;
- struct key *keyring, *key;
- key_ref_t key_ref;
- long err;
- int sp, nkeys, kix;
+ struct keyring_search_context *ctx = iterator_data;
+ const struct key *key = keyring_ptr_to_key(object);
+ unsigned long kflags = key->flags;
- keyring = key_ref_to_ptr(keyring_ref);
- possessed = is_key_possessed(keyring_ref);
- key_check(keyring);
+ kenter("{%d}", key->serial);
- /* top keyring must have search permission to begin the search */
- err = key_task_permission(keyring_ref, cred, KEY_SEARCH);
- if (err < 0) {
- key_ref = ERR_PTR(err);
- goto error;
+ /* ignore keys not of this type */
+ if (key->type != ctx->index_key.type) {
+ kleave(" = 0 [!type]");
+ return 0;
}
- key_ref = ERR_PTR(-ENOTDIR);
- if (keyring->type != &key_type_keyring)
- goto error;
+ /* skip invalidated, revoked and expired keys */
+ if (ctx->flags & KEYRING_SEARCH_DO_STATE_CHECK) {
+ if (kflags & ((1 << KEY_FLAG_INVALIDATED) |
+ (1 << KEY_FLAG_REVOKED))) {
+ ctx->result = ERR_PTR(-EKEYREVOKED);
+ kleave(" = %d [invrev]", ctx->skipped_ret);
+ goto skipped;
+ }
- rcu_read_lock();
+ if (key->expiry && ctx->now.tv_sec >= key->expiry) {
+ ctx->result = ERR_PTR(-EKEYEXPIRED);
+ kleave(" = %d [expire]", ctx->skipped_ret);
+ goto skipped;
+ }
+ }
- now = current_kernel_time();
- err = -EAGAIN;
- sp = 0;
-
- /* firstly we should check to see if this top-level keyring is what we
- * are looking for */
- key_ref = ERR_PTR(-EAGAIN);
- kflags = keyring->flags;
- if (keyring->type == type && match(keyring, description)) {
- key = keyring;
- if (no_state_check)
- goto found;
+ /* keys that don't match */
+ if (!ctx->match(key, ctx->match_data)) {
+ kleave(" = 0 [!match]");
+ return 0;
+ }
- /* check it isn't negative and hasn't expired or been
- * revoked */
- if (kflags & (1 << KEY_FLAG_REVOKED))
- goto error_2;
- if (key->expiry && now.tv_sec >= key->expiry)
- goto error_2;
- key_ref = ERR_PTR(key->type_data.reject_error);
- if (kflags & (1 << KEY_FLAG_NEGATIVE))
- goto error_2;
- goto found;
+ /* key must have search permissions */
+ if (!(ctx->flags & KEYRING_SEARCH_NO_CHECK_PERM) &&
+ key_task_permission(make_key_ref(key, ctx->possessed),
+ ctx->cred, KEY_SEARCH) < 0) {
+ ctx->result = ERR_PTR(-EACCES);
+ kleave(" = %d [!perm]", ctx->skipped_ret);
+ goto skipped;
}
- /* otherwise, the top keyring must not be revoked, expired, or
- * negatively instantiated if we are to search it */
- key_ref = ERR_PTR(-EAGAIN);
- if (kflags & ((1 << KEY_FLAG_INVALIDATED) |
- (1 << KEY_FLAG_REVOKED) |
- (1 << KEY_FLAG_NEGATIVE)) ||
- (keyring->expiry && now.tv_sec >= keyring->expiry))
- goto error_2;
-
- /* start processing a new keyring */
-descend:
- kflags = keyring->flags;
- if (kflags & ((1 << KEY_FLAG_INVALIDATED) |
- (1 << KEY_FLAG_REVOKED)))
- goto not_this_keyring;
+ if (ctx->flags & KEYRING_SEARCH_DO_STATE_CHECK) {
+ /* we set a different error code if we pass a negative key */
+ if (kflags & (1 << KEY_FLAG_NEGATIVE)) {
+ smp_rmb();
+ ctx->result = ERR_PTR(key->type_data.reject_error);
+ kleave(" = %d [neg]", ctx->skipped_ret);
+ goto skipped;
+ }
+ }
- keylist = rcu_dereference(keyring->payload.subscriptions);
- if (!keylist)
- goto not_this_keyring;
+ /* Found */
+ ctx->result = make_key_ref(key, ctx->possessed);
+ kleave(" = 1 [found]");
+ return 1;
- /* iterate through the keys in this keyring first */
- nkeys = keylist->nkeys;
- smp_rmb();
- for (kix = 0; kix < nkeys; kix++) {
- key = rcu_dereference(keylist->keys[kix]);
- kflags = key->flags;
+skipped:
+ return ctx->skipped_ret;
+}
- /* ignore keys not of this type */
- if (key->type != type)
- continue;
+/*
+ * Search inside a keyring for a key. We can search by walking to it
+ * directly based on its index-key or we can iterate over the entire
+ * tree looking for it, based on the match function.
+ */
+static int search_keyring(struct key *keyring, struct keyring_search_context *ctx)
+{
+ if ((ctx->flags & KEYRING_SEARCH_LOOKUP_TYPE) ==
+ KEYRING_SEARCH_LOOKUP_DIRECT) {
+ const void *object;
+
+ object = assoc_array_find(&keyring->keys,
+ &keyring_assoc_array_ops,
+ &ctx->index_key);
+ return object ? ctx->iterator(object, ctx) : 0;
+ }
+ return assoc_array_iterate(&keyring->keys, ctx->iterator, ctx);
+}
- /* skip invalidated, revoked and expired keys */
- if (!no_state_check) {
- if (kflags & ((1 << KEY_FLAG_INVALIDATED) |
- (1 << KEY_FLAG_REVOKED)))
- continue;
+/*
+ * Search a tree of keyrings that point to other keyrings up to the maximum
+ * depth.
+ */
+static bool search_nested_keyrings(struct key *keyring,
+ struct keyring_search_context *ctx)
+{
+ struct {
+ struct key *keyring;
+ struct assoc_array_node *node;
+ int slot;
+ } stack[KEYRING_SEARCH_MAX_DEPTH];
- if (key->expiry && now.tv_sec >= key->expiry)
- continue;
- }
+ struct assoc_array_shortcut *shortcut;
+ struct assoc_array_node *node;
+ struct assoc_array_ptr *ptr;
+ struct key *key;
+ int sp = 0, slot;
- /* keys that don't match */
- if (!match(key, description))
- continue;
+ kenter("{%d},{%s,%s}",
+ keyring->serial,
+ ctx->index_key.type->name,
+ ctx->index_key.description);
- /* key must have search permissions */
- if (key_task_permission(make_key_ref(key, possessed),
- cred, KEY_SEARCH) < 0)
- continue;
+ if (ctx->index_key.description)
+ ctx->index_key.desc_len = strlen(ctx->index_key.description);
- if (no_state_check)
+ /* Check to see if this top-level keyring is what we are looking for
+ * and whether it is valid or not.
+ */
+ if (ctx->flags & KEYRING_SEARCH_LOOKUP_ITERATE ||
+ keyring_compare_object(keyring, &ctx->index_key)) {
+ ctx->skipped_ret = 2;
+ ctx->flags |= KEYRING_SEARCH_DO_STATE_CHECK;
+ switch (ctx->iterator(keyring_key_to_ptr(keyring), ctx)) {
+ case 1:
goto found;
-
- /* we set a different error code if we pass a negative key */
- if (kflags & (1 << KEY_FLAG_NEGATIVE)) {
- err = key->type_data.reject_error;
- continue;
+ case 2:
+ return false;
+ default:
+ break;
}
+ }
+
+ ctx->skipped_ret = 0;
+ if (ctx->flags & KEYRING_SEARCH_NO_STATE_CHECK)
+ ctx->flags &= ~KEYRING_SEARCH_DO_STATE_CHECK;
+ /* Start processing a new keyring */
+descend_to_keyring:
+ kdebug("descend to %d", keyring->serial);
+ if (keyring->flags & ((1 << KEY_FLAG_INVALIDATED) |
+ (1 << KEY_FLAG_REVOKED)))
+ goto not_this_keyring;
+
+ /* Search through the keys in this keyring before its searching its
+ * subtrees.
+ */
+ if (search_keyring(keyring, ctx))
goto found;
- }
- /* search through the keyrings nested in this one */
- kix = 0;
-ascend:
- nkeys = keylist->nkeys;
- smp_rmb();
- for (; kix < nkeys; kix++) {
- key = rcu_dereference(keylist->keys[kix]);
- if (key->type != &key_type_keyring)
- continue;
+ /* Then manually iterate through the keyrings nested in this one.
+ *
+ * Start from the root node of the index tree. Because of the way the
+ * hash function has been set up, keyrings cluster on the leftmost
+ * branch of the root node (root slot 0) or in the root node itself.
+ * Non-keyrings avoid the leftmost branch of the root entirely (root
+ * slots 1-15).
+ */
+ ptr = ACCESS_ONCE(keyring->keys.root);
+ if (!ptr)
+ goto not_this_keyring;
- /* recursively search nested keyrings
- * - only search keyrings for which we have search permission
+ if (assoc_array_ptr_is_shortcut(ptr)) {
+ /* If the root is a shortcut, either the keyring only contains
+ * keyring pointers (everything clusters behind root slot 0) or
+ * doesn't contain any keyring pointers.
*/
- if (sp >= KEYRING_SEARCH_MAX_DEPTH)
+ shortcut = assoc_array_ptr_to_shortcut(ptr);
+ smp_read_barrier_depends();
+ if ((shortcut->index_key[0] & ASSOC_ARRAY_FAN_MASK) != 0)
+ goto not_this_keyring;
+
+ ptr = ACCESS_ONCE(shortcut->next_node);
+ node = assoc_array_ptr_to_node(ptr);
+ goto begin_node;
+ }
+
+ node = assoc_array_ptr_to_node(ptr);
+ smp_read_barrier_depends();
+
+ ptr = node->slots[0];
+ if (!assoc_array_ptr_is_meta(ptr))
+ goto begin_node;
+
+descend_to_node:
+ /* Descend to a more distal node in this keyring's content tree and go
+ * through that.
+ */
+ kdebug("descend");
+ if (assoc_array_ptr_is_shortcut(ptr)) {
+ shortcut = assoc_array_ptr_to_shortcut(ptr);
+ smp_read_barrier_depends();
+ ptr = ACCESS_ONCE(shortcut->next_node);
+ BUG_ON(!assoc_array_ptr_is_node(ptr));
+ }
+ node = assoc_array_ptr_to_node(ptr);
+
+begin_node:
+ kdebug("begin_node");
+ smp_read_barrier_depends();
+ slot = 0;
+ascend_to_node:
+ /* Go through the slots in a node */
+ for (; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
+ ptr = ACCESS_ONCE(node->slots[slot]);
+
+ if (assoc_array_ptr_is_meta(ptr) && node->back_pointer)
+ goto descend_to_node;
+
+ if (!keyring_ptr_is_keyring(ptr))
continue;
- if (key_task_permission(make_key_ref(key, possessed),
- cred, KEY_SEARCH) < 0)
+ key = keyring_ptr_to_key(ptr);
+
+ if (sp >= KEYRING_SEARCH_MAX_DEPTH) {
+ if (ctx->flags & KEYRING_SEARCH_DETECT_TOO_DEEP) {
+ ctx->result = ERR_PTR(-ELOOP);
+ return false;
+ }
+ goto not_this_keyring;
+ }
+
+ /* Search a nested keyring */
+ if (!(ctx->flags & KEYRING_SEARCH_NO_CHECK_PERM) &&
+ key_task_permission(make_key_ref(key, ctx->possessed),
+ ctx->cred, KEY_SEARCH) < 0)
continue;
/* stack the current position */
stack[sp].keyring = keyring;
- stack[sp].keylist = keylist;
- stack[sp].kix = kix;
+ stack[sp].node = node;
+ stack[sp].slot = slot;
sp++;
/* begin again with the new keyring */
keyring = key;
- goto descend;
+ goto descend_to_keyring;
}
- /* the keyring we're looking at was disqualified or didn't contain a
- * matching key */
+ /* We've dealt with all the slots in the current node, so now we need
+ * to ascend to the parent and continue processing there.
+ */
+ ptr = ACCESS_ONCE(node->back_pointer);
+ slot = node->parent_slot;
+
+ if (ptr && assoc_array_ptr_is_shortcut(ptr)) {
+ shortcut = assoc_array_ptr_to_shortcut(ptr);
+ smp_read_barrier_depends();
+ ptr = ACCESS_ONCE(shortcut->back_pointer);
+ slot = shortcut->parent_slot;
+ }
+ if (!ptr)
+ goto not_this_keyring;
+ node = assoc_array_ptr_to_node(ptr);
+ smp_read_barrier_depends();
+ slot++;
+
+ /* If we've ascended to the root (zero backpointer), we must have just
+ * finished processing the leftmost branch rather than the root slots -
+ * so there can't be any more keyrings for us to find.
+ */
+ if (node->back_pointer) {
+ kdebug("ascend %d", slot);
+ goto ascend_to_node;
+ }
+
+ /* The keyring we're looking at was disqualified or didn't contain a
+ * matching key.
+ */
not_this_keyring:
- if (sp > 0) {
- /* resume the processing of a keyring higher up in the tree */
- sp--;
- keyring = stack[sp].keyring;
- keylist = stack[sp].keylist;
- kix = stack[sp].kix + 1;
- goto ascend;
+ kdebug("not_this_keyring %d", sp);
+ if (sp <= 0) {
+ kleave(" = false");
+ return false;
}
- key_ref = ERR_PTR(err);
- goto error_2;
+ /* Resume the processing of a keyring higher up in the tree */
+ sp--;
+ keyring = stack[sp].keyring;
+ node = stack[sp].node;
+ slot = stack[sp].slot + 1;
+ kdebug("ascend to %d [%d]", keyring->serial, slot);
+ goto ascend_to_node;
- /* we found a viable match */
+ /* We found a viable match */
found:
- atomic_inc(&key->usage);
- key->last_used_at = now.tv_sec;
- keyring->last_used_at = now.tv_sec;
- while (sp > 0)
- stack[--sp].keyring->last_used_at = now.tv_sec;
+ key = key_ref_to_ptr(ctx->result);
key_check(key);
- key_ref = make_key_ref(key, possessed);
-error_2:
+ if (!(ctx->flags & KEYRING_SEARCH_NO_UPDATE_TIME)) {
+ key->last_used_at = ctx->now.tv_sec;
+ keyring->last_used_at = ctx->now.tv_sec;
+ while (sp > 0)
+ stack[--sp].keyring->last_used_at = ctx->now.tv_sec;
+ }
+ kleave(" = true");
+ return true;
+}
+
+/**
+ * keyring_search_aux - Search a keyring tree for a key matching some criteria
+ * @keyring_ref: A pointer to the keyring with possession indicator.
+ * @ctx: The keyring search context.
+ *
+ * Search the supplied keyring tree for a key that matches the criteria given.
+ * The root keyring and any linked keyrings must grant Search permission to the
+ * caller to be searchable and keys can only be found if they too grant Search
+ * to the caller. The possession flag on the root keyring pointer controls use
+ * of the possessor bits in permissions checking of the entire tree. In
+ * addition, the LSM gets to forbid keyring searches and key matches.
+ *
+ * The search is performed as a breadth-then-depth search up to the prescribed
+ * limit (KEYRING_SEARCH_MAX_DEPTH).
+ *
+ * Keys are matched to the type provided and are then filtered by the match
+ * function, which is given the description to use in any way it sees fit. The
+ * match function may use any attributes of a key that it wishes to to
+ * determine the match. Normally the match function from the key type would be
+ * used.
+ *
+ * RCU can be used to prevent the keyring key lists from disappearing without
+ * the need to take lots of locks.
+ *
+ * Returns a pointer to the found key and increments the key usage count if
+ * successful; -EAGAIN if no matching keys were found, or if expired or revoked
+ * keys were found; -ENOKEY if only negative keys were found; -ENOTDIR if the
+ * specified keyring wasn't a keyring.
+ *
+ * In the case of a successful return, the possession attribute from
+ * @keyring_ref is propagated to the returned key reference.
+ */
+key_ref_t keyring_search_aux(key_ref_t keyring_ref,
+ struct keyring_search_context *ctx)
+{
+ struct key *keyring;
+ long err;
+
+ ctx->iterator = keyring_search_iterator;
+ ctx->possessed = is_key_possessed(keyring_ref);
+ ctx->result = ERR_PTR(-EAGAIN);
+
+ keyring = key_ref_to_ptr(keyring_ref);
+ key_check(keyring);
+
+ if (keyring->type != &key_type_keyring)
+ return ERR_PTR(-ENOTDIR);
+
+ if (!(ctx->flags & KEYRING_SEARCH_NO_CHECK_PERM)) {
+ err = key_task_permission(keyring_ref, ctx->cred, KEY_SEARCH);
+ if (err < 0)
+ return ERR_PTR(err);
+ }
+
+ rcu_read_lock();
+ ctx->now = current_kernel_time();
+ if (search_nested_keyrings(keyring, ctx))
+ __key_get(key_ref_to_ptr(ctx->result));
rcu_read_unlock();
-error:
- return key_ref;
+ return ctx->result;
}
/**
@@ -507,77 +863,73 @@ error:
* @description: The name of the keyring we want to find.
*
* As keyring_search_aux() above, but using the current task's credentials and
- * type's default matching function.
+ * type's default matching function and preferred search method.
*/
key_ref_t keyring_search(key_ref_t keyring,
struct key_type *type,
const char *description)
{
- if (!type->match)
+ struct keyring_search_context ctx = {
+ .index_key.type = type,
+ .index_key.description = description,
+ .cred = current_cred(),
+ .match = type->match,
+ .match_data = description,
+ .flags = (type->def_lookup_type |
+ KEYRING_SEARCH_DO_STATE_CHECK),
+ };
+
+ if (!ctx.match)
return ERR_PTR(-ENOKEY);
- return keyring_search_aux(keyring, current->cred,
- type, description, type->match, false);
+ return keyring_search_aux(keyring, &ctx);
}
EXPORT_SYMBOL(keyring_search);
/*
- * Search the given keyring only (no recursion).
+ * Search the given keyring for a key that might be updated.
*
* The caller must guarantee that the keyring is a keyring and that the
- * permission is granted to search the keyring as no check is made here.
- *
- * RCU is used to make it unnecessary to lock the keyring key list here.
+ * permission is granted to modify the keyring as no check is made here. The
+ * caller must also hold a lock on the keyring semaphore.
*
* Returns a pointer to the found key with usage count incremented if
- * successful and returns -ENOKEY if not found. Revoked keys and keys not
- * providing the requested permission are skipped over.
+ * successful and returns NULL if not found. Revoked and invalidated keys are
+ * skipped over.
*
* If successful, the possession indicator is propagated from the keyring ref
* to the returned key reference.
*/
-key_ref_t __keyring_search_one(key_ref_t keyring_ref,
- const struct key_type *ktype,
- const char *description,
- key_perm_t perm)
+key_ref_t find_key_to_update(key_ref_t keyring_ref,
+ const struct keyring_index_key *index_key)
{
- struct keyring_list *klist;
- unsigned long possessed;
struct key *keyring, *key;
- int nkeys, loop;
+ const void *object;
keyring = key_ref_to_ptr(keyring_ref);
- possessed = is_key_possessed(keyring_ref);
- rcu_read_lock();
+ kenter("{%d},{%s,%s}",
+ keyring->serial, index_key->type->name, index_key->description);
- klist = rcu_dereference(keyring->payload.subscriptions);
- if (klist) {
- nkeys = klist->nkeys;
- smp_rmb();
- for (loop = 0; loop < nkeys ; loop++) {
- key = rcu_dereference(klist->keys[loop]);
- if (key->type == ktype &&
- (!key->type->match ||
- key->type->match(key, description)) &&
- key_permission(make_key_ref(key, possessed),
- perm) == 0 &&
- !(key->flags & ((1 << KEY_FLAG_INVALIDATED) |
- (1 << KEY_FLAG_REVOKED)))
- )
- goto found;
- }
- }
+ object = assoc_array_find(&keyring->keys, &keyring_assoc_array_ops,
+ index_key);
- rcu_read_unlock();
- return ERR_PTR(-ENOKEY);
+ if (object)
+ goto found;
+
+ kleave(" = NULL");
+ return NULL;
found:
- atomic_inc(&key->usage);
- keyring->last_used_at = key->last_used_at =
- current_kernel_time().tv_sec;
- rcu_read_unlock();
- return make_key_ref(key, possessed);
+ key = keyring_ptr_to_key(object);
+ if (key->flags & ((1 << KEY_FLAG_INVALIDATED) |
+ (1 << KEY_FLAG_REVOKED))) {
+ kleave(" = NULL [x]");
+ return NULL;
+ }
+ __key_get(key);
+ kleave(" = {%d}", key->serial);
+ return make_key_ref(key, is_key_possessed(keyring_ref));
}
/*
@@ -640,6 +992,19 @@ out:
return keyring;
}
+static int keyring_detect_cycle_iterator(const void *object,
+ void *iterator_data)
+{
+ struct keyring_search_context *ctx = iterator_data;
+ const struct key *key = keyring_ptr_to_key(object);
+
+ kenter("{%d}", key->serial);
+
+ BUG_ON(key != ctx->match_data);
+ ctx->result = ERR_PTR(-EDEADLK);
+ return 1;
+}
+
/*
* See if a cycle will will be created by inserting acyclic tree B in acyclic
* tree A at the topmost level (ie: as a direct child of A).
@@ -649,116 +1014,39 @@ out:
*/
static int keyring_detect_cycle(struct key *A, struct key *B)
{
- struct {
- struct keyring_list *keylist;
- int kix;
- } stack[KEYRING_SEARCH_MAX_DEPTH];
-
- struct keyring_list *keylist;
- struct key *subtree, *key;
- int sp, nkeys, kix, ret;
+ struct keyring_search_context ctx = {
+ .index_key = A->index_key,
+ .match_data = A,
+ .iterator = keyring_detect_cycle_iterator,
+ .flags = (KEYRING_SEARCH_LOOKUP_DIRECT |
+ KEYRING_SEARCH_NO_STATE_CHECK |
+ KEYRING_SEARCH_NO_UPDATE_TIME |
+ KEYRING_SEARCH_NO_CHECK_PERM |
+ KEYRING_SEARCH_DETECT_TOO_DEEP),
+ };
rcu_read_lock();
-
- ret = -EDEADLK;
- if (A == B)
- goto cycle_detected;
-
- subtree = B;
- sp = 0;
-
- /* start processing a new keyring */
-descend:
- if (test_bit(KEY_FLAG_REVOKED, &subtree->flags))
- goto not_this_keyring;
-
- keylist = rcu_dereference(subtree->payload.subscriptions);
- if (!keylist)
- goto not_this_keyring;
- kix = 0;
-
-ascend:
- /* iterate through the remaining keys in this keyring */
- nkeys = keylist->nkeys;
- smp_rmb();
- for (; kix < nkeys; kix++) {
- key = rcu_dereference(keylist->keys[kix]);
-
- if (key == A)
- goto cycle_detected;
-
- /* recursively check nested keyrings */
- if (key->type == &key_type_keyring) {
- if (sp >= KEYRING_SEARCH_MAX_DEPTH)
- goto too_deep;
-
- /* stack the current position */
- stack[sp].keylist = keylist;
- stack[sp].kix = kix;
- sp++;
-
- /* begin again with the new keyring */
- subtree = key;
- goto descend;
- }
- }
-
- /* the keyring we're looking at was disqualified or didn't contain a
- * matching key */
-not_this_keyring:
- if (sp > 0) {
- /* resume the checking of a keyring higher up in the tree */
- sp--;
- keylist = stack[sp].keylist;
- kix = stack[sp].kix + 1;
- goto ascend;
- }
-
- ret = 0; /* no cycles detected */
-
-error:
+ search_nested_keyrings(B, &ctx);
rcu_read_unlock();
- return ret;
-
-too_deep:
- ret = -ELOOP;
- goto error;
-
-cycle_detected:
- ret = -EDEADLK;
- goto error;
-}
-
-/*
- * Dispose of a keyring list after the RCU grace period, freeing the unlinked
- * key
- */
-static void keyring_unlink_rcu_disposal(struct rcu_head *rcu)
-{
- struct keyring_list *klist =
- container_of(rcu, struct keyring_list, rcu);
-
- if (klist->delkey != USHRT_MAX)
- key_put(rcu_access_pointer(klist->keys[klist->delkey]));
- kfree(klist);
+ return PTR_ERR(ctx.result) == -EAGAIN ? 0 : PTR_ERR(ctx.result);
}
/*
* Preallocate memory so that a key can be linked into to a keyring.
*/
-int __key_link_begin(struct key *keyring, const struct key_type *type,
- const char *description, unsigned long *_prealloc)
+int __key_link_begin(struct key *keyring,
+ const struct keyring_index_key *index_key,
+ struct assoc_array_edit **_edit)
__acquires(&keyring->sem)
__acquires(&keyring_serialise_link_sem)
{
- struct keyring_list *klist, *nklist;
- unsigned long prealloc;
- unsigned max;
- time_t lowest_lru;
- size_t size;
- int loop, lru, ret;
+ struct assoc_array_edit *edit;
+ int ret;
+
+ kenter("%d,%s,%s,",
+ keyring->serial, index_key->type->name, index_key->description);
- kenter("%d,%s,%s,", key_serial(keyring), type->name, description);
+ BUG_ON(index_key->desc_len == 0);
if (keyring->type != &key_type_keyring)
return -ENOTDIR;
@@ -771,100 +1059,39 @@ int __key_link_begin(struct key *keyring, const struct key_type *type,
/* serialise link/link calls to prevent parallel calls causing a cycle
* when linking two keyring in opposite orders */
- if (type == &key_type_keyring)
+ if (index_key->type == &key_type_keyring)
down_write(&keyring_serialise_link_sem);
- klist = rcu_dereference_locked_keyring(keyring);
-
- /* see if there's a matching key we can displace */
- lru = -1;
- if (klist && klist->nkeys > 0) {
- lowest_lru = TIME_T_MAX;
- for (loop = klist->nkeys - 1; loop >= 0; loop--) {
- struct key *key = rcu_deref_link_locked(klist, loop,
- keyring);
- if (key->type == type &&
- strcmp(key->description, description) == 0) {
- /* Found a match - we'll replace the link with
- * one to the new key. We record the slot
- * position.
- */
- klist->delkey = loop;
- prealloc = 0;
- goto done;
- }
- if (key->last_used_at < lowest_lru) {
- lowest_lru = key->last_used_at;
- lru = loop;
- }
- }
- }
-
- /* If the keyring is full then do an LRU discard */
- if (klist &&
- klist->nkeys == klist->maxkeys &&
- klist->maxkeys >= MAX_KEYRING_LINKS) {
- kdebug("LRU discard %d\n", lru);
- klist->delkey = lru;
- prealloc = 0;
- goto done;
- }
-
- /* check that we aren't going to overrun the user's quota */
- ret = key_payload_reserve(keyring,
- keyring->datalen + KEYQUOTA_LINK_BYTES);
- if (ret < 0)
+ /* Create an edit script that will insert/replace the key in the
+ * keyring tree.
+ */
+ edit = assoc_array_insert(&keyring->keys,
+ &keyring_assoc_array_ops,
+ index_key,
+ NULL);
+ if (IS_ERR(edit)) {
+ ret = PTR_ERR(edit);
goto error_sem;
+ }
- if (klist && klist->nkeys < klist->maxkeys) {
- /* there's sufficient slack space to append directly */
- klist->delkey = klist->nkeys;
- prealloc = KEY_LINK_FIXQUOTA;
- } else {
- /* grow the key list */
- max = 4;
- if (klist) {
- max += klist->maxkeys;
- if (max > MAX_KEYRING_LINKS)
- max = MAX_KEYRING_LINKS;
- BUG_ON(max <= klist->maxkeys);
- }
-
- size = sizeof(*klist) + sizeof(struct key *) * max;
-
- ret = -ENOMEM;
- nklist = kmalloc(size, GFP_KERNEL);
- if (!nklist)
- goto error_quota;
-
- nklist->maxkeys = max;
- if (klist) {
- memcpy(nklist->keys, klist->keys,
- sizeof(struct key *) * klist->nkeys);
- nklist->delkey = klist->nkeys;
- nklist->nkeys = klist->nkeys + 1;
- klist->delkey = USHRT_MAX;
- } else {
- nklist->nkeys = 1;
- nklist->delkey = 0;
- }
-
- /* add the key into the new space */
- RCU_INIT_POINTER(nklist->keys[nklist->delkey], NULL);
- prealloc = (unsigned long)nklist | KEY_LINK_FIXQUOTA;
+ /* If we're not replacing a link in-place then we're going to need some
+ * extra quota.
+ */
+ if (!edit->dead_leaf) {
+ ret = key_payload_reserve(keyring,
+ keyring->datalen + KEYQUOTA_LINK_BYTES);
+ if (ret < 0)
+ goto error_cancel;
}
-done:
- *_prealloc = prealloc;
+ *_edit = edit;
kleave(" = 0");
return 0;
-error_quota:
- /* undo the quota changes */
- key_payload_reserve(keyring,
- keyring->datalen - KEYQUOTA_LINK_BYTES);
+error_cancel:
+ assoc_array_cancel_edit(edit);
error_sem:
- if (type == &key_type_keyring)
+ if (index_key->type == &key_type_keyring)
up_write(&keyring_serialise_link_sem);
error_krsem:
up_write(&keyring->sem);
@@ -895,60 +1122,12 @@ int __key_link_check_live_key(struct key *keyring, struct key *key)
* holds at most one link to any given key of a particular type+description
* combination.
*/
-void __key_link(struct key *keyring, struct key *key,
- unsigned long *_prealloc)
+void __key_link(struct key *key, struct assoc_array_edit **_edit)
{
- struct keyring_list *klist, *nklist;
- struct key *discard;
-
- nklist = (struct keyring_list *)(*_prealloc & ~KEY_LINK_FIXQUOTA);
- *_prealloc = 0;
-
- kenter("%d,%d,%p", keyring->serial, key->serial, nklist);
-
- klist = rcu_dereference_locked_keyring(keyring);
-
- atomic_inc(&key->usage);
- keyring->last_used_at = key->last_used_at =
- current_kernel_time().tv_sec;
-
- /* there's a matching key we can displace or an empty slot in a newly
- * allocated list we can fill */
- if (nklist) {
- kdebug("reissue %hu/%hu/%hu",
- nklist->delkey, nklist->nkeys, nklist->maxkeys);
-
- RCU_INIT_POINTER(nklist->keys[nklist->delkey], key);
-
- rcu_assign_pointer(keyring->payload.subscriptions, nklist);
-
- /* dispose of the old keyring list and, if there was one, the
- * displaced key */
- if (klist) {
- kdebug("dispose %hu/%hu/%hu",
- klist->delkey, klist->nkeys, klist->maxkeys);
- call_rcu(&klist->rcu, keyring_unlink_rcu_disposal);
- }
- } else if (klist->delkey < klist->nkeys) {
- kdebug("replace %hu/%hu/%hu",
- klist->delkey, klist->nkeys, klist->maxkeys);
-
- discard = rcu_dereference_protected(
- klist->keys[klist->delkey],
- rwsem_is_locked(&keyring->sem));
- rcu_assign_pointer(klist->keys[klist->delkey], key);
- /* The garbage collector will take care of RCU
- * synchronisation */
- key_put(discard);
- } else {
- /* there's sufficient slack space to append directly */
- kdebug("append %hu/%hu/%hu",
- klist->delkey, klist->nkeys, klist->maxkeys);
-
- RCU_INIT_POINTER(klist->keys[klist->delkey], key);
- smp_wmb();
- klist->nkeys++;
- }
+ __key_get(key);
+ assoc_array_insert_set_object(*_edit, keyring_key_to_ptr(key));
+ assoc_array_apply_edit(*_edit);
+ *_edit = NULL;
}
/*
@@ -956,24 +1135,22 @@ void __key_link(struct key *keyring, struct key *key,
*
* Must be called with __key_link_begin() having being called.
*/
-void __key_link_end(struct key *keyring, struct key_type *type,
- unsigned long prealloc)
+void __key_link_end(struct key *keyring,
+ const struct keyring_index_key *index_key,
+ struct assoc_array_edit *edit)
__releases(&keyring->sem)
__releases(&keyring_serialise_link_sem)
{
- BUG_ON(type == NULL);
- BUG_ON(type->name == NULL);
- kenter("%d,%s,%lx", keyring->serial, type->name, prealloc);
+ BUG_ON(index_key->type == NULL);
+ kenter("%d,%s,", keyring->serial, index_key->type->name);
- if (type == &key_type_keyring)
+ if (index_key->type == &key_type_keyring)
up_write(&keyring_serialise_link_sem);
- if (prealloc) {
- if (prealloc & KEY_LINK_FIXQUOTA)
- key_payload_reserve(keyring,
- keyring->datalen -
- KEYQUOTA_LINK_BYTES);
- kfree((struct keyring_list *)(prealloc & ~KEY_LINK_FIXQUOTA));
+ if (edit && !edit->dead_leaf) {
+ key_payload_reserve(keyring,
+ keyring->datalen - KEYQUOTA_LINK_BYTES);
+ assoc_array_cancel_edit(edit);
}
up_write(&keyring->sem);
}
@@ -1000,20 +1177,28 @@ void __key_link_end(struct key *keyring, struct key_type *type,
*/
int key_link(struct key *keyring, struct key *key)
{
- unsigned long prealloc;
+ struct assoc_array_edit *edit;
int ret;
+ kenter("{%d,%d}", keyring->serial, atomic_read(&keyring->usage));
+
key_check(keyring);
key_check(key);
- ret = __key_link_begin(keyring, key->type, key->description, &prealloc);
+ if (test_bit(KEY_FLAG_TRUSTED_ONLY, &keyring->flags) &&
+ !test_bit(KEY_FLAG_TRUSTED, &key->flags))
+ return -EPERM;
+
+ ret = __key_link_begin(keyring, &key->index_key, &edit);
if (ret == 0) {
+ kdebug("begun {%d,%d}", keyring->serial, atomic_read(&keyring->usage));
ret = __key_link_check_live_key(keyring, key);
if (ret == 0)
- __key_link(keyring, key, &prealloc);
- __key_link_end(keyring, key->type, prealloc);
+ __key_link(key, &edit);
+ __key_link_end(keyring, &key->index_key, edit);
}
+ kleave(" = %d {%d,%d}", ret, keyring->serial, atomic_read(&keyring->usage));
return ret;
}
EXPORT_SYMBOL(key_link);
@@ -1037,90 +1222,37 @@ EXPORT_SYMBOL(key_link);
*/
int key_unlink(struct key *keyring, struct key *key)
{
- struct keyring_list *klist, *nklist;
- int loop, ret;
+ struct assoc_array_edit *edit;
+ int ret;
key_check(keyring);
key_check(key);
- ret = -ENOTDIR;
if (keyring->type != &key_type_keyring)
- goto error;
+ return -ENOTDIR;
down_write(&keyring->sem);
- klist = rcu_dereference_locked_keyring(keyring);
- if (klist) {
- /* search the keyring for the key */
- for (loop = 0; loop < klist->nkeys; loop++)
- if (rcu_access_pointer(klist->keys[loop]) == key)
- goto key_is_present;
+ edit = assoc_array_delete(&keyring->keys, &keyring_assoc_array_ops,
+ &key->index_key);
+ if (IS_ERR(edit)) {
+ ret = PTR_ERR(edit);
+ goto error;
}
-
- up_write(&keyring->sem);
ret = -ENOENT;
- goto error;
-
-key_is_present:
- /* we need to copy the key list for RCU purposes */
- nklist = kmalloc(sizeof(*klist) +
- sizeof(struct key *) * klist->maxkeys,
- GFP_KERNEL);
- if (!nklist)
- goto nomem;
- nklist->maxkeys = klist->maxkeys;
- nklist->nkeys = klist->nkeys - 1;
-
- if (loop > 0)
- memcpy(&nklist->keys[0],
- &klist->keys[0],
- loop * sizeof(struct key *));
-
- if (loop < nklist->nkeys)
- memcpy(&nklist->keys[loop],
- &klist->keys[loop + 1],
- (nklist->nkeys - loop) * sizeof(struct key *));
-
- /* adjust the user's quota */
- key_payload_reserve(keyring,
- keyring->datalen - KEYQUOTA_LINK_BYTES);
-
- rcu_assign_pointer(keyring->payload.subscriptions, nklist);
-
- up_write(&keyring->sem);
-
- /* schedule for later cleanup */
- klist->delkey = loop;
- call_rcu(&klist->rcu, keyring_unlink_rcu_disposal);
+ if (edit == NULL)
+ goto error;
+ assoc_array_apply_edit(edit);
+ key_payload_reserve(keyring, keyring->datalen - KEYQUOTA_LINK_BYTES);
ret = 0;
error:
- return ret;
-nomem:
- ret = -ENOMEM;
up_write(&keyring->sem);
- goto error;
+ return ret;
}
EXPORT_SYMBOL(key_unlink);
-/*
- * Dispose of a keyring list after the RCU grace period, releasing the keys it
- * links to.
- */
-static void keyring_clear_rcu_disposal(struct rcu_head *rcu)
-{
- struct keyring_list *klist;
- int loop;
-
- klist = container_of(rcu, struct keyring_list, rcu);
-
- for (loop = klist->nkeys - 1; loop >= 0; loop--)
- key_put(rcu_access_pointer(klist->keys[loop]));
-
- kfree(klist);
-}
-
/**
* keyring_clear - Clear a keyring
* @keyring: The keyring to clear.
@@ -1131,33 +1263,25 @@ static void keyring_clear_rcu_disposal(struct rcu_head *rcu)
*/
int keyring_clear(struct key *keyring)
{
- struct keyring_list *klist;
+ struct assoc_array_edit *edit;
int ret;
- ret = -ENOTDIR;
- if (keyring->type == &key_type_keyring) {
- /* detach the pointer block with the locks held */
- down_write(&keyring->sem);
-
- klist = rcu_dereference_locked_keyring(keyring);
- if (klist) {
- /* adjust the quota */
- key_payload_reserve(keyring,
- sizeof(struct keyring_list));
-
- rcu_assign_pointer(keyring->payload.subscriptions,
- NULL);
- }
-
- up_write(&keyring->sem);
+ if (keyring->type != &key_type_keyring)
+ return -ENOTDIR;
- /* free the keys after the locks have been dropped */
- if (klist)
- call_rcu(&klist->rcu, keyring_clear_rcu_disposal);
+ down_write(&keyring->sem);
+ edit = assoc_array_clear(&keyring->keys, &keyring_assoc_array_ops);
+ if (IS_ERR(edit)) {
+ ret = PTR_ERR(edit);
+ } else {
+ if (edit)
+ assoc_array_apply_edit(edit);
+ key_payload_reserve(keyring, 0);
ret = 0;
}
+ up_write(&keyring->sem);
return ret;
}
EXPORT_SYMBOL(keyring_clear);
@@ -1169,111 +1293,68 @@ EXPORT_SYMBOL(keyring_clear);
*/
static void keyring_revoke(struct key *keyring)
{
- struct keyring_list *klist;
+ struct assoc_array_edit *edit;
+
+ edit = assoc_array_clear(&keyring->keys, &keyring_assoc_array_ops);
+ if (!IS_ERR(edit)) {
+ if (edit)
+ assoc_array_apply_edit(edit);
+ key_payload_reserve(keyring, 0);
+ }
+}
+
+static bool keyring_gc_select_iterator(void *object, void *iterator_data)
+{
+ struct key *key = keyring_ptr_to_key(object);
+ time_t *limit = iterator_data;
- klist = rcu_dereference_locked_keyring(keyring);
+ if (key_is_dead(key, *limit))
+ return false;
+ key_get(key);
+ return true;
+}
- /* adjust the quota */
- key_payload_reserve(keyring, 0);
+static int keyring_gc_check_iterator(const void *object, void *iterator_data)
+{
+ const struct key *key = keyring_ptr_to_key(object);
+ time_t *limit = iterator_data;
- if (klist) {
- rcu_assign_pointer(keyring->payload.subscriptions, NULL);
- call_rcu(&klist->rcu, keyring_clear_rcu_disposal);
- }
+ key_check(key);
+ return key_is_dead(key, *limit);
}
/*
- * Collect garbage from the contents of a keyring, replacing the old list with
- * a new one with the pointers all shuffled down.
+ * Garbage collect pointers from a keyring.
*
- * Dead keys are classed as oned that are flagged as being dead or are revoked,
- * expired or negative keys that were revoked or expired before the specified
- * limit.
+ * Not called with any locks held. The keyring's key struct will not be
+ * deallocated under us as only our caller may deallocate it.
*/
void keyring_gc(struct key *keyring, time_t limit)
{
- struct keyring_list *klist, *new;
- struct key *key;
- int loop, keep, max;
-
- kenter("{%x,%s}", key_serial(keyring), keyring->description);
-
- down_write(&keyring->sem);
-
- klist = rcu_dereference_locked_keyring(keyring);
- if (!klist)
- goto no_klist;
-
- /* work out how many subscriptions we're keeping */
- keep = 0;
- for (loop = klist->nkeys - 1; loop >= 0; loop--)
- if (!key_is_dead(rcu_deref_link_locked(klist, loop, keyring),
- limit))
- keep++;
-
- if (keep == klist->nkeys)
- goto just_return;
-
- /* allocate a new keyring payload */
- max = roundup(keep, 4);
- new = kmalloc(sizeof(struct keyring_list) + max * sizeof(struct key *),
- GFP_KERNEL);
- if (!new)
- goto nomem;
- new->maxkeys = max;
- new->nkeys = 0;
- new->delkey = 0;
-
- /* install the live keys
- * - must take care as expired keys may be updated back to life
- */
- keep = 0;
- for (loop = klist->nkeys - 1; loop >= 0; loop--) {
- key = rcu_deref_link_locked(klist, loop, keyring);
- if (!key_is_dead(key, limit)) {
- if (keep >= max)
- goto discard_new;
- RCU_INIT_POINTER(new->keys[keep++], key_get(key));
- }
- }
- new->nkeys = keep;
-
- /* adjust the quota */
- key_payload_reserve(keyring,
- sizeof(struct keyring_list) +
- KEYQUOTA_LINK_BYTES * keep);
+ int result;
- if (keep == 0) {
- rcu_assign_pointer(keyring->payload.subscriptions, NULL);
- kfree(new);
- } else {
- rcu_assign_pointer(keyring->payload.subscriptions, new);
- }
+ kenter("%x{%s}", keyring->serial, keyring->description ?: "");
- up_write(&keyring->sem);
+ if (keyring->flags & ((1 << KEY_FLAG_INVALIDATED) |
+ (1 << KEY_FLAG_REVOKED)))
+ goto dont_gc;
- call_rcu(&klist->rcu, keyring_clear_rcu_disposal);
- kleave(" [yes]");
- return;
-
-discard_new:
- new->nkeys = keep;
- keyring_clear_rcu_disposal(&new->rcu);
- up_write(&keyring->sem);
- kleave(" [discard]");
- return;
-
-just_return:
- up_write(&keyring->sem);
- kleave(" [no dead]");
- return;
+ /* scan the keyring looking for dead keys */
+ rcu_read_lock();
+ result = assoc_array_iterate(&keyring->keys,
+ keyring_gc_check_iterator, &limit);
+ rcu_read_unlock();
+ if (result == true)
+ goto do_gc;
-no_klist:
- up_write(&keyring->sem);
- kleave(" [no_klist]");
+dont_gc:
+ kleave(" [no gc]");
return;
-nomem:
+do_gc:
+ down_write(&keyring->sem);
+ assoc_array_gc(&keyring->keys, &keyring_assoc_array_ops,
+ keyring_gc_select_iterator, &limit);
up_write(&keyring->sem);
- kleave(" [oom]");
+ kleave(" [gc]");
}
diff --git a/security/keys/persistent.c b/security/keys/persistent.c
new file mode 100644
index 000000000000..0ad3ee283781
--- /dev/null
+++ b/security/keys/persistent.c
@@ -0,0 +1,167 @@
+/* General persistent per-UID keyrings register
+ *
+ * Copyright (C) 2013 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#include <linux/user_namespace.h>
+#include "internal.h"
+
+unsigned persistent_keyring_expiry = 3 * 24 * 3600; /* Expire after 3 days of non-use */
+
+/*
+ * Create the persistent keyring register for the current user namespace.
+ *
+ * Called with the namespace's sem locked for writing.
+ */
+static int key_create_persistent_register(struct user_namespace *ns)
+{
+ struct key *reg = keyring_alloc(".persistent_register",
+ KUIDT_INIT(0), KGIDT_INIT(0),
+ current_cred(),
+ ((KEY_POS_ALL & ~KEY_POS_SETATTR) |
+ KEY_USR_VIEW | KEY_USR_READ),
+ KEY_ALLOC_NOT_IN_QUOTA, NULL);
+ if (IS_ERR(reg))
+ return PTR_ERR(reg);
+
+ ns->persistent_keyring_register = reg;
+ return 0;
+}
+
+/*
+ * Create the persistent keyring for the specified user.
+ *
+ * Called with the namespace's sem locked for writing.
+ */
+static key_ref_t key_create_persistent(struct user_namespace *ns, kuid_t uid,
+ struct keyring_index_key *index_key)
+{
+ struct key *persistent;
+ key_ref_t reg_ref, persistent_ref;
+
+ if (!ns->persistent_keyring_register) {
+ long err = key_create_persistent_register(ns);
+ if (err < 0)
+ return ERR_PTR(err);
+ } else {
+ reg_ref = make_key_ref(ns->persistent_keyring_register, true);
+ persistent_ref = find_key_to_update(reg_ref, index_key);
+ if (persistent_ref)
+ return persistent_ref;
+ }
+
+ persistent = keyring_alloc(index_key->description,
+ uid, INVALID_GID, current_cred(),
+ ((KEY_POS_ALL & ~KEY_POS_SETATTR) |
+ KEY_USR_VIEW | KEY_USR_READ),
+ KEY_ALLOC_NOT_IN_QUOTA,
+ ns->persistent_keyring_register);
+ if (IS_ERR(persistent))
+ return ERR_CAST(persistent);
+
+ return make_key_ref(persistent, true);
+}
+
+/*
+ * Get the persistent keyring for a specific UID and link it to the nominated
+ * keyring.
+ */
+static long key_get_persistent(struct user_namespace *ns, kuid_t uid,
+ key_ref_t dest_ref)
+{
+ struct keyring_index_key index_key;
+ struct key *persistent;
+ key_ref_t reg_ref, persistent_ref;
+ char buf[32];
+ long ret;
+
+ /* Look in the register if it exists */
+ index_key.type = &key_type_keyring;
+ index_key.description = buf;
+ index_key.desc_len = sprintf(buf, "_persistent.%u", from_kuid(ns, uid));
+
+ if (ns->persistent_keyring_register) {
+ reg_ref = make_key_ref(ns->persistent_keyring_register, true);
+ down_read(&ns->persistent_keyring_register_sem);
+ persistent_ref = find_key_to_update(reg_ref, &index_key);
+ up_read(&ns->persistent_keyring_register_sem);
+
+ if (persistent_ref)
+ goto found;
+ }
+
+ /* It wasn't in the register, so we'll need to create it. We might
+ * also need to create the register.
+ */
+ down_write(&ns->persistent_keyring_register_sem);
+ persistent_ref = key_create_persistent(ns, uid, &index_key);
+ up_write(&ns->persistent_keyring_register_sem);
+ if (!IS_ERR(persistent_ref))
+ goto found;
+
+ return PTR_ERR(persistent_ref);
+
+found:
+ ret = key_task_permission(persistent_ref, current_cred(), KEY_LINK);
+ if (ret == 0) {
+ persistent = key_ref_to_ptr(persistent_ref);
+ ret = key_link(key_ref_to_ptr(dest_ref), persistent);
+ if (ret == 0) {
+ key_set_timeout(persistent, persistent_keyring_expiry);
+ ret = persistent->serial;
+ }
+ }
+
+ key_ref_put(persistent_ref);
+ return ret;
+}
+
+/*
+ * Get the persistent keyring for a specific UID and link it to the nominated
+ * keyring.
+ */
+long keyctl_get_persistent(uid_t _uid, key_serial_t destid)
+{
+ struct user_namespace *ns = current_user_ns();
+ key_ref_t dest_ref;
+ kuid_t uid;
+ long ret;
+
+ /* -1 indicates the current user */
+ if (_uid == (uid_t)-1) {
+ uid = current_uid();
+ } else {
+ uid = make_kuid(ns, _uid);
+ if (!uid_valid(uid))
+ return -EINVAL;
+
+ /* You can only see your own persistent cache if you're not
+ * sufficiently privileged.
+ */
+ if (!uid_eq(uid, current_uid()) &&
+ !uid_eq(uid, current_euid()) &&
+ !ns_capable(ns, CAP_SETUID))
+ return -EPERM;
+ }
+
+ /* There must be a destination keyring */
+ dest_ref = lookup_user_key(destid, KEY_LOOKUP_CREATE, KEY_WRITE);
+ if (IS_ERR(dest_ref))
+ return PTR_ERR(dest_ref);
+ if (key_ref_to_ptr(dest_ref)->type != &key_type_keyring) {
+ ret = -ENOTDIR;
+ goto out_put_dest;
+ }
+
+ ret = key_get_persistent(ns, uid, dest_ref);
+
+out_put_dest:
+ key_ref_put(dest_ref);
+ return ret;
+}
diff --git a/security/keys/proc.c b/security/keys/proc.c
index 217b6855e815..88e9a466940f 100644
--- a/security/keys/proc.c
+++ b/security/keys/proc.c
@@ -182,7 +182,6 @@ static void proc_keys_stop(struct seq_file *p, void *v)
static int proc_keys_show(struct seq_file *m, void *v)
{
- const struct cred *cred = current_cred();
struct rb_node *_p = v;
struct key *key = rb_entry(_p, struct key, serial_node);
struct timespec now;
@@ -191,15 +190,23 @@ static int proc_keys_show(struct seq_file *m, void *v)
char xbuf[12];
int rc;
+ struct keyring_search_context ctx = {
+ .index_key.type = key->type,
+ .index_key.description = key->description,
+ .cred = current_cred(),
+ .match = lookup_user_key_possessed,
+ .match_data = key,
+ .flags = (KEYRING_SEARCH_NO_STATE_CHECK |
+ KEYRING_SEARCH_LOOKUP_DIRECT),
+ };
+
key_ref = make_key_ref(key, 0);
/* determine if the key is possessed by this process (a test we can
* skip if the key does not indicate the possessor can view it
*/
if (key->perm & KEY_POS_VIEW) {
- skey_ref = search_my_process_keyrings(key->type, key,
- lookup_user_key_possessed,
- true, cred);
+ skey_ref = search_my_process_keyrings(&ctx);
if (!IS_ERR(skey_ref)) {
key_ref_put(skey_ref);
key_ref = make_key_ref(key, 1);
@@ -211,7 +218,7 @@ static int proc_keys_show(struct seq_file *m, void *v)
* - the caller holds a spinlock, and thus the RCU read lock, making our
* access to __current_cred() safe
*/
- rc = key_task_permission(key_ref, cred, KEY_VIEW);
+ rc = key_task_permission(key_ref, ctx.cred, KEY_VIEW);
if (rc < 0)
return 0;
diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
index 42defae1e161..0cf8a130a267 100644
--- a/security/keys/process_keys.c
+++ b/security/keys/process_keys.c
@@ -235,7 +235,7 @@ int install_session_keyring_to_cred(struct cred *cred, struct key *keyring)
if (IS_ERR(keyring))
return PTR_ERR(keyring);
} else {
- atomic_inc(&keyring->usage);
+ __key_get(keyring);
}
/* install the keyring */
@@ -319,11 +319,7 @@ void key_fsgid_changed(struct task_struct *tsk)
* In the case of a successful return, the possession attribute is set on the
* returned key reference.
*/
-key_ref_t search_my_process_keyrings(struct key_type *type,
- const void *description,
- key_match_func_t match,
- bool no_state_check,
- const struct cred *cred)
+key_ref_t search_my_process_keyrings(struct keyring_search_context *ctx)
{
key_ref_t key_ref, ret, err;
@@ -339,10 +335,9 @@ key_ref_t search_my_process_keyrings(struct key_type *type,
err = ERR_PTR(-EAGAIN);
/* search the thread keyring first */
- if (cred->thread_keyring) {
+ if (ctx->cred->thread_keyring) {
key_ref = keyring_search_aux(
- make_key_ref(cred->thread_keyring, 1),
- cred, type, description, match, no_state_check);
+ make_key_ref(ctx->cred->thread_keyring, 1), ctx);
if (!IS_ERR(key_ref))
goto found;
@@ -358,10 +353,9 @@ key_ref_t search_my_process_keyrings(struct key_type *type,
}
/* search the process keyring second */
- if (cred->process_keyring) {
+ if (ctx->cred->process_keyring) {
key_ref = keyring_search_aux(
- make_key_ref(cred->process_keyring, 1),
- cred, type, description, match, no_state_check);
+ make_key_ref(ctx->cred->process_keyring, 1), ctx);
if (!IS_ERR(key_ref))
goto found;
@@ -379,11 +373,11 @@ key_ref_t search_my_process_keyrings(struct key_type *type,
}
/* search the session keyring */
- if (cred->session_keyring) {
+ if (ctx->cred->session_keyring) {
rcu_read_lock();
key_ref = keyring_search_aux(
- make_key_ref(rcu_dereference(cred->session_keyring), 1),
- cred, type, description, match, no_state_check);
+ make_key_ref(rcu_dereference(ctx->cred->session_keyring), 1),
+ ctx);
rcu_read_unlock();
if (!IS_ERR(key_ref))
@@ -402,10 +396,10 @@ key_ref_t search_my_process_keyrings(struct key_type *type,
}
}
/* or search the user-session keyring */
- else if (cred->user->session_keyring) {
+ else if (ctx->cred->user->session_keyring) {
key_ref = keyring_search_aux(
- make_key_ref(cred->user->session_keyring, 1),
- cred, type, description, match, no_state_check);
+ make_key_ref(ctx->cred->user->session_keyring, 1),
+ ctx);
if (!IS_ERR(key_ref))
goto found;
@@ -437,18 +431,14 @@ found:
*
* Return same as search_my_process_keyrings().
*/
-key_ref_t search_process_keyrings(struct key_type *type,
- const void *description,
- key_match_func_t match,
- const struct cred *cred)
+key_ref_t search_process_keyrings(struct keyring_search_context *ctx)
{
struct request_key_auth *rka;
key_ref_t key_ref, ret = ERR_PTR(-EACCES), err;
might_sleep();
- key_ref = search_my_process_keyrings(type, description, match,
- false, cred);
+ key_ref = search_my_process_keyrings(ctx);
if (!IS_ERR(key_ref))
goto found;
err = key_ref;
@@ -457,18 +447,21 @@ key_ref_t search_process_keyrings(struct key_type *type,
* search the keyrings of the process mentioned there
* - we don't permit access to request_key auth keys via this method
*/
- if (cred->request_key_auth &&
- cred == current_cred() &&
- type != &key_type_request_key_auth
+ if (ctx->cred->request_key_auth &&
+ ctx->cred == current_cred() &&
+ ctx->index_key.type != &key_type_request_key_auth
) {
+ const struct cred *cred = ctx->cred;
+
/* defend against the auth key being revoked */
down_read(&cred->request_key_auth->sem);
- if (key_validate(cred->request_key_auth) == 0) {
- rka = cred->request_key_auth->payload.data;
+ if (key_validate(ctx->cred->request_key_auth) == 0) {
+ rka = ctx->cred->request_key_auth->payload.data;
- key_ref = search_process_keyrings(type, description,
- match, rka->cred);
+ ctx->cred = rka->cred;
+ key_ref = search_process_keyrings(ctx);
+ ctx->cred = cred;
up_read(&cred->request_key_auth->sem);
@@ -522,19 +515,23 @@ int lookup_user_key_possessed(const struct key *key, const void *target)
key_ref_t lookup_user_key(key_serial_t id, unsigned long lflags,
key_perm_t perm)
{
+ struct keyring_search_context ctx = {
+ .match = lookup_user_key_possessed,
+ .flags = (KEYRING_SEARCH_NO_STATE_CHECK |
+ KEYRING_SEARCH_LOOKUP_DIRECT),
+ };
struct request_key_auth *rka;
- const struct cred *cred;
struct key *key;
key_ref_t key_ref, skey_ref;
int ret;
try_again:
- cred = get_current_cred();
+ ctx.cred = get_current_cred();
key_ref = ERR_PTR(-ENOKEY);
switch (id) {
case KEY_SPEC_THREAD_KEYRING:
- if (!cred->thread_keyring) {
+ if (!ctx.cred->thread_keyring) {
if (!(lflags & KEY_LOOKUP_CREATE))
goto error;
@@ -546,13 +543,13 @@ try_again:
goto reget_creds;
}
- key = cred->thread_keyring;
- atomic_inc(&key->usage);
+ key = ctx.cred->thread_keyring;
+ __key_get(key);
key_ref = make_key_ref(key, 1);
break;
case KEY_SPEC_PROCESS_KEYRING:
- if (!cred->process_keyring) {
+ if (!ctx.cred->process_keyring) {
if (!(lflags & KEY_LOOKUP_CREATE))
goto error;
@@ -564,13 +561,13 @@ try_again:
goto reget_creds;
}
- key = cred->process_keyring;
- atomic_inc(&key->usage);
+ key = ctx.cred->process_keyring;
+ __key_get(key);
key_ref = make_key_ref(key, 1);
break;
case KEY_SPEC_SESSION_KEYRING:
- if (!cred->session_keyring) {
+ if (!ctx.cred->session_keyring) {
/* always install a session keyring upon access if one
* doesn't exist yet */
ret = install_user_keyrings();
@@ -580,13 +577,13 @@ try_again:
ret = join_session_keyring(NULL);
else
ret = install_session_keyring(
- cred->user->session_keyring);
+ ctx.cred->user->session_keyring);
if (ret < 0)
goto error;
goto reget_creds;
- } else if (cred->session_keyring ==
- cred->user->session_keyring &&
+ } else if (ctx.cred->session_keyring ==
+ ctx.cred->user->session_keyring &&
lflags & KEY_LOOKUP_CREATE) {
ret = join_session_keyring(NULL);
if (ret < 0)
@@ -595,33 +592,33 @@ try_again:
}
rcu_read_lock();
- key = rcu_dereference(cred->session_keyring);
- atomic_inc(&key->usage);
+ key = rcu_dereference(ctx.cred->session_keyring);
+ __key_get(key);
rcu_read_unlock();
key_ref = make_key_ref(key, 1);
break;
case KEY_SPEC_USER_KEYRING:
- if (!cred->user->uid_keyring) {
+ if (!ctx.cred->user->uid_keyring) {
ret = install_user_keyrings();
if (ret < 0)
goto error;
}
- key = cred->user->uid_keyring;
- atomic_inc(&key->usage);
+ key = ctx.cred->user->uid_keyring;
+ __key_get(key);
key_ref = make_key_ref(key, 1);
break;
case KEY_SPEC_USER_SESSION_KEYRING:
- if (!cred->user->session_keyring) {
+ if (!ctx.cred->user->session_keyring) {
ret = install_user_keyrings();
if (ret < 0)
goto error;
}
- key = cred->user->session_keyring;
- atomic_inc(&key->usage);
+ key = ctx.cred->user->session_keyring;
+ __key_get(key);
key_ref = make_key_ref(key, 1);
break;
@@ -631,29 +628,29 @@ try_again:
goto error;
case KEY_SPEC_REQKEY_AUTH_KEY:
- key = cred->request_key_auth;
+ key = ctx.cred->request_key_auth;
if (!key)
goto error;
- atomic_inc(&key->usage);
+ __key_get(key);
key_ref = make_key_ref(key, 1);
break;
case KEY_SPEC_REQUESTOR_KEYRING:
- if (!cred->request_key_auth)
+ if (!ctx.cred->request_key_auth)
goto error;
- down_read(&cred->request_key_auth->sem);
+ down_read(&ctx.cred->request_key_auth->sem);
if (test_bit(KEY_FLAG_REVOKED,
- &cred->request_key_auth->flags)) {
+ &ctx.cred->request_key_auth->flags)) {
key_ref = ERR_PTR(-EKEYREVOKED);
key = NULL;
} else {
- rka = cred->request_key_auth->payload.data;
+ rka = ctx.cred->request_key_auth->payload.data;
key = rka->dest_keyring;
- atomic_inc(&key->usage);
+ __key_get(key);
}
- up_read(&cred->request_key_auth->sem);
+ up_read(&ctx.cred->request_key_auth->sem);
if (!key)
goto error;
key_ref = make_key_ref(key, 1);
@@ -673,9 +670,13 @@ try_again:
key_ref = make_key_ref(key, 0);
/* check to see if we possess the key */
- skey_ref = search_process_keyrings(key->type, key,
- lookup_user_key_possessed,
- cred);
+ ctx.index_key.type = key->type;
+ ctx.index_key.description = key->description;
+ ctx.index_key.desc_len = strlen(key->description);
+ ctx.match_data = key;
+ kdebug("check possessed");
+ skey_ref = search_process_keyrings(&ctx);
+ kdebug("possessed=%p", skey_ref);
if (!IS_ERR(skey_ref)) {
key_put(key);
@@ -715,14 +716,14 @@ try_again:
goto invalid_key;
/* check the permissions */
- ret = key_task_permission(key_ref, cred, perm);
+ ret = key_task_permission(key_ref, ctx.cred, perm);
if (ret < 0)
goto invalid_key;
key->last_used_at = current_kernel_time().tv_sec;
error:
- put_cred(cred);
+ put_cred(ctx.cred);
return key_ref;
invalid_key:
@@ -733,7 +734,7 @@ invalid_key:
/* if we attempted to install a keyring, then it may have caused new
* creds to be installed */
reget_creds:
- put_cred(cred);
+ put_cred(ctx.cred);
goto try_again;
}
@@ -856,3 +857,13 @@ void key_change_session_keyring(struct callback_head *twork)
commit_creds(new);
}
+
+/*
+ * Make sure that root's user and user-session keyrings exist.
+ */
+static int __init init_root_keyring(void)
+{
+ return install_user_keyrings();
+}
+
+late_initcall(init_root_keyring);
diff --git a/security/keys/request_key.c b/security/keys/request_key.c
index c411f9bb156b..381411941cc1 100644
--- a/security/keys/request_key.c
+++ b/security/keys/request_key.c
@@ -345,33 +345,34 @@ static void construct_get_dest_keyring(struct key **_dest_keyring)
* May return a key that's already under construction instead if there was a
* race between two thread calling request_key().
*/
-static int construct_alloc_key(struct key_type *type,
- const char *description,
+static int construct_alloc_key(struct keyring_search_context *ctx,
struct key *dest_keyring,
unsigned long flags,
struct key_user *user,
struct key **_key)
{
- const struct cred *cred = current_cred();
- unsigned long prealloc;
+ struct assoc_array_edit *edit;
struct key *key;
key_perm_t perm;
key_ref_t key_ref;
int ret;
- kenter("%s,%s,,,", type->name, description);
+ kenter("%s,%s,,,",
+ ctx->index_key.type->name, ctx->index_key.description);
*_key = NULL;
mutex_lock(&user->cons_lock);
perm = KEY_POS_VIEW | KEY_POS_SEARCH | KEY_POS_LINK | KEY_POS_SETATTR;
perm |= KEY_USR_VIEW;
- if (type->read)
+ if (ctx->index_key.type->read)
perm |= KEY_POS_READ;
- if (type == &key_type_keyring || type->update)
+ if (ctx->index_key.type == &key_type_keyring ||
+ ctx->index_key.type->update)
perm |= KEY_POS_WRITE;
- key = key_alloc(type, description, cred->fsuid, cred->fsgid, cred,
+ key = key_alloc(ctx->index_key.type, ctx->index_key.description,
+ ctx->cred->fsuid, ctx->cred->fsgid, ctx->cred,
perm, flags);
if (IS_ERR(key))
goto alloc_failed;
@@ -379,8 +380,7 @@ static int construct_alloc_key(struct key_type *type,
set_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags);
if (dest_keyring) {
- ret = __key_link_begin(dest_keyring, type, description,
- &prealloc);
+ ret = __key_link_begin(dest_keyring, &ctx->index_key, &edit);
if (ret < 0)
goto link_prealloc_failed;
}
@@ -390,16 +390,16 @@ static int construct_alloc_key(struct key_type *type,
* waited for locks */
mutex_lock(&key_construction_mutex);
- key_ref = search_process_keyrings(type, description, type->match, cred);
+ key_ref = search_process_keyrings(ctx);
if (!IS_ERR(key_ref))
goto key_already_present;
if (dest_keyring)
- __key_link(dest_keyring, key, &prealloc);
+ __key_link(key, &edit);
mutex_unlock(&key_construction_mutex);
if (dest_keyring)
- __key_link_end(dest_keyring, type, prealloc);
+ __key_link_end(dest_keyring, &ctx->index_key, edit);
mutex_unlock(&user->cons_lock);
*_key = key;
kleave(" = 0 [%d]", key_serial(key));
@@ -414,8 +414,8 @@ key_already_present:
if (dest_keyring) {
ret = __key_link_check_live_key(dest_keyring, key);
if (ret == 0)
- __key_link(dest_keyring, key, &prealloc);
- __key_link_end(dest_keyring, type, prealloc);
+ __key_link(key, &edit);
+ __key_link_end(dest_keyring, &ctx->index_key, edit);
if (ret < 0)
goto link_check_failed;
}
@@ -444,8 +444,7 @@ alloc_failed:
/*
* Commence key construction.
*/
-static struct key *construct_key_and_link(struct key_type *type,
- const char *description,
+static struct key *construct_key_and_link(struct keyring_search_context *ctx,
const char *callout_info,
size_t callout_len,
void *aux,
@@ -464,8 +463,7 @@ static struct key *construct_key_and_link(struct key_type *type,
construct_get_dest_keyring(&dest_keyring);
- ret = construct_alloc_key(type, description, dest_keyring, flags, user,
- &key);
+ ret = construct_alloc_key(ctx, dest_keyring, flags, user, &key);
key_user_put(user);
if (ret == 0) {
@@ -529,17 +527,24 @@ struct key *request_key_and_link(struct key_type *type,
struct key *dest_keyring,
unsigned long flags)
{
- const struct cred *cred = current_cred();
+ struct keyring_search_context ctx = {
+ .index_key.type = type,
+ .index_key.description = description,
+ .cred = current_cred(),
+ .match = type->match,
+ .match_data = description,
+ .flags = KEYRING_SEARCH_LOOKUP_DIRECT,
+ };
struct key *key;
key_ref_t key_ref;
int ret;
kenter("%s,%s,%p,%zu,%p,%p,%lx",
- type->name, description, callout_info, callout_len, aux,
- dest_keyring, flags);
+ ctx.index_key.type->name, ctx.index_key.description,
+ callout_info, callout_len, aux, dest_keyring, flags);
/* search all the process keyrings for a key */
- key_ref = search_process_keyrings(type, description, type->match, cred);
+ key_ref = search_process_keyrings(&ctx);
if (!IS_ERR(key_ref)) {
key = key_ref_to_ptr(key_ref);
@@ -562,9 +567,8 @@ struct key *request_key_and_link(struct key_type *type,
if (!callout_info)
goto error;
- key = construct_key_and_link(type, description, callout_info,
- callout_len, aux, dest_keyring,
- flags);
+ key = construct_key_and_link(&ctx, callout_info, callout_len,
+ aux, dest_keyring, flags);
}
error:
@@ -592,8 +596,10 @@ int wait_for_key_construction(struct key *key, bool intr)
intr ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
if (ret < 0)
return ret;
- if (test_bit(KEY_FLAG_NEGATIVE, &key->flags))
+ if (test_bit(KEY_FLAG_NEGATIVE, &key->flags)) {
+ smp_rmb();
return key->type_data.reject_error;
+ }
return key_validate(key);
}
EXPORT_SYMBOL(wait_for_key_construction);
diff --git a/security/keys/request_key_auth.c b/security/keys/request_key_auth.c
index 85730d5a5a59..7495a93b4b90 100644
--- a/security/keys/request_key_auth.c
+++ b/security/keys/request_key_auth.c
@@ -18,6 +18,7 @@
#include <linux/slab.h>
#include <asm/uaccess.h>
#include "internal.h"
+#include <keys/user-type.h>
static int request_key_auth_instantiate(struct key *,
struct key_preparsed_payload *);
@@ -222,32 +223,26 @@ error_alloc:
}
/*
- * See if an authorisation key is associated with a particular key.
- */
-static int key_get_instantiation_authkey_match(const struct key *key,
- const void *_id)
-{
- struct request_key_auth *rka = key->payload.data;
- key_serial_t id = (key_serial_t)(unsigned long) _id;
-
- return rka->target_key->serial == id;
-}
-
-/*
* Search the current process's keyrings for the authorisation key for
* instantiation of a key.
*/
struct key *key_get_instantiation_authkey(key_serial_t target_id)
{
- const struct cred *cred = current_cred();
+ char description[16];
+ struct keyring_search_context ctx = {
+ .index_key.type = &key_type_request_key_auth,
+ .index_key.description = description,
+ .cred = current_cred(),
+ .match = user_match,
+ .match_data = description,
+ .flags = KEYRING_SEARCH_LOOKUP_DIRECT,
+ };
struct key *authkey;
key_ref_t authkey_ref;
- authkey_ref = search_process_keyrings(
- &key_type_request_key_auth,
- (void *) (unsigned long) target_id,
- key_get_instantiation_authkey_match,
- cred);
+ sprintf(description, "%x", target_id);
+
+ authkey_ref = search_process_keyrings(&ctx);
if (IS_ERR(authkey_ref)) {
authkey = ERR_CAST(authkey_ref);
diff --git a/security/keys/sysctl.c b/security/keys/sysctl.c
index ee32d181764a..8c0af08760c8 100644
--- a/security/keys/sysctl.c
+++ b/security/keys/sysctl.c
@@ -61,5 +61,16 @@ ctl_table key_sysctls[] = {
.extra1 = (void *) &zero,
.extra2 = (void *) &max,
},
+#ifdef CONFIG_PERSISTENT_KEYRINGS
+ {
+ .procname = "persistent_keyring_expiry",
+ .data = &persistent_keyring_expiry,
+ .maxlen = sizeof(unsigned),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = (void *) &zero,
+ .extra2 = (void *) &max,
+ },
+#endif
{ }
};
diff --git a/security/keys/user_defined.c b/security/keys/user_defined.c
index 55dc88939185..faa2caeb593f 100644
--- a/security/keys/user_defined.c
+++ b/security/keys/user_defined.c
@@ -25,14 +25,15 @@ static int logon_vet_description(const char *desc);
* arbitrary blob of data as the payload
*/
struct key_type key_type_user = {
- .name = "user",
- .instantiate = user_instantiate,
- .update = user_update,
- .match = user_match,
- .revoke = user_revoke,
- .destroy = user_destroy,
- .describe = user_describe,
- .read = user_read,
+ .name = "user",
+ .def_lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT,
+ .instantiate = user_instantiate,
+ .update = user_update,
+ .match = user_match,
+ .revoke = user_revoke,
+ .destroy = user_destroy,
+ .describe = user_describe,
+ .read = user_read,
};
EXPORT_SYMBOL_GPL(key_type_user);
@@ -45,6 +46,7 @@ EXPORT_SYMBOL_GPL(key_type_user);
*/
struct key_type key_type_logon = {
.name = "logon",
+ .def_lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT,
.instantiate = user_instantiate,
.update = user_update,
.match = user_match,
diff --git a/security/lsm_audit.c b/security/lsm_audit.c
index 234bc2ab450c..9a62045e6282 100644
--- a/security/lsm_audit.c
+++ b/security/lsm_audit.c
@@ -397,7 +397,8 @@ void common_lsm_audit(struct common_audit_data *a,
if (a == NULL)
return;
/* we use GFP_ATOMIC so we won't sleep */
- ab = audit_log_start(current->audit_context, GFP_ATOMIC, AUDIT_AVC);
+ ab = audit_log_start(current->audit_context, GFP_ATOMIC | __GFP_NOWARN,
+ AUDIT_AVC);
if (ab == NULL)
return;
diff --git a/security/security.c b/security/security.c
index 4dc31f4f2700..15b6928592ef 100644
--- a/security/security.c
+++ b/security/security.c
@@ -1340,22 +1340,17 @@ int security_xfrm_policy_delete(struct xfrm_sec_ctx *ctx)
return security_ops->xfrm_policy_delete_security(ctx);
}
-int security_xfrm_state_alloc(struct xfrm_state *x, struct xfrm_user_sec_ctx *sec_ctx)
+int security_xfrm_state_alloc(struct xfrm_state *x,
+ struct xfrm_user_sec_ctx *sec_ctx)
{
- return security_ops->xfrm_state_alloc_security(x, sec_ctx, 0);
+ return security_ops->xfrm_state_alloc(x, sec_ctx);
}
EXPORT_SYMBOL(security_xfrm_state_alloc);
int security_xfrm_state_alloc_acquire(struct xfrm_state *x,
struct xfrm_sec_ctx *polsec, u32 secid)
{
- if (!polsec)
- return 0;
- /*
- * We want the context to be taken from secid which is usually
- * from the sock.
- */
- return security_ops->xfrm_state_alloc_security(x, NULL, secid);
+ return security_ops->xfrm_state_alloc_acquire(x, polsec, secid);
}
int security_xfrm_state_delete(struct xfrm_state *x)
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index c540795fb3f2..6625699f497c 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -53,6 +53,7 @@
#include <net/ip.h> /* for local_port_range[] */
#include <net/sock.h>
#include <net/tcp.h> /* struct or_callable used in sock_rcv_skb */
+#include <net/inet_connection_sock.h>
#include <net/net_namespace.h>
#include <net/netlabel.h>
#include <linux/uaccess.h>
@@ -95,8 +96,6 @@
#include "audit.h"
#include "avc_ss.h"
-#define NUM_SEL_MNT_OPTS 5
-
extern struct security_operations *security_ops;
/* SECMARK reference count */
@@ -139,12 +138,28 @@ static struct kmem_cache *sel_inode_cache;
* This function checks the SECMARK reference counter to see if any SECMARK
* targets are currently configured, if the reference counter is greater than
* zero SECMARK is considered to be enabled. Returns true (1) if SECMARK is
- * enabled, false (0) if SECMARK is disabled.
+ * enabled, false (0) if SECMARK is disabled. If the always_check_network
+ * policy capability is enabled, SECMARK is always considered enabled.
*
*/
static int selinux_secmark_enabled(void)
{
- return (atomic_read(&selinux_secmark_refcount) > 0);
+ return (selinux_policycap_alwaysnetwork || atomic_read(&selinux_secmark_refcount));
+}
+
+/**
+ * selinux_peerlbl_enabled - Check to see if peer labeling is currently enabled
+ *
+ * Description:
+ * This function checks if NetLabel or labeled IPSEC is enabled. Returns true
+ * (1) if any are enabled or false (0) if neither are enabled. If the
+ * always_check_network policy capability is enabled, peer labeling
+ * is always considered enabled.
+ *
+ */
+static int selinux_peerlbl_enabled(void)
+{
+ return (selinux_policycap_alwaysnetwork || netlbl_enabled() || selinux_xfrm_enabled());
}
/*
@@ -309,8 +324,11 @@ enum {
Opt_defcontext = 3,
Opt_rootcontext = 4,
Opt_labelsupport = 5,
+ Opt_nextmntopt = 6,
};
+#define NUM_SEL_MNT_OPTS (Opt_nextmntopt - 1)
+
static const match_table_t tokens = {
{Opt_context, CONTEXT_STR "%s"},
{Opt_fscontext, FSCONTEXT_STR "%s"},
@@ -355,6 +373,29 @@ static int may_context_mount_inode_relabel(u32 sid,
return rc;
}
+static int selinux_is_sblabel_mnt(struct super_block *sb)
+{
+ struct superblock_security_struct *sbsec = sb->s_security;
+
+ if (sbsec->behavior == SECURITY_FS_USE_XATTR ||
+ sbsec->behavior == SECURITY_FS_USE_TRANS ||
+ sbsec->behavior == SECURITY_FS_USE_TASK)
+ return 1;
+
+ /* Special handling for sysfs. Is genfs but also has setxattr handler*/
+ if (strncmp(sb->s_type->name, "sysfs", sizeof("sysfs")) == 0)
+ return 1;
+
+ /*
+ * Special handling for rootfs. Is genfs but supports
+ * setting SELinux context on in-core inodes.
+ */
+ if (strncmp(sb->s_type->name, "rootfs", sizeof("rootfs")) == 0)
+ return 1;
+
+ return 0;
+}
+
static int sb_finish_set_opts(struct super_block *sb)
{
struct superblock_security_struct *sbsec = sb->s_security;
@@ -388,8 +429,6 @@ static int sb_finish_set_opts(struct super_block *sb)
}
}
- sbsec->flags |= (SE_SBINITIALIZED | SE_SBLABELSUPP);
-
if (sbsec->behavior > ARRAY_SIZE(labeling_behaviors))
printk(KERN_ERR "SELinux: initialized (dev %s, type %s), unknown behavior\n",
sb->s_id, sb->s_type->name);
@@ -398,15 +437,9 @@ static int sb_finish_set_opts(struct super_block *sb)
sb->s_id, sb->s_type->name,
labeling_behaviors[sbsec->behavior-1]);
- if (sbsec->behavior == SECURITY_FS_USE_GENFS ||
- sbsec->behavior == SECURITY_FS_USE_MNTPOINT ||
- sbsec->behavior == SECURITY_FS_USE_NONE ||
- sbsec->behavior > ARRAY_SIZE(labeling_behaviors))
- sbsec->flags &= ~SE_SBLABELSUPP;
-
- /* Special handling for sysfs. Is genfs but also has setxattr handler*/
- if (strncmp(sb->s_type->name, "sysfs", sizeof("sysfs")) == 0)
- sbsec->flags |= SE_SBLABELSUPP;
+ sbsec->flags |= SE_SBINITIALIZED;
+ if (selinux_is_sblabel_mnt(sb))
+ sbsec->flags |= SBLABEL_MNT;
/* Initialize the root inode. */
rc = inode_doinit_with_dentry(root_inode, root);
@@ -460,15 +493,18 @@ static int selinux_get_mnt_opts(const struct super_block *sb,
if (!ss_initialized)
return -EINVAL;
+ /* make sure we always check enough bits to cover the mask */
+ BUILD_BUG_ON(SE_MNTMASK >= (1 << NUM_SEL_MNT_OPTS));
+
tmp = sbsec->flags & SE_MNTMASK;
/* count the number of mount options for this sb */
- for (i = 0; i < 8; i++) {
+ for (i = 0; i < NUM_SEL_MNT_OPTS; i++) {
if (tmp & 0x01)
opts->num_mnt_opts++;
tmp >>= 1;
}
/* Check if the Label support flag is set */
- if (sbsec->flags & SE_SBLABELSUPP)
+ if (sbsec->flags & SBLABEL_MNT)
opts->num_mnt_opts++;
opts->mnt_opts = kcalloc(opts->num_mnt_opts, sizeof(char *), GFP_ATOMIC);
@@ -515,9 +551,9 @@ static int selinux_get_mnt_opts(const struct super_block *sb,
opts->mnt_opts[i] = context;
opts->mnt_opts_flags[i++] = ROOTCONTEXT_MNT;
}
- if (sbsec->flags & SE_SBLABELSUPP) {
+ if (sbsec->flags & SBLABEL_MNT) {
opts->mnt_opts[i] = NULL;
- opts->mnt_opts_flags[i++] = SE_SBLABELSUPP;
+ opts->mnt_opts_flags[i++] = SBLABEL_MNT;
}
BUG_ON(i != opts->num_mnt_opts);
@@ -614,7 +650,7 @@ static int selinux_set_mnt_opts(struct super_block *sb,
for (i = 0; i < num_opts; i++) {
u32 sid;
- if (flags[i] == SE_SBLABELSUPP)
+ if (flags[i] == SBLABEL_MNT)
continue;
rc = security_context_to_sid(mount_options[i],
strlen(mount_options[i]), &sid);
@@ -685,9 +721,7 @@ static int selinux_set_mnt_opts(struct super_block *sb,
* Determine the labeling behavior to use for this
* filesystem type.
*/
- rc = security_fs_use((sbsec->flags & SE_SBPROC) ?
- "proc" : sb->s_type->name,
- &sbsec->behavior, &sbsec->sid);
+ rc = security_fs_use(sb);
if (rc) {
printk(KERN_WARNING
"%s: security_fs_use(%s) returned %d\n",
@@ -1037,7 +1071,7 @@ static void selinux_write_opts(struct seq_file *m,
case DEFCONTEXT_MNT:
prefix = DEFCONTEXT_STR;
break;
- case SE_SBLABELSUPP:
+ case SBLABEL_MNT:
seq_putc(m, ',');
seq_puts(m, LABELSUPP_STR);
continue;
@@ -1649,7 +1683,7 @@ static int may_create(struct inode *dir,
if (rc)
return rc;
- if (!newsid || !(sbsec->flags & SE_SBLABELSUPP)) {
+ if (!newsid || !(sbsec->flags & SBLABEL_MNT)) {
rc = security_transition_sid(sid, dsec->sid, tclass,
&dentry->d_name, &newsid);
if (rc)
@@ -2437,7 +2471,7 @@ static int selinux_sb_remount(struct super_block *sb, void *data)
u32 sid;
size_t len;
- if (flags[i] == SE_SBLABELSUPP)
+ if (flags[i] == SBLABEL_MNT)
continue;
len = strlen(mount_options[i]);
rc = security_context_to_sid(mount_options[i], len, &sid);
@@ -2606,7 +2640,7 @@ static int selinux_inode_init_security(struct inode *inode, struct inode *dir,
if ((sbsec->flags & SE_SBINITIALIZED) &&
(sbsec->behavior == SECURITY_FS_USE_MNTPOINT))
newsid = sbsec->mntpoint_sid;
- else if (!newsid || !(sbsec->flags & SE_SBLABELSUPP)) {
+ else if (!newsid || !(sbsec->flags & SBLABEL_MNT)) {
rc = security_transition_sid(sid, dsec->sid,
inode_mode_to_security_class(inode->i_mode),
qstr, &newsid);
@@ -2628,7 +2662,7 @@ static int selinux_inode_init_security(struct inode *inode, struct inode *dir,
isec->initialized = 1;
}
- if (!ss_initialized || !(sbsec->flags & SE_SBLABELSUPP))
+ if (!ss_initialized || !(sbsec->flags & SBLABEL_MNT))
return -EOPNOTSUPP;
if (name)
@@ -2830,7 +2864,7 @@ static int selinux_inode_setxattr(struct dentry *dentry, const char *name,
return selinux_inode_setotherxattr(dentry, name);
sbsec = inode->i_sb->s_security;
- if (!(sbsec->flags & SE_SBLABELSUPP))
+ if (!(sbsec->flags & SBLABEL_MNT))
return -EOPNOTSUPP;
if (!inode_owner_or_capable(inode))
@@ -3791,8 +3825,12 @@ static int selinux_skb_peerlbl_sid(struct sk_buff *skb, u16 family, u32 *sid)
u32 nlbl_sid;
u32 nlbl_type;
- selinux_skb_xfrm_sid(skb, &xfrm_sid);
- selinux_netlbl_skbuff_getsid(skb, family, &nlbl_type, &nlbl_sid);
+ err = selinux_xfrm_skb_sid(skb, &xfrm_sid);
+ if (unlikely(err))
+ return -EACCES;
+ err = selinux_netlbl_skbuff_getsid(skb, family, &nlbl_type, &nlbl_sid);
+ if (unlikely(err))
+ return -EACCES;
err = security_net_peersid_resolve(nlbl_sid, nlbl_type, xfrm_sid, sid);
if (unlikely(err)) {
@@ -3805,6 +3843,30 @@ static int selinux_skb_peerlbl_sid(struct sk_buff *skb, u16 family, u32 *sid)
return 0;
}
+/**
+ * selinux_conn_sid - Determine the child socket label for a connection
+ * @sk_sid: the parent socket's SID
+ * @skb_sid: the packet's SID
+ * @conn_sid: the resulting connection SID
+ *
+ * If @skb_sid is valid then the user:role:type information from @sk_sid is
+ * combined with the MLS information from @skb_sid in order to create
+ * @conn_sid. If @skb_sid is not valid then then @conn_sid is simply a copy
+ * of @sk_sid. Returns zero on success, negative values on failure.
+ *
+ */
+static int selinux_conn_sid(u32 sk_sid, u32 skb_sid, u32 *conn_sid)
+{
+ int err = 0;
+
+ if (skb_sid != SECSID_NULL)
+ err = security_sid_mls_copy(sk_sid, skb_sid, conn_sid);
+ else
+ *conn_sid = sk_sid;
+
+ return err;
+}
+
/* socket security operations */
static int socket_sockcreate_sid(const struct task_security_struct *tsec,
@@ -4246,7 +4308,7 @@ static int selinux_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
return selinux_sock_rcv_skb_compat(sk, skb, family);
secmark_active = selinux_secmark_enabled();
- peerlbl_active = netlbl_enabled() || selinux_xfrm_enabled();
+ peerlbl_active = selinux_peerlbl_enabled();
if (!secmark_active && !peerlbl_active)
return 0;
@@ -4272,8 +4334,10 @@ static int selinux_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
}
err = avc_has_perm(sk_sid, peer_sid, SECCLASS_PEER,
PEER__RECV, &ad);
- if (err)
+ if (err) {
selinux_netlbl_err(skb, err, 0);
+ return err;
+ }
}
if (secmark_active) {
@@ -4411,7 +4475,7 @@ static int selinux_inet_conn_request(struct sock *sk, struct sk_buff *skb,
struct sk_security_struct *sksec = sk->sk_security;
int err;
u16 family = sk->sk_family;
- u32 newsid;
+ u32 connsid;
u32 peersid;
/* handle mapped IPv4 packets arriving via IPv6 sockets */
@@ -4421,16 +4485,11 @@ static int selinux_inet_conn_request(struct sock *sk, struct sk_buff *skb,
err = selinux_skb_peerlbl_sid(skb, family, &peersid);
if (err)
return err;
- if (peersid == SECSID_NULL) {
- req->secid = sksec->sid;
- req->peer_secid = SECSID_NULL;
- } else {
- err = security_sid_mls_copy(sksec->sid, peersid, &newsid);
- if (err)
- return err;
- req->secid = newsid;
- req->peer_secid = peersid;
- }
+ err = selinux_conn_sid(sksec->sid, peersid, &connsid);
+ if (err)
+ return err;
+ req->secid = connsid;
+ req->peer_secid = peersid;
return selinux_netlbl_inet_conn_request(req, family);
}
@@ -4628,7 +4687,7 @@ static unsigned int selinux_ip_forward(struct sk_buff *skb, int ifindex,
secmark_active = selinux_secmark_enabled();
netlbl_active = netlbl_enabled();
- peerlbl_active = netlbl_active || selinux_xfrm_enabled();
+ peerlbl_active = selinux_peerlbl_enabled();
if (!secmark_active && !peerlbl_active)
return NF_ACCEPT;
@@ -4690,6 +4749,7 @@ static unsigned int selinux_ipv6_forward(const struct nf_hook_ops *ops,
static unsigned int selinux_ip_output(struct sk_buff *skb,
u16 family)
{
+ struct sock *sk;
u32 sid;
if (!netlbl_enabled())
@@ -4698,8 +4758,27 @@ static unsigned int selinux_ip_output(struct sk_buff *skb,
/* we do this in the LOCAL_OUT path and not the POST_ROUTING path
* because we want to make sure we apply the necessary labeling
* before IPsec is applied so we can leverage AH protection */
- if (skb->sk) {
- struct sk_security_struct *sksec = skb->sk->sk_security;
+ sk = skb->sk;
+ if (sk) {
+ struct sk_security_struct *sksec;
+
+ if (sk->sk_state == TCP_LISTEN)
+ /* if the socket is the listening state then this
+ * packet is a SYN-ACK packet which means it needs to
+ * be labeled based on the connection/request_sock and
+ * not the parent socket. unfortunately, we can't
+ * lookup the request_sock yet as it isn't queued on
+ * the parent socket until after the SYN-ACK is sent.
+ * the "solution" is to simply pass the packet as-is
+ * as any IP option based labeling should be copied
+ * from the initial connection request (in the IP
+ * layer). it is far from ideal, but until we get a
+ * security label in the packet itself this is the
+ * best we can do. */
+ return NF_ACCEPT;
+
+ /* standard practice, label using the parent socket */
+ sksec = sk->sk_security;
sid = sksec->sid;
} else
sid = SECINITSID_KERNEL;
@@ -4769,27 +4848,36 @@ static unsigned int selinux_ip_postroute(struct sk_buff *skb, int ifindex,
* as fast and as clean as possible. */
if (!selinux_policycap_netpeer)
return selinux_ip_postroute_compat(skb, ifindex, family);
+
+ secmark_active = selinux_secmark_enabled();
+ peerlbl_active = selinux_peerlbl_enabled();
+ if (!secmark_active && !peerlbl_active)
+ return NF_ACCEPT;
+
+ sk = skb->sk;
+
#ifdef CONFIG_XFRM
/* If skb->dst->xfrm is non-NULL then the packet is undergoing an IPsec
* packet transformation so allow the packet to pass without any checks
* since we'll have another chance to perform access control checks
* when the packet is on it's final way out.
* NOTE: there appear to be some IPv6 multicast cases where skb->dst
- * is NULL, in this case go ahead and apply access control. */
- if (skb_dst(skb) != NULL && skb_dst(skb)->xfrm != NULL)
+ * is NULL, in this case go ahead and apply access control.
+ * NOTE: if this is a local socket (skb->sk != NULL) that is in the
+ * TCP listening state we cannot wait until the XFRM processing
+ * is done as we will miss out on the SA label if we do;
+ * unfortunately, this means more work, but it is only once per
+ * connection. */
+ if (skb_dst(skb) != NULL && skb_dst(skb)->xfrm != NULL &&
+ !(sk != NULL && sk->sk_state == TCP_LISTEN))
return NF_ACCEPT;
#endif
- secmark_active = selinux_secmark_enabled();
- peerlbl_active = netlbl_enabled() || selinux_xfrm_enabled();
- if (!secmark_active && !peerlbl_active)
- return NF_ACCEPT;
- /* if the packet is being forwarded then get the peer label from the
- * packet itself; otherwise check to see if it is from a local
- * application or the kernel, if from an application get the peer label
- * from the sending socket, otherwise use the kernel's sid */
- sk = skb->sk;
if (sk == NULL) {
+ /* Without an associated socket the packet is either coming
+ * from the kernel or it is being forwarded; check the packet
+ * to determine which and if the packet is being forwarded
+ * query the packet directly to determine the security label. */
if (skb->skb_iif) {
secmark_perm = PACKET__FORWARD_OUT;
if (selinux_skb_peerlbl_sid(skb, family, &peer_sid))
@@ -4798,7 +4886,45 @@ static unsigned int selinux_ip_postroute(struct sk_buff *skb, int ifindex,
secmark_perm = PACKET__SEND;
peer_sid = SECINITSID_KERNEL;
}
+ } else if (sk->sk_state == TCP_LISTEN) {
+ /* Locally generated packet but the associated socket is in the
+ * listening state which means this is a SYN-ACK packet. In
+ * this particular case the correct security label is assigned
+ * to the connection/request_sock but unfortunately we can't
+ * query the request_sock as it isn't queued on the parent
+ * socket until after the SYN-ACK packet is sent; the only
+ * viable choice is to regenerate the label like we do in
+ * selinux_inet_conn_request(). See also selinux_ip_output()
+ * for similar problems. */
+ u32 skb_sid;
+ struct sk_security_struct *sksec = sk->sk_security;
+ if (selinux_skb_peerlbl_sid(skb, family, &skb_sid))
+ return NF_DROP;
+ /* At this point, if the returned skb peerlbl is SECSID_NULL
+ * and the packet has been through at least one XFRM
+ * transformation then we must be dealing with the "final"
+ * form of labeled IPsec packet; since we've already applied
+ * all of our access controls on this packet we can safely
+ * pass the packet. */
+ if (skb_sid == SECSID_NULL) {
+ switch (family) {
+ case PF_INET:
+ if (IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED)
+ return NF_ACCEPT;
+ break;
+ case PF_INET6:
+ if (IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED)
+ return NF_ACCEPT;
+ default:
+ return NF_DROP_ERR(-ECONNREFUSED);
+ }
+ }
+ if (selinux_conn_sid(sksec->sid, skb_sid, &peer_sid))
+ return NF_DROP;
+ secmark_perm = PACKET__SEND;
} else {
+ /* Locally generated packet, fetch the security label from the
+ * associated socket. */
struct sk_security_struct *sksec = sk->sk_security;
peer_sid = sksec->sid;
secmark_perm = PACKET__SEND;
@@ -5462,11 +5588,11 @@ static int selinux_setprocattr(struct task_struct *p,
/* Check for ptracing, and update the task SID if ok.
Otherwise, leave SID unchanged and fail. */
ptsid = 0;
- task_lock(p);
+ rcu_read_lock();
tracer = ptrace_parent(p);
if (tracer)
ptsid = task_sid(tracer);
- task_unlock(p);
+ rcu_read_unlock();
if (tracer) {
error = avc_has_perm(ptsid, sid, SECCLASS_PROCESS,
@@ -5784,7 +5910,8 @@ static struct security_operations selinux_ops = {
.xfrm_policy_clone_security = selinux_xfrm_policy_clone,
.xfrm_policy_free_security = selinux_xfrm_policy_free,
.xfrm_policy_delete_security = selinux_xfrm_policy_delete,
- .xfrm_state_alloc_security = selinux_xfrm_state_alloc,
+ .xfrm_state_alloc = selinux_xfrm_state_alloc,
+ .xfrm_state_alloc_acquire = selinux_xfrm_state_alloc_acquire,
.xfrm_state_free_security = selinux_xfrm_state_free,
.xfrm_state_delete_security = selinux_xfrm_state_delete,
.xfrm_policy_lookup = selinux_xfrm_policy_lookup,
diff --git a/security/selinux/include/objsec.h b/security/selinux/include/objsec.h
index aa47bcabb5f6..b1dfe1049450 100644
--- a/security/selinux/include/objsec.h
+++ b/security/selinux/include/objsec.h
@@ -58,8 +58,8 @@ struct superblock_security_struct {
u32 sid; /* SID of file system superblock */
u32 def_sid; /* default SID for labeling */
u32 mntpoint_sid; /* SECURITY_FS_USE_MNTPOINT context for files */
- unsigned int behavior; /* labeling behavior */
- unsigned char flags; /* which mount options were specified */
+ unsigned short behavior; /* labeling behavior */
+ unsigned short flags; /* which mount options were specified */
struct mutex lock;
struct list_head isec_head;
spinlock_t isec_lock;
diff --git a/security/selinux/include/security.h b/security/selinux/include/security.h
index 8fd8e18ea340..fe341ae37004 100644
--- a/security/selinux/include/security.h
+++ b/security/selinux/include/security.h
@@ -45,14 +45,15 @@
/* Mask for just the mount related flags */
#define SE_MNTMASK 0x0f
/* Super block security struct flags for mount options */
+/* BE CAREFUL, these need to be the low order bits for selinux_get_mnt_opts */
#define CONTEXT_MNT 0x01
#define FSCONTEXT_MNT 0x02
#define ROOTCONTEXT_MNT 0x04
#define DEFCONTEXT_MNT 0x08
+#define SBLABEL_MNT 0x10
/* Non-mount related flags */
-#define SE_SBINITIALIZED 0x10
-#define SE_SBPROC 0x20
-#define SE_SBLABELSUPP 0x40
+#define SE_SBINITIALIZED 0x0100
+#define SE_SBPROC 0x0200
#define CONTEXT_STR "context="
#define FSCONTEXT_STR "fscontext="
@@ -68,12 +69,15 @@ extern int selinux_enabled;
enum {
POLICYDB_CAPABILITY_NETPEER,
POLICYDB_CAPABILITY_OPENPERM,
+ POLICYDB_CAPABILITY_REDHAT1,
+ POLICYDB_CAPABILITY_ALWAYSNETWORK,
__POLICYDB_CAPABILITY_MAX
};
#define POLICYDB_CAPABILITY_MAX (__POLICYDB_CAPABILITY_MAX - 1)
extern int selinux_policycap_netpeer;
extern int selinux_policycap_openperm;
+extern int selinux_policycap_alwaysnetwork;
/*
* type_datum properties
@@ -172,8 +176,7 @@ int security_get_allow_unknown(void);
#define SECURITY_FS_USE_NATIVE 7 /* use native label support */
#define SECURITY_FS_USE_MAX 7 /* Highest SECURITY_FS_USE_XXX */
-int security_fs_use(const char *fstype, unsigned int *behavior,
- u32 *sid);
+int security_fs_use(struct super_block *sb);
int security_genfs_sid(const char *fstype, char *name, u16 sclass,
u32 *sid);
diff --git a/security/selinux/include/xfrm.h b/security/selinux/include/xfrm.h
index 6713f04e30ba..48c3cc94c168 100644
--- a/security/selinux/include/xfrm.h
+++ b/security/selinux/include/xfrm.h
@@ -10,29 +10,21 @@
#include <net/flow.h>
int selinux_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp,
- struct xfrm_user_sec_ctx *sec_ctx);
+ struct xfrm_user_sec_ctx *uctx);
int selinux_xfrm_policy_clone(struct xfrm_sec_ctx *old_ctx,
struct xfrm_sec_ctx **new_ctxp);
void selinux_xfrm_policy_free(struct xfrm_sec_ctx *ctx);
int selinux_xfrm_policy_delete(struct xfrm_sec_ctx *ctx);
int selinux_xfrm_state_alloc(struct xfrm_state *x,
- struct xfrm_user_sec_ctx *sec_ctx, u32 secid);
+ struct xfrm_user_sec_ctx *uctx);
+int selinux_xfrm_state_alloc_acquire(struct xfrm_state *x,
+ struct xfrm_sec_ctx *polsec, u32 secid);
void selinux_xfrm_state_free(struct xfrm_state *x);
int selinux_xfrm_state_delete(struct xfrm_state *x);
int selinux_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir);
int selinux_xfrm_state_pol_flow_match(struct xfrm_state *x,
- struct xfrm_policy *xp, const struct flowi *fl);
-
-/*
- * Extract the security blob from the sock (it's actually on the socket)
- */
-static inline struct inode_security_struct *get_sock_isec(struct sock *sk)
-{
- if (!sk->sk_socket)
- return NULL;
-
- return SOCK_INODE(sk->sk_socket)->i_security;
-}
+ struct xfrm_policy *xp,
+ const struct flowi *fl);
#ifdef CONFIG_SECURITY_NETWORK_XFRM
extern atomic_t selinux_xfrm_refcount;
@@ -42,11 +34,12 @@ static inline int selinux_xfrm_enabled(void)
return (atomic_read(&selinux_xfrm_refcount) > 0);
}
-int selinux_xfrm_sock_rcv_skb(u32 sid, struct sk_buff *skb,
- struct common_audit_data *ad);
-int selinux_xfrm_postroute_last(u32 isec_sid, struct sk_buff *skb,
- struct common_audit_data *ad, u8 proto);
+int selinux_xfrm_sock_rcv_skb(u32 sk_sid, struct sk_buff *skb,
+ struct common_audit_data *ad);
+int selinux_xfrm_postroute_last(u32 sk_sid, struct sk_buff *skb,
+ struct common_audit_data *ad, u8 proto);
int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall);
+int selinux_xfrm_skb_sid(struct sk_buff *skb, u32 *sid);
static inline void selinux_xfrm_notify_policyload(void)
{
@@ -64,19 +57,21 @@ static inline int selinux_xfrm_enabled(void)
return 0;
}
-static inline int selinux_xfrm_sock_rcv_skb(u32 isec_sid, struct sk_buff *skb,
- struct common_audit_data *ad)
+static inline int selinux_xfrm_sock_rcv_skb(u32 sk_sid, struct sk_buff *skb,
+ struct common_audit_data *ad)
{
return 0;
}
-static inline int selinux_xfrm_postroute_last(u32 isec_sid, struct sk_buff *skb,
- struct common_audit_data *ad, u8 proto)
+static inline int selinux_xfrm_postroute_last(u32 sk_sid, struct sk_buff *skb,
+ struct common_audit_data *ad,
+ u8 proto)
{
return 0;
}
-static inline int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall)
+static inline int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid,
+ int ckall)
{
*sid = SECSID_NULL;
return 0;
@@ -85,12 +80,12 @@ static inline int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int
static inline void selinux_xfrm_notify_policyload(void)
{
}
-#endif
-static inline void selinux_skb_xfrm_sid(struct sk_buff *skb, u32 *sid)
+static inline int selinux_xfrm_skb_sid(struct sk_buff *skb, u32 *sid)
{
- int err = selinux_xfrm_decode_session(skb, sid, 0);
- BUG_ON(err);
+ *sid = SECSID_NULL;
+ return 0;
}
+#endif
#endif /* _SELINUX_XFRM_H_ */
diff --git a/security/selinux/netlabel.c b/security/selinux/netlabel.c
index da4b8b233280..6235d052338b 100644
--- a/security/selinux/netlabel.c
+++ b/security/selinux/netlabel.c
@@ -442,8 +442,7 @@ int selinux_netlbl_socket_connect(struct sock *sk, struct sockaddr *addr)
sksec->nlbl_state != NLBL_CONNLABELED)
return 0;
- local_bh_disable();
- bh_lock_sock_nested(sk);
+ lock_sock(sk);
/* connected sockets are allowed to disconnect when the address family
* is set to AF_UNSPEC, if that is what is happening we want to reset
@@ -464,7 +463,6 @@ int selinux_netlbl_socket_connect(struct sock *sk, struct sockaddr *addr)
sksec->nlbl_state = NLBL_CONNLABELED;
socket_connect_return:
- bh_unlock_sock(sk);
- local_bh_enable();
+ release_sock(sk);
return rc;
}
diff --git a/security/selinux/netnode.c b/security/selinux/netnode.c
index c5454c0477c3..03a72c32afd7 100644
--- a/security/selinux/netnode.c
+++ b/security/selinux/netnode.c
@@ -166,6 +166,7 @@ static void sel_netnode_insert(struct sel_netnode *node)
break;
default:
BUG();
+ return;
}
/* we need to impose a limit on the growth of the hash table so check
@@ -225,6 +226,7 @@ static int sel_netnode_sid_slow(void *addr, u16 family, u32 *sid)
break;
default:
BUG();
+ ret = -EINVAL;
}
if (ret != 0)
goto out;
diff --git a/security/selinux/nlmsgtab.c b/security/selinux/nlmsgtab.c
index 855e464e92ef..332ac8a80cf5 100644
--- a/security/selinux/nlmsgtab.c
+++ b/security/selinux/nlmsgtab.c
@@ -116,6 +116,8 @@ static struct nlmsg_perm nlmsg_audit_perms[] =
{ AUDIT_MAKE_EQUIV, NETLINK_AUDIT_SOCKET__NLMSG_WRITE },
{ AUDIT_TTY_GET, NETLINK_AUDIT_SOCKET__NLMSG_READ },
{ AUDIT_TTY_SET, NETLINK_AUDIT_SOCKET__NLMSG_TTY_AUDIT },
+ { AUDIT_GET_FEATURE, NETLINK_AUDIT_SOCKET__NLMSG_READ },
+ { AUDIT_SET_FEATURE, NETLINK_AUDIT_SOCKET__NLMSG_WRITE },
};
diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
index ff427733c290..5122affe06a8 100644
--- a/security/selinux/selinuxfs.c
+++ b/security/selinux/selinuxfs.c
@@ -44,7 +44,9 @@
/* Policy capability filenames */
static char *policycap_names[] = {
"network_peer_controls",
- "open_perms"
+ "open_perms",
+ "redhat1",
+ "always_check_network"
};
unsigned int selinux_checkreqprot = CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE;
diff --git a/security/selinux/ss/ebitmap.c b/security/selinux/ss/ebitmap.c
index 30f119b1d1ec..820313a04d49 100644
--- a/security/selinux/ss/ebitmap.c
+++ b/security/selinux/ss/ebitmap.c
@@ -213,7 +213,12 @@ netlbl_import_failure:
}
#endif /* CONFIG_NETLABEL */
-int ebitmap_contains(struct ebitmap *e1, struct ebitmap *e2)
+/*
+ * Check to see if all the bits set in e2 are also set in e1. Optionally,
+ * if last_e2bit is non-zero, the highest set bit in e2 cannot exceed
+ * last_e2bit.
+ */
+int ebitmap_contains(struct ebitmap *e1, struct ebitmap *e2, u32 last_e2bit)
{
struct ebitmap_node *n1, *n2;
int i;
@@ -223,14 +228,25 @@ int ebitmap_contains(struct ebitmap *e1, struct ebitmap *e2)
n1 = e1->node;
n2 = e2->node;
+
while (n1 && n2 && (n1->startbit <= n2->startbit)) {
if (n1->startbit < n2->startbit) {
n1 = n1->next;
continue;
}
- for (i = 0; i < EBITMAP_UNIT_NUMS; i++) {
+ for (i = EBITMAP_UNIT_NUMS - 1; (i >= 0) && !n2->maps[i]; )
+ i--; /* Skip trailing NULL map entries */
+ if (last_e2bit && (i >= 0)) {
+ u32 lastsetbit = n2->startbit + i * EBITMAP_UNIT_SIZE +
+ __fls(n2->maps[i]);
+ if (lastsetbit > last_e2bit)
+ return 0;
+ }
+
+ while (i >= 0) {
if ((n1->maps[i] & n2->maps[i]) != n2->maps[i])
return 0;
+ i--;
}
n1 = n1->next;
diff --git a/security/selinux/ss/ebitmap.h b/security/selinux/ss/ebitmap.h
index 922f8afa89dd..712c8a7b8e8b 100644
--- a/security/selinux/ss/ebitmap.h
+++ b/security/selinux/ss/ebitmap.h
@@ -16,7 +16,13 @@
#include <net/netlabel.h>
-#define EBITMAP_UNIT_NUMS ((32 - sizeof(void *) - sizeof(u32)) \
+#ifdef CONFIG_64BIT
+#define EBITMAP_NODE_SIZE 64
+#else
+#define EBITMAP_NODE_SIZE 32
+#endif
+
+#define EBITMAP_UNIT_NUMS ((EBITMAP_NODE_SIZE-sizeof(void *)-sizeof(u32))\
/ sizeof(unsigned long))
#define EBITMAP_UNIT_SIZE BITS_PER_LONG
#define EBITMAP_SIZE (EBITMAP_UNIT_NUMS * EBITMAP_UNIT_SIZE)
@@ -117,7 +123,7 @@ static inline void ebitmap_node_clr_bit(struct ebitmap_node *n,
int ebitmap_cmp(struct ebitmap *e1, struct ebitmap *e2);
int ebitmap_cpy(struct ebitmap *dst, struct ebitmap *src);
-int ebitmap_contains(struct ebitmap *e1, struct ebitmap *e2);
+int ebitmap_contains(struct ebitmap *e1, struct ebitmap *e2, u32 last_e2bit);
int ebitmap_get_bit(struct ebitmap *e, unsigned long bit);
int ebitmap_set_bit(struct ebitmap *e, unsigned long bit, int value);
void ebitmap_destroy(struct ebitmap *e);
diff --git a/security/selinux/ss/mls.c b/security/selinux/ss/mls.c
index 40de8d3f208e..c85bc1ec040c 100644
--- a/security/selinux/ss/mls.c
+++ b/security/selinux/ss/mls.c
@@ -160,8 +160,6 @@ void mls_sid_to_context(struct context *context,
int mls_level_isvalid(struct policydb *p, struct mls_level *l)
{
struct level_datum *levdatum;
- struct ebitmap_node *node;
- int i;
if (!l->sens || l->sens > p->p_levels.nprim)
return 0;
@@ -170,19 +168,13 @@ int mls_level_isvalid(struct policydb *p, struct mls_level *l)
if (!levdatum)
return 0;
- ebitmap_for_each_positive_bit(&l->cat, node, i) {
- if (i > p->p_cats.nprim)
- return 0;
- if (!ebitmap_get_bit(&levdatum->level->cat, i)) {
- /*
- * Category may not be associated with
- * sensitivity.
- */
- return 0;
- }
- }
-
- return 1;
+ /*
+ * Return 1 iff all the bits set in l->cat are also be set in
+ * levdatum->level->cat and no bit in l->cat is larger than
+ * p->p_cats.nprim.
+ */
+ return ebitmap_contains(&levdatum->level->cat, &l->cat,
+ p->p_cats.nprim);
}
int mls_range_isvalid(struct policydb *p, struct mls_range *r)
diff --git a/security/selinux/ss/mls_types.h b/security/selinux/ss/mls_types.h
index 03bed52a8052..e93648774137 100644
--- a/security/selinux/ss/mls_types.h
+++ b/security/selinux/ss/mls_types.h
@@ -35,7 +35,7 @@ static inline int mls_level_eq(struct mls_level *l1, struct mls_level *l2)
static inline int mls_level_dom(struct mls_level *l1, struct mls_level *l2)
{
return ((l1->sens >= l2->sens) &&
- ebitmap_contains(&l1->cat, &l2->cat));
+ ebitmap_contains(&l1->cat, &l2->cat, 0));
}
#define mls_level_incomp(l1, l2) \
diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
index c8adde3aff8f..f6195ebde3c9 100644
--- a/security/selinux/ss/policydb.c
+++ b/security/selinux/ss/policydb.c
@@ -3203,9 +3203,8 @@ static int range_write_helper(void *key, void *data, void *ptr)
static int range_write(struct policydb *p, void *fp)
{
- size_t nel;
__le32 buf[1];
- int rc;
+ int rc, nel;
struct policy_data pd;
pd.p = p;
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
index b4feecc3fe01..d106733ad987 100644
--- a/security/selinux/ss/services.c
+++ b/security/selinux/ss/services.c
@@ -72,6 +72,7 @@
int selinux_policycap_netpeer;
int selinux_policycap_openperm;
+int selinux_policycap_alwaysnetwork;
static DEFINE_RWLOCK(policy_rwlock);
@@ -1812,6 +1813,8 @@ static void security_load_policycaps(void)
POLICYDB_CAPABILITY_NETPEER);
selinux_policycap_openperm = ebitmap_get_bit(&policydb.policycaps,
POLICYDB_CAPABILITY_OPENPERM);
+ selinux_policycap_alwaysnetwork = ebitmap_get_bit(&policydb.policycaps,
+ POLICYDB_CAPABILITY_ALWAYSNETWORK);
}
static int security_preserve_bools(struct policydb *p);
@@ -2323,17 +2326,14 @@ out:
/**
* security_fs_use - Determine how to handle labeling for a filesystem.
- * @fstype: filesystem type
- * @behavior: labeling behavior
- * @sid: SID for filesystem (superblock)
+ * @sb: superblock in question
*/
-int security_fs_use(
- const char *fstype,
- unsigned int *behavior,
- u32 *sid)
+int security_fs_use(struct super_block *sb)
{
int rc = 0;
struct ocontext *c;
+ struct superblock_security_struct *sbsec = sb->s_security;
+ const char *fstype = sb->s_type->name;
read_lock(&policy_rwlock);
@@ -2345,21 +2345,21 @@ int security_fs_use(
}
if (c) {
- *behavior = c->v.behavior;
+ sbsec->behavior = c->v.behavior;
if (!c->sid[0]) {
rc = sidtab_context_to_sid(&sidtab, &c->context[0],
&c->sid[0]);
if (rc)
goto out;
}
- *sid = c->sid[0];
+ sbsec->sid = c->sid[0];
} else {
- rc = security_genfs_sid(fstype, "/", SECCLASS_DIR, sid);
+ rc = security_genfs_sid(fstype, "/", SECCLASS_DIR, &sbsec->sid);
if (rc) {
- *behavior = SECURITY_FS_USE_NONE;
+ sbsec->behavior = SECURITY_FS_USE_NONE;
rc = 0;
} else {
- *behavior = SECURITY_FS_USE_GENFS;
+ sbsec->behavior = SECURITY_FS_USE_GENFS;
}
}
diff --git a/security/selinux/xfrm.c b/security/selinux/xfrm.c
index d03081886214..0462cb3ff0a7 100644
--- a/security/selinux/xfrm.c
+++ b/security/selinux/xfrm.c
@@ -56,7 +56,7 @@
atomic_t selinux_xfrm_refcount = ATOMIC_INIT(0);
/*
- * Returns true if an LSM/SELinux context
+ * Returns true if the context is an LSM/SELinux context.
*/
static inline int selinux_authorizable_ctx(struct xfrm_sec_ctx *ctx)
{
@@ -66,7 +66,7 @@ static inline int selinux_authorizable_ctx(struct xfrm_sec_ctx *ctx)
}
/*
- * Returns true if the xfrm contains a security blob for SELinux
+ * Returns true if the xfrm contains a security blob for SELinux.
*/
static inline int selinux_authorizable_xfrm(struct xfrm_state *x)
{
@@ -74,48 +74,111 @@ static inline int selinux_authorizable_xfrm(struct xfrm_state *x)
}
/*
- * LSM hook implementation that authorizes that a flow can use
- * a xfrm policy rule.
+ * Allocates a xfrm_sec_state and populates it using the supplied security
+ * xfrm_user_sec_ctx context.
*/
-int selinux_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir)
+static int selinux_xfrm_alloc_user(struct xfrm_sec_ctx **ctxp,
+ struct xfrm_user_sec_ctx *uctx)
{
int rc;
- u32 sel_sid;
+ const struct task_security_struct *tsec = current_security();
+ struct xfrm_sec_ctx *ctx = NULL;
+ u32 str_len;
- /* Context sid is either set to label or ANY_ASSOC */
- if (ctx) {
- if (!selinux_authorizable_ctx(ctx))
- return -EINVAL;
-
- sel_sid = ctx->ctx_sid;
- } else
- /*
- * All flows should be treated as polmatch'ing an
- * otherwise applicable "non-labeled" policy. This
- * would prevent inadvertent "leaks".
- */
- return 0;
+ if (ctxp == NULL || uctx == NULL ||
+ uctx->ctx_doi != XFRM_SC_DOI_LSM ||
+ uctx->ctx_alg != XFRM_SC_ALG_SELINUX)
+ return -EINVAL;
+
+ str_len = uctx->ctx_len;
+ if (str_len >= PAGE_SIZE)
+ return -ENOMEM;
+
+ ctx = kmalloc(sizeof(*ctx) + str_len + 1, GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
- rc = avc_has_perm(fl_secid, sel_sid, SECCLASS_ASSOCIATION,
- ASSOCIATION__POLMATCH,
- NULL);
+ ctx->ctx_doi = XFRM_SC_DOI_LSM;
+ ctx->ctx_alg = XFRM_SC_ALG_SELINUX;
+ ctx->ctx_len = str_len;
+ memcpy(ctx->ctx_str, &uctx[1], str_len);
+ ctx->ctx_str[str_len] = '\0';
+ rc = security_context_to_sid(ctx->ctx_str, str_len, &ctx->ctx_sid);
+ if (rc)
+ goto err;
- if (rc == -EACCES)
- return -ESRCH;
+ rc = avc_has_perm(tsec->sid, ctx->ctx_sid,
+ SECCLASS_ASSOCIATION, ASSOCIATION__SETCONTEXT, NULL);
+ if (rc)
+ goto err;
+ *ctxp = ctx;
+ atomic_inc(&selinux_xfrm_refcount);
+ return 0;
+
+err:
+ kfree(ctx);
return rc;
}
/*
+ * Free the xfrm_sec_ctx structure.
+ */
+static void selinux_xfrm_free(struct xfrm_sec_ctx *ctx)
+{
+ if (!ctx)
+ return;
+
+ atomic_dec(&selinux_xfrm_refcount);
+ kfree(ctx);
+}
+
+/*
+ * Authorize the deletion of a labeled SA or policy rule.
+ */
+static int selinux_xfrm_delete(struct xfrm_sec_ctx *ctx)
+{
+ const struct task_security_struct *tsec = current_security();
+
+ if (!ctx)
+ return 0;
+
+ return avc_has_perm(tsec->sid, ctx->ctx_sid,
+ SECCLASS_ASSOCIATION, ASSOCIATION__SETCONTEXT,
+ NULL);
+}
+
+/*
+ * LSM hook implementation that authorizes that a flow can use a xfrm policy
+ * rule.
+ */
+int selinux_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir)
+{
+ int rc;
+
+ /* All flows should be treated as polmatch'ing an otherwise applicable
+ * "non-labeled" policy. This would prevent inadvertent "leaks". */
+ if (!ctx)
+ return 0;
+
+ /* Context sid is either set to label or ANY_ASSOC */
+ if (!selinux_authorizable_ctx(ctx))
+ return -EINVAL;
+
+ rc = avc_has_perm(fl_secid, ctx->ctx_sid,
+ SECCLASS_ASSOCIATION, ASSOCIATION__POLMATCH, NULL);
+ return (rc == -EACCES ? -ESRCH : rc);
+}
+
+/*
* LSM hook implementation that authorizes that a state matches
* the given policy, flow combo.
*/
-
-int selinux_xfrm_state_pol_flow_match(struct xfrm_state *x, struct xfrm_policy *xp,
- const struct flowi *fl)
+int selinux_xfrm_state_pol_flow_match(struct xfrm_state *x,
+ struct xfrm_policy *xp,
+ const struct flowi *fl)
{
u32 state_sid;
- int rc;
if (!xp->security)
if (x->security)
@@ -138,187 +201,111 @@ int selinux_xfrm_state_pol_flow_match(struct xfrm_state *x, struct xfrm_policy *
if (fl->flowi_secid != state_sid)
return 0;
- rc = avc_has_perm(fl->flowi_secid, state_sid, SECCLASS_ASSOCIATION,
- ASSOCIATION__SENDTO,
- NULL)? 0:1;
-
- /*
- * We don't need a separate SA Vs. policy polmatch check
- * since the SA is now of the same label as the flow and
- * a flow Vs. policy polmatch check had already happened
- * in selinux_xfrm_policy_lookup() above.
- */
-
- return rc;
+ /* We don't need a separate SA Vs. policy polmatch check since the SA
+ * is now of the same label as the flow and a flow Vs. policy polmatch
+ * check had already happened in selinux_xfrm_policy_lookup() above. */
+ return (avc_has_perm(fl->flowi_secid, state_sid,
+ SECCLASS_ASSOCIATION, ASSOCIATION__SENDTO,
+ NULL) ? 0 : 1);
}
-/*
- * LSM hook implementation that checks and/or returns the xfrm sid for the
- * incoming packet.
- */
-
-int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall)
+static u32 selinux_xfrm_skb_sid_egress(struct sk_buff *skb)
{
- struct sec_path *sp;
+ struct dst_entry *dst = skb_dst(skb);
+ struct xfrm_state *x;
- *sid = SECSID_NULL;
+ if (dst == NULL)
+ return SECSID_NULL;
+ x = dst->xfrm;
+ if (x == NULL || !selinux_authorizable_xfrm(x))
+ return SECSID_NULL;
- if (skb == NULL)
- return 0;
+ return x->security->ctx_sid;
+}
+
+static int selinux_xfrm_skb_sid_ingress(struct sk_buff *skb,
+ u32 *sid, int ckall)
+{
+ u32 sid_session = SECSID_NULL;
+ struct sec_path *sp = skb->sp;
- sp = skb->sp;
if (sp) {
- int i, sid_set = 0;
+ int i;
- for (i = sp->len-1; i >= 0; i--) {
+ for (i = sp->len - 1; i >= 0; i--) {
struct xfrm_state *x = sp->xvec[i];
if (selinux_authorizable_xfrm(x)) {
struct xfrm_sec_ctx *ctx = x->security;
- if (!sid_set) {
- *sid = ctx->ctx_sid;
- sid_set = 1;
-
+ if (sid_session == SECSID_NULL) {
+ sid_session = ctx->ctx_sid;
if (!ckall)
- break;
- } else if (*sid != ctx->ctx_sid)
+ goto out;
+ } else if (sid_session != ctx->ctx_sid) {
+ *sid = SECSID_NULL;
return -EINVAL;
+ }
}
}
}
+out:
+ *sid = sid_session;
return 0;
}
/*
- * Security blob allocation for xfrm_policy and xfrm_state
- * CTX does not have a meaningful value on input
+ * LSM hook implementation that checks and/or returns the xfrm sid for the
+ * incoming packet.
*/
-static int selinux_xfrm_sec_ctx_alloc(struct xfrm_sec_ctx **ctxp,
- struct xfrm_user_sec_ctx *uctx, u32 sid)
+int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall)
{
- int rc = 0;
- const struct task_security_struct *tsec = current_security();
- struct xfrm_sec_ctx *ctx = NULL;
- char *ctx_str = NULL;
- u32 str_len;
-
- BUG_ON(uctx && sid);
-
- if (!uctx)
- goto not_from_user;
-
- if (uctx->ctx_alg != XFRM_SC_ALG_SELINUX)
- return -EINVAL;
-
- str_len = uctx->ctx_len;
- if (str_len >= PAGE_SIZE)
- return -ENOMEM;
-
- *ctxp = ctx = kmalloc(sizeof(*ctx) +
- str_len + 1,
- GFP_KERNEL);
-
- if (!ctx)
- return -ENOMEM;
-
- ctx->ctx_doi = uctx->ctx_doi;
- ctx->ctx_len = str_len;
- ctx->ctx_alg = uctx->ctx_alg;
-
- memcpy(ctx->ctx_str,
- uctx+1,
- str_len);
- ctx->ctx_str[str_len] = 0;
- rc = security_context_to_sid(ctx->ctx_str,
- str_len,
- &ctx->ctx_sid);
-
- if (rc)
- goto out;
-
- /*
- * Does the subject have permission to set security context?
- */
- rc = avc_has_perm(tsec->sid, ctx->ctx_sid,
- SECCLASS_ASSOCIATION,
- ASSOCIATION__SETCONTEXT, NULL);
- if (rc)
- goto out;
-
- return rc;
-
-not_from_user:
- rc = security_sid_to_context(sid, &ctx_str, &str_len);
- if (rc)
- goto out;
-
- *ctxp = ctx = kmalloc(sizeof(*ctx) +
- str_len,
- GFP_ATOMIC);
-
- if (!ctx) {
- rc = -ENOMEM;
- goto out;
+ if (skb == NULL) {
+ *sid = SECSID_NULL;
+ return 0;
}
+ return selinux_xfrm_skb_sid_ingress(skb, sid, ckall);
+}
- ctx->ctx_doi = XFRM_SC_DOI_LSM;
- ctx->ctx_alg = XFRM_SC_ALG_SELINUX;
- ctx->ctx_sid = sid;
- ctx->ctx_len = str_len;
- memcpy(ctx->ctx_str,
- ctx_str,
- str_len);
+int selinux_xfrm_skb_sid(struct sk_buff *skb, u32 *sid)
+{
+ int rc;
- goto out2;
+ rc = selinux_xfrm_skb_sid_ingress(skb, sid, 0);
+ if (rc == 0 && *sid == SECSID_NULL)
+ *sid = selinux_xfrm_skb_sid_egress(skb);
-out:
- *ctxp = NULL;
- kfree(ctx);
-out2:
- kfree(ctx_str);
return rc;
}
/*
- * LSM hook implementation that allocs and transfers uctx spec to
- * xfrm_policy.
+ * LSM hook implementation that allocs and transfers uctx spec to xfrm_policy.
*/
int selinux_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp,
struct xfrm_user_sec_ctx *uctx)
{
- int err;
-
- BUG_ON(!uctx);
-
- err = selinux_xfrm_sec_ctx_alloc(ctxp, uctx, 0);
- if (err == 0)
- atomic_inc(&selinux_xfrm_refcount);
-
- return err;
+ return selinux_xfrm_alloc_user(ctxp, uctx);
}
-
/*
- * LSM hook implementation that copies security data structure from old to
- * new for policy cloning.
+ * LSM hook implementation that copies security data structure from old to new
+ * for policy cloning.
*/
int selinux_xfrm_policy_clone(struct xfrm_sec_ctx *old_ctx,
struct xfrm_sec_ctx **new_ctxp)
{
struct xfrm_sec_ctx *new_ctx;
- if (old_ctx) {
- new_ctx = kmalloc(sizeof(*old_ctx) + old_ctx->ctx_len,
- GFP_ATOMIC);
- if (!new_ctx)
- return -ENOMEM;
+ if (!old_ctx)
+ return 0;
+
+ new_ctx = kmemdup(old_ctx, sizeof(*old_ctx) + old_ctx->ctx_len,
+ GFP_ATOMIC);
+ if (!new_ctx)
+ return -ENOMEM;
+ atomic_inc(&selinux_xfrm_refcount);
+ *new_ctxp = new_ctx;
- memcpy(new_ctx, old_ctx, sizeof(*new_ctx));
- memcpy(new_ctx->ctx_str, old_ctx->ctx_str, new_ctx->ctx_len);
- atomic_inc(&selinux_xfrm_refcount);
- *new_ctxp = new_ctx;
- }
return 0;
}
@@ -327,8 +314,7 @@ int selinux_xfrm_policy_clone(struct xfrm_sec_ctx *old_ctx,
*/
void selinux_xfrm_policy_free(struct xfrm_sec_ctx *ctx)
{
- atomic_dec(&selinux_xfrm_refcount);
- kfree(ctx);
+ selinux_xfrm_free(ctx);
}
/*
@@ -336,31 +322,58 @@ void selinux_xfrm_policy_free(struct xfrm_sec_ctx *ctx)
*/
int selinux_xfrm_policy_delete(struct xfrm_sec_ctx *ctx)
{
- const struct task_security_struct *tsec = current_security();
-
- if (!ctx)
- return 0;
+ return selinux_xfrm_delete(ctx);
+}
- return avc_has_perm(tsec->sid, ctx->ctx_sid,
- SECCLASS_ASSOCIATION, ASSOCIATION__SETCONTEXT,
- NULL);
+/*
+ * LSM hook implementation that allocates a xfrm_sec_state, populates it using
+ * the supplied security context, and assigns it to the xfrm_state.
+ */
+int selinux_xfrm_state_alloc(struct xfrm_state *x,
+ struct xfrm_user_sec_ctx *uctx)
+{
+ return selinux_xfrm_alloc_user(&x->security, uctx);
}
/*
- * LSM hook implementation that allocs and transfers sec_ctx spec to
- * xfrm_state.
+ * LSM hook implementation that allocates a xfrm_sec_state and populates based
+ * on a secid.
*/
-int selinux_xfrm_state_alloc(struct xfrm_state *x, struct xfrm_user_sec_ctx *uctx,
- u32 secid)
+int selinux_xfrm_state_alloc_acquire(struct xfrm_state *x,
+ struct xfrm_sec_ctx *polsec, u32 secid)
{
- int err;
+ int rc;
+ struct xfrm_sec_ctx *ctx;
+ char *ctx_str = NULL;
+ int str_len;
+
+ if (!polsec)
+ return 0;
+
+ if (secid == 0)
+ return -EINVAL;
+
+ rc = security_sid_to_context(secid, &ctx_str, &str_len);
+ if (rc)
+ return rc;
- BUG_ON(!x);
+ ctx = kmalloc(sizeof(*ctx) + str_len, GFP_ATOMIC);
+ if (!ctx) {
+ rc = -ENOMEM;
+ goto out;
+ }
- err = selinux_xfrm_sec_ctx_alloc(&x->security, uctx, secid);
- if (err == 0)
- atomic_inc(&selinux_xfrm_refcount);
- return err;
+ ctx->ctx_doi = XFRM_SC_DOI_LSM;
+ ctx->ctx_alg = XFRM_SC_ALG_SELINUX;
+ ctx->ctx_sid = secid;
+ ctx->ctx_len = str_len;
+ memcpy(ctx->ctx_str, ctx_str, str_len);
+
+ x->security = ctx;
+ atomic_inc(&selinux_xfrm_refcount);
+out:
+ kfree(ctx_str);
+ return rc;
}
/*
@@ -368,24 +381,15 @@ int selinux_xfrm_state_alloc(struct xfrm_state *x, struct xfrm_user_sec_ctx *uct
*/
void selinux_xfrm_state_free(struct xfrm_state *x)
{
- atomic_dec(&selinux_xfrm_refcount);
- kfree(x->security);
+ selinux_xfrm_free(x->security);
}
- /*
- * LSM hook implementation that authorizes deletion of labeled SAs.
- */
+/*
+ * LSM hook implementation that authorizes deletion of labeled SAs.
+ */
int selinux_xfrm_state_delete(struct xfrm_state *x)
{
- const struct task_security_struct *tsec = current_security();
- struct xfrm_sec_ctx *ctx = x->security;
-
- if (!ctx)
- return 0;
-
- return avc_has_perm(tsec->sid, ctx->ctx_sid,
- SECCLASS_ASSOCIATION, ASSOCIATION__SETCONTEXT,
- NULL);
+ return selinux_xfrm_delete(x->security);
}
/*
@@ -395,14 +399,12 @@ int selinux_xfrm_state_delete(struct xfrm_state *x)
* we need to check for unlabelled access since this may not have
* gone thru the IPSec process.
*/
-int selinux_xfrm_sock_rcv_skb(u32 isec_sid, struct sk_buff *skb,
- struct common_audit_data *ad)
+int selinux_xfrm_sock_rcv_skb(u32 sk_sid, struct sk_buff *skb,
+ struct common_audit_data *ad)
{
- int i, rc = 0;
- struct sec_path *sp;
- u32 sel_sid = SECINITSID_UNLABELED;
-
- sp = skb->sp;
+ int i;
+ struct sec_path *sp = skb->sp;
+ u32 peer_sid = SECINITSID_UNLABELED;
if (sp) {
for (i = 0; i < sp->len; i++) {
@@ -410,23 +412,17 @@ int selinux_xfrm_sock_rcv_skb(u32 isec_sid, struct sk_buff *skb,
if (x && selinux_authorizable_xfrm(x)) {
struct xfrm_sec_ctx *ctx = x->security;
- sel_sid = ctx->ctx_sid;
+ peer_sid = ctx->ctx_sid;
break;
}
}
}
- /*
- * This check even when there's no association involved is
- * intended, according to Trent Jaeger, to make sure a
- * process can't engage in non-ipsec communication unless
- * explicitly allowed by policy.
- */
-
- rc = avc_has_perm(isec_sid, sel_sid, SECCLASS_ASSOCIATION,
- ASSOCIATION__RECVFROM, ad);
-
- return rc;
+ /* This check even when there's no association involved is intended,
+ * according to Trent Jaeger, to make sure a process can't engage in
+ * non-IPsec communication unless explicitly allowed by policy. */
+ return avc_has_perm(sk_sid, peer_sid,
+ SECCLASS_ASSOCIATION, ASSOCIATION__RECVFROM, ad);
}
/*
@@ -436,49 +432,38 @@ int selinux_xfrm_sock_rcv_skb(u32 isec_sid, struct sk_buff *skb,
* If we do have a authorizable security association, then it has already been
* checked in the selinux_xfrm_state_pol_flow_match hook above.
*/
-int selinux_xfrm_postroute_last(u32 isec_sid, struct sk_buff *skb,
- struct common_audit_data *ad, u8 proto)
+int selinux_xfrm_postroute_last(u32 sk_sid, struct sk_buff *skb,
+ struct common_audit_data *ad, u8 proto)
{
struct dst_entry *dst;
- int rc = 0;
-
- dst = skb_dst(skb);
-
- if (dst) {
- struct dst_entry *dst_test;
-
- for (dst_test = dst; dst_test != NULL;
- dst_test = dst_test->child) {
- struct xfrm_state *x = dst_test->xfrm;
-
- if (x && selinux_authorizable_xfrm(x))
- goto out;
- }
- }
switch (proto) {
case IPPROTO_AH:
case IPPROTO_ESP:
case IPPROTO_COMP:
- /*
- * We should have already seen this packet once before
- * it underwent xfrm(s). No need to subject it to the
- * unlabeled check.
- */
- goto out;
+ /* We should have already seen this packet once before it
+ * underwent xfrm(s). No need to subject it to the unlabeled
+ * check. */
+ return 0;
default:
break;
}
- /*
- * This check even when there's no association involved is
- * intended, according to Trent Jaeger, to make sure a
- * process can't engage in non-ipsec communication unless
- * explicitly allowed by policy.
- */
+ dst = skb_dst(skb);
+ if (dst) {
+ struct dst_entry *iter;
- rc = avc_has_perm(isec_sid, SECINITSID_UNLABELED, SECCLASS_ASSOCIATION,
- ASSOCIATION__SENDTO, ad);
-out:
- return rc;
+ for (iter = dst; iter != NULL; iter = iter->child) {
+ struct xfrm_state *x = iter->xfrm;
+
+ if (x && selinux_authorizable_xfrm(x))
+ return 0;
+ }
+ }
+
+ /* This check even when there's no association involved is intended,
+ * according to Trent Jaeger, to make sure a process can't engage in
+ * non-IPsec communication unless explicitly allowed by policy. */
+ return avc_has_perm(sk_sid, SECINITSID_UNLABELED,
+ SECCLASS_ASSOCIATION, ASSOCIATION__SENDTO, ad);
}
diff --git a/security/smack/smack.h b/security/smack/smack.h
index 076b8e8a51ab..364cc64fce71 100644
--- a/security/smack/smack.h
+++ b/security/smack/smack.h
@@ -177,9 +177,13 @@ struct smk_port_label {
#define SMACK_CIPSO_MAXCATNUM 184 /* 23 * 8 */
/*
- * Flag for transmute access
+ * Flags for untraditional access modes.
+ * It shouldn't be necessary to avoid conflicts with definitions
+ * in fs.h, but do so anyway.
*/
-#define MAY_TRANSMUTE 64
+#define MAY_TRANSMUTE 0x00001000 /* Controls directory labeling */
+#define MAY_LOCK 0x00002000 /* Locks should be writes, but ... */
+
/*
* Just to make the common cases easier to deal with
*/
@@ -188,9 +192,9 @@ struct smk_port_label {
#define MAY_NOT 0
/*
- * Number of access types used by Smack (rwxat)
+ * Number of access types used by Smack (rwxatl)
*/
-#define SMK_NUM_ACCESS_TYPE 5
+#define SMK_NUM_ACCESS_TYPE 6
/* SMACK data */
struct smack_audit_data {
diff --git a/security/smack/smack_access.c b/security/smack/smack_access.c
index b3b59b1e93d6..14293cd9b1e5 100644
--- a/security/smack/smack_access.c
+++ b/security/smack/smack_access.c
@@ -84,6 +84,8 @@ int log_policy = SMACK_AUDIT_DENIED;
*
* Do the object check first because that is more
* likely to differ.
+ *
+ * Allowing write access implies allowing locking.
*/
int smk_access_entry(char *subject_label, char *object_label,
struct list_head *rule_list)
@@ -99,6 +101,11 @@ int smk_access_entry(char *subject_label, char *object_label,
}
}
+ /*
+ * MAY_WRITE implies MAY_LOCK.
+ */
+ if ((may & MAY_WRITE) == MAY_WRITE)
+ may |= MAY_LOCK;
return may;
}
@@ -245,6 +252,7 @@ out_audit:
static inline void smack_str_from_perm(char *string, int access)
{
int i = 0;
+
if (access & MAY_READ)
string[i++] = 'r';
if (access & MAY_WRITE)
@@ -255,6 +263,8 @@ static inline void smack_str_from_perm(char *string, int access)
string[i++] = 'a';
if (access & MAY_TRANSMUTE)
string[i++] = 't';
+ if (access & MAY_LOCK)
+ string[i++] = 'l';
string[i] = '\0';
}
/**
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index 8825375cc031..b0be893ad44d 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -185,7 +185,7 @@ static int smack_ptrace_access_check(struct task_struct *ctp, unsigned int mode)
smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_TASK);
smk_ad_setfield_u_tsk(&ad, ctp);
- rc = smk_curacc(skp->smk_known, MAY_READWRITE, &ad);
+ rc = smk_curacc(skp->smk_known, mode, &ad);
return rc;
}
@@ -1146,7 +1146,7 @@ static int smack_file_ioctl(struct file *file, unsigned int cmd,
* @file: the object
* @cmd: unused
*
- * Returns 0 if current has write access, error code otherwise
+ * Returns 0 if current has lock access, error code otherwise
*/
static int smack_file_lock(struct file *file, unsigned int cmd)
{
@@ -1154,7 +1154,7 @@ static int smack_file_lock(struct file *file, unsigned int cmd)
smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_PATH);
smk_ad_setfield_u_fs_path(&ad, file->f_path);
- return smk_curacc(file->f_security, MAY_WRITE, &ad);
+ return smk_curacc(file->f_security, MAY_LOCK, &ad);
}
/**
@@ -1178,8 +1178,13 @@ static int smack_file_fcntl(struct file *file, unsigned int cmd,
switch (cmd) {
case F_GETLK:
+ break;
case F_SETLK:
case F_SETLKW:
+ smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_PATH);
+ smk_ad_setfield_u_fs_path(&ad, file->f_path);
+ rc = smk_curacc(file->f_security, MAY_LOCK, &ad);
+ break;
case F_SETOWN:
case F_SETSIG:
smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_PATH);
diff --git a/security/smack/smackfs.c b/security/smack/smackfs.c
index 80f4b4a45725..160aa08e3cd5 100644
--- a/security/smack/smackfs.c
+++ b/security/smack/smackfs.c
@@ -139,7 +139,7 @@ const char *smack_cipso_option = SMACK_CIPSO_OPTION;
* SMK_LOADLEN: Smack rule length
*/
#define SMK_OACCESS "rwxa"
-#define SMK_ACCESS "rwxat"
+#define SMK_ACCESS "rwxatl"
#define SMK_OACCESSLEN (sizeof(SMK_OACCESS) - 1)
#define SMK_ACCESSLEN (sizeof(SMK_ACCESS) - 1)
#define SMK_OLOADLEN (SMK_LABELLEN + SMK_LABELLEN + SMK_OACCESSLEN)
@@ -282,6 +282,10 @@ static int smk_perm_from_str(const char *string)
case 'T':
perm |= MAY_TRANSMUTE;
break;
+ case 'l':
+ case 'L':
+ perm |= MAY_LOCK;
+ break;
default:
return perm;
}
@@ -452,7 +456,7 @@ static ssize_t smk_write_rules_list(struct file *file, const char __user *buf,
/*
* Minor hack for backward compatibility
*/
- if (count != SMK_OLOADLEN && count != SMK_LOADLEN)
+ if (count < SMK_OLOADLEN || count > SMK_LOADLEN)
return -EINVAL;
} else {
if (count >= PAGE_SIZE) {
@@ -592,6 +596,8 @@ static void smk_rule_show(struct seq_file *s, struct smack_rule *srp, int max)
seq_putc(s, 'a');
if (srp->smk_access & MAY_TRANSMUTE)
seq_putc(s, 't');
+ if (srp->smk_access & MAY_LOCK)
+ seq_putc(s, 'l');
seq_putc(s, '\n');
}
diff --git a/sound/atmel/abdac.c b/sound/atmel/abdac.c
index 872d59e35ee2..721d8fd45685 100644
--- a/sound/atmel/abdac.c
+++ b/sound/atmel/abdac.c
@@ -357,7 +357,8 @@ static int set_sample_rates(struct atmel_abdac *dac)
if (new_rate < 0)
break;
/* make sure we are below the ABDAC clock */
- if (new_rate <= clk_get_rate(dac->pclk)) {
+ if (index < MAX_NUM_RATES &&
+ new_rate <= clk_get_rate(dac->pclk)) {
dac->rates[index] = new_rate / 256;
index++;
}
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
index 6e03b465e44e..a2104671f51d 100644
--- a/sound/core/pcm_lib.c
+++ b/sound/core/pcm_lib.c
@@ -1937,6 +1937,8 @@ static int wait_for_avail(struct snd_pcm_substream *substream,
case SNDRV_PCM_STATE_DISCONNECTED:
err = -EBADFD;
goto _endloop;
+ case SNDRV_PCM_STATE_PAUSED:
+ continue;
}
if (!tout) {
snd_printd("%s write error (DMA or IRQ trouble?)\n",
diff --git a/sound/firewire/amdtp.c b/sound/firewire/amdtp.c
index d3226892ad6b..9048777228e2 100644
--- a/sound/firewire/amdtp.c
+++ b/sound/firewire/amdtp.c
@@ -434,17 +434,14 @@ static void queue_out_packet(struct amdtp_out_stream *s, unsigned int cycle)
return;
index = s->packet_index;
+ /* this module generate empty packet for 'no data' */
syt = calculate_syt(s, cycle);
- if (!(s->flags & CIP_BLOCKING)) {
+ if (!(s->flags & CIP_BLOCKING))
data_blocks = calculate_data_blocks(s);
- } else {
- if (syt != 0xffff) {
- data_blocks = s->syt_interval;
- } else {
- data_blocks = 0;
- syt = 0xffffff;
- }
- }
+ else if (syt != 0xffff)
+ data_blocks = s->syt_interval;
+ else
+ data_blocks = 0;
buffer = s->buffer.packets[index].buffer;
buffer[0] = cpu_to_be32(ACCESS_ONCE(s->source_node_id_field) |
diff --git a/sound/firewire/amdtp.h b/sound/firewire/amdtp.h
index 839ebf812d79..2746ecd291af 100644
--- a/sound/firewire/amdtp.h
+++ b/sound/firewire/amdtp.h
@@ -4,6 +4,7 @@
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/mutex.h>
+#include <sound/asound.h>
#include "packets-buffer.h"
/**
diff --git a/sound/firewire/dice.c b/sound/firewire/dice.c
index 57bcd31fcc12..c0aa64941cee 100644
--- a/sound/firewire/dice.c
+++ b/sound/firewire/dice.c
@@ -1019,7 +1019,7 @@ static void dice_proc_read(struct snd_info_entry *entry,
if (dice_proc_read_mem(dice, &tx_rx_header, sections[2], 2) < 0)
return;
- quadlets = min_t(u32, tx_rx_header.size, sizeof(buf.tx));
+ quadlets = min_t(u32, tx_rx_header.size, sizeof(buf.tx) / 4);
for (stream = 0; stream < tx_rx_header.number; ++stream) {
if (dice_proc_read_mem(dice, &buf.tx, sections[2] + 2 +
stream * tx_rx_header.size,
@@ -1045,7 +1045,7 @@ static void dice_proc_read(struct snd_info_entry *entry,
if (dice_proc_read_mem(dice, &tx_rx_header, sections[4], 2) < 0)
return;
- quadlets = min_t(u32, tx_rx_header.size, sizeof(buf.rx));
+ quadlets = min_t(u32, tx_rx_header.size, sizeof(buf.rx) / 4);
for (stream = 0; stream < tx_rx_header.number; ++stream) {
if (dice_proc_read_mem(dice, &buf.rx, sections[4] + 2 +
stream * tx_rx_header.size,
diff --git a/sound/pci/hda/Kconfig b/sound/pci/hda/Kconfig
index 8de66ccd7279..4cdd9ded4563 100644
--- a/sound/pci/hda/Kconfig
+++ b/sound/pci/hda/Kconfig
@@ -209,8 +209,9 @@ config SND_HDA_CODEC_CA0132
config SND_HDA_CODEC_CA0132_DSP
bool "Support new DSP code for CA0132 codec"
- depends on SND_HDA_CODEC_CA0132 && FW_LOADER
+ depends on SND_HDA_CODEC_CA0132
select SND_HDA_DSP_LOADER
+ select FW_LOADER
help
Say Y here to enable the DSP for Creative CA0132 for extended
features like equalizer or echo cancellation.
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index afb90f48867f..69178c4f4113 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -4000,6 +4000,10 @@ static void hda_call_codec_resume(struct hda_codec *codec)
* in the resume / power-save sequence
*/
hda_keep_power_on(codec);
+ if (codec->pm_down_notified) {
+ codec->pm_down_notified = 0;
+ hda_call_pm_notify(codec->bus, true);
+ }
hda_set_power_state(codec, AC_PWRST_D0);
restore_shutup_pins(codec);
hda_exec_init_verbs(codec);
diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h
index 77db69480c19..7aa9870040c1 100644
--- a/sound/pci/hda/hda_codec.h
+++ b/sound/pci/hda/hda_codec.h
@@ -698,7 +698,6 @@ struct hda_bus {
unsigned int in_reset:1; /* during reset operation */
unsigned int power_keep_link_on:1; /* don't power off HDA link */
unsigned int no_response_fallback:1; /* don't fallback at RIRB error */
- unsigned int avoid_link_reset:1; /* don't reset link at runtime PM */
int primary_dig_out_type; /* primary digital out PCM type */
};
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
index 3067ed4fe3b2..c7f6d1cab606 100644
--- a/sound/pci/hda/hda_generic.c
+++ b/sound/pci/hda/hda_generic.c
@@ -474,6 +474,20 @@ static void invalidate_nid_path(struct hda_codec *codec, int idx)
memset(path, 0, sizeof(*path));
}
+/* return a DAC if paired to the given pin by codec driver */
+static hda_nid_t get_preferred_dac(struct hda_codec *codec, hda_nid_t pin)
+{
+ struct hda_gen_spec *spec = codec->spec;
+ const hda_nid_t *list = spec->preferred_dacs;
+
+ if (!list)
+ return 0;
+ for (; *list; list += 2)
+ if (*list == pin)
+ return list[1];
+ return 0;
+}
+
/* look for an empty DAC slot */
static hda_nid_t look_for_dac(struct hda_codec *codec, hda_nid_t pin,
bool is_digital)
@@ -1192,7 +1206,14 @@ static int try_assign_dacs(struct hda_codec *codec, int num_outs,
continue;
}
- dacs[i] = look_for_dac(codec, pin, false);
+ dacs[i] = get_preferred_dac(codec, pin);
+ if (dacs[i]) {
+ if (is_dac_already_used(codec, dacs[i]))
+ badness += bad->shared_primary;
+ }
+
+ if (!dacs[i])
+ dacs[i] = look_for_dac(codec, pin, false);
if (!dacs[i] && !i) {
/* try to steal the DAC of surrounds for the front */
for (j = 1; j < num_outs; j++) {
@@ -2506,12 +2527,8 @@ static int create_out_jack_modes(struct hda_codec *codec, int num_pins,
for (i = 0; i < num_pins; i++) {
hda_nid_t pin = pins[i];
- if (pin == spec->hp_mic_pin) {
- int ret = create_hp_mic_jack_mode(codec, pin);
- if (ret < 0)
- return ret;
+ if (pin == spec->hp_mic_pin)
continue;
- }
if (get_out_jack_num_items(codec, pin) > 1) {
struct snd_kcontrol_new *knew;
char name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
@@ -2764,7 +2781,7 @@ static int hp_mic_jack_mode_put(struct snd_kcontrol *kcontrol,
val &= ~(AC_PINCTL_VREFEN | PIN_HP);
val |= get_vref_idx(vref_caps, idx) | PIN_IN;
} else
- val = snd_hda_get_default_vref(codec, nid);
+ val = snd_hda_get_default_vref(codec, nid) | PIN_IN;
}
snd_hda_set_pin_ctl_cache(codec, nid, val);
call_hp_automute(codec, NULL);
@@ -2784,9 +2801,6 @@ static int create_hp_mic_jack_mode(struct hda_codec *codec, hda_nid_t pin)
struct hda_gen_spec *spec = codec->spec;
struct snd_kcontrol_new *knew;
- if (get_out_jack_num_items(codec, pin) <= 1 &&
- get_in_jack_num_items(codec, pin) <= 1)
- return 0; /* no need */
knew = snd_hda_gen_add_kctl(spec, "Headphone Mic Jack Mode",
&hp_mic_jack_mode_enum);
if (!knew)
@@ -2815,6 +2829,42 @@ static int add_loopback_list(struct hda_gen_spec *spec, hda_nid_t mix, int idx)
return 0;
}
+/* return true if either a volume or a mute amp is found for the given
+ * aamix path; the amp has to be either in the mixer node or its direct leaf
+ */
+static bool look_for_mix_leaf_ctls(struct hda_codec *codec, hda_nid_t mix_nid,
+ hda_nid_t pin, unsigned int *mix_val,
+ unsigned int *mute_val)
+{
+ int idx, num_conns;
+ const hda_nid_t *list;
+ hda_nid_t nid;
+
+ idx = snd_hda_get_conn_index(codec, mix_nid, pin, true);
+ if (idx < 0)
+ return false;
+
+ *mix_val = *mute_val = 0;
+ if (nid_has_volume(codec, mix_nid, HDA_INPUT))
+ *mix_val = HDA_COMPOSE_AMP_VAL(mix_nid, 3, idx, HDA_INPUT);
+ if (nid_has_mute(codec, mix_nid, HDA_INPUT))
+ *mute_val = HDA_COMPOSE_AMP_VAL(mix_nid, 3, idx, HDA_INPUT);
+ if (*mix_val && *mute_val)
+ return true;
+
+ /* check leaf node */
+ num_conns = snd_hda_get_conn_list(codec, mix_nid, &list);
+ if (num_conns < idx)
+ return false;
+ nid = list[idx];
+ if (!*mix_val && nid_has_volume(codec, nid, HDA_OUTPUT))
+ *mix_val = HDA_COMPOSE_AMP_VAL(nid, 3, 0, HDA_OUTPUT);
+ if (!*mute_val && nid_has_mute(codec, nid, HDA_OUTPUT))
+ *mute_val = HDA_COMPOSE_AMP_VAL(nid, 3, 0, HDA_OUTPUT);
+
+ return *mix_val || *mute_val;
+}
+
/* create input playback/capture controls for the given pin */
static int new_analog_input(struct hda_codec *codec, int input_idx,
hda_nid_t pin, const char *ctlname, int ctlidx,
@@ -2822,12 +2872,11 @@ static int new_analog_input(struct hda_codec *codec, int input_idx,
{
struct hda_gen_spec *spec = codec->spec;
struct nid_path *path;
- unsigned int val;
+ unsigned int mix_val, mute_val;
int err, idx;
- if (!nid_has_volume(codec, mix_nid, HDA_INPUT) &&
- !nid_has_mute(codec, mix_nid, HDA_INPUT))
- return 0; /* no need for analog loopback */
+ if (!look_for_mix_leaf_ctls(codec, mix_nid, pin, &mix_val, &mute_val))
+ return 0;
path = snd_hda_add_new_path(codec, pin, mix_nid, 0);
if (!path)
@@ -2836,20 +2885,18 @@ static int new_analog_input(struct hda_codec *codec, int input_idx,
spec->loopback_paths[input_idx] = snd_hda_get_path_idx(codec, path);
idx = path->idx[path->depth - 1];
- if (nid_has_volume(codec, mix_nid, HDA_INPUT)) {
- val = HDA_COMPOSE_AMP_VAL(mix_nid, 3, idx, HDA_INPUT);
- err = __add_pb_vol_ctrl(spec, HDA_CTL_WIDGET_VOL, ctlname, ctlidx, val);
+ if (mix_val) {
+ err = __add_pb_vol_ctrl(spec, HDA_CTL_WIDGET_VOL, ctlname, ctlidx, mix_val);
if (err < 0)
return err;
- path->ctls[NID_PATH_VOL_CTL] = val;
+ path->ctls[NID_PATH_VOL_CTL] = mix_val;
}
- if (nid_has_mute(codec, mix_nid, HDA_INPUT)) {
- val = HDA_COMPOSE_AMP_VAL(mix_nid, 3, idx, HDA_INPUT);
- err = __add_pb_sw_ctrl(spec, HDA_CTL_WIDGET_MUTE, ctlname, ctlidx, val);
+ if (mute_val) {
+ err = __add_pb_sw_ctrl(spec, HDA_CTL_WIDGET_MUTE, ctlname, ctlidx, mute_val);
if (err < 0)
return err;
- path->ctls[NID_PATH_MUTE_CTL] = val;
+ path->ctls[NID_PATH_MUTE_CTL] = mute_val;
}
path->active = true;
@@ -4271,6 +4318,26 @@ static unsigned int snd_hda_gen_path_power_filter(struct hda_codec *codec,
return AC_PWRST_D3;
}
+/* mute all aamix inputs initially; parse up to the first leaves */
+static void mute_all_mixer_nid(struct hda_codec *codec, hda_nid_t mix)
+{
+ int i, nums;
+ const hda_nid_t *conn;
+ bool has_amp;
+
+ nums = snd_hda_get_conn_list(codec, mix, &conn);
+ has_amp = nid_has_mute(codec, mix, HDA_INPUT);
+ for (i = 0; i < nums; i++) {
+ if (has_amp)
+ snd_hda_codec_amp_stereo(codec, mix,
+ HDA_INPUT, i,
+ 0xff, HDA_AMP_MUTE);
+ else if (nid_has_volume(codec, conn[i], HDA_OUTPUT))
+ snd_hda_codec_amp_stereo(codec, conn[i],
+ HDA_OUTPUT, 0,
+ 0xff, HDA_AMP_MUTE);
+ }
+}
/*
* Parse the given BIOS configuration and set up the hda_gen_spec
@@ -4383,6 +4450,17 @@ int snd_hda_gen_parse_auto_config(struct hda_codec *codec,
if (err < 0)
return err;
+ /* create "Headphone Mic Jack Mode" if no input selection is
+ * available (or user specifies add_jack_modes hint)
+ */
+ if (spec->hp_mic_pin &&
+ (spec->auto_mic || spec->input_mux.num_items == 1 ||
+ spec->add_jack_modes)) {
+ err = create_hp_mic_jack_mode(codec, spec->hp_mic_pin);
+ if (err < 0)
+ return err;
+ }
+
if (spec->add_jack_modes) {
if (cfg->line_out_type != AUTO_PIN_SPEAKER_OUT) {
err = create_out_jack_modes(codec, cfg->line_outs,
@@ -4398,6 +4476,10 @@ int snd_hda_gen_parse_auto_config(struct hda_codec *codec,
}
}
+ /* mute all aamix input initially */
+ if (spec->mixer_nid)
+ mute_all_mixer_nid(codec, spec->mixer_nid);
+
dig_only:
parse_digital(codec);
diff --git a/sound/pci/hda/hda_generic.h b/sound/pci/hda/hda_generic.h
index 7e45cb44d151..0929a06df812 100644
--- a/sound/pci/hda/hda_generic.h
+++ b/sound/pci/hda/hda_generic.h
@@ -249,6 +249,9 @@ struct hda_gen_spec {
const struct badness_table *main_out_badness;
const struct badness_table *extra_out_badness;
+ /* preferred pin/DAC pairs; an array of paired NIDs */
+ const hda_nid_t *preferred_dacs;
+
/* loopback mixing mode */
bool aamix_mode;
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 7a09404579a7..956871d8b3d2 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -2994,8 +2994,7 @@ static int azx_runtime_suspend(struct device *dev)
STATESTS_INT_MASK);
azx_stop_chip(chip);
- if (!chip->bus->avoid_link_reset)
- azx_enter_link_reset(chip);
+ azx_enter_link_reset(chip);
azx_clear_irq_pending(chip);
if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL)
hda_display_power(false);
@@ -3434,6 +3433,10 @@ static void check_probe_mask(struct azx *chip, int dev)
* white/black-list for enable_msi
*/
static struct snd_pci_quirk msi_black_list[] = {
+ SND_PCI_QUIRK(0x103c, 0x2191, "HP", 0), /* AMD Hudson */
+ SND_PCI_QUIRK(0x103c, 0x2192, "HP", 0), /* AMD Hudson */
+ SND_PCI_QUIRK(0x103c, 0x21f7, "HP", 0), /* AMD Hudson */
+ SND_PCI_QUIRK(0x103c, 0x21fa, "HP", 0), /* AMD Hudson */
SND_PCI_QUIRK(0x1043, 0x81f2, "ASUS", 0), /* Athlon64 X2 + nvidia */
SND_PCI_QUIRK(0x1043, 0x81f6, "ASUS", 0), /* nvidia */
SND_PCI_QUIRK(0x1043, 0x822d, "ASUS", 0), /* Athlon64 X2 + nvidia MCP55 */
@@ -3877,7 +3880,8 @@ static int azx_probe(struct pci_dev *pci,
}
dev++;
- complete_all(&chip->probe_wait);
+ if (chip->disabled)
+ complete_all(&chip->probe_wait);
return 0;
out_free:
@@ -3954,10 +3958,10 @@ static int azx_probe_continue(struct azx *chip)
if ((chip->driver_caps & AZX_DCAPS_PM_RUNTIME) || chip->use_vga_switcheroo)
pm_runtime_put_noidle(&pci->dev);
- return 0;
-
out_free:
- chip->init_failed = 1;
+ if (err < 0)
+ chip->init_failed = 1;
+ complete_all(&chip->probe_wait);
return err;
}
diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c
index 1a83559f4cbd..699262a3e07a 100644
--- a/sound/pci/hda/patch_analog.c
+++ b/sound/pci/hda/patch_analog.c
@@ -147,6 +147,8 @@ static void ad_vmaster_eapd_hook(void *private_data, int enabled)
if (!spec->eapd_nid)
return;
+ if (codec->inv_eapd)
+ enabled = !enabled;
snd_hda_codec_update_cache(codec, spec->eapd_nid, 0,
AC_VERB_SET_EAPD_BTLENABLE,
enabled ? 0x02 : 0x00);
@@ -338,6 +340,14 @@ static int patch_ad1986a(struct hda_codec *codec)
{
int err;
struct ad198x_spec *spec;
+ static hda_nid_t preferred_pairs[] = {
+ 0x1a, 0x03,
+ 0x1b, 0x03,
+ 0x1c, 0x04,
+ 0x1d, 0x05,
+ 0x1e, 0x03,
+ 0
+ };
err = alloc_ad_spec(codec);
if (err < 0)
@@ -358,6 +368,11 @@ static int patch_ad1986a(struct hda_codec *codec)
* So, let's disable the shared stream.
*/
spec->gen.multiout.no_share_stream = 1;
+ /* give fixed DAC/pin pairs */
+ spec->gen.preferred_dacs = preferred_pairs;
+
+ /* AD1986A can't manage the dynamic pin on/off smoothly */
+ spec->gen.auto_mute_via_amp = 1;
snd_hda_pick_fixup(codec, ad1986a_fixup_models, ad1986a_fixup_tbl,
ad1986a_fixups);
@@ -962,6 +977,7 @@ static void ad1884_fixup_hp_eapd(struct hda_codec *codec,
switch (action) {
case HDA_FIXUP_ACT_PRE_PROBE:
spec->gen.vmaster_mute.hook = ad1884_vmaster_hp_gpio_hook;
+ spec->gen.own_eapd_ctl = 1;
snd_hda_sequence_write_cache(codec, gpio_init_verbs);
break;
case HDA_FIXUP_ACT_PROBE:
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index c205bb1747fd..3fbf2883e06e 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -2936,7 +2936,6 @@ static const struct snd_pci_quirk cxt5066_cfg_tbl[] = {
SND_PCI_QUIRK(0x1028, 0x0401, "Dell Vostro 1014", CXT5066_DELL_VOSTRO),
SND_PCI_QUIRK(0x1028, 0x0408, "Dell Inspiron One 19T", CXT5066_IDEAPAD),
SND_PCI_QUIRK(0x1028, 0x050f, "Dell Inspiron", CXT5066_IDEAPAD),
- SND_PCI_QUIRK(0x1028, 0x0510, "Dell Vostro", CXT5066_IDEAPAD),
SND_PCI_QUIRK(0x103c, 0x360b, "HP G60", CXT5066_HP_LAPTOP),
SND_PCI_QUIRK(0x1043, 0x13f3, "Asus A52J", CXT5066_ASUS),
SND_PCI_QUIRK(0x1043, 0x1643, "Asus K52JU", CXT5066_ASUS),
@@ -3244,9 +3243,29 @@ enum {
#if IS_ENABLED(CONFIG_THINKPAD_ACPI)
#include <linux/thinkpad_acpi.h>
+#include <acpi/acpi.h>
static int (*led_set_func)(int, bool);
+static acpi_status acpi_check_cb(acpi_handle handle, u32 lvl, void *context,
+ void **rv)
+{
+ bool *found = context;
+ *found = true;
+ return AE_OK;
+}
+
+static bool is_thinkpad(struct hda_codec *codec)
+{
+ bool found = false;
+ if (codec->subsystem_id >> 16 != 0x17aa)
+ return false;
+ if (ACPI_SUCCESS(acpi_get_devices("LEN0068", acpi_check_cb, &found, NULL)) && found)
+ return true;
+ found = false;
+ return ACPI_SUCCESS(acpi_get_devices("IBM0068", acpi_check_cb, &found, NULL)) && found;
+}
+
static void update_tpacpi_mute_led(void *private_data, int enabled)
{
struct hda_codec *codec = private_data;
@@ -3279,6 +3298,8 @@ static void cxt_fixup_thinkpad_acpi(struct hda_codec *codec,
bool removefunc = false;
if (action == HDA_FIXUP_ACT_PROBE) {
+ if (!is_thinkpad(codec))
+ return;
if (!led_set_func)
led_set_func = symbol_request(tpacpi_led_set);
if (!led_set_func) {
@@ -3494,6 +3515,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
SND_PCI_QUIRK(0x17aa, 0x3975, "Lenovo U300s", CXT_FIXUP_STEREO_DMIC),
SND_PCI_QUIRK(0x17aa, 0x3977, "Lenovo IdeaPad U310", CXT_FIXUP_STEREO_DMIC),
SND_PCI_QUIRK(0x17aa, 0x397b, "Lenovo S205", CXT_FIXUP_STEREO_DMIC),
+ SND_PCI_QUIRK_VENDOR(0x17aa, "Thinkpad", CXT_FIXUP_THINKPAD_ACPI),
SND_PCI_QUIRK(0x1c06, 0x2011, "Lemote A1004", CXT_PINCFG_LEMOTE_A1004),
SND_PCI_QUIRK(0x1c06, 0x2012, "Lemote A1205", CXT_PINCFG_LEMOTE_A1205),
{}
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 08407bed093e..f281c8068557 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -1142,32 +1142,34 @@ static void hdmi_setup_audio_infoframe(struct hda_codec *codec,
static bool hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll);
-static void hdmi_intrinsic_event(struct hda_codec *codec, unsigned int res)
+static void jack_callback(struct hda_codec *codec, struct hda_jack_tbl *jack)
{
struct hdmi_spec *spec = codec->spec;
+ int pin_idx = pin_nid_to_pin_index(spec, jack->nid);
+ if (pin_idx < 0)
+ return;
+
+ if (hdmi_present_sense(get_pin(spec, pin_idx), 1))
+ snd_hda_jack_report_sync(codec);
+}
+
+static void hdmi_intrinsic_event(struct hda_codec *codec, unsigned int res)
+{
int tag = res >> AC_UNSOL_RES_TAG_SHIFT;
- int pin_nid;
- int pin_idx;
struct hda_jack_tbl *jack;
int dev_entry = (res & AC_UNSOL_RES_DE) >> AC_UNSOL_RES_DE_SHIFT;
jack = snd_hda_jack_tbl_get_from_tag(codec, tag);
if (!jack)
return;
- pin_nid = jack->nid;
jack->jack_dirty = 1;
_snd_printd(SND_PR_VERBOSE,
"HDMI hot plug event: Codec=%d Pin=%d Device=%d Inactive=%d Presence_Detect=%d ELD_Valid=%d\n",
- codec->addr, pin_nid, dev_entry, !!(res & AC_UNSOL_RES_IA),
+ codec->addr, jack->nid, dev_entry, !!(res & AC_UNSOL_RES_IA),
!!(res & AC_UNSOL_RES_PD), !!(res & AC_UNSOL_RES_ELDV));
- pin_idx = pin_nid_to_pin_index(spec, pin_nid);
- if (pin_idx < 0)
- return;
-
- if (hdmi_present_sense(get_pin(spec, pin_idx), 1))
- snd_hda_jack_report_sync(codec);
+ jack_callback(codec, jack);
}
static void hdmi_non_intrinsic_event(struct hda_codec *codec, unsigned int res)
@@ -2095,7 +2097,8 @@ static int generic_hdmi_init(struct hda_codec *codec)
hda_nid_t pin_nid = per_pin->pin_nid;
hdmi_init_pin(codec, pin_nid);
- snd_hda_jack_detect_enable(codec, pin_nid, pin_nid);
+ snd_hda_jack_detect_enable_callback(codec, pin_nid, pin_nid,
+ codec->jackpoll_interval > 0 ? jack_callback : NULL);
}
return 0;
}
@@ -2334,8 +2337,9 @@ static int simple_playback_build_controls(struct hda_codec *codec)
int err;
per_cvt = get_cvt(spec, 0);
- err = snd_hda_create_spdif_out_ctls(codec, per_cvt->cvt_nid,
- per_cvt->cvt_nid);
+ err = snd_hda_create_dig_out_ctls(codec, per_cvt->cvt_nid,
+ per_cvt->cvt_nid,
+ HDA_PCM_TYPE_HDMI);
if (err < 0)
return err;
return simple_hdmi_build_jack(codec, 0);
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 04d1e6be600e..c5646941539a 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -1512,6 +1512,7 @@ enum {
ALC260_FIXUP_KN1,
ALC260_FIXUP_FSC_S7020,
ALC260_FIXUP_FSC_S7020_JWSE,
+ ALC260_FIXUP_VAIO_PINS,
};
static void alc260_gpio1_automute(struct hda_codec *codec)
@@ -1652,6 +1653,24 @@ static const struct hda_fixup alc260_fixups[] = {
.chained = true,
.chain_id = ALC260_FIXUP_FSC_S7020,
},
+ [ALC260_FIXUP_VAIO_PINS] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+ /* Pin configs are missing completely on some VAIOs */
+ { 0x0f, 0x01211020 },
+ { 0x10, 0x0001003f },
+ { 0x11, 0x411111f0 },
+ { 0x12, 0x01a15930 },
+ { 0x13, 0x411111f0 },
+ { 0x14, 0x411111f0 },
+ { 0x15, 0x411111f0 },
+ { 0x16, 0x411111f0 },
+ { 0x17, 0x411111f0 },
+ { 0x18, 0x411111f0 },
+ { 0x19, 0x411111f0 },
+ { }
+ }
+ },
};
static const struct snd_pci_quirk alc260_fixup_tbl[] = {
@@ -1660,6 +1679,8 @@ static const struct snd_pci_quirk alc260_fixup_tbl[] = {
SND_PCI_QUIRK(0x1025, 0x008f, "Acer", ALC260_FIXUP_GPIO1),
SND_PCI_QUIRK(0x103c, 0x280a, "HP dc5750", ALC260_FIXUP_HP_DC5750),
SND_PCI_QUIRK(0x103c, 0x30ba, "HP Presario B1900", ALC260_FIXUP_HP_B1900),
+ SND_PCI_QUIRK(0x104d, 0x81bb, "Sony VAIO", ALC260_FIXUP_VAIO_PINS),
+ SND_PCI_QUIRK(0x104d, 0x81e2, "Sony VAIO TX", ALC260_FIXUP_HP_PIN_0F),
SND_PCI_QUIRK(0x10cf, 0x1326, "FSC LifeBook S7020", ALC260_FIXUP_FSC_S7020),
SND_PCI_QUIRK(0x1509, 0x4540, "Favorit 100XS", ALC260_FIXUP_GPIO1),
SND_PCI_QUIRK(0x152d, 0x0729, "Quanta KN1", ALC260_FIXUP_KN1),
@@ -1759,8 +1780,11 @@ enum {
ALC889_FIXUP_DAC_ROUTE,
ALC889_FIXUP_MBP_VREF,
ALC889_FIXUP_IMAC91_VREF,
+ ALC889_FIXUP_MBA21_VREF,
ALC882_FIXUP_INV_DMIC,
ALC882_FIXUP_NO_PRIMARY_HP,
+ ALC887_FIXUP_ASUS_BASS,
+ ALC887_FIXUP_BASS_CHMAP,
};
static void alc889_fixup_coef(struct hda_codec *codec,
@@ -1861,17 +1885,13 @@ static void alc889_fixup_mbp_vref(struct hda_codec *codec,
}
}
-/* Set VREF on speaker pins on imac91 */
-static void alc889_fixup_imac91_vref(struct hda_codec *codec,
- const struct hda_fixup *fix, int action)
+static void alc889_fixup_mac_pins(struct hda_codec *codec,
+ const hda_nid_t *nids, int num_nids)
{
struct alc_spec *spec = codec->spec;
- static hda_nid_t nids[2] = { 0x18, 0x1a };
int i;
- if (action != HDA_FIXUP_ACT_INIT)
- return;
- for (i = 0; i < ARRAY_SIZE(nids); i++) {
+ for (i = 0; i < num_nids; i++) {
unsigned int val;
val = snd_hda_codec_get_pin_target(codec, nids[i]);
val |= AC_PINCTL_VREF_50;
@@ -1880,6 +1900,26 @@ static void alc889_fixup_imac91_vref(struct hda_codec *codec,
spec->gen.keep_vref_in_automute = 1;
}
+/* Set VREF on speaker pins on imac91 */
+static void alc889_fixup_imac91_vref(struct hda_codec *codec,
+ const struct hda_fixup *fix, int action)
+{
+ static hda_nid_t nids[2] = { 0x18, 0x1a };
+
+ if (action == HDA_FIXUP_ACT_INIT)
+ alc889_fixup_mac_pins(codec, nids, ARRAY_SIZE(nids));
+}
+
+/* Set VREF on speaker pins on mba21 */
+static void alc889_fixup_mba21_vref(struct hda_codec *codec,
+ const struct hda_fixup *fix, int action)
+{
+ static hda_nid_t nids[2] = { 0x18, 0x19 };
+
+ if (action == HDA_FIXUP_ACT_INIT)
+ alc889_fixup_mac_pins(codec, nids, ARRAY_SIZE(nids));
+}
+
/* Don't take HP output as primary
* Strangely, the speaker output doesn't work on Vaio Z and some Vaio
* all-in-one desktop PCs (for example VGC-LN51JGB) through DAC 0x05
@@ -1894,6 +1934,9 @@ static void alc882_fixup_no_primary_hp(struct hda_codec *codec,
}
}
+static void alc_fixup_bass_chmap(struct hda_codec *codec,
+ const struct hda_fixup *fix, int action);
+
static const struct hda_fixup alc882_fixups[] = {
[ALC882_FIXUP_ABIT_AW9D_MAX] = {
.type = HDA_FIXUP_PINS,
@@ -2076,6 +2119,12 @@ static const struct hda_fixup alc882_fixups[] = {
.chained = true,
.chain_id = ALC882_FIXUP_GPIO1,
},
+ [ALC889_FIXUP_MBA21_VREF] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc889_fixup_mba21_vref,
+ .chained = true,
+ .chain_id = ALC889_FIXUP_MBP_VREF,
+ },
[ALC882_FIXUP_INV_DMIC] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc_fixup_inv_dmic_0x12,
@@ -2084,6 +2133,19 @@ static const struct hda_fixup alc882_fixups[] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc882_fixup_no_primary_hp,
},
+ [ALC887_FIXUP_ASUS_BASS] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+ {0x16, 0x99130130}, /* bass speaker */
+ {}
+ },
+ .chained = true,
+ .chain_id = ALC887_FIXUP_BASS_CHMAP,
+ },
+ [ALC887_FIXUP_BASS_CHMAP] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc_fixup_bass_chmap,
+ },
};
static const struct snd_pci_quirk alc882_fixup_tbl[] = {
@@ -2117,6 +2179,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
SND_PCI_QUIRK(0x1043, 0x1873, "ASUS W90V", ALC882_FIXUP_ASUS_W90V),
SND_PCI_QUIRK(0x1043, 0x1971, "Asus W2JC", ALC882_FIXUP_ASUS_W2JC),
SND_PCI_QUIRK(0x1043, 0x835f, "Asus Eee 1601", ALC888_FIXUP_EEE1601),
+ SND_PCI_QUIRK(0x1043, 0x84bc, "ASUS ET2700", ALC887_FIXUP_ASUS_BASS),
SND_PCI_QUIRK(0x104d, 0x9047, "Sony Vaio TT", ALC889_FIXUP_VAIO_TT),
SND_PCI_QUIRK(0x104d, 0x905a, "Sony Vaio Z", ALC882_FIXUP_NO_PRIMARY_HP),
SND_PCI_QUIRK(0x104d, 0x9043, "Sony Vaio VGC-LN51JGB", ALC882_FIXUP_NO_PRIMARY_HP),
@@ -2132,7 +2195,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
SND_PCI_QUIRK(0x106b, 0x3000, "iMac", ALC889_FIXUP_MBP_VREF),
SND_PCI_QUIRK(0x106b, 0x3200, "iMac 7,1 Aluminum", ALC882_FIXUP_EAPD),
SND_PCI_QUIRK(0x106b, 0x3400, "MacBookAir 1,1", ALC889_FIXUP_MBP_VREF),
- SND_PCI_QUIRK(0x106b, 0x3500, "MacBookAir 2,1", ALC889_FIXUP_MBP_VREF),
+ SND_PCI_QUIRK(0x106b, 0x3500, "MacBookAir 2,1", ALC889_FIXUP_MBA21_VREF),
SND_PCI_QUIRK(0x106b, 0x3600, "Macbook 3,1", ALC889_FIXUP_MBP_VREF),
SND_PCI_QUIRK(0x106b, 0x3800, "MacbookPro 4,1", ALC889_FIXUP_MBP_VREF),
SND_PCI_QUIRK(0x106b, 0x3e00, "iMac 24 Aluminum", ALC885_FIXUP_MACPRO_GPIO),
@@ -3247,6 +3310,7 @@ static void alc_headset_mode_ctia(struct hda_codec *codec)
alc_write_coef_idx(codec, 0x18, 0x7388);
break;
case 0x10ec0668:
+ alc_write_coef_idx(codec, 0x11, 0x0001);
alc_write_coef_idx(codec, 0x15, 0x0d60);
alc_write_coef_idx(codec, 0xc3, 0x0000);
break;
@@ -3275,6 +3339,7 @@ static void alc_headset_mode_omtp(struct hda_codec *codec)
alc_write_coef_idx(codec, 0x18, 0x7388);
break;
case 0x10ec0668:
+ alc_write_coef_idx(codec, 0x11, 0x0001);
alc_write_coef_idx(codec, 0x15, 0x0d50);
alc_write_coef_idx(codec, 0xc3, 0x0000);
break;
@@ -3393,7 +3458,7 @@ static void alc_update_headset_mode_hook(struct hda_codec *codec,
static void alc_update_headset_jack_cb(struct hda_codec *codec, struct hda_jack_tbl *jack)
{
struct alc_spec *spec = codec->spec;
- spec->current_headset_type = ALC_HEADSET_MODE_UNKNOWN;
+ spec->current_headset_type = ALC_HEADSET_TYPE_UNKNOWN;
snd_hda_gen_hp_automute(codec, jack);
}
@@ -3560,11 +3625,6 @@ static void alc283_hp_automute_hook(struct hda_codec *codec,
vref);
}
-static void alc283_chromebook_caps(struct hda_codec *codec)
-{
- snd_hda_override_wcaps(codec, 0x03, 0);
-}
-
static void alc283_fixup_chromebook(struct hda_codec *codec,
const struct hda_fixup *fix, int action)
{
@@ -3573,9 +3633,26 @@ static void alc283_fixup_chromebook(struct hda_codec *codec,
switch (action) {
case HDA_FIXUP_ACT_PRE_PROBE:
- alc283_chromebook_caps(codec);
+ snd_hda_override_wcaps(codec, 0x03, 0);
/* Disable AA-loopback as it causes white noise */
spec->gen.mixer_nid = 0;
+ break;
+ case HDA_FIXUP_ACT_INIT:
+ /* Enable Line1 input control by verb */
+ val = alc_read_coef_idx(codec, 0x1a);
+ alc_write_coef_idx(codec, 0x1a, val | (1 << 4));
+ break;
+ }
+}
+
+static void alc283_fixup_sense_combo_jack(struct hda_codec *codec,
+ const struct hda_fixup *fix, int action)
+{
+ struct alc_spec *spec = codec->spec;
+ int val;
+
+ switch (action) {
+ case HDA_FIXUP_ACT_PRE_PROBE:
spec->gen.hp_automute_hook = alc283_hp_automute_hook;
break;
case HDA_FIXUP_ACT_INIT:
@@ -3583,9 +3660,6 @@ static void alc283_fixup_chromebook(struct hda_codec *codec,
/* Set to manual mode */
val = alc_read_coef_idx(codec, 0x06);
alc_write_coef_idx(codec, 0x06, val & ~0x000c);
- /* Enable Line1 input control by verb */
- val = alc_read_coef_idx(codec, 0x1a);
- alc_write_coef_idx(codec, 0x1a, val | (1 << 4));
break;
}
}
@@ -3652,9 +3726,29 @@ static void alc290_fixup_mono_speakers(struct hda_codec *codec,
#if IS_ENABLED(CONFIG_THINKPAD_ACPI)
#include <linux/thinkpad_acpi.h>
+#include <acpi/acpi.h>
static int (*led_set_func)(int, bool);
+static acpi_status acpi_check_cb(acpi_handle handle, u32 lvl, void *context,
+ void **rv)
+{
+ bool *found = context;
+ *found = true;
+ return AE_OK;
+}
+
+static bool is_thinkpad(struct hda_codec *codec)
+{
+ bool found = false;
+ if (codec->subsystem_id >> 16 != 0x17aa)
+ return false;
+ if (ACPI_SUCCESS(acpi_get_devices("LEN0068", acpi_check_cb, &found, NULL)) && found)
+ return true;
+ found = false;
+ return ACPI_SUCCESS(acpi_get_devices("IBM0068", acpi_check_cb, &found, NULL)) && found;
+}
+
static void update_tpacpi_mute_led(void *private_data, int enabled)
{
if (led_set_func)
@@ -3680,6 +3774,8 @@ static void alc_fixup_thinkpad_acpi(struct hda_codec *codec,
bool removefunc = false;
if (action == HDA_FIXUP_ACT_PROBE) {
+ if (!is_thinkpad(codec))
+ return;
if (!led_set_func)
led_set_func = symbol_request(tpacpi_led_set);
if (!led_set_func) {
@@ -3753,11 +3849,14 @@ enum {
ALC269_FIXUP_ASUS_X101,
ALC271_FIXUP_AMIC_MIC2,
ALC271_FIXUP_HP_GATE_MIC_JACK,
+ ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572,
ALC269_FIXUP_ACER_AC700,
ALC269_FIXUP_LIMIT_INT_MIC_BOOST,
+ ALC269VB_FIXUP_ASUS_ZENBOOK,
ALC269_FIXUP_LIMIT_INT_MIC_BOOST_MUTE_LED,
ALC269VB_FIXUP_ORDISSIMO_EVE2,
ALC283_FIXUP_CHROME_BOOK,
+ ALC283_FIXUP_SENSE_COMBO_JACK,
ALC282_FIXUP_ASUS_TX300,
ALC283_FIXUP_INT_MIC,
ALC290_FIXUP_MONO_SPEAKERS,
@@ -3923,6 +4022,8 @@ static const struct hda_fixup alc269_fixups[] = {
[ALC269_FIXUP_PINCFG_NO_HP_TO_LINEOUT] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc269_fixup_pincfg_no_hp_to_lineout,
+ .chained = true,
+ .chain_id = ALC269_FIXUP_THINKPAD_ACPI,
},
[ALC269_FIXUP_DELL1_MIC_NO_PRESENCE] = {
.type = HDA_FIXUP_PINS,
@@ -4011,6 +4112,12 @@ static const struct hda_fixup alc269_fixups[] = {
.chained = true,
.chain_id = ALC271_FIXUP_AMIC_MIC2,
},
+ [ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc269_fixup_limit_int_mic_boost,
+ .chained = true,
+ .chain_id = ALC271_FIXUP_HP_GATE_MIC_JACK,
+ },
[ALC269_FIXUP_ACER_AC700] = {
.type = HDA_FIXUP_PINS,
.v.pins = (const struct hda_pintbl[]) {
@@ -4027,6 +4134,14 @@ static const struct hda_fixup alc269_fixups[] = {
[ALC269_FIXUP_LIMIT_INT_MIC_BOOST] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc269_fixup_limit_int_mic_boost,
+ .chained = true,
+ .chain_id = ALC269_FIXUP_THINKPAD_ACPI,
+ },
+ [ALC269VB_FIXUP_ASUS_ZENBOOK] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc269_fixup_limit_int_mic_boost,
+ .chained = true,
+ .chain_id = ALC269VB_FIXUP_DMIC,
},
[ALC269_FIXUP_LIMIT_INT_MIC_BOOST_MUTE_LED] = {
.type = HDA_FIXUP_FUNC,
@@ -4047,6 +4162,12 @@ static const struct hda_fixup alc269_fixups[] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc283_fixup_chromebook,
},
+ [ALC283_FIXUP_SENSE_COMBO_JACK] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc283_fixup_sense_combo_jack,
+ .chained = true,
+ .chain_id = ALC283_FIXUP_CHROME_BOOK,
+ },
[ALC282_FIXUP_ASUS_TX300] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc282_fixup_asus_tx300,
@@ -4070,8 +4191,6 @@ static const struct hda_fixup alc269_fixups[] = {
[ALC269_FIXUP_THINKPAD_ACPI] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc_fixup_thinkpad_acpi,
- .chained = true,
- .chain_id = ALC269_FIXUP_LIMIT_INT_MIC_BOOST
},
[ALC255_FIXUP_DELL1_MIC_NO_PRESENCE] = {
.type = HDA_FIXUP_PINS,
@@ -4096,6 +4215,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1025, 0x0740, "Acer AO725", ALC271_FIXUP_HP_GATE_MIC_JACK),
SND_PCI_QUIRK(0x1025, 0x0742, "Acer AO756", ALC271_FIXUP_HP_GATE_MIC_JACK),
SND_PCI_QUIRK_VENDOR(0x1025, "Acer Aspire", ALC271_FIXUP_DMIC),
+ SND_PCI_QUIRK(0x1025, 0x0775, "Acer Aspire E1-572", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572),
SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z),
SND_PCI_QUIRK(0x1028, 0x05bd, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x05be, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
@@ -4127,10 +4247,16 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1028, 0x0606, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x0608, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x0609, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1028, 0x0610, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x0613, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1028, 0x0614, "Dell Inspiron 3135", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x0616, "Dell Vostro 5470", ALC290_FIXUP_MONO_SPEAKERS),
SND_PCI_QUIRK(0x1028, 0x061f, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1028, 0x0629, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1028, 0x0638, "Dell Inspiron 5439", ALC290_FIXUP_MONO_SPEAKERS),
+ SND_PCI_QUIRK(0x1028, 0x063e, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x063f, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1028, 0x0640, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x15cc, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x15cd, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
@@ -4138,13 +4264,12 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x1973, "HP Pavilion", ALC269_FIXUP_HP_MUTE_LED_MIC1),
SND_PCI_QUIRK(0x103c, 0x1983, "HP Pavilion", ALC269_FIXUP_HP_MUTE_LED_MIC1),
SND_PCI_QUIRK(0x103c, 0x218b, "HP", ALC269_FIXUP_LIMIT_INT_MIC_BOOST_MUTE_LED),
- SND_PCI_QUIRK(0x103c, 0x21ed, "HP Falco Chromebook", ALC283_FIXUP_CHROME_BOOK),
SND_PCI_QUIRK_VENDOR(0x103c, "HP", ALC269_FIXUP_HP_MUTE_LED),
SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
SND_PCI_QUIRK(0x1043, 0x115d, "Asus 1015E", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
- SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_DMIC),
- SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_DMIC),
+ SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_ASUS_ZENBOOK),
+ SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK),
SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW),
SND_PCI_QUIRK(0x1043, 0x1b13, "Asus U41SV", ALC269_FIXUP_INV_DMIC),
@@ -4173,7 +4298,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x2208, "Thinkpad T431s", ALC269_FIXUP_LENOVO_DOCK),
SND_PCI_QUIRK(0x17aa, 0x220c, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
SND_PCI_QUIRK(0x17aa, 0x2212, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
- SND_PCI_QUIRK(0x17aa, 0x2214, "Thinkpad", ALC269_FIXUP_THINKPAD_ACPI),
+ SND_PCI_QUIRK(0x17aa, 0x2214, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
SND_PCI_QUIRK(0x17aa, 0x2215, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
SND_PCI_QUIRK(0x17aa, 0x5013, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
SND_PCI_QUIRK(0x17aa, 0x501a, "Thinkpad", ALC283_FIXUP_INT_MIC),
@@ -4181,6 +4306,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
+ SND_PCI_QUIRK_VENDOR(0x17aa, "Thinkpad", ALC269_FIXUP_THINKPAD_ACPI),
SND_PCI_QUIRK(0x1b7d, 0xa831, "Ordissimo EVE2 ", ALC269VB_FIXUP_ORDISSIMO_EVE2), /* Also known as Malata PC-B1303 */
#if 0
@@ -4245,6 +4371,8 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
{.id = ALC269_FIXUP_HP_GPIO_LED, .name = "hp-gpio-led"},
{.id = ALC269_FIXUP_DELL1_MIC_NO_PRESENCE, .name = "dell-headset-multi"},
{.id = ALC269_FIXUP_DELL2_MIC_NO_PRESENCE, .name = "dell-headset-dock"},
+ {.id = ALC283_FIXUP_CHROME_BOOK, .name = "alc283-chrome"},
+ {.id = ALC283_FIXUP_SENSE_COMBO_JACK, .name = "alc283-sense-combo"},
{}
};
@@ -4420,6 +4548,7 @@ enum {
ALC861_FIXUP_AMP_VREF_0F,
ALC861_FIXUP_NO_JACK_DETECT,
ALC861_FIXUP_ASUS_A6RP,
+ ALC660_FIXUP_ASUS_W7J,
};
/* On some laptops, VREF of pin 0x0f is abused for controlling the main amp */
@@ -4469,10 +4598,22 @@ static const struct hda_fixup alc861_fixups[] = {
.v.func = alc861_fixup_asus_amp_vref_0f,
.chained = true,
.chain_id = ALC861_FIXUP_NO_JACK_DETECT,
+ },
+ [ALC660_FIXUP_ASUS_W7J] = {
+ .type = HDA_FIXUP_VERBS,
+ .v.verbs = (const struct hda_verb[]) {
+ /* ASUS W7J needs a magic pin setup on unused NID 0x10
+ * for enabling outputs
+ */
+ {0x10, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x24},
+ { }
+ },
}
};
static const struct snd_pci_quirk alc861_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x1043, 0x1253, "ASUS W7J", ALC660_FIXUP_ASUS_W7J),
+ SND_PCI_QUIRK(0x1043, 0x1263, "ASUS Z35HL", ALC660_FIXUP_ASUS_W7J),
SND_PCI_QUIRK(0x1043, 0x1393, "ASUS A6Rp", ALC861_FIXUP_ASUS_A6RP),
SND_PCI_QUIRK_VENDOR(0x1043, "ASUS laptop", ALC861_FIXUP_AMP_VREF_0F),
SND_PCI_QUIRK(0x1462, 0x7254, "HP DX2200", ALC861_FIXUP_NO_JACK_DETECT),
@@ -4668,7 +4809,7 @@ static const struct snd_pcm_chmap_elem asus_pcm_2_1_chmaps[] = {
};
/* override the 2.1 chmap */
-static void alc662_fixup_bass_chmap(struct hda_codec *codec,
+static void alc_fixup_bass_chmap(struct hda_codec *codec,
const struct hda_fixup *fix, int action)
{
if (action == HDA_FIXUP_ACT_BUILD) {
@@ -4698,6 +4839,8 @@ enum {
ALC668_FIXUP_DELL_MIC_NO_PRESENCE,
ALC668_FIXUP_HEADSET_MODE,
ALC662_FIXUP_BASS_CHMAP,
+ ALC662_FIXUP_BASS_1A,
+ ALC662_FIXUP_BASS_1A_CHMAP,
};
static const struct hda_fixup alc662_fixups[] = {
@@ -4874,10 +5017,23 @@ static const struct hda_fixup alc662_fixups[] = {
},
[ALC662_FIXUP_BASS_CHMAP] = {
.type = HDA_FIXUP_FUNC,
- .v.func = alc662_fixup_bass_chmap,
+ .v.func = alc_fixup_bass_chmap,
.chained = true,
.chain_id = ALC662_FIXUP_ASUS_MODE4
},
+ [ALC662_FIXUP_BASS_1A] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+ {0x1a, 0x80106111}, /* bass speaker */
+ {}
+ },
+ },
+ [ALC662_FIXUP_BASS_1A_CHMAP] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc_fixup_bass_chmap,
+ .chained = true,
+ .chain_id = ALC662_FIXUP_BASS_1A,
+ },
};
static const struct snd_pci_quirk alc662_fixup_tbl[] = {
@@ -4890,8 +5046,13 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
SND_PCI_QUIRK(0x1025, 0x038b, "Acer Aspire 8943G", ALC662_FIXUP_ASPIRE),
SND_PCI_QUIRK(0x1028, 0x05d8, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x05db, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1028, 0x0623, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1028, 0x0624, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1028, 0x0625, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x0626, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1028, 0x0628, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800),
+ SND_PCI_QUIRK(0x1043, 0x11cd, "Asus N550", ALC662_FIXUP_BASS_1A_CHMAP),
SND_PCI_QUIRK(0x1043, 0x1477, "ASUS N56VZ", ALC662_FIXUP_BASS_CHMAP),
SND_PCI_QUIRK(0x1043, 0x1bf3, "ASUS N76VZ", ALC662_FIXUP_BASS_CHMAP),
SND_PCI_QUIRK(0x1043, 0x8469, "ASUS mobo", ALC662_FIXUP_NO_JACK_DETECT),
@@ -5054,6 +5215,7 @@ static int patch_alc662(struct hda_codec *codec)
case 0x10ec0272:
case 0x10ec0663:
case 0x10ec0665:
+ case 0x10ec0668:
set_beep_amp(spec, 0x0b, 0x04, HDA_INPUT);
break;
case 0x10ec0273:
@@ -5111,6 +5273,7 @@ static int patch_alc680(struct hda_codec *codec)
*/
static const struct hda_codec_preset snd_hda_preset_realtek[] = {
{ .id = 0x10ec0221, .name = "ALC221", .patch = patch_alc269 },
+ { .id = 0x10ec0231, .name = "ALC231", .patch = patch_alc269 },
{ .id = 0x10ec0233, .name = "ALC233", .patch = patch_alc269 },
{ .id = 0x10ec0255, .name = "ALC255", .patch = patch_alc269 },
{ .id = 0x10ec0260, .name = "ALC260", .patch = patch_alc260 },
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index d2cc0041d9d3..088a5afbd1b9 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -2094,7 +2094,8 @@ static void stac92hd83xxx_fixup_hp_mic_led(struct hda_codec *codec,
if (action == HDA_FIXUP_ACT_PRE_PROBE) {
spec->mic_mute_led_gpio = 0x08; /* GPIO3 */
- codec->bus->avoid_link_reset = 1;
+ /* resetting controller clears GPIO, so we need to keep on */
+ codec->bus->power_keep_link_on = 1;
}
}
diff --git a/sound/soc/atmel/atmel_ssc_dai.c b/sound/soc/atmel/atmel_ssc_dai.c
index 8697cedccd21..1ead3c977a51 100644
--- a/sound/soc/atmel/atmel_ssc_dai.c
+++ b/sound/soc/atmel/atmel_ssc_dai.c
@@ -648,7 +648,7 @@ static int atmel_ssc_prepare(struct snd_pcm_substream *substream,
dma_params = ssc_p->dma_params[dir];
- ssc_writel(ssc_p->ssc->regs, CR, dma_params->mask->ssc_enable);
+ ssc_writel(ssc_p->ssc->regs, CR, dma_params->mask->ssc_disable);
ssc_writel(ssc_p->ssc->regs, IDR, dma_params->mask->ssc_error);
pr_debug("%s enabled SSC_SR=0x%08x\n",
@@ -657,6 +657,33 @@ static int atmel_ssc_prepare(struct snd_pcm_substream *substream,
return 0;
}
+static int atmel_ssc_trigger(struct snd_pcm_substream *substream,
+ int cmd, struct snd_soc_dai *dai)
+{
+ struct atmel_ssc_info *ssc_p = &ssc_info[dai->id];
+ struct atmel_pcm_dma_params *dma_params;
+ int dir;
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ dir = 0;
+ else
+ dir = 1;
+
+ dma_params = ssc_p->dma_params[dir];
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ ssc_writel(ssc_p->ssc->regs, CR, dma_params->mask->ssc_enable);
+ break;
+ default:
+ ssc_writel(ssc_p->ssc->regs, CR, dma_params->mask->ssc_disable);
+ break;
+ }
+
+ return 0;
+}
#ifdef CONFIG_PM
static int atmel_ssc_suspend(struct snd_soc_dai *cpu_dai)
@@ -731,6 +758,7 @@ static const struct snd_soc_dai_ops atmel_ssc_dai_ops = {
.startup = atmel_ssc_startup,
.shutdown = atmel_ssc_shutdown,
.prepare = atmel_ssc_prepare,
+ .trigger = atmel_ssc_trigger,
.hw_params = atmel_ssc_hw_params,
.set_fmt = atmel_ssc_set_dai_fmt,
.set_clkdiv = atmel_ssc_set_dai_clkdiv,
diff --git a/sound/soc/atmel/sam9x5_wm8731.c b/sound/soc/atmel/sam9x5_wm8731.c
index 992ae38d5a15..7d6a9055874b 100644
--- a/sound/soc/atmel/sam9x5_wm8731.c
+++ b/sound/soc/atmel/sam9x5_wm8731.c
@@ -97,6 +97,8 @@ static int sam9x5_wm8731_driver_probe(struct platform_device *pdev)
goto out;
}
+ snd_soc_card_set_drvdata(card, priv);
+
card->dev = &pdev->dev;
card->owner = THIS_MODULE;
card->dai_link = dai;
@@ -107,7 +109,7 @@ static int sam9x5_wm8731_driver_probe(struct platform_device *pdev)
dai->stream_name = "WM8731 PCM";
dai->codec_dai_name = "wm8731-hifi";
dai->init = sam9x5_wm8731_init;
- dai->dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF
+ dai->dai_fmt = SND_SOC_DAIFMT_DSP_A | SND_SOC_DAIFMT_NB_NF
| SND_SOC_DAIFMT_CBM_CFM;
ret = snd_soc_of_parse_card_name(card, "atmel,model");
diff --git a/sound/soc/codecs/ab8500-codec.c b/sound/soc/codecs/ab8500-codec.c
index 21ae8d4fdbfb..1ad92cbf0b24 100644
--- a/sound/soc/codecs/ab8500-codec.c
+++ b/sound/soc/codecs/ab8500-codec.c
@@ -126,8 +126,6 @@ struct ab8500_codec_drvdata_dbg {
/* Private data for AB8500 device-driver */
struct ab8500_codec_drvdata {
- struct regmap *regmap;
-
/* Sidetone */
long *sid_fir_values;
enum sid_state sid_status;
@@ -168,34 +166,48 @@ static inline const char *amic_type_str(enum amic_type type)
*/
/* Read a register from the audio-bank of AB8500 */
-static int ab8500_codec_read_reg(void *context, unsigned int reg,
- unsigned int *value)
+static unsigned int ab8500_codec_read_reg(struct snd_soc_codec *codec,
+ unsigned int reg)
{
- struct device *dev = context;
int status;
+ unsigned int value = 0;
u8 value8;
- status = abx500_get_register_interruptible(dev, AB8500_AUDIO,
- reg, &value8);
- *value = (unsigned int)value8;
+ status = abx500_get_register_interruptible(codec->dev, AB8500_AUDIO,
+ reg, &value8);
+ if (status < 0) {
+ dev_err(codec->dev,
+ "%s: ERROR: Register (0x%02x:0x%02x) read failed (%d).\n",
+ __func__, (u8)AB8500_AUDIO, (u8)reg, status);
+ } else {
+ dev_dbg(codec->dev,
+ "%s: Read 0x%02x from register 0x%02x:0x%02x\n",
+ __func__, value8, (u8)AB8500_AUDIO, (u8)reg);
+ value = (unsigned int)value8;
+ }
- return status;
+ return value;
}
/* Write to a register in the audio-bank of AB8500 */
-static int ab8500_codec_write_reg(void *context, unsigned int reg,
- unsigned int value)
+static int ab8500_codec_write_reg(struct snd_soc_codec *codec,
+ unsigned int reg, unsigned int value)
{
- struct device *dev = context;
+ int status;
- return abx500_set_register_interruptible(dev, AB8500_AUDIO,
- reg, value);
-}
+ status = abx500_set_register_interruptible(codec->dev, AB8500_AUDIO,
+ reg, value);
+ if (status < 0)
+ dev_err(codec->dev,
+ "%s: ERROR: Register (%02x:%02x) write failed (%d).\n",
+ __func__, (u8)AB8500_AUDIO, (u8)reg, status);
+ else
+ dev_dbg(codec->dev,
+ "%s: Wrote 0x%02x into register %02x:%02x\n",
+ __func__, (u8)value, (u8)AB8500_AUDIO, (u8)reg);
-static const struct regmap_config ab8500_codec_regmap = {
- .reg_read = ab8500_codec_read_reg,
- .reg_write = ab8500_codec_write_reg,
-};
+ return status;
+}
/*
* Controls - DAPM
@@ -2473,13 +2485,9 @@ static int ab8500_codec_probe(struct snd_soc_codec *codec)
dev_dbg(dev, "%s: Enter.\n", __func__);
- snd_soc_codec_set_cache_io(codec, 0, 0, SND_SOC_REGMAP);
-
/* Setup AB8500 according to board-settings */
pdata = dev_get_platdata(dev->parent);
- codec->control_data = drvdata->regmap;
-
if (np) {
if (!pdata)
pdata = devm_kzalloc(dev,
@@ -2557,6 +2565,9 @@ static int ab8500_codec_probe(struct snd_soc_codec *codec)
static struct snd_soc_codec_driver ab8500_codec_driver = {
.probe = ab8500_codec_probe,
+ .read = ab8500_codec_read_reg,
+ .write = ab8500_codec_write_reg,
+ .reg_word_size = sizeof(u8),
.controls = ab8500_ctrls,
.num_controls = ARRAY_SIZE(ab8500_ctrls),
.dapm_widgets = ab8500_dapm_widgets,
@@ -2581,15 +2592,6 @@ static int ab8500_codec_driver_probe(struct platform_device *pdev)
drvdata->anc_status = ANC_UNCONFIGURED;
dev_set_drvdata(&pdev->dev, drvdata);
- drvdata->regmap = devm_regmap_init(&pdev->dev, NULL, &pdev->dev,
- &ab8500_codec_regmap);
- if (IS_ERR(drvdata->regmap)) {
- status = PTR_ERR(drvdata->regmap);
- dev_err(&pdev->dev, "%s: Failed to allocate regmap: %d\n",
- __func__, status);
- return status;
- }
-
dev_dbg(&pdev->dev, "%s: Register codec.\n", __func__);
status = snd_soc_register_codec(&pdev->dev, &ab8500_codec_driver,
ab8500_codec_dai,
diff --git a/sound/soc/codecs/arizona.c b/sound/soc/codecs/arizona.c
index 6f05b17d1965..fea991031be1 100644
--- a/sound/soc/codecs/arizona.c
+++ b/sound/soc/codecs/arizona.c
@@ -1529,6 +1529,8 @@ static void arizona_enable_fll(struct arizona_fll *fll,
try_wait_for_completion(&fll->ok);
regmap_update_bits(arizona->regmap, fll->base + 1,
+ ARIZONA_FLL1_FREERUN, 0);
+ regmap_update_bits(arizona->regmap, fll->base + 1,
ARIZONA_FLL1_ENA, ARIZONA_FLL1_ENA);
if (use_sync)
regmap_update_bits(arizona->regmap, fll->base + 0x11,
@@ -1546,6 +1548,8 @@ static void arizona_disable_fll(struct arizona_fll *fll)
struct arizona *arizona = fll->arizona;
bool change;
+ regmap_update_bits(arizona->regmap, fll->base + 1,
+ ARIZONA_FLL1_FREERUN, ARIZONA_FLL1_FREERUN);
regmap_update_bits_check(arizona->regmap, fll->base + 1,
ARIZONA_FLL1_ENA, 0, &change);
regmap_update_bits(arizona->regmap, fll->base + 0x11,
diff --git a/sound/soc/codecs/wm5110.c b/sound/soc/codecs/wm5110.c
index f2d1094424b9..0ab2dc296474 100644
--- a/sound/soc/codecs/wm5110.c
+++ b/sound/soc/codecs/wm5110.c
@@ -37,6 +37,47 @@ struct wm5110_priv {
struct arizona_fll fll[2];
};
+static const struct reg_default wm5110_sysclk_revd_patch[] = {
+ { 0x3093, 0x1001 },
+ { 0x30E3, 0x1301 },
+ { 0x3133, 0x1201 },
+ { 0x3183, 0x1501 },
+ { 0x31D3, 0x1401 },
+};
+
+static int wm5110_sysclk_ev(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = w->codec;
+ struct arizona *arizona = dev_get_drvdata(codec->dev->parent);
+ struct regmap *regmap = codec->control_data;
+ const struct reg_default *patch = NULL;
+ int i, patch_size;
+
+ switch (arizona->rev) {
+ case 3:
+ patch = wm5110_sysclk_revd_patch;
+ patch_size = ARRAY_SIZE(wm5110_sysclk_revd_patch);
+ break;
+ default:
+ return 0;
+ }
+
+ switch (event) {
+ case SND_SOC_DAPM_POST_PMU:
+ if (patch)
+ for (i = 0; i < patch_size; i++)
+ regmap_write(regmap, patch[i].reg,
+ patch[i].def);
+ break;
+
+ default:
+ break;
+ }
+
+ return 0;
+}
+
static DECLARE_TLV_DB_SCALE(ana_tlv, 0, 100, 0);
static DECLARE_TLV_DB_SCALE(eq_tlv, -1200, 100, 0);
static DECLARE_TLV_DB_SCALE(digital_tlv, -6400, 50, 0);
@@ -207,19 +248,6 @@ ARIZONA_MIXER_CONTROLS("SPKDAT1R", ARIZONA_OUT5RMIX_INPUT_1_SOURCE),
ARIZONA_MIXER_CONTROLS("SPKDAT2L", ARIZONA_OUT6LMIX_INPUT_1_SOURCE),
ARIZONA_MIXER_CONTROLS("SPKDAT2R", ARIZONA_OUT6RMIX_INPUT_1_SOURCE),
-SOC_SINGLE("HPOUT1 High Performance Switch", ARIZONA_OUTPUT_PATH_CONFIG_1L,
- ARIZONA_OUT1_OSR_SHIFT, 1, 0),
-SOC_SINGLE("HPOUT2 High Performance Switch", ARIZONA_OUTPUT_PATH_CONFIG_2L,
- ARIZONA_OUT2_OSR_SHIFT, 1, 0),
-SOC_SINGLE("HPOUT3 High Performance Switch", ARIZONA_OUTPUT_PATH_CONFIG_3L,
- ARIZONA_OUT3_OSR_SHIFT, 1, 0),
-SOC_SINGLE("Speaker High Performance Switch", ARIZONA_OUTPUT_PATH_CONFIG_4L,
- ARIZONA_OUT4_OSR_SHIFT, 1, 0),
-SOC_SINGLE("SPKDAT1 High Performance Switch", ARIZONA_OUTPUT_PATH_CONFIG_5L,
- ARIZONA_OUT5_OSR_SHIFT, 1, 0),
-SOC_SINGLE("SPKDAT2 High Performance Switch", ARIZONA_OUTPUT_PATH_CONFIG_6L,
- ARIZONA_OUT6_OSR_SHIFT, 1, 0),
-
SOC_DOUBLE_R("HPOUT1 Digital Switch", ARIZONA_DAC_DIGITAL_VOLUME_1L,
ARIZONA_DAC_DIGITAL_VOLUME_1R, ARIZONA_OUT1L_MUTE_SHIFT, 1, 1),
SOC_DOUBLE_R("HPOUT2 Digital Switch", ARIZONA_DAC_DIGITAL_VOLUME_2L,
@@ -252,18 +280,6 @@ SOC_DOUBLE_R_TLV("SPKDAT2 Digital Volume", ARIZONA_DAC_DIGITAL_VOLUME_6L,
ARIZONA_DAC_DIGITAL_VOLUME_6R, ARIZONA_OUT6L_VOL_SHIFT,
0xbf, 0, digital_tlv),
-SOC_DOUBLE_R_RANGE_TLV("HPOUT1 Volume", ARIZONA_OUTPUT_PATH_CONFIG_1L,
- ARIZONA_OUTPUT_PATH_CONFIG_1R,
- ARIZONA_OUT1L_PGA_VOL_SHIFT,
- 0x34, 0x40, 0, ana_tlv),
-SOC_DOUBLE_R_RANGE_TLV("HPOUT2 Volume", ARIZONA_OUTPUT_PATH_CONFIG_2L,
- ARIZONA_OUTPUT_PATH_CONFIG_2R,
- ARIZONA_OUT2L_PGA_VOL_SHIFT,
- 0x34, 0x40, 0, ana_tlv),
-SOC_DOUBLE_R_RANGE_TLV("HPOUT3 Volume", ARIZONA_OUTPUT_PATH_CONFIG_3L,
- ARIZONA_OUTPUT_PATH_CONFIG_3R,
- ARIZONA_OUT3L_PGA_VOL_SHIFT, 0x34, 0x40, 0, ana_tlv),
-
SOC_DOUBLE("SPKDAT1 Switch", ARIZONA_PDM_SPK1_CTRL_1, ARIZONA_SPK1L_MUTE_SHIFT,
ARIZONA_SPK1R_MUTE_SHIFT, 1, 1),
SOC_DOUBLE("SPKDAT2 Switch", ARIZONA_PDM_SPK2_CTRL_1, ARIZONA_SPK2L_MUTE_SHIFT,
@@ -400,7 +416,7 @@ static const struct snd_kcontrol_new wm5110_aec_loopback_mux =
static const struct snd_soc_dapm_widget wm5110_dapm_widgets[] = {
SND_SOC_DAPM_SUPPLY("SYSCLK", ARIZONA_SYSTEM_CLOCK_1, ARIZONA_SYSCLK_ENA_SHIFT,
- 0, NULL, 0),
+ 0, wm5110_sysclk_ev, SND_SOC_DAPM_POST_PMU),
SND_SOC_DAPM_SUPPLY("ASYNCCLK", ARIZONA_ASYNC_CLOCK_1,
ARIZONA_ASYNC_CLK_ENA_SHIFT, 0, NULL, 0),
SND_SOC_DAPM_SUPPLY("OPCLK", ARIZONA_OUTPUT_SYSTEM_CLOCK,
@@ -996,7 +1012,7 @@ static const struct snd_soc_dapm_route wm5110_dapm_routes[] = {
{ "AEC Loopback", "HPOUT3L", "OUT3L" },
{ "AEC Loopback", "HPOUT3R", "OUT3R" },
{ "HPOUT3L", NULL, "OUT3L" },
- { "HPOUT3R", NULL, "OUT3L" },
+ { "HPOUT3R", NULL, "OUT3R" },
{ "AEC Loopback", "SPKOUTL", "OUT4L" },
{ "SPKOUTLN", NULL, "OUT4L" },
diff --git a/sound/soc/codecs/wm8731.c b/sound/soc/codecs/wm8731.c
index 456bb8c6d759..bc7472c968e3 100644
--- a/sound/soc/codecs/wm8731.c
+++ b/sound/soc/codecs/wm8731.c
@@ -447,10 +447,10 @@ static int wm8731_set_dai_fmt(struct snd_soc_dai *codec_dai,
iface |= 0x0001;
break;
case SND_SOC_DAIFMT_DSP_A:
- iface |= 0x0003;
+ iface |= 0x0013;
break;
case SND_SOC_DAIFMT_DSP_B:
- iface |= 0x0013;
+ iface |= 0x0003;
break;
default:
return -EINVAL;
diff --git a/sound/soc/codecs/wm8904.c b/sound/soc/codecs/wm8904.c
index 3938fb1c203e..53bbfac6a83a 100644
--- a/sound/soc/codecs/wm8904.c
+++ b/sound/soc/codecs/wm8904.c
@@ -1444,7 +1444,7 @@ static int wm8904_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_DSP_B:
- aif1 |= WM8904_AIF_LRCLK_INV;
+ aif1 |= 0x3 | WM8904_AIF_LRCLK_INV;
case SND_SOC_DAIFMT_DSP_A:
aif1 |= 0x3;
break;
diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c
index 543c5c2631b6..0f17ed3e29f4 100644
--- a/sound/soc/codecs/wm8962.c
+++ b/sound/soc/codecs/wm8962.c
@@ -2439,7 +2439,20 @@ static void wm8962_configure_bclk(struct snd_soc_codec *codec)
snd_soc_update_bits(codec, WM8962_CLOCKING_4,
WM8962_SYSCLK_RATE_MASK, clocking4);
+ /* DSPCLK_DIV can be only generated correctly after enabling SYSCLK.
+ * So we here provisionally enable it and then disable it afterward
+ * if current bias_level hasn't reached SND_SOC_BIAS_ON.
+ */
+ if (codec->dapm.bias_level != SND_SOC_BIAS_ON)
+ snd_soc_update_bits(codec, WM8962_CLOCKING2,
+ WM8962_SYSCLK_ENA_MASK, WM8962_SYSCLK_ENA);
+
dspclk = snd_soc_read(codec, WM8962_CLOCKING1);
+
+ if (codec->dapm.bias_level != SND_SOC_BIAS_ON)
+ snd_soc_update_bits(codec, WM8962_CLOCKING2,
+ WM8962_SYSCLK_ENA_MASK, 0);
+
if (dspclk < 0) {
dev_err(codec->dev, "Failed to read DSPCLK: %d\n", dspclk);
return;
diff --git a/sound/soc/codecs/wm8990.c b/sound/soc/codecs/wm8990.c
index 253c88bb7a4c..4f05fb88bddf 100644
--- a/sound/soc/codecs/wm8990.c
+++ b/sound/soc/codecs/wm8990.c
@@ -1259,6 +1259,8 @@ static int wm8990_set_bias_level(struct snd_soc_codec *codec,
/* disable POBCTRL, SOFT_ST and BUFDCOPEN */
snd_soc_write(codec, WM8990_ANTIPOP2, 0x0);
+
+ codec->cache_sync = 1;
break;
}
diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
index 46ec0e9744d4..4fbcab63e61f 100644
--- a/sound/soc/codecs/wm_adsp.c
+++ b/sound/soc/codecs/wm_adsp.c
@@ -1474,13 +1474,17 @@ static int wm_adsp2_ena(struct wm_adsp *dsp)
return ret;
/* Wait for the RAM to start, should be near instantaneous */
- count = 0;
- do {
+ for (count = 0; count < 10; ++count) {
ret = regmap_read(dsp->regmap, dsp->base + ADSP2_STATUS1,
&val);
if (ret != 0)
return ret;
- } while (!(val & ADSP2_RAM_RDY) && ++count < 10);
+
+ if (val & ADSP2_RAM_RDY)
+ break;
+
+ msleep(1);
+ }
if (!(val & ADSP2_RAM_RDY)) {
adsp_err(dsp, "Failed to start DSP RAM\n");
diff --git a/sound/soc/davinci/davinci-pcm.c b/sound/soc/davinci/davinci-pcm.c
index fa64cd85204f..fb5d107f5603 100644
--- a/sound/soc/davinci/davinci-pcm.c
+++ b/sound/soc/davinci/davinci-pcm.c
@@ -238,7 +238,7 @@ static void davinci_pcm_dma_irq(unsigned link, u16 ch_status, void *data)
print_buf_info(prtd->ram_channel, "i ram_channel");
pr_debug("davinci_pcm: link=%d, status=0x%x\n", link, ch_status);
- if (unlikely(ch_status != DMA_COMPLETE))
+ if (unlikely(ch_status != EDMA_DMA_COMPLETE))
return;
if (snd_pcm_running(substream)) {
diff --git a/sound/soc/fsl/imx-wm8962.c b/sound/soc/fsl/imx-wm8962.c
index 61e48852b9e8..3fd76bc391de 100644
--- a/sound/soc/fsl/imx-wm8962.c
+++ b/sound/soc/fsl/imx-wm8962.c
@@ -130,8 +130,6 @@ static int imx_wm8962_set_bias_level(struct snd_soc_card *card,
break;
}
- dapm->bias_level = level;
-
return 0;
}
diff --git a/sound/soc/fsl/pcm030-audio-fabric.c b/sound/soc/fsl/pcm030-audio-fabric.c
index eb4373840bb6..3665f612819d 100644
--- a/sound/soc/fsl/pcm030-audio-fabric.c
+++ b/sound/soc/fsl/pcm030-audio-fabric.c
@@ -69,7 +69,6 @@ static int pcm030_fabric_probe(struct platform_device *op)
return -ENOMEM;
card->dev = &op->dev;
- platform_set_drvdata(op, pdata);
pdata->card = card;
@@ -98,6 +97,8 @@ static int pcm030_fabric_probe(struct platform_device *op)
if (ret)
dev_err(&op->dev, "snd_soc_register_card() failed: %d\n", ret);
+ platform_set_drvdata(op, pdata);
+
return ret;
}
diff --git a/sound/soc/kirkwood/kirkwood-i2s.c b/sound/soc/kirkwood/kirkwood-i2s.c
index d34d91743e3f..3920a5e8125f 100644
--- a/sound/soc/kirkwood/kirkwood-i2s.c
+++ b/sound/soc/kirkwood/kirkwood-i2s.c
@@ -33,6 +33,10 @@
SNDRV_PCM_FMTBIT_S24_LE | \
SNDRV_PCM_FMTBIT_S32_LE)
+#define KIRKWOOD_SPDIF_FORMATS \
+ (SNDRV_PCM_FMTBIT_S16_LE | \
+ SNDRV_PCM_FMTBIT_S24_LE)
+
static int kirkwood_i2s_set_fmt(struct snd_soc_dai *cpu_dai,
unsigned int fmt)
{
@@ -244,15 +248,15 @@ static int kirkwood_i2s_play_trigger(struct snd_pcm_substream *substream,
ctl);
}
- if (dai->id == 0)
- ctl &= ~KIRKWOOD_PLAYCTL_SPDIF_EN; /* i2s */
- else
- ctl &= ~KIRKWOOD_PLAYCTL_I2S_EN; /* spdif */
-
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
/* configure */
ctl = priv->ctl_play;
+ if (dai->id == 0)
+ ctl &= ~KIRKWOOD_PLAYCTL_SPDIF_EN; /* i2s */
+ else
+ ctl &= ~KIRKWOOD_PLAYCTL_I2S_EN; /* spdif */
+
value = ctl & ~KIRKWOOD_PLAYCTL_ENABLE_MASK;
writel(value, priv->io + KIRKWOOD_PLAYCTL);
@@ -449,14 +453,14 @@ static struct snd_soc_dai_driver kirkwood_i2s_dai[2] = {
.channels_max = 2,
.rates = SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 |
SNDRV_PCM_RATE_96000,
- .formats = KIRKWOOD_I2S_FORMATS,
+ .formats = KIRKWOOD_SPDIF_FORMATS,
},
.capture = {
.channels_min = 1,
.channels_max = 2,
.rates = SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 |
SNDRV_PCM_RATE_96000,
- .formats = KIRKWOOD_I2S_FORMATS,
+ .formats = KIRKWOOD_SPDIF_FORMATS,
},
.ops = &kirkwood_i2s_dai_ops,
},
@@ -469,17 +473,17 @@ static struct snd_soc_dai_driver kirkwood_i2s_dai_extclk[2] = {
.playback = {
.channels_min = 1,
.channels_max = 2,
- .rates = SNDRV_PCM_RATE_8000_192000 |
- SNDRV_PCM_RATE_CONTINUOUS |
- SNDRV_PCM_RATE_KNOT,
+ .rates = SNDRV_PCM_RATE_CONTINUOUS,
+ .rate_min = 5512,
+ .rate_max = 192000,
.formats = KIRKWOOD_I2S_FORMATS,
},
.capture = {
.channels_min = 1,
.channels_max = 2,
- .rates = SNDRV_PCM_RATE_8000_192000 |
- SNDRV_PCM_RATE_CONTINUOUS |
- SNDRV_PCM_RATE_KNOT,
+ .rates = SNDRV_PCM_RATE_CONTINUOUS,
+ .rate_min = 5512,
+ .rate_max = 192000,
.formats = KIRKWOOD_I2S_FORMATS,
},
.ops = &kirkwood_i2s_dai_ops,
@@ -490,18 +494,18 @@ static struct snd_soc_dai_driver kirkwood_i2s_dai_extclk[2] = {
.playback = {
.channels_min = 1,
.channels_max = 2,
- .rates = SNDRV_PCM_RATE_8000_192000 |
- SNDRV_PCM_RATE_CONTINUOUS |
- SNDRV_PCM_RATE_KNOT,
- .formats = KIRKWOOD_I2S_FORMATS,
+ .rates = SNDRV_PCM_RATE_CONTINUOUS,
+ .rate_min = 5512,
+ .rate_max = 192000,
+ .formats = KIRKWOOD_SPDIF_FORMATS,
},
.capture = {
.channels_min = 1,
.channels_max = 2,
- .rates = SNDRV_PCM_RATE_8000_192000 |
- SNDRV_PCM_RATE_CONTINUOUS |
- SNDRV_PCM_RATE_KNOT,
- .formats = KIRKWOOD_I2S_FORMATS,
+ .rates = SNDRV_PCM_RATE_CONTINUOUS,
+ .rate_min = 5512,
+ .rate_max = 192000,
+ .formats = KIRKWOOD_SPDIF_FORMATS,
},
.ops = &kirkwood_i2s_dai_ops,
},
diff --git a/sound/soc/omap/n810.c b/sound/soc/omap/n810.c
index 6d216cb6c19b..3fde9e402710 100644
--- a/sound/soc/omap/n810.c
+++ b/sound/soc/omap/n810.c
@@ -100,12 +100,12 @@ static int n810_startup(struct snd_pcm_substream *substream)
SNDRV_PCM_HW_PARAM_CHANNELS, 2, 2);
n810_ext_control(&codec->dapm);
- return clk_enable(sys_clkout2);
+ return clk_prepare_enable(sys_clkout2);
}
static void n810_shutdown(struct snd_pcm_substream *substream)
{
- clk_disable(sys_clkout2);
+ clk_disable_unprepare(sys_clkout2);
}
static int n810_hw_params(struct snd_pcm_substream *substream,
diff --git a/sound/soc/sh/Kconfig b/sound/soc/sh/Kconfig
index 14011d90d70a..ff60e11ecb56 100644
--- a/sound/soc/sh/Kconfig
+++ b/sound/soc/sh/Kconfig
@@ -37,6 +37,7 @@ config SND_SOC_SH4_SIU
config SND_SOC_RCAR
tristate "R-Car series SRU/SCU/SSIU/SSI support"
select SND_SIMPLE_CARD
+ select REGMAP
help
This option enables R-Car SUR/SCU/SSIU/SSI sound support
diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c
index 78c35b44fc04..b3653d37f75f 100644
--- a/sound/soc/sh/rcar/core.c
+++ b/sound/soc/sh/rcar/core.c
@@ -200,9 +200,8 @@ static void rsnd_dma_do_work(struct work_struct *work)
return;
}
+ dma_async_issue_pending(dma->chan);
}
-
- dma_async_issue_pending(dma->chan);
}
int rsnd_dma_available(struct rsnd_dma *dma)
@@ -288,15 +287,13 @@ int rsnd_dai_connect(struct rsnd_dai *rdai,
struct rsnd_mod *mod,
struct rsnd_dai_stream *io)
{
- struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
- struct device *dev = rsnd_priv_to_dev(priv);
-
- if (!mod) {
- dev_err(dev, "NULL mod\n");
+ if (!mod)
return -EIO;
- }
if (!list_empty(&mod->list)) {
+ struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
+ struct device *dev = rsnd_priv_to_dev(priv);
+
dev_err(dev, "%s%d is not empty\n",
rsnd_mod_name(mod),
rsnd_mod_id(mod));
diff --git a/sound/soc/sh/rcar/scu.c b/sound/soc/sh/rcar/scu.c
index f4453e33a847..fa8fa15860b9 100644
--- a/sound/soc/sh/rcar/scu.c
+++ b/sound/soc/sh/rcar/scu.c
@@ -68,7 +68,7 @@ static int rsnd_scu_set_route(struct rsnd_priv *priv,
return 0;
id = rsnd_mod_id(mod);
- if (id < 0 || id > ARRAY_SIZE(routes))
+ if (id < 0 || id >= ARRAY_SIZE(routes))
return -EIO;
/*
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index 4e53d87e881d..a66783e13a9c 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -3212,11 +3212,11 @@ int snd_soc_bytes_get(struct snd_kcontrol *kcontrol,
break;
case 2:
((u16 *)(&ucontrol->value.bytes.data))[0]
- &= ~params->mask;
+ &= cpu_to_be16(~params->mask);
break;
case 4:
((u32 *)(&ucontrol->value.bytes.data))[0]
- &= ~params->mask;
+ &= cpu_to_be32(~params->mask);
break;
default:
return -EINVAL;
diff --git a/sound/soc/soc-devres.c b/sound/soc/soc-devres.c
index b1d732255c02..3449c1e909ae 100644
--- a/sound/soc/soc-devres.c
+++ b/sound/soc/soc-devres.c
@@ -66,7 +66,7 @@ static void devm_card_release(struct device *dev, void *res)
*/
int devm_snd_soc_register_card(struct device *dev, struct snd_soc_card *card)
{
- struct device **ptr;
+ struct snd_soc_card **ptr;
int ret;
ptr = devres_alloc(devm_card_release, sizeof(*ptr), GFP_KERNEL);
@@ -75,7 +75,7 @@ int devm_snd_soc_register_card(struct device *dev, struct snd_soc_card *card)
ret = snd_soc_register_card(card);
if (ret == 0) {
- *ptr = dev;
+ *ptr = card;
devres_add(dev, ptr);
} else {
devres_free(ptr);
diff --git a/sound/soc/soc-generic-dmaengine-pcm.c b/sound/soc/soc-generic-dmaengine-pcm.c
index cbc9c96ce1f4..41949af3baae 100644
--- a/sound/soc/soc-generic-dmaengine-pcm.c
+++ b/sound/soc/soc-generic-dmaengine-pcm.c
@@ -305,6 +305,20 @@ static void dmaengine_pcm_request_chan_of(struct dmaengine_pcm *pcm,
}
}
+static void dmaengine_pcm_release_chan(struct dmaengine_pcm *pcm)
+{
+ unsigned int i;
+
+ for (i = SNDRV_PCM_STREAM_PLAYBACK; i <= SNDRV_PCM_STREAM_CAPTURE;
+ i++) {
+ if (!pcm->chan[i])
+ continue;
+ dma_release_channel(pcm->chan[i]);
+ if (pcm->flags & SND_DMAENGINE_PCM_FLAG_HALF_DUPLEX)
+ break;
+ }
+}
+
/**
* snd_dmaengine_pcm_register - Register a dmaengine based PCM device
* @dev: The parent device for the PCM device
@@ -315,6 +329,7 @@ int snd_dmaengine_pcm_register(struct device *dev,
const struct snd_dmaengine_pcm_config *config, unsigned int flags)
{
struct dmaengine_pcm *pcm;
+ int ret;
pcm = kzalloc(sizeof(*pcm), GFP_KERNEL);
if (!pcm)
@@ -326,11 +341,20 @@ int snd_dmaengine_pcm_register(struct device *dev,
dmaengine_pcm_request_chan_of(pcm, dev);
if (flags & SND_DMAENGINE_PCM_FLAG_NO_RESIDUE)
- return snd_soc_add_platform(dev, &pcm->platform,
+ ret = snd_soc_add_platform(dev, &pcm->platform,
&dmaengine_no_residue_pcm_platform);
else
- return snd_soc_add_platform(dev, &pcm->platform,
+ ret = snd_soc_add_platform(dev, &pcm->platform,
&dmaengine_pcm_platform);
+ if (ret)
+ goto err_free_dma;
+
+ return 0;
+
+err_free_dma:
+ dmaengine_pcm_release_chan(pcm);
+ kfree(pcm);
+ return ret;
}
EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_register);
@@ -345,7 +369,6 @@ void snd_dmaengine_pcm_unregister(struct device *dev)
{
struct snd_soc_platform *platform;
struct dmaengine_pcm *pcm;
- unsigned int i;
platform = snd_soc_lookup_platform(dev);
if (!platform)
@@ -353,15 +376,8 @@ void snd_dmaengine_pcm_unregister(struct device *dev)
pcm = soc_platform_to_pcm(platform);
- for (i = SNDRV_PCM_STREAM_PLAYBACK; i <= SNDRV_PCM_STREAM_CAPTURE; i++) {
- if (pcm->chan[i]) {
- dma_release_channel(pcm->chan[i]);
- if (pcm->flags & SND_DMAENGINE_PCM_FLAG_HALF_DUPLEX)
- break;
- }
- }
-
snd_soc_remove_platform(platform);
+ dmaengine_pcm_release_chan(pcm);
kfree(pcm);
}
EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_unregister);
diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
index 42782c01e413..891b9a9bcbf8 100644
--- a/sound/soc/soc-pcm.c
+++ b/sound/soc/soc-pcm.c
@@ -148,12 +148,12 @@ static void soc_pcm_apply_msb(struct snd_pcm_substream *substream,
}
}
-static void soc_pcm_init_runtime_hw(struct snd_pcm_hardware *hw,
+static void soc_pcm_init_runtime_hw(struct snd_pcm_runtime *runtime,
struct snd_soc_pcm_stream *codec_stream,
struct snd_soc_pcm_stream *cpu_stream)
{
- hw->rate_min = max(codec_stream->rate_min, cpu_stream->rate_min);
- hw->rate_max = max(codec_stream->rate_max, cpu_stream->rate_max);
+ struct snd_pcm_hardware *hw = &runtime->hw;
+
hw->channels_min = max(codec_stream->channels_min,
cpu_stream->channels_min);
hw->channels_max = min(codec_stream->channels_max,
@@ -166,6 +166,13 @@ static void soc_pcm_init_runtime_hw(struct snd_pcm_hardware *hw,
if (cpu_stream->rates
& (SNDRV_PCM_RATE_KNOT | SNDRV_PCM_RATE_CONTINUOUS))
hw->rates |= codec_stream->rates;
+
+ snd_pcm_limit_hw_rates(runtime);
+
+ hw->rate_min = max(hw->rate_min, cpu_stream->rate_min);
+ hw->rate_min = max(hw->rate_min, codec_stream->rate_min);
+ hw->rate_max = min_not_zero(hw->rate_max, cpu_stream->rate_max);
+ hw->rate_max = min_not_zero(hw->rate_max, codec_stream->rate_max);
}
/*
@@ -235,15 +242,14 @@ static int soc_pcm_open(struct snd_pcm_substream *substream)
/* Check that the codec and cpu DAIs are compatible */
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
- soc_pcm_init_runtime_hw(&runtime->hw, &codec_dai_drv->playback,
+ soc_pcm_init_runtime_hw(runtime, &codec_dai_drv->playback,
&cpu_dai_drv->playback);
} else {
- soc_pcm_init_runtime_hw(&runtime->hw, &codec_dai_drv->capture,
+ soc_pcm_init_runtime_hw(runtime, &codec_dai_drv->capture,
&cpu_dai_drv->capture);
}
ret = -EINVAL;
- snd_pcm_limit_hw_rates(runtime);
if (!runtime->hw.rates) {
printk(KERN_ERR "ASoC: %s <-> %s No matching rates\n",
codec_dai->name, cpu_dai->name);
@@ -594,12 +600,13 @@ static int soc_pcm_hw_free(struct snd_pcm_substream *substream)
struct snd_soc_platform *platform = rtd->platform;
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
struct snd_soc_dai *codec_dai = rtd->codec_dai;
- struct snd_soc_codec *codec = rtd->codec;
+ bool playback = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
mutex_lock_nested(&rtd->pcm_mutex, rtd->pcm_subclass);
/* apply codec digital mute */
- if (!codec->active)
+ if ((playback && codec_dai->playback_active == 1) ||
+ (!playback && codec_dai->capture_active == 1))
snd_soc_dai_digital_mute(codec_dai, 1, substream->stream);
/* free any machine hw params */
diff --git a/sound/soc/tegra/tegra20_i2s.c b/sound/soc/tegra/tegra20_i2s.c
index 364bf6a907e1..8c819f811470 100644
--- a/sound/soc/tegra/tegra20_i2s.c
+++ b/sound/soc/tegra/tegra20_i2s.c
@@ -74,7 +74,7 @@ static int tegra20_i2s_set_fmt(struct snd_soc_dai *dai,
unsigned int fmt)
{
struct tegra20_i2s *i2s = snd_soc_dai_get_drvdata(dai);
- unsigned int mask, val;
+ unsigned int mask = 0, val = 0;
switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
case SND_SOC_DAIFMT_NB_NF:
@@ -83,10 +83,10 @@ static int tegra20_i2s_set_fmt(struct snd_soc_dai *dai,
return -EINVAL;
}
- mask = TEGRA20_I2S_CTRL_MASTER_ENABLE;
+ mask |= TEGRA20_I2S_CTRL_MASTER_ENABLE;
switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
case SND_SOC_DAIFMT_CBS_CFS:
- val = TEGRA20_I2S_CTRL_MASTER_ENABLE;
+ val |= TEGRA20_I2S_CTRL_MASTER_ENABLE;
break;
case SND_SOC_DAIFMT_CBM_CFM:
break;
diff --git a/sound/soc/tegra/tegra20_spdif.c b/sound/soc/tegra/tegra20_spdif.c
index 08bc6931c7c7..8c7c1028e579 100644
--- a/sound/soc/tegra/tegra20_spdif.c
+++ b/sound/soc/tegra/tegra20_spdif.c
@@ -67,15 +67,15 @@ static int tegra20_spdif_hw_params(struct snd_pcm_substream *substream,
{
struct device *dev = dai->dev;
struct tegra20_spdif *spdif = snd_soc_dai_get_drvdata(dai);
- unsigned int mask, val;
+ unsigned int mask = 0, val = 0;
int ret, spdifclock;
- mask = TEGRA20_SPDIF_CTRL_PACK |
- TEGRA20_SPDIF_CTRL_BIT_MODE_MASK;
+ mask |= TEGRA20_SPDIF_CTRL_PACK |
+ TEGRA20_SPDIF_CTRL_BIT_MODE_MASK;
switch (params_format(params)) {
case SNDRV_PCM_FORMAT_S16_LE:
- val = TEGRA20_SPDIF_CTRL_PACK |
- TEGRA20_SPDIF_CTRL_BIT_MODE_16BIT;
+ val |= TEGRA20_SPDIF_CTRL_PACK |
+ TEGRA20_SPDIF_CTRL_BIT_MODE_16BIT;
break;
default:
return -EINVAL;
diff --git a/sound/soc/tegra/tegra30_i2s.c b/sound/soc/tegra/tegra30_i2s.c
index 231a785b3921..02247fee1cf7 100644
--- a/sound/soc/tegra/tegra30_i2s.c
+++ b/sound/soc/tegra/tegra30_i2s.c
@@ -118,7 +118,7 @@ static int tegra30_i2s_set_fmt(struct snd_soc_dai *dai,
unsigned int fmt)
{
struct tegra30_i2s *i2s = snd_soc_dai_get_drvdata(dai);
- unsigned int mask, val;
+ unsigned int mask = 0, val = 0;
switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
case SND_SOC_DAIFMT_NB_NF:
@@ -127,10 +127,10 @@ static int tegra30_i2s_set_fmt(struct snd_soc_dai *dai,
return -EINVAL;
}
- mask = TEGRA30_I2S_CTRL_MASTER_ENABLE;
+ mask |= TEGRA30_I2S_CTRL_MASTER_ENABLE;
switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
case SND_SOC_DAIFMT_CBS_CFS:
- val = TEGRA30_I2S_CTRL_MASTER_ENABLE;
+ val |= TEGRA30_I2S_CTRL_MASTER_ENABLE;
break;
case SND_SOC_DAIFMT_CBM_CFM:
break;
diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
index b9ba0fcc45df..83aabea259d7 100644
--- a/sound/usb/endpoint.c
+++ b/sound/usb/endpoint.c
@@ -636,8 +636,22 @@ static int data_ep_set_params(struct snd_usb_endpoint *ep,
if (usb_pipein(ep->pipe) ||
snd_usb_endpoint_implicit_feedback_sink(ep)) {
+ urb_packs = packs_per_ms;
+ /*
+ * Wireless devices can poll at a max rate of once per 4ms.
+ * For dataintervals less than 5, increase the packet count to
+ * allow the host controller to use bursting to fill in the
+ * gaps.
+ */
+ if (snd_usb_get_speed(ep->chip->dev) == USB_SPEED_WIRELESS) {
+ int interval = ep->datainterval;
+ while (interval < 5) {
+ urb_packs <<= 1;
+ ++interval;
+ }
+ }
/* make capture URBs <= 1 ms and smaller than a period */
- urb_packs = min(max_packs_per_urb, packs_per_ms);
+ urb_packs = min(max_packs_per_urb, urb_packs);
while (urb_packs > 1 && urb_packs * maxsize >= period_bytes)
urb_packs >>= 1;
ep->nurbs = MAX_URBS;
diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
index 3454262358b3..f4b12c216f1c 100644
--- a/sound/usb/mixer_quirks.c
+++ b/sound/usb/mixer_quirks.c
@@ -1603,7 +1603,7 @@ static int snd_microii_controls_create(struct usb_mixer_interface *mixer)
return err;
}
- return err;
+ return 0;
}
int snd_usb_mixer_apply_create_quirk(struct usb_mixer_interface *mixer)
diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c
index 0362d575de7d..217c82ee3665 100644
--- a/tools/lib/traceevent/event-parse.c
+++ b/tools/lib/traceevent/event-parse.c
@@ -1606,6 +1606,24 @@ process_arg(struct event_format *event, struct print_arg *arg, char **tok)
static enum event_type
process_op(struct event_format *event, struct print_arg *arg, char **tok);
+/*
+ * For __print_symbolic() and __print_flags, we need to completely
+ * evaluate the first argument, which defines what to print next.
+ */
+static enum event_type
+process_field_arg(struct event_format *event, struct print_arg *arg, char **tok)
+{
+ enum event_type type;
+
+ type = process_arg(event, arg, tok);
+
+ while (type == EVENT_OP) {
+ type = process_op(event, arg, tok);
+ }
+
+ return type;
+}
+
static enum event_type
process_cond(struct event_format *event, struct print_arg *top, char **tok)
{
@@ -2371,7 +2389,7 @@ process_flags(struct event_format *event, struct print_arg *arg, char **tok)
goto out_free;
}
- type = process_arg(event, field, &token);
+ type = process_field_arg(event, field, &token);
/* Handle operations in the first argument */
while (type == EVENT_OP)
@@ -2424,7 +2442,8 @@ process_symbols(struct event_format *event, struct print_arg *arg, char **tok)
goto out_free;
}
- type = process_arg(event, field, &token);
+ type = process_field_arg(event, field, &token);
+
if (test_type_token(type, token, EVENT_DELIM, ","))
goto out_free_field;
@@ -3446,7 +3465,7 @@ eval_num_arg(void *data, int size, struct event_format *event, struct print_arg
* is in the bottom half of the 32 bit field.
*/
offset &= 0xffff;
- val = (unsigned long long)(data + offset);
+ val = (unsigned long long)((unsigned long)data + offset);
break;
default: /* not sure what to do there */
return 0;
diff --git a/tools/net/Makefile b/tools/net/Makefile
index b4444d53b73f..004cd74734b6 100644
--- a/tools/net/Makefile
+++ b/tools/net/Makefile
@@ -1,15 +1,34 @@
prefix = /usr
CC = gcc
+LEX = flex
+YACC = bison
-all : bpf_jit_disasm
+%.yacc.c: %.y
+ $(YACC) -o $@ -d $<
+
+%.lex.c: %.l
+ $(LEX) -o $@ $<
+
+all : bpf_jit_disasm bpf_dbg bpf_asm
bpf_jit_disasm : CFLAGS = -Wall -O2
bpf_jit_disasm : LDLIBS = -lopcodes -lbfd -ldl
bpf_jit_disasm : bpf_jit_disasm.o
+bpf_dbg : CFLAGS = -Wall -O2
+bpf_dbg : LDLIBS = -lreadline
+bpf_dbg : bpf_dbg.o
+
+bpf_asm : CFLAGS = -Wall -O2 -I.
+bpf_asm : LDLIBS =
+bpf_asm : bpf_asm.o bpf_exp.yacc.o bpf_exp.lex.o
+bpf_exp.lex.o : bpf_exp.yacc.c
+
clean :
- rm -rf *.o bpf_jit_disasm
+ rm -rf *.o bpf_jit_disasm bpf_dbg bpf_asm bpf_exp.yacc.* bpf_exp.lex.*
install :
install bpf_jit_disasm $(prefix)/bin/bpf_jit_disasm
+ install bpf_dbg $(prefix)/bin/bpf_dbg
+ install bpf_asm $(prefix)/bin/bpf_asm
diff --git a/tools/net/bpf_asm.c b/tools/net/bpf_asm.c
new file mode 100644
index 000000000000..c15aef097b04
--- /dev/null
+++ b/tools/net/bpf_asm.c
@@ -0,0 +1,52 @@
+/*
+ * Minimal BPF assembler
+ *
+ * Instead of libpcap high-level filter expressions, it can be quite
+ * useful to define filters in low-level BPF assembler (that is kept
+ * close to Steven McCanne and Van Jacobson's original BPF paper).
+ * In particular for BPF JIT implementors, JIT security auditors, or
+ * just for defining BPF expressions that contain extensions which are
+ * not supported by compilers.
+ *
+ * How to get into it:
+ *
+ * 1) read Documentation/networking/filter.txt
+ * 2) Run `bpf_asm [-c] <filter-prog file>` to translate into binary
+ * blob that is loadable with xt_bpf, cls_bpf et al. Note: -c will
+ * pretty print a C-like construct.
+ *
+ * Copyright 2013 Daniel Borkmann <borkmann@redhat.com>
+ * Licensed under the GNU General Public License, version 2.0 (GPLv2)
+ */
+
+#include <stdbool.h>
+#include <stdio.h>
+#include <string.h>
+
+extern void bpf_asm_compile(FILE *fp, bool cstyle);
+
+int main(int argc, char **argv)
+{
+ FILE *fp = stdin;
+ bool cstyle = false;
+ int i;
+
+ for (i = 1; i < argc; i++) {
+ if (!strncmp("-c", argv[i], 2)) {
+ cstyle = true;
+ continue;
+ }
+
+ fp = fopen(argv[i], "r");
+ if (!fp) {
+ fp = stdin;
+ continue;
+ }
+
+ break;
+ }
+
+ bpf_asm_compile(fp, cstyle);
+
+ return 0;
+}
diff --git a/tools/net/bpf_dbg.c b/tools/net/bpf_dbg.c
new file mode 100644
index 000000000000..65dc757f7f7b
--- /dev/null
+++ b/tools/net/bpf_dbg.c
@@ -0,0 +1,1404 @@
+/*
+ * Minimal BPF debugger
+ *
+ * Minimal BPF debugger that mimics the kernel's engine (w/o extensions)
+ * and allows for single stepping through selected packets from a pcap
+ * with a provided user filter in order to facilitate verification of a
+ * BPF program. Besides others, this is useful to verify BPF programs
+ * before attaching to a live system, and can be used in socket filters,
+ * cls_bpf, xt_bpf, team driver and e.g. PTP code; in particular when a
+ * single more complex BPF program is being used. Reasons for a more
+ * complex BPF program are likely primarily to optimize execution time
+ * for making a verdict when multiple simple BPF programs are combined
+ * into one in order to prevent parsing same headers multiple times.
+ *
+ * More on how to debug BPF opcodes see Documentation/networking/filter.txt
+ * which is the main document on BPF. Mini howto for getting started:
+ *
+ * 1) `./bpf_dbg` to enter the shell (shell cmds denoted with '>'):
+ * 2) > load bpf 6,40 0 0 12,21 0 3 20... (output from `bpf_asm` or
+ * `tcpdump -iem1 -ddd port 22 | tr '\n' ','` to load as filter)
+ * 3) > load pcap foo.pcap
+ * 4) > run <n>/disassemble/dump/quit (self-explanatory)
+ * 5) > breakpoint 2 (sets bp at loaded BPF insns 2, do `run` then;
+ * multiple bps can be set, of course, a call to `breakpoint`
+ * w/o args shows currently loaded bps, `breakpoint reset` for
+ * resetting all breakpoints)
+ * 6) > select 3 (`run` etc will start from the 3rd packet in the pcap)
+ * 7) > step [-<n>, +<n>] (performs single stepping through the BPF)
+ *
+ * Copyright 2013 Daniel Borkmann <borkmann@redhat.com>
+ * Licensed under the GNU General Public License, version 2.0 (GPLv2)
+ */
+
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <ctype.h>
+#include <stdbool.h>
+#include <stdarg.h>
+#include <setjmp.h>
+#include <linux/filter.h>
+#include <linux/if_packet.h>
+#include <readline/readline.h>
+#include <readline/history.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <sys/stat.h>
+#include <sys/mman.h>
+#include <fcntl.h>
+#include <errno.h>
+#include <signal.h>
+#include <arpa/inet.h>
+#include <net/ethernet.h>
+
+#define TCPDUMP_MAGIC 0xa1b2c3d4
+
+#define BPF_LDX_B (BPF_LDX | BPF_B)
+#define BPF_LDX_W (BPF_LDX | BPF_W)
+#define BPF_JMP_JA (BPF_JMP | BPF_JA)
+#define BPF_JMP_JEQ (BPF_JMP | BPF_JEQ)
+#define BPF_JMP_JGT (BPF_JMP | BPF_JGT)
+#define BPF_JMP_JGE (BPF_JMP | BPF_JGE)
+#define BPF_JMP_JSET (BPF_JMP | BPF_JSET)
+#define BPF_ALU_ADD (BPF_ALU | BPF_ADD)
+#define BPF_ALU_SUB (BPF_ALU | BPF_SUB)
+#define BPF_ALU_MUL (BPF_ALU | BPF_MUL)
+#define BPF_ALU_DIV (BPF_ALU | BPF_DIV)
+#define BPF_ALU_MOD (BPF_ALU | BPF_MOD)
+#define BPF_ALU_NEG (BPF_ALU | BPF_NEG)
+#define BPF_ALU_AND (BPF_ALU | BPF_AND)
+#define BPF_ALU_OR (BPF_ALU | BPF_OR)
+#define BPF_ALU_XOR (BPF_ALU | BPF_XOR)
+#define BPF_ALU_LSH (BPF_ALU | BPF_LSH)
+#define BPF_ALU_RSH (BPF_ALU | BPF_RSH)
+#define BPF_MISC_TAX (BPF_MISC | BPF_TAX)
+#define BPF_MISC_TXA (BPF_MISC | BPF_TXA)
+#define BPF_LD_B (BPF_LD | BPF_B)
+#define BPF_LD_H (BPF_LD | BPF_H)
+#define BPF_LD_W (BPF_LD | BPF_W)
+
+#ifndef array_size
+# define array_size(x) (sizeof(x) / sizeof((x)[0]))
+#endif
+
+#ifndef __check_format_printf
+# define __check_format_printf(pos_fmtstr, pos_fmtargs) \
+ __attribute__ ((format (printf, (pos_fmtstr), (pos_fmtargs))))
+#endif
+
+#define CMD(_name, _func) { .name = _name, .func = _func, }
+#define OP(_op, _name) [_op] = _name
+
+enum {
+ CMD_OK,
+ CMD_ERR,
+ CMD_EX,
+};
+
+struct shell_cmd {
+ const char *name;
+ int (*func)(char *args);
+};
+
+struct pcap_filehdr {
+ uint32_t magic;
+ uint16_t version_major;
+ uint16_t version_minor;
+ int32_t thiszone;
+ uint32_t sigfigs;
+ uint32_t snaplen;
+ uint32_t linktype;
+};
+
+struct pcap_timeval {
+ int32_t tv_sec;
+ int32_t tv_usec;
+};
+
+struct pcap_pkthdr {
+ struct pcap_timeval ts;
+ uint32_t caplen;
+ uint32_t len;
+};
+
+struct bpf_regs {
+ uint32_t A;
+ uint32_t X;
+ uint32_t M[BPF_MEMWORDS];
+ uint32_t R;
+ bool Rs;
+ uint16_t Pc;
+};
+
+static struct sock_filter bpf_image[BPF_MAXINSNS + 1];
+static unsigned int bpf_prog_len = 0;
+
+static int bpf_breakpoints[64];
+static struct bpf_regs bpf_regs[BPF_MAXINSNS + 1];
+static struct bpf_regs bpf_curr;
+static unsigned int bpf_regs_len = 0;
+
+static int pcap_fd = -1;
+static unsigned int pcap_packet = 0;
+static size_t pcap_map_size = 0;
+static char *pcap_ptr_va_start, *pcap_ptr_va_curr;
+
+static const char * const op_table[] = {
+ OP(BPF_ST, "st"),
+ OP(BPF_STX, "stx"),
+ OP(BPF_LD_B, "ldb"),
+ OP(BPF_LD_H, "ldh"),
+ OP(BPF_LD_W, "ld"),
+ OP(BPF_LDX, "ldx"),
+ OP(BPF_LDX_B, "ldxb"),
+ OP(BPF_JMP_JA, "ja"),
+ OP(BPF_JMP_JEQ, "jeq"),
+ OP(BPF_JMP_JGT, "jgt"),
+ OP(BPF_JMP_JGE, "jge"),
+ OP(BPF_JMP_JSET, "jset"),
+ OP(BPF_ALU_ADD, "add"),
+ OP(BPF_ALU_SUB, "sub"),
+ OP(BPF_ALU_MUL, "mul"),
+ OP(BPF_ALU_DIV, "div"),
+ OP(BPF_ALU_MOD, "mod"),
+ OP(BPF_ALU_NEG, "neg"),
+ OP(BPF_ALU_AND, "and"),
+ OP(BPF_ALU_OR, "or"),
+ OP(BPF_ALU_XOR, "xor"),
+ OP(BPF_ALU_LSH, "lsh"),
+ OP(BPF_ALU_RSH, "rsh"),
+ OP(BPF_MISC_TAX, "tax"),
+ OP(BPF_MISC_TXA, "txa"),
+ OP(BPF_RET, "ret"),
+};
+
+static __check_format_printf(1, 2) int rl_printf(const char *fmt, ...)
+{
+ int ret;
+ va_list vl;
+
+ va_start(vl, fmt);
+ ret = vfprintf(rl_outstream, fmt, vl);
+ va_end(vl);
+
+ return ret;
+}
+
+static int matches(const char *cmd, const char *pattern)
+{
+ int len = strlen(cmd);
+
+ if (len > strlen(pattern))
+ return -1;
+
+ return memcmp(pattern, cmd, len);
+}
+
+static void hex_dump(const uint8_t *buf, size_t len)
+{
+ int i;
+
+ rl_printf("%3u: ", 0);
+ for (i = 0; i < len; i++) {
+ if (i && !(i % 16))
+ rl_printf("\n%3u: ", i);
+ rl_printf("%02x ", buf[i]);
+ }
+ rl_printf("\n");
+}
+
+static bool bpf_prog_loaded(void)
+{
+ if (bpf_prog_len == 0)
+ rl_printf("no bpf program loaded!\n");
+
+ return bpf_prog_len > 0;
+}
+
+static void bpf_disasm(const struct sock_filter f, unsigned int i)
+{
+ const char *op, *fmt;
+ int val = f.k;
+ char buf[256];
+
+ switch (f.code) {
+ case BPF_RET | BPF_K:
+ op = op_table[BPF_RET];
+ fmt = "#%#x";
+ break;
+ case BPF_RET | BPF_A:
+ op = op_table[BPF_RET];
+ fmt = "a";
+ break;
+ case BPF_RET | BPF_X:
+ op = op_table[BPF_RET];
+ fmt = "x";
+ break;
+ case BPF_MISC_TAX:
+ op = op_table[BPF_MISC_TAX];
+ fmt = "";
+ break;
+ case BPF_MISC_TXA:
+ op = op_table[BPF_MISC_TXA];
+ fmt = "";
+ break;
+ case BPF_ST:
+ op = op_table[BPF_ST];
+ fmt = "M[%d]";
+ break;
+ case BPF_STX:
+ op = op_table[BPF_STX];
+ fmt = "M[%d]";
+ break;
+ case BPF_LD_W | BPF_ABS:
+ op = op_table[BPF_LD_W];
+ fmt = "[%d]";
+ break;
+ case BPF_LD_H | BPF_ABS:
+ op = op_table[BPF_LD_H];
+ fmt = "[%d]";
+ break;
+ case BPF_LD_B | BPF_ABS:
+ op = op_table[BPF_LD_B];
+ fmt = "[%d]";
+ break;
+ case BPF_LD_W | BPF_LEN:
+ op = op_table[BPF_LD_W];
+ fmt = "#len";
+ break;
+ case BPF_LD_W | BPF_IND:
+ op = op_table[BPF_LD_W];
+ fmt = "[x+%d]";
+ break;
+ case BPF_LD_H | BPF_IND:
+ op = op_table[BPF_LD_H];
+ fmt = "[x+%d]";
+ break;
+ case BPF_LD_B | BPF_IND:
+ op = op_table[BPF_LD_B];
+ fmt = "[x+%d]";
+ break;
+ case BPF_LD | BPF_IMM:
+ op = op_table[BPF_LD_W];
+ fmt = "#%#x";
+ break;
+ case BPF_LDX | BPF_IMM:
+ op = op_table[BPF_LDX];
+ fmt = "#%#x";
+ break;
+ case BPF_LDX_B | BPF_MSH:
+ op = op_table[BPF_LDX_B];
+ fmt = "4*([%d]&0xf)";
+ break;
+ case BPF_LD | BPF_MEM:
+ op = op_table[BPF_LD_W];
+ fmt = "M[%d]";
+ break;
+ case BPF_LDX | BPF_MEM:
+ op = op_table[BPF_LDX];
+ fmt = "M[%d]";
+ break;
+ case BPF_JMP_JA:
+ op = op_table[BPF_JMP_JA];
+ fmt = "%d";
+ val = i + 1 + f.k;
+ break;
+ case BPF_JMP_JGT | BPF_X:
+ op = op_table[BPF_JMP_JGT];
+ fmt = "x";
+ break;
+ case BPF_JMP_JGT | BPF_K:
+ op = op_table[BPF_JMP_JGT];
+ fmt = "#%#x";
+ break;
+ case BPF_JMP_JGE | BPF_X:
+ op = op_table[BPF_JMP_JGE];
+ fmt = "x";
+ break;
+ case BPF_JMP_JGE | BPF_K:
+ op = op_table[BPF_JMP_JGE];
+ fmt = "#%#x";
+ break;
+ case BPF_JMP_JEQ | BPF_X:
+ op = op_table[BPF_JMP_JEQ];
+ fmt = "x";
+ break;
+ case BPF_JMP_JEQ | BPF_K:
+ op = op_table[BPF_JMP_JEQ];
+ fmt = "#%#x";
+ break;
+ case BPF_JMP_JSET | BPF_X:
+ op = op_table[BPF_JMP_JSET];
+ fmt = "x";
+ break;
+ case BPF_JMP_JSET | BPF_K:
+ op = op_table[BPF_JMP_JSET];
+ fmt = "#%#x";
+ break;
+ case BPF_ALU_NEG:
+ op = op_table[BPF_ALU_NEG];
+ fmt = "";
+ break;
+ case BPF_ALU_LSH | BPF_X:
+ op = op_table[BPF_ALU_LSH];
+ fmt = "x";
+ break;
+ case BPF_ALU_LSH | BPF_K:
+ op = op_table[BPF_ALU_LSH];
+ fmt = "#%d";
+ break;
+ case BPF_ALU_RSH | BPF_X:
+ op = op_table[BPF_ALU_RSH];
+ fmt = "x";
+ break;
+ case BPF_ALU_RSH | BPF_K:
+ op = op_table[BPF_ALU_RSH];
+ fmt = "#%d";
+ break;
+ case BPF_ALU_ADD | BPF_X:
+ op = op_table[BPF_ALU_ADD];
+ fmt = "x";
+ break;
+ case BPF_ALU_ADD | BPF_K:
+ op = op_table[BPF_ALU_ADD];
+ fmt = "#%d";
+ break;
+ case BPF_ALU_SUB | BPF_X:
+ op = op_table[BPF_ALU_SUB];
+ fmt = "x";
+ break;
+ case BPF_ALU_SUB | BPF_K:
+ op = op_table[BPF_ALU_SUB];
+ fmt = "#%d";
+ break;
+ case BPF_ALU_MUL | BPF_X:
+ op = op_table[BPF_ALU_MUL];
+ fmt = "x";
+ break;
+ case BPF_ALU_MUL | BPF_K:
+ op = op_table[BPF_ALU_MUL];
+ fmt = "#%d";
+ break;
+ case BPF_ALU_DIV | BPF_X:
+ op = op_table[BPF_ALU_DIV];
+ fmt = "x";
+ break;
+ case BPF_ALU_DIV | BPF_K:
+ op = op_table[BPF_ALU_DIV];
+ fmt = "#%d";
+ break;
+ case BPF_ALU_MOD | BPF_X:
+ op = op_table[BPF_ALU_MOD];
+ fmt = "x";
+ break;
+ case BPF_ALU_MOD | BPF_K:
+ op = op_table[BPF_ALU_MOD];
+ fmt = "#%d";
+ break;
+ case BPF_ALU_AND | BPF_X:
+ op = op_table[BPF_ALU_AND];
+ fmt = "x";
+ break;
+ case BPF_ALU_AND | BPF_K:
+ op = op_table[BPF_ALU_AND];
+ fmt = "#%#x";
+ break;
+ case BPF_ALU_OR | BPF_X:
+ op = op_table[BPF_ALU_OR];
+ fmt = "x";
+ break;
+ case BPF_ALU_OR | BPF_K:
+ op = op_table[BPF_ALU_OR];
+ fmt = "#%#x";
+ break;
+ case BPF_ALU_XOR | BPF_X:
+ op = op_table[BPF_ALU_XOR];
+ fmt = "x";
+ break;
+ case BPF_ALU_XOR | BPF_K:
+ op = op_table[BPF_ALU_XOR];
+ fmt = "#%#x";
+ break;
+ default:
+ op = "nosup";
+ fmt = "%#x";
+ val = f.code;
+ break;
+ }
+
+ memset(buf, 0, sizeof(buf));
+ snprintf(buf, sizeof(buf), fmt, val);
+ buf[sizeof(buf) - 1] = 0;
+
+ if ((BPF_CLASS(f.code) == BPF_JMP && BPF_OP(f.code) != BPF_JA))
+ rl_printf("l%d:\t%s %s, l%d, l%d\n", i, op, buf,
+ i + 1 + f.jt, i + 1 + f.jf);
+ else
+ rl_printf("l%d:\t%s %s\n", i, op, buf);
+}
+
+static void bpf_dump_curr(struct bpf_regs *r, struct sock_filter *f)
+{
+ int i, m = 0;
+
+ rl_printf("pc: [%u]\n", r->Pc);
+ rl_printf("code: [%u] jt[%u] jf[%u] k[%u]\n",
+ f->code, f->jt, f->jf, f->k);
+ rl_printf("curr: ");
+ bpf_disasm(*f, r->Pc);
+
+ if (f->jt || f->jf) {
+ rl_printf("jt: ");
+ bpf_disasm(*(f + f->jt + 1), r->Pc + f->jt + 1);
+ rl_printf("jf: ");
+ bpf_disasm(*(f + f->jf + 1), r->Pc + f->jf + 1);
+ }
+
+ rl_printf("A: [%#08x][%u]\n", r->A, r->A);
+ rl_printf("X: [%#08x][%u]\n", r->X, r->X);
+ if (r->Rs)
+ rl_printf("ret: [%#08x][%u]!\n", r->R, r->R);
+
+ for (i = 0; i < BPF_MEMWORDS; i++) {
+ if (r->M[i]) {
+ m++;
+ rl_printf("M[%d]: [%#08x][%u]\n", i, r->M[i], r->M[i]);
+ }
+ }
+ if (m == 0)
+ rl_printf("M[0,%d]: [%#08x][%u]\n", BPF_MEMWORDS - 1, 0, 0);
+}
+
+static void bpf_dump_pkt(uint8_t *pkt, uint32_t pkt_caplen, uint32_t pkt_len)
+{
+ if (pkt_caplen != pkt_len)
+ rl_printf("cap: %u, len: %u\n", pkt_caplen, pkt_len);
+ else
+ rl_printf("len: %u\n", pkt_len);
+
+ hex_dump(pkt, pkt_caplen);
+}
+
+static void bpf_disasm_all(const struct sock_filter *f, unsigned int len)
+{
+ unsigned int i;
+
+ for (i = 0; i < len; i++)
+ bpf_disasm(f[i], i);
+}
+
+static void bpf_dump_all(const struct sock_filter *f, unsigned int len)
+{
+ unsigned int i;
+
+ rl_printf("/* { op, jt, jf, k }, */\n");
+ for (i = 0; i < len; i++)
+ rl_printf("{ %#04x, %2u, %2u, %#010x },\n",
+ f[i].code, f[i].jt, f[i].jf, f[i].k);
+}
+
+static bool bpf_runnable(struct sock_filter *f, unsigned int len)
+{
+ int sock, ret, i;
+ struct sock_fprog bpf = {
+ .filter = f,
+ .len = len,
+ };
+
+ sock = socket(AF_INET, SOCK_DGRAM, 0);
+ if (sock < 0) {
+ rl_printf("cannot open socket!\n");
+ return false;
+ }
+ ret = setsockopt(sock, SOL_SOCKET, SO_ATTACH_FILTER, &bpf, sizeof(bpf));
+ close(sock);
+ if (ret < 0) {
+ rl_printf("program not allowed to run by kernel!\n");
+ return false;
+ }
+ for (i = 0; i < len; i++) {
+ if (BPF_CLASS(f[i].code) == BPF_LD &&
+ f[i].k > SKF_AD_OFF) {
+ rl_printf("extensions currently not supported!\n");
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static void bpf_reset_breakpoints(void)
+{
+ int i;
+
+ for (i = 0; i < array_size(bpf_breakpoints); i++)
+ bpf_breakpoints[i] = -1;
+}
+
+static void bpf_set_breakpoints(unsigned int where)
+{
+ int i;
+ bool set = false;
+
+ for (i = 0; i < array_size(bpf_breakpoints); i++) {
+ if (bpf_breakpoints[i] == (int) where) {
+ rl_printf("breakpoint already set!\n");
+ set = true;
+ break;
+ }
+
+ if (bpf_breakpoints[i] == -1 && set == false) {
+ bpf_breakpoints[i] = where;
+ set = true;
+ }
+ }
+
+ if (!set)
+ rl_printf("too many breakpoints set, reset first!\n");
+}
+
+static void bpf_dump_breakpoints(void)
+{
+ int i;
+
+ rl_printf("breakpoints: ");
+
+ for (i = 0; i < array_size(bpf_breakpoints); i++) {
+ if (bpf_breakpoints[i] < 0)
+ continue;
+ rl_printf("%d ", bpf_breakpoints[i]);
+ }
+
+ rl_printf("\n");
+}
+
+static void bpf_reset(void)
+{
+ bpf_regs_len = 0;
+
+ memset(bpf_regs, 0, sizeof(bpf_regs));
+ memset(&bpf_curr, 0, sizeof(bpf_curr));
+}
+
+static void bpf_safe_regs(void)
+{
+ memcpy(&bpf_regs[bpf_regs_len++], &bpf_curr, sizeof(bpf_curr));
+}
+
+static bool bpf_restore_regs(int off)
+{
+ unsigned int index = bpf_regs_len - 1 + off;
+
+ if (index == 0) {
+ bpf_reset();
+ return true;
+ } else if (index < bpf_regs_len) {
+ memcpy(&bpf_curr, &bpf_regs[index], sizeof(bpf_curr));
+ bpf_regs_len = index;
+ return true;
+ } else {
+ rl_printf("reached bottom of register history stack!\n");
+ return false;
+ }
+}
+
+static uint32_t extract_u32(uint8_t *pkt, uint32_t off)
+{
+ uint32_t r;
+
+ memcpy(&r, &pkt[off], sizeof(r));
+
+ return ntohl(r);
+}
+
+static uint16_t extract_u16(uint8_t *pkt, uint32_t off)
+{
+ uint16_t r;
+
+ memcpy(&r, &pkt[off], sizeof(r));
+
+ return ntohs(r);
+}
+
+static uint8_t extract_u8(uint8_t *pkt, uint32_t off)
+{
+ return pkt[off];
+}
+
+static void set_return(struct bpf_regs *r)
+{
+ r->R = 0;
+ r->Rs = true;
+}
+
+static void bpf_single_step(struct bpf_regs *r, struct sock_filter *f,
+ uint8_t *pkt, uint32_t pkt_caplen,
+ uint32_t pkt_len)
+{
+ uint32_t K = f->k;
+ int d;
+
+ switch (f->code) {
+ case BPF_RET | BPF_K:
+ r->R = K;
+ r->Rs = true;
+ break;
+ case BPF_RET | BPF_A:
+ r->R = r->A;
+ r->Rs = true;
+ break;
+ case BPF_RET | BPF_X:
+ r->R = r->X;
+ r->Rs = true;
+ break;
+ case BPF_MISC_TAX:
+ r->X = r->A;
+ break;
+ case BPF_MISC_TXA:
+ r->A = r->X;
+ break;
+ case BPF_ST:
+ r->M[K] = r->A;
+ break;
+ case BPF_STX:
+ r->M[K] = r->X;
+ break;
+ case BPF_LD_W | BPF_ABS:
+ d = pkt_caplen - K;
+ if (d >= sizeof(uint32_t))
+ r->A = extract_u32(pkt, K);
+ else
+ set_return(r);
+ break;
+ case BPF_LD_H | BPF_ABS:
+ d = pkt_caplen - K;
+ if (d >= sizeof(uint16_t))
+ r->A = extract_u16(pkt, K);
+ else
+ set_return(r);
+ break;
+ case BPF_LD_B | BPF_ABS:
+ d = pkt_caplen - K;
+ if (d >= sizeof(uint8_t))
+ r->A = extract_u8(pkt, K);
+ else
+ set_return(r);
+ break;
+ case BPF_LD_W | BPF_IND:
+ d = pkt_caplen - (r->X + K);
+ if (d >= sizeof(uint32_t))
+ r->A = extract_u32(pkt, r->X + K);
+ break;
+ case BPF_LD_H | BPF_IND:
+ d = pkt_caplen - (r->X + K);
+ if (d >= sizeof(uint16_t))
+ r->A = extract_u16(pkt, r->X + K);
+ else
+ set_return(r);
+ break;
+ case BPF_LD_B | BPF_IND:
+ d = pkt_caplen - (r->X + K);
+ if (d >= sizeof(uint8_t))
+ r->A = extract_u8(pkt, r->X + K);
+ else
+ set_return(r);
+ break;
+ case BPF_LDX_B | BPF_MSH:
+ d = pkt_caplen - K;
+ if (d >= sizeof(uint8_t)) {
+ r->X = extract_u8(pkt, K);
+ r->X = (r->X & 0xf) << 2;
+ } else
+ set_return(r);
+ break;
+ case BPF_LD_W | BPF_LEN:
+ r->A = pkt_len;
+ break;
+ case BPF_LDX_W | BPF_LEN:
+ r->A = pkt_len;
+ break;
+ case BPF_LD | BPF_IMM:
+ r->A = K;
+ break;
+ case BPF_LDX | BPF_IMM:
+ r->X = K;
+ break;
+ case BPF_LD | BPF_MEM:
+ r->A = r->M[K];
+ break;
+ case BPF_LDX | BPF_MEM:
+ r->X = r->M[K];
+ break;
+ case BPF_JMP_JA:
+ r->Pc += K;
+ break;
+ case BPF_JMP_JGT | BPF_X:
+ r->Pc += r->A > r->X ? f->jt : f->jf;
+ break;
+ case BPF_JMP_JGT | BPF_K:
+ r->Pc += r->A > K ? f->jt : f->jf;
+ break;
+ case BPF_JMP_JGE | BPF_X:
+ r->Pc += r->A >= r->X ? f->jt : f->jf;
+ break;
+ case BPF_JMP_JGE | BPF_K:
+ r->Pc += r->A >= K ? f->jt : f->jf;
+ break;
+ case BPF_JMP_JEQ | BPF_X:
+ r->Pc += r->A == r->X ? f->jt : f->jf;
+ break;
+ case BPF_JMP_JEQ | BPF_K:
+ r->Pc += r->A == K ? f->jt : f->jf;
+ break;
+ case BPF_JMP_JSET | BPF_X:
+ r->Pc += r->A & r->X ? f->jt : f->jf;
+ break;
+ case BPF_JMP_JSET | BPF_K:
+ r->Pc += r->A & K ? f->jt : f->jf;
+ break;
+ case BPF_ALU_NEG:
+ r->A = -r->A;
+ break;
+ case BPF_ALU_LSH | BPF_X:
+ r->A <<= r->X;
+ break;
+ case BPF_ALU_LSH | BPF_K:
+ r->A <<= K;
+ break;
+ case BPF_ALU_RSH | BPF_X:
+ r->A >>= r->X;
+ break;
+ case BPF_ALU_RSH | BPF_K:
+ r->A >>= K;
+ break;
+ case BPF_ALU_ADD | BPF_X:
+ r->A += r->X;
+ break;
+ case BPF_ALU_ADD | BPF_K:
+ r->A += K;
+ break;
+ case BPF_ALU_SUB | BPF_X:
+ r->A -= r->X;
+ break;
+ case BPF_ALU_SUB | BPF_K:
+ r->A -= K;
+ break;
+ case BPF_ALU_MUL | BPF_X:
+ r->A *= r->X;
+ break;
+ case BPF_ALU_MUL | BPF_K:
+ r->A *= K;
+ break;
+ case BPF_ALU_DIV | BPF_X:
+ case BPF_ALU_MOD | BPF_X:
+ if (r->X == 0) {
+ set_return(r);
+ break;
+ }
+ goto do_div;
+ case BPF_ALU_DIV | BPF_K:
+ case BPF_ALU_MOD | BPF_K:
+ if (K == 0) {
+ set_return(r);
+ break;
+ }
+do_div:
+ switch (f->code) {
+ case BPF_ALU_DIV | BPF_X:
+ r->A /= r->X;
+ break;
+ case BPF_ALU_DIV | BPF_K:
+ r->A /= K;
+ break;
+ case BPF_ALU_MOD | BPF_X:
+ r->A %= r->X;
+ break;
+ case BPF_ALU_MOD | BPF_K:
+ r->A %= K;
+ break;
+ }
+ break;
+ case BPF_ALU_AND | BPF_X:
+ r->A &= r->X;
+ break;
+ case BPF_ALU_AND | BPF_K:
+ r->A &= r->X;
+ break;
+ case BPF_ALU_OR | BPF_X:
+ r->A |= r->X;
+ break;
+ case BPF_ALU_OR | BPF_K:
+ r->A |= K;
+ break;
+ case BPF_ALU_XOR | BPF_X:
+ r->A ^= r->X;
+ break;
+ case BPF_ALU_XOR | BPF_K:
+ r->A ^= K;
+ break;
+ }
+}
+
+static bool bpf_pc_has_breakpoint(uint16_t pc)
+{
+ int i;
+
+ for (i = 0; i < array_size(bpf_breakpoints); i++) {
+ if (bpf_breakpoints[i] < 0)
+ continue;
+ if (bpf_breakpoints[i] == pc)
+ return true;
+ }
+
+ return false;
+}
+
+static bool bpf_handle_breakpoint(struct bpf_regs *r, struct sock_filter *f,
+ uint8_t *pkt, uint32_t pkt_caplen,
+ uint32_t pkt_len)
+{
+ rl_printf("-- register dump --\n");
+ bpf_dump_curr(r, &f[r->Pc]);
+ rl_printf("-- packet dump --\n");
+ bpf_dump_pkt(pkt, pkt_caplen, pkt_len);
+ rl_printf("(breakpoint)\n");
+ return true;
+}
+
+static int bpf_run_all(struct sock_filter *f, uint16_t bpf_len, uint8_t *pkt,
+ uint32_t pkt_caplen, uint32_t pkt_len)
+{
+ bool stop = false;
+
+ while (bpf_curr.Rs == false && stop == false) {
+ bpf_safe_regs();
+
+ if (bpf_pc_has_breakpoint(bpf_curr.Pc))
+ stop = bpf_handle_breakpoint(&bpf_curr, f, pkt,
+ pkt_caplen, pkt_len);
+
+ bpf_single_step(&bpf_curr, &f[bpf_curr.Pc], pkt, pkt_caplen,
+ pkt_len);
+ bpf_curr.Pc++;
+ }
+
+ return stop ? -1 : bpf_curr.R;
+}
+
+static int bpf_run_stepping(struct sock_filter *f, uint16_t bpf_len,
+ uint8_t *pkt, uint32_t pkt_caplen,
+ uint32_t pkt_len, int next)
+{
+ bool stop = false;
+ int i = 1;
+
+ while (bpf_curr.Rs == false && stop == false) {
+ bpf_safe_regs();
+
+ if (i++ == next)
+ stop = bpf_handle_breakpoint(&bpf_curr, f, pkt,
+ pkt_caplen, pkt_len);
+
+ bpf_single_step(&bpf_curr, &f[bpf_curr.Pc], pkt, pkt_caplen,
+ pkt_len);
+ bpf_curr.Pc++;
+ }
+
+ return stop ? -1 : bpf_curr.R;
+}
+
+static bool pcap_loaded(void)
+{
+ if (pcap_fd < 0)
+ rl_printf("no pcap file loaded!\n");
+
+ return pcap_fd >= 0;
+}
+
+static struct pcap_pkthdr *pcap_curr_pkt(void)
+{
+ return (void *) pcap_ptr_va_curr;
+}
+
+static bool pcap_next_pkt(void)
+{
+ struct pcap_pkthdr *hdr = pcap_curr_pkt();
+
+ if (pcap_ptr_va_curr + sizeof(*hdr) -
+ pcap_ptr_va_start >= pcap_map_size)
+ return false;
+ if (hdr->caplen == 0 || hdr->len == 0 || hdr->caplen > hdr->len)
+ return false;
+ if (pcap_ptr_va_curr + sizeof(*hdr) + hdr->caplen -
+ pcap_ptr_va_start >= pcap_map_size)
+ return false;
+
+ pcap_ptr_va_curr += (sizeof(*hdr) + hdr->caplen);
+ return true;
+}
+
+static void pcap_reset_pkt(void)
+{
+ pcap_ptr_va_curr = pcap_ptr_va_start + sizeof(struct pcap_filehdr);
+}
+
+static int try_load_pcap(const char *file)
+{
+ struct pcap_filehdr *hdr;
+ struct stat sb;
+ int ret;
+
+ pcap_fd = open(file, O_RDONLY);
+ if (pcap_fd < 0) {
+ rl_printf("cannot open pcap [%s]!\n", strerror(errno));
+ return CMD_ERR;
+ }
+
+ ret = fstat(pcap_fd, &sb);
+ if (ret < 0) {
+ rl_printf("cannot fstat pcap file!\n");
+ return CMD_ERR;
+ }
+
+ if (!S_ISREG(sb.st_mode)) {
+ rl_printf("not a regular pcap file, duh!\n");
+ return CMD_ERR;
+ }
+
+ pcap_map_size = sb.st_size;
+ if (pcap_map_size <= sizeof(struct pcap_filehdr)) {
+ rl_printf("pcap file too small!\n");
+ return CMD_ERR;
+ }
+
+ pcap_ptr_va_start = mmap(NULL, pcap_map_size, PROT_READ,
+ MAP_SHARED | MAP_LOCKED, pcap_fd, 0);
+ if (pcap_ptr_va_start == MAP_FAILED) {
+ rl_printf("mmap of file failed!");
+ return CMD_ERR;
+ }
+
+ hdr = (void *) pcap_ptr_va_start;
+ if (hdr->magic != TCPDUMP_MAGIC) {
+ rl_printf("wrong pcap magic!\n");
+ return CMD_ERR;
+ }
+
+ pcap_reset_pkt();
+
+ return CMD_OK;
+
+}
+
+static void try_close_pcap(void)
+{
+ if (pcap_fd >= 0) {
+ munmap(pcap_ptr_va_start, pcap_map_size);
+ close(pcap_fd);
+
+ pcap_ptr_va_start = pcap_ptr_va_curr = NULL;
+ pcap_map_size = 0;
+ pcap_packet = 0;
+ pcap_fd = -1;
+ }
+}
+
+static int cmd_load_bpf(char *bpf_string)
+{
+ char sp, *token, separator = ',';
+ unsigned short bpf_len, i = 0;
+ struct sock_filter tmp;
+
+ bpf_prog_len = 0;
+ memset(bpf_image, 0, sizeof(bpf_image));
+
+ if (sscanf(bpf_string, "%hu%c", &bpf_len, &sp) != 2 ||
+ sp != separator || bpf_len > BPF_MAXINSNS || bpf_len == 0) {
+ rl_printf("syntax error in head length encoding!\n");
+ return CMD_ERR;
+ }
+
+ token = bpf_string;
+ while ((token = strchr(token, separator)) && (++token)[0]) {
+ if (i >= bpf_len) {
+ rl_printf("program exceeds encoded length!\n");
+ return CMD_ERR;
+ }
+
+ if (sscanf(token, "%hu %hhu %hhu %u,",
+ &tmp.code, &tmp.jt, &tmp.jf, &tmp.k) != 4) {
+ rl_printf("syntax error at instruction %d!\n", i);
+ return CMD_ERR;
+ }
+
+ bpf_image[i].code = tmp.code;
+ bpf_image[i].jt = tmp.jt;
+ bpf_image[i].jf = tmp.jf;
+ bpf_image[i].k = tmp.k;
+
+ i++;
+ }
+
+ if (i != bpf_len) {
+ rl_printf("syntax error exceeding encoded length!\n");
+ return CMD_ERR;
+ } else
+ bpf_prog_len = bpf_len;
+ if (!bpf_runnable(bpf_image, bpf_prog_len))
+ bpf_prog_len = 0;
+
+ return CMD_OK;
+}
+
+static int cmd_load_pcap(char *file)
+{
+ char *file_trim, *tmp;
+
+ file_trim = strtok_r(file, " ", &tmp);
+ if (file_trim == NULL)
+ return CMD_ERR;
+
+ try_close_pcap();
+
+ return try_load_pcap(file_trim);
+}
+
+static int cmd_load(char *arg)
+{
+ char *subcmd, *cont, *tmp = strdup(arg);
+ int ret = CMD_OK;
+
+ subcmd = strtok_r(tmp, " ", &cont);
+ if (subcmd == NULL)
+ goto out;
+ if (matches(subcmd, "bpf") == 0) {
+ bpf_reset();
+ bpf_reset_breakpoints();
+
+ ret = cmd_load_bpf(cont);
+ } else if (matches(subcmd, "pcap") == 0) {
+ ret = cmd_load_pcap(cont);
+ } else {
+out:
+ rl_printf("bpf <code>: load bpf code\n");
+ rl_printf("pcap <file>: load pcap file\n");
+ ret = CMD_ERR;
+ }
+
+ free(tmp);
+ return ret;
+}
+
+static int cmd_step(char *num)
+{
+ struct pcap_pkthdr *hdr;
+ int steps, ret;
+
+ if (!bpf_prog_loaded() || !pcap_loaded())
+ return CMD_ERR;
+
+ steps = strtol(num, NULL, 10);
+ if (steps == 0 || strlen(num) == 0)
+ steps = 1;
+ if (steps < 0) {
+ if (!bpf_restore_regs(steps))
+ return CMD_ERR;
+ steps = 1;
+ }
+
+ hdr = pcap_curr_pkt();
+ ret = bpf_run_stepping(bpf_image, bpf_prog_len,
+ (uint8_t *) hdr + sizeof(*hdr),
+ hdr->caplen, hdr->len, steps);
+ if (ret >= 0 || bpf_curr.Rs) {
+ bpf_reset();
+ if (!pcap_next_pkt()) {
+ rl_printf("(going back to first packet)\n");
+ pcap_reset_pkt();
+ } else {
+ rl_printf("(next packet)\n");
+ }
+ }
+
+ return CMD_OK;
+}
+
+static int cmd_select(char *num)
+{
+ unsigned int which, i;
+ struct pcap_pkthdr *hdr;
+ bool have_next = true;
+
+ if (!pcap_loaded() || strlen(num) == 0)
+ return CMD_ERR;
+
+ which = strtoul(num, NULL, 10);
+ if (which == 0) {
+ rl_printf("packet count starts with 1, clamping!\n");
+ which = 1;
+ }
+
+ pcap_reset_pkt();
+ bpf_reset();
+
+ for (i = 0; i < which && (have_next = pcap_next_pkt()); i++)
+ /* noop */;
+ if (!have_next || (hdr = pcap_curr_pkt()) == NULL) {
+ rl_printf("no packet #%u available!\n", which);
+ pcap_reset_pkt();
+ return CMD_ERR;
+ }
+
+ return CMD_OK;
+}
+
+static int cmd_breakpoint(char *subcmd)
+{
+ if (!bpf_prog_loaded())
+ return CMD_ERR;
+ if (strlen(subcmd) == 0)
+ bpf_dump_breakpoints();
+ else if (matches(subcmd, "reset") == 0)
+ bpf_reset_breakpoints();
+ else {
+ unsigned int where = strtoul(subcmd, NULL, 10);
+
+ if (where < bpf_prog_len) {
+ bpf_set_breakpoints(where);
+ rl_printf("breakpoint at: ");
+ bpf_disasm(bpf_image[where], where);
+ }
+ }
+
+ return CMD_OK;
+}
+
+static int cmd_run(char *num)
+{
+ static uint32_t pass = 0, fail = 0;
+ struct pcap_pkthdr *hdr;
+ bool has_limit = true;
+ int ret, pkts = 0, i = 0;
+
+ if (!bpf_prog_loaded() || !pcap_loaded())
+ return CMD_ERR;
+
+ pkts = strtol(num, NULL, 10);
+ if (pkts == 0 || strlen(num) == 0)
+ has_limit = false;
+
+ do {
+ hdr = pcap_curr_pkt();
+ ret = bpf_run_all(bpf_image, bpf_prog_len,
+ (uint8_t *) hdr + sizeof(*hdr),
+ hdr->caplen, hdr->len);
+ if (ret > 0)
+ pass++;
+ else if (ret == 0)
+ fail++;
+ else
+ return CMD_OK;
+ bpf_reset();
+ } while (pcap_next_pkt() && (!has_limit || (has_limit && ++i < pkts)));
+
+ rl_printf("bpf passes:%u fails:%u\n", pass, fail);
+
+ pcap_reset_pkt();
+ bpf_reset();
+
+ pass = fail = 0;
+ return CMD_OK;
+}
+
+static int cmd_disassemble(char *line_string)
+{
+ bool single_line = false;
+ unsigned long line;
+
+ if (!bpf_prog_loaded())
+ return CMD_ERR;
+ if (strlen(line_string) > 0 &&
+ (line = strtoul(line_string, NULL, 10)) < bpf_prog_len)
+ single_line = true;
+ if (single_line)
+ bpf_disasm(bpf_image[line], line);
+ else
+ bpf_disasm_all(bpf_image, bpf_prog_len);
+
+ return CMD_OK;
+}
+
+static int cmd_dump(char *dontcare)
+{
+ if (!bpf_prog_loaded())
+ return CMD_ERR;
+
+ bpf_dump_all(bpf_image, bpf_prog_len);
+
+ return CMD_OK;
+}
+
+static int cmd_quit(char *dontcare)
+{
+ return CMD_EX;
+}
+
+static const struct shell_cmd cmds[] = {
+ CMD("load", cmd_load),
+ CMD("select", cmd_select),
+ CMD("step", cmd_step),
+ CMD("run", cmd_run),
+ CMD("breakpoint", cmd_breakpoint),
+ CMD("disassemble", cmd_disassemble),
+ CMD("dump", cmd_dump),
+ CMD("quit", cmd_quit),
+};
+
+static int execf(char *arg)
+{
+ char *cmd, *cont, *tmp = strdup(arg);
+ int i, ret = 0, len;
+
+ cmd = strtok_r(tmp, " ", &cont);
+ if (cmd == NULL)
+ goto out;
+ len = strlen(cmd);
+ for (i = 0; i < array_size(cmds); i++) {
+ if (len != strlen(cmds[i].name))
+ continue;
+ if (strncmp(cmds[i].name, cmd, len) == 0) {
+ ret = cmds[i].func(cont);
+ break;
+ }
+ }
+out:
+ free(tmp);
+ return ret;
+}
+
+static char *shell_comp_gen(const char *buf, int state)
+{
+ static int list_index, len;
+ const char *name;
+
+ if (!state) {
+ list_index = 0;
+ len = strlen(buf);
+ }
+
+ for (; list_index < array_size(cmds); ) {
+ name = cmds[list_index].name;
+ list_index++;
+
+ if (strncmp(name, buf, len) == 0)
+ return strdup(name);
+ }
+
+ return NULL;
+}
+
+static char **shell_completion(const char *buf, int start, int end)
+{
+ char **matches = NULL;
+
+ if (start == 0)
+ matches = rl_completion_matches(buf, shell_comp_gen);
+
+ return matches;
+}
+
+static void intr_shell(int sig)
+{
+ if (rl_end)
+ rl_kill_line(-1, 0);
+
+ rl_crlf();
+ rl_refresh_line(0, 0);
+ rl_free_line_state();
+}
+
+static void init_shell(FILE *fin, FILE *fout)
+{
+ char file[128];
+
+ memset(file, 0, sizeof(file));
+ snprintf(file, sizeof(file) - 1,
+ "%s/.bpf_dbg_history", getenv("HOME"));
+
+ read_history(file);
+
+ memset(file, 0, sizeof(file));
+ snprintf(file, sizeof(file) - 1,
+ "%s/.bpf_dbg_init", getenv("HOME"));
+
+ rl_instream = fin;
+ rl_outstream = fout;
+
+ rl_readline_name = "bpf_dbg";
+ rl_terminal_name = getenv("TERM");
+
+ rl_catch_signals = 0;
+ rl_catch_sigwinch = 1;
+
+ rl_attempted_completion_function = shell_completion;
+
+ rl_bind_key('\t', rl_complete);
+
+ rl_bind_key_in_map('\t', rl_complete, emacs_meta_keymap);
+ rl_bind_key_in_map('\033', rl_complete, emacs_meta_keymap);
+
+ rl_read_init_file(file);
+ rl_prep_terminal(0);
+ rl_set_signals();
+
+ signal(SIGINT, intr_shell);
+}
+
+static void exit_shell(void)
+{
+ char file[128];
+
+ memset(file, 0, sizeof(file));
+ snprintf(file, sizeof(file) - 1,
+ "%s/.bpf_dbg_history", getenv("HOME"));
+
+ write_history(file);
+ clear_history();
+ rl_deprep_terminal();
+
+ try_close_pcap();
+}
+
+static int run_shell_loop(FILE *fin, FILE *fout)
+{
+ char *buf;
+ int ret;
+
+ init_shell(fin, fout);
+
+ while ((buf = readline("> ")) != NULL) {
+ ret = execf(buf);
+ if (ret == CMD_EX)
+ break;
+ if (ret == CMD_OK && strlen(buf) > 0)
+ add_history(buf);
+
+ free(buf);
+ }
+
+ exit_shell();
+ return 0;
+}
+
+int main(int argc, char **argv)
+{
+ FILE *fin = NULL, *fout = NULL;
+
+ if (argc >= 2)
+ fin = fopen(argv[1], "r");
+ if (argc >= 3)
+ fout = fopen(argv[2], "w");
+
+ return run_shell_loop(fin ? : stdin, fout ? : stdout);
+}
diff --git a/tools/net/bpf_exp.l b/tools/net/bpf_exp.l
new file mode 100644
index 000000000000..bf7be77ddd62
--- /dev/null
+++ b/tools/net/bpf_exp.l
@@ -0,0 +1,143 @@
+/*
+ * BPF asm code lexer
+ *
+ * This program is free software; you can distribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Syntax kept close to:
+ *
+ * Steven McCanne and Van Jacobson. 1993. The BSD packet filter: a new
+ * architecture for user-level packet capture. In Proceedings of the
+ * USENIX Winter 1993 Conference Proceedings on USENIX Winter 1993
+ * Conference Proceedings (USENIX'93). USENIX Association, Berkeley,
+ * CA, USA, 2-2.
+ *
+ * Copyright 2013 Daniel Borkmann <borkmann@redhat.com>
+ * Licensed under the GNU General Public License, version 2.0 (GPLv2)
+ */
+
+%{
+
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+
+#include "bpf_exp.yacc.h"
+
+extern void yyerror(const char *str);
+
+%}
+
+%option align
+%option ecs
+
+%option nounput
+%option noreject
+%option noinput
+%option noyywrap
+
+%option 8bit
+%option caseless
+%option yylineno
+
+%%
+
+"ldb" { return OP_LDB; }
+"ldh" { return OP_LDH; }
+"ld" { return OP_LD; }
+"ldi" { return OP_LDI; }
+"ldx" { return OP_LDX; }
+"ldxi" { return OP_LDXI; }
+"ldxb" { return OP_LDXB; }
+"st" { return OP_ST; }
+"stx" { return OP_STX; }
+"jmp" { return OP_JMP; }
+"ja" { return OP_JMP; }
+"jeq" { return OP_JEQ; }
+"jneq" { return OP_JNEQ; }
+"jne" { return OP_JNEQ; }
+"jlt" { return OP_JLT; }
+"jle" { return OP_JLE; }
+"jgt" { return OP_JGT; }
+"jge" { return OP_JGE; }
+"jset" { return OP_JSET; }
+"add" { return OP_ADD; }
+"sub" { return OP_SUB; }
+"mul" { return OP_MUL; }
+"div" { return OP_DIV; }
+"mod" { return OP_MOD; }
+"neg" { return OP_NEG; }
+"and" { return OP_AND; }
+"xor" { return OP_XOR; }
+"or" { return OP_OR; }
+"lsh" { return OP_LSH; }
+"rsh" { return OP_RSH; }
+"ret" { return OP_RET; }
+"tax" { return OP_TAX; }
+"txa" { return OP_TXA; }
+
+"#"?("len") { return K_PKT_LEN; }
+"#"?("proto") { return K_PROTO; }
+"#"?("type") { return K_TYPE; }
+"#"?("poff") { return K_POFF; }
+"#"?("ifidx") { return K_IFIDX; }
+"#"?("nla") { return K_NLATTR; }
+"#"?("nlan") { return K_NLATTR_NEST; }
+"#"?("mark") { return K_MARK; }
+"#"?("queue") { return K_QUEUE; }
+"#"?("hatype") { return K_HATYPE; }
+"#"?("rxhash") { return K_RXHASH; }
+"#"?("cpu") { return K_CPU; }
+"#"?("vlan_tci") { return K_VLANT; }
+"#"?("vlan_pr") { return K_VLANP; }
+
+":" { return ':'; }
+"," { return ','; }
+"#" { return '#'; }
+"%" { return '%'; }
+"[" { return '['; }
+"]" { return ']'; }
+"(" { return '('; }
+")" { return ')'; }
+"x" { return 'x'; }
+"a" { return 'a'; }
+"+" { return '+'; }
+"M" { return 'M'; }
+"*" { return '*'; }
+"&" { return '&'; }
+
+([0][x][a-fA-F0-9]+) {
+ yylval.number = strtoul(yytext, NULL, 16);
+ return number;
+ }
+([0][b][0-1]+) {
+ yylval.number = strtol(yytext + 2, NULL, 2);
+ return number;
+ }
+(([0])|([-+]?[1-9][0-9]*)) {
+ yylval.number = strtol(yytext, NULL, 10);
+ return number;
+ }
+([0][0-9]+) {
+ yylval.number = strtol(yytext + 1, NULL, 8);
+ return number;
+ }
+[a-zA-Z_][a-zA-Z0-9_]+ {
+ yylval.label = strdup(yytext);
+ return label;
+ }
+
+"/*"([^\*]|\*[^/])*"*/" { /* NOP */ }
+";"[^\n]* { /* NOP */ }
+^#.* { /* NOP */ }
+[ \t]+ { /* NOP */ }
+[ \n]+ { /* NOP */ }
+
+. {
+ printf("unknown character \'%s\'", yytext);
+ yyerror("lex unknown character");
+ }
+
+%%
diff --git a/tools/net/bpf_exp.y b/tools/net/bpf_exp.y
new file mode 100644
index 000000000000..d15efc989ef5
--- /dev/null
+++ b/tools/net/bpf_exp.y
@@ -0,0 +1,762 @@
+/*
+ * BPF asm code parser
+ *
+ * This program is free software; you can distribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Syntax kept close to:
+ *
+ * Steven McCanne and Van Jacobson. 1993. The BSD packet filter: a new
+ * architecture for user-level packet capture. In Proceedings of the
+ * USENIX Winter 1993 Conference Proceedings on USENIX Winter 1993
+ * Conference Proceedings (USENIX'93). USENIX Association, Berkeley,
+ * CA, USA, 2-2.
+ *
+ * Copyright 2013 Daniel Borkmann <borkmann@redhat.com>
+ * Licensed under the GNU General Public License, version 2.0 (GPLv2)
+ */
+
+%{
+
+#include <stdio.h>
+#include <string.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdbool.h>
+#include <unistd.h>
+#include <errno.h>
+#include <assert.h>
+#include <linux/filter.h>
+
+#include "bpf_exp.yacc.h"
+
+enum jmp_type { JTL, JFL, JKL };
+
+extern FILE *yyin;
+extern int yylex(void);
+extern void yyerror(const char *str);
+
+extern void bpf_asm_compile(FILE *fp, bool cstyle);
+static void bpf_set_curr_instr(uint16_t op, uint8_t jt, uint8_t jf, uint32_t k);
+static void bpf_set_curr_label(char *label);
+static void bpf_set_jmp_label(char *label, enum jmp_type type);
+
+%}
+
+%union {
+ char *label;
+ uint32_t number;
+}
+
+%token OP_LDB OP_LDH OP_LD OP_LDX OP_ST OP_STX OP_JMP OP_JEQ OP_JGT OP_JGE
+%token OP_JSET OP_ADD OP_SUB OP_MUL OP_DIV OP_AND OP_OR OP_XOR OP_LSH OP_RSH
+%token OP_RET OP_TAX OP_TXA OP_LDXB OP_MOD OP_NEG OP_JNEQ OP_JLT OP_JLE OP_LDI
+%token OP_LDXI
+
+%token K_PKT_LEN K_PROTO K_TYPE K_NLATTR K_NLATTR_NEST K_MARK K_QUEUE K_HATYPE
+%token K_RXHASH K_CPU K_IFIDX K_VLANT K_VLANP K_POFF
+
+%token ':' ',' '[' ']' '(' ')' 'x' 'a' '+' 'M' '*' '&' '#' '%'
+
+%token number label
+
+%type <label> label
+%type <number> number
+
+%%
+
+prog
+ : line
+ | prog line
+ ;
+
+line
+ : instr
+ | labelled_instr
+ ;
+
+labelled_instr
+ : labelled instr
+ ;
+
+instr
+ : ldb
+ | ldh
+ | ld
+ | ldi
+ | ldx
+ | ldxi
+ | st
+ | stx
+ | jmp
+ | jeq
+ | jneq
+ | jlt
+ | jle
+ | jgt
+ | jge
+ | jset
+ | add
+ | sub
+ | mul
+ | div
+ | mod
+ | neg
+ | and
+ | or
+ | xor
+ | lsh
+ | rsh
+ | ret
+ | tax
+ | txa
+ ;
+
+labelled
+ : label ':' { bpf_set_curr_label($1); }
+ ;
+
+ldb
+ : OP_LDB '[' 'x' '+' number ']' {
+ bpf_set_curr_instr(BPF_LD | BPF_B | BPF_IND, 0, 0, $5); }
+ | OP_LDB '[' '%' 'x' '+' number ']' {
+ bpf_set_curr_instr(BPF_LD | BPF_B | BPF_IND, 0, 0, $6); }
+ | OP_LDB '[' number ']' {
+ bpf_set_curr_instr(BPF_LD | BPF_B | BPF_ABS, 0, 0, $3); }
+ | OP_LDB K_PROTO {
+ bpf_set_curr_instr(BPF_LD | BPF_B | BPF_ABS, 0, 0,
+ SKF_AD_OFF + SKF_AD_PROTOCOL); }
+ | OP_LDB K_TYPE {
+ bpf_set_curr_instr(BPF_LD | BPF_B | BPF_ABS, 0, 0,
+ SKF_AD_OFF + SKF_AD_PKTTYPE); }
+ | OP_LDB K_IFIDX {
+ bpf_set_curr_instr(BPF_LD | BPF_B | BPF_ABS, 0, 0,
+ SKF_AD_OFF + SKF_AD_IFINDEX); }
+ | OP_LDB K_NLATTR {
+ bpf_set_curr_instr(BPF_LD | BPF_B | BPF_ABS, 0, 0,
+ SKF_AD_OFF + SKF_AD_NLATTR); }
+ | OP_LDB K_NLATTR_NEST {
+ bpf_set_curr_instr(BPF_LD | BPF_B | BPF_ABS, 0, 0,
+ SKF_AD_OFF + SKF_AD_NLATTR_NEST); }
+ | OP_LDB K_MARK {
+ bpf_set_curr_instr(BPF_LD | BPF_B | BPF_ABS, 0, 0,
+ SKF_AD_OFF + SKF_AD_MARK); }
+ | OP_LDB K_QUEUE {
+ bpf_set_curr_instr(BPF_LD | BPF_B | BPF_ABS, 0, 0,
+ SKF_AD_OFF + SKF_AD_QUEUE); }
+ | OP_LDB K_HATYPE {
+ bpf_set_curr_instr(BPF_LD | BPF_B | BPF_ABS, 0, 0,
+ SKF_AD_OFF + SKF_AD_HATYPE); }
+ | OP_LDB K_RXHASH {
+ bpf_set_curr_instr(BPF_LD | BPF_B | BPF_ABS, 0, 0,
+ SKF_AD_OFF + SKF_AD_RXHASH); }
+ | OP_LDB K_CPU {
+ bpf_set_curr_instr(BPF_LD | BPF_B | BPF_ABS, 0, 0,
+ SKF_AD_OFF + SKF_AD_CPU); }
+ | OP_LDB K_VLANT {
+ bpf_set_curr_instr(BPF_LD | BPF_B | BPF_ABS, 0, 0,
+ SKF_AD_OFF + SKF_AD_VLAN_TAG); }
+ | OP_LDB K_VLANP {
+ bpf_set_curr_instr(BPF_LD | BPF_B | BPF_ABS, 0, 0,
+ SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT); }
+ | OP_LDB K_POFF {
+ bpf_set_curr_instr(BPF_LD | BPF_B | BPF_ABS, 0, 0,
+ SKF_AD_OFF + SKF_AD_PAY_OFFSET); }
+ ;
+
+ldh
+ : OP_LDH '[' 'x' '+' number ']' {
+ bpf_set_curr_instr(BPF_LD | BPF_H | BPF_IND, 0, 0, $5); }
+ | OP_LDH '[' '%' 'x' '+' number ']' {
+ bpf_set_curr_instr(BPF_LD | BPF_H | BPF_IND, 0, 0, $6); }
+ | OP_LDH '[' number ']' {
+ bpf_set_curr_instr(BPF_LD | BPF_H | BPF_ABS, 0, 0, $3); }
+ | OP_LDH K_PROTO {
+ bpf_set_curr_instr(BPF_LD | BPF_H | BPF_ABS, 0, 0,
+ SKF_AD_OFF + SKF_AD_PROTOCOL); }
+ | OP_LDH K_TYPE {
+ bpf_set_curr_instr(BPF_LD | BPF_H | BPF_ABS, 0, 0,
+ SKF_AD_OFF + SKF_AD_PKTTYPE); }
+ | OP_LDH K_IFIDX {
+ bpf_set_curr_instr(BPF_LD | BPF_H | BPF_ABS, 0, 0,
+ SKF_AD_OFF + SKF_AD_IFINDEX); }
+ | OP_LDH K_NLATTR {
+ bpf_set_curr_instr(BPF_LD | BPF_H | BPF_ABS, 0, 0,
+ SKF_AD_OFF + SKF_AD_NLATTR); }
+ | OP_LDH K_NLATTR_NEST {
+ bpf_set_curr_instr(BPF_LD | BPF_H | BPF_ABS, 0, 0,
+ SKF_AD_OFF + SKF_AD_NLATTR_NEST); }
+ | OP_LDH K_MARK {
+ bpf_set_curr_instr(BPF_LD | BPF_H | BPF_ABS, 0, 0,
+ SKF_AD_OFF + SKF_AD_MARK); }
+ | OP_LDH K_QUEUE {
+ bpf_set_curr_instr(BPF_LD | BPF_H | BPF_ABS, 0, 0,
+ SKF_AD_OFF + SKF_AD_QUEUE); }
+ | OP_LDH K_HATYPE {
+ bpf_set_curr_instr(BPF_LD | BPF_H | BPF_ABS, 0, 0,
+ SKF_AD_OFF + SKF_AD_HATYPE); }
+ | OP_LDH K_RXHASH {
+ bpf_set_curr_instr(BPF_LD | BPF_H | BPF_ABS, 0, 0,
+ SKF_AD_OFF + SKF_AD_RXHASH); }
+ | OP_LDH K_CPU {
+ bpf_set_curr_instr(BPF_LD | BPF_H | BPF_ABS, 0, 0,
+ SKF_AD_OFF + SKF_AD_CPU); }
+ | OP_LDH K_VLANT {
+ bpf_set_curr_instr(BPF_LD | BPF_H | BPF_ABS, 0, 0,
+ SKF_AD_OFF + SKF_AD_VLAN_TAG); }
+ | OP_LDH K_VLANP {
+ bpf_set_curr_instr(BPF_LD | BPF_H | BPF_ABS, 0, 0,
+ SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT); }
+ | OP_LDH K_POFF {
+ bpf_set_curr_instr(BPF_LD | BPF_H | BPF_ABS, 0, 0,
+ SKF_AD_OFF + SKF_AD_PAY_OFFSET); }
+ ;
+
+ldi
+ : OP_LDI '#' number {
+ bpf_set_curr_instr(BPF_LD | BPF_IMM, 0, 0, $3); }
+ | OP_LDI number {
+ bpf_set_curr_instr(BPF_LD | BPF_IMM, 0, 0, $2); }
+ ;
+
+ld
+ : OP_LD '#' number {
+ bpf_set_curr_instr(BPF_LD | BPF_IMM, 0, 0, $3); }
+ | OP_LD K_PKT_LEN {
+ bpf_set_curr_instr(BPF_LD | BPF_W | BPF_LEN, 0, 0, 0); }
+ | OP_LD K_PROTO {
+ bpf_set_curr_instr(BPF_LD | BPF_W | BPF_ABS, 0, 0,
+ SKF_AD_OFF + SKF_AD_PROTOCOL); }
+ | OP_LD K_TYPE {
+ bpf_set_curr_instr(BPF_LD | BPF_W | BPF_ABS, 0, 0,
+ SKF_AD_OFF + SKF_AD_PKTTYPE); }
+ | OP_LD K_IFIDX {
+ bpf_set_curr_instr(BPF_LD | BPF_W | BPF_ABS, 0, 0,
+ SKF_AD_OFF + SKF_AD_IFINDEX); }
+ | OP_LD K_NLATTR {
+ bpf_set_curr_instr(BPF_LD | BPF_W | BPF_ABS, 0, 0,
+ SKF_AD_OFF + SKF_AD_NLATTR); }
+ | OP_LD K_NLATTR_NEST {
+ bpf_set_curr_instr(BPF_LD | BPF_W | BPF_ABS, 0, 0,
+ SKF_AD_OFF + SKF_AD_NLATTR_NEST); }
+ | OP_LD K_MARK {
+ bpf_set_curr_instr(BPF_LD | BPF_W | BPF_ABS, 0, 0,
+ SKF_AD_OFF + SKF_AD_MARK); }
+ | OP_LD K_QUEUE {
+ bpf_set_curr_instr(BPF_LD | BPF_W | BPF_ABS, 0, 0,
+ SKF_AD_OFF + SKF_AD_QUEUE); }
+ | OP_LD K_HATYPE {
+ bpf_set_curr_instr(BPF_LD | BPF_W | BPF_ABS, 0, 0,
+ SKF_AD_OFF + SKF_AD_HATYPE); }
+ | OP_LD K_RXHASH {
+ bpf_set_curr_instr(BPF_LD | BPF_W | BPF_ABS, 0, 0,
+ SKF_AD_OFF + SKF_AD_RXHASH); }
+ | OP_LD K_CPU {
+ bpf_set_curr_instr(BPF_LD | BPF_W | BPF_ABS, 0, 0,
+ SKF_AD_OFF + SKF_AD_CPU); }
+ | OP_LD K_VLANT {
+ bpf_set_curr_instr(BPF_LD | BPF_W | BPF_ABS, 0, 0,
+ SKF_AD_OFF + SKF_AD_VLAN_TAG); }
+ | OP_LD K_VLANP {
+ bpf_set_curr_instr(BPF_LD | BPF_W | BPF_ABS, 0, 0,
+ SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT); }
+ | OP_LD K_POFF {
+ bpf_set_curr_instr(BPF_LD | BPF_W | BPF_ABS, 0, 0,
+ SKF_AD_OFF + SKF_AD_PAY_OFFSET); }
+ | OP_LD 'M' '[' number ']' {
+ bpf_set_curr_instr(BPF_LD | BPF_MEM, 0, 0, $4); }
+ | OP_LD '[' 'x' '+' number ']' {
+ bpf_set_curr_instr(BPF_LD | BPF_W | BPF_IND, 0, 0, $5); }
+ | OP_LD '[' '%' 'x' '+' number ']' {
+ bpf_set_curr_instr(BPF_LD | BPF_W | BPF_IND, 0, 0, $6); }
+ | OP_LD '[' number ']' {
+ bpf_set_curr_instr(BPF_LD | BPF_W | BPF_ABS, 0, 0, $3); }
+ ;
+
+ldxi
+ : OP_LDXI '#' number {
+ bpf_set_curr_instr(BPF_LDX | BPF_IMM, 0, 0, $3); }
+ | OP_LDXI number {
+ bpf_set_curr_instr(BPF_LDX | BPF_IMM, 0, 0, $2); }
+ ;
+
+ldx
+ : OP_LDX '#' number {
+ bpf_set_curr_instr(BPF_LDX | BPF_IMM, 0, 0, $3); }
+ | OP_LDX K_PKT_LEN {
+ bpf_set_curr_instr(BPF_LDX | BPF_W | BPF_LEN, 0, 0, 0); }
+ | OP_LDX 'M' '[' number ']' {
+ bpf_set_curr_instr(BPF_LDX | BPF_MEM, 0, 0, $4); }
+ | OP_LDXB number '*' '(' '[' number ']' '&' number ')' {
+ if ($2 != 4 || $9 != 0xf) {
+ fprintf(stderr, "ldxb offset not supported!\n");
+ exit(0);
+ } else {
+ bpf_set_curr_instr(BPF_LDX | BPF_MSH | BPF_B, 0, 0, $6); } }
+ | OP_LDX number '*' '(' '[' number ']' '&' number ')' {
+ if ($2 != 4 || $9 != 0xf) {
+ fprintf(stderr, "ldxb offset not supported!\n");
+ exit(0);
+ } else {
+ bpf_set_curr_instr(BPF_LDX | BPF_MSH | BPF_B, 0, 0, $6); } }
+ ;
+
+st
+ : OP_ST 'M' '[' number ']' {
+ bpf_set_curr_instr(BPF_ST, 0, 0, $4); }
+ ;
+
+stx
+ : OP_STX 'M' '[' number ']' {
+ bpf_set_curr_instr(BPF_STX, 0, 0, $4); }
+ ;
+
+jmp
+ : OP_JMP label {
+ bpf_set_jmp_label($2, JKL);
+ bpf_set_curr_instr(BPF_JMP | BPF_JA, 0, 0, 0); }
+ ;
+
+jeq
+ : OP_JEQ '#' number ',' label ',' label {
+ bpf_set_jmp_label($5, JTL);
+ bpf_set_jmp_label($7, JFL);
+ bpf_set_curr_instr(BPF_JMP | BPF_JEQ | BPF_K, 0, 0, $3); }
+ | OP_JEQ 'x' ',' label ',' label {
+ bpf_set_jmp_label($4, JTL);
+ bpf_set_jmp_label($6, JFL);
+ bpf_set_curr_instr(BPF_JMP | BPF_JEQ | BPF_X, 0, 0, 0); }
+ | OP_JEQ '%' 'x' ',' label ',' label {
+ bpf_set_jmp_label($5, JTL);
+ bpf_set_jmp_label($7, JFL);
+ bpf_set_curr_instr(BPF_JMP | BPF_JEQ | BPF_X, 0, 0, 0); }
+ | OP_JEQ '#' number ',' label {
+ bpf_set_jmp_label($5, JTL);
+ bpf_set_curr_instr(BPF_JMP | BPF_JEQ | BPF_K, 0, 0, $3); }
+ | OP_JEQ 'x' ',' label {
+ bpf_set_jmp_label($4, JTL);
+ bpf_set_curr_instr(BPF_JMP | BPF_JEQ | BPF_X, 0, 0, 0); }
+ | OP_JEQ '%' 'x' ',' label {
+ bpf_set_jmp_label($5, JTL);
+ bpf_set_curr_instr(BPF_JMP | BPF_JEQ | BPF_X, 0, 0, 0); }
+ ;
+
+jneq
+ : OP_JNEQ '#' number ',' label {
+ bpf_set_jmp_label($5, JFL);
+ bpf_set_curr_instr(BPF_JMP | BPF_JEQ | BPF_K, 0, 0, $3); }
+ | OP_JNEQ 'x' ',' label {
+ bpf_set_jmp_label($4, JFL);
+ bpf_set_curr_instr(BPF_JMP | BPF_JEQ | BPF_X, 0, 0, 0); }
+ | OP_JNEQ '%' 'x' ',' label {
+ bpf_set_jmp_label($5, JFL);
+ bpf_set_curr_instr(BPF_JMP | BPF_JEQ | BPF_X, 0, 0, 0); }
+ ;
+
+jlt
+ : OP_JLT '#' number ',' label {
+ bpf_set_jmp_label($5, JFL);
+ bpf_set_curr_instr(BPF_JMP | BPF_JGE | BPF_K, 0, 0, $3); }
+ | OP_JLT 'x' ',' label {
+ bpf_set_jmp_label($4, JFL);
+ bpf_set_curr_instr(BPF_JMP | BPF_JGE | BPF_X, 0, 0, 0); }
+ | OP_JLT '%' 'x' ',' label {
+ bpf_set_jmp_label($5, JFL);
+ bpf_set_curr_instr(BPF_JMP | BPF_JGE | BPF_X, 0, 0, 0); }
+ ;
+
+jle
+ : OP_JLE '#' number ',' label {
+ bpf_set_jmp_label($5, JFL);
+ bpf_set_curr_instr(BPF_JMP | BPF_JGT | BPF_K, 0, 0, $3); }
+ | OP_JLE 'x' ',' label {
+ bpf_set_jmp_label($4, JFL);
+ bpf_set_curr_instr(BPF_JMP | BPF_JGT | BPF_X, 0, 0, 0); }
+ | OP_JLE '%' 'x' ',' label {
+ bpf_set_jmp_label($5, JFL);
+ bpf_set_curr_instr(BPF_JMP | BPF_JGT | BPF_X, 0, 0, 0); }
+ ;
+
+jgt
+ : OP_JGT '#' number ',' label ',' label {
+ bpf_set_jmp_label($5, JTL);
+ bpf_set_jmp_label($7, JFL);
+ bpf_set_curr_instr(BPF_JMP | BPF_JGT | BPF_K, 0, 0, $3); }
+ | OP_JGT 'x' ',' label ',' label {
+ bpf_set_jmp_label($4, JTL);
+ bpf_set_jmp_label($6, JFL);
+ bpf_set_curr_instr(BPF_JMP | BPF_JGT | BPF_X, 0, 0, 0); }
+ | OP_JGT '%' 'x' ',' label ',' label {
+ bpf_set_jmp_label($5, JTL);
+ bpf_set_jmp_label($7, JFL);
+ bpf_set_curr_instr(BPF_JMP | BPF_JGT | BPF_X, 0, 0, 0); }
+ | OP_JGT '#' number ',' label {
+ bpf_set_jmp_label($5, JTL);
+ bpf_set_curr_instr(BPF_JMP | BPF_JGT | BPF_K, 0, 0, $3); }
+ | OP_JGT 'x' ',' label {
+ bpf_set_jmp_label($4, JTL);
+ bpf_set_curr_instr(BPF_JMP | BPF_JGT | BPF_X, 0, 0, 0); }
+ | OP_JGT '%' 'x' ',' label {
+ bpf_set_jmp_label($5, JTL);
+ bpf_set_curr_instr(BPF_JMP | BPF_JGT | BPF_X, 0, 0, 0); }
+ ;
+
+jge
+ : OP_JGE '#' number ',' label ',' label {
+ bpf_set_jmp_label($5, JTL);
+ bpf_set_jmp_label($7, JFL);
+ bpf_set_curr_instr(BPF_JMP | BPF_JGE | BPF_K, 0, 0, $3); }
+ | OP_JGE 'x' ',' label ',' label {
+ bpf_set_jmp_label($4, JTL);
+ bpf_set_jmp_label($6, JFL);
+ bpf_set_curr_instr(BPF_JMP | BPF_JGE | BPF_X, 0, 0, 0); }
+ | OP_JGE '%' 'x' ',' label ',' label {
+ bpf_set_jmp_label($5, JTL);
+ bpf_set_jmp_label($7, JFL);
+ bpf_set_curr_instr(BPF_JMP | BPF_JGE | BPF_X, 0, 0, 0); }
+ | OP_JGE '#' number ',' label {
+ bpf_set_jmp_label($5, JTL);
+ bpf_set_curr_instr(BPF_JMP | BPF_JGE | BPF_K, 0, 0, $3); }
+ | OP_JGE 'x' ',' label {
+ bpf_set_jmp_label($4, JTL);
+ bpf_set_curr_instr(BPF_JMP | BPF_JGE | BPF_X, 0, 0, 0); }
+ | OP_JGE '%' 'x' ',' label {
+ bpf_set_jmp_label($5, JTL);
+ bpf_set_curr_instr(BPF_JMP | BPF_JGE | BPF_X, 0, 0, 0); }
+ ;
+
+jset
+ : OP_JSET '#' number ',' label ',' label {
+ bpf_set_jmp_label($5, JTL);
+ bpf_set_jmp_label($7, JFL);
+ bpf_set_curr_instr(BPF_JMP | BPF_JSET | BPF_K, 0, 0, $3); }
+ | OP_JSET 'x' ',' label ',' label {
+ bpf_set_jmp_label($4, JTL);
+ bpf_set_jmp_label($6, JFL);
+ bpf_set_curr_instr(BPF_JMP | BPF_JSET | BPF_X, 0, 0, 0); }
+ | OP_JSET '%' 'x' ',' label ',' label {
+ bpf_set_jmp_label($5, JTL);
+ bpf_set_jmp_label($7, JFL);
+ bpf_set_curr_instr(BPF_JMP | BPF_JSET | BPF_X, 0, 0, 0); }
+ | OP_JSET '#' number ',' label {
+ bpf_set_jmp_label($5, JTL);
+ bpf_set_curr_instr(BPF_JMP | BPF_JSET | BPF_K, 0, 0, $3); }
+ | OP_JSET 'x' ',' label {
+ bpf_set_jmp_label($4, JTL);
+ bpf_set_curr_instr(BPF_JMP | BPF_JSET | BPF_X, 0, 0, 0); }
+ | OP_JSET '%' 'x' ',' label {
+ bpf_set_jmp_label($5, JTL);
+ bpf_set_curr_instr(BPF_JMP | BPF_JSET | BPF_X, 0, 0, 0); }
+ ;
+
+add
+ : OP_ADD '#' number {
+ bpf_set_curr_instr(BPF_ALU | BPF_ADD | BPF_K, 0, 0, $3); }
+ | OP_ADD 'x' {
+ bpf_set_curr_instr(BPF_ALU | BPF_ADD | BPF_X, 0, 0, 0); }
+ | OP_ADD '%' 'x' {
+ bpf_set_curr_instr(BPF_ALU | BPF_ADD | BPF_X, 0, 0, 0); }
+ ;
+
+sub
+ : OP_SUB '#' number {
+ bpf_set_curr_instr(BPF_ALU | BPF_SUB | BPF_K, 0, 0, $3); }
+ | OP_SUB 'x' {
+ bpf_set_curr_instr(BPF_ALU | BPF_SUB | BPF_X, 0, 0, 0); }
+ | OP_SUB '%' 'x' {
+ bpf_set_curr_instr(BPF_ALU | BPF_SUB | BPF_X, 0, 0, 0); }
+ ;
+
+mul
+ : OP_MUL '#' number {
+ bpf_set_curr_instr(BPF_ALU | BPF_MUL | BPF_K, 0, 0, $3); }
+ | OP_MUL 'x' {
+ bpf_set_curr_instr(BPF_ALU | BPF_MUL | BPF_X, 0, 0, 0); }
+ | OP_MUL '%' 'x' {
+ bpf_set_curr_instr(BPF_ALU | BPF_MUL | BPF_X, 0, 0, 0); }
+ ;
+
+div
+ : OP_DIV '#' number {
+ bpf_set_curr_instr(BPF_ALU | BPF_DIV | BPF_K, 0, 0, $3); }
+ | OP_DIV 'x' {
+ bpf_set_curr_instr(BPF_ALU | BPF_DIV | BPF_X, 0, 0, 0); }
+ | OP_DIV '%' 'x' {
+ bpf_set_curr_instr(BPF_ALU | BPF_DIV | BPF_X, 0, 0, 0); }
+ ;
+
+mod
+ : OP_MOD '#' number {
+ bpf_set_curr_instr(BPF_ALU | BPF_MOD | BPF_K, 0, 0, $3); }
+ | OP_MOD 'x' {
+ bpf_set_curr_instr(BPF_ALU | BPF_MOD | BPF_X, 0, 0, 0); }
+ | OP_MOD '%' 'x' {
+ bpf_set_curr_instr(BPF_ALU | BPF_MOD | BPF_X, 0, 0, 0); }
+ ;
+
+neg
+ : OP_NEG {
+ bpf_set_curr_instr(BPF_ALU | BPF_NEG, 0, 0, 0); }
+ ;
+
+and
+ : OP_AND '#' number {
+ bpf_set_curr_instr(BPF_ALU | BPF_AND | BPF_K, 0, 0, $3); }
+ | OP_AND 'x' {
+ bpf_set_curr_instr(BPF_ALU | BPF_AND | BPF_X, 0, 0, 0); }
+ | OP_AND '%' 'x' {
+ bpf_set_curr_instr(BPF_ALU | BPF_AND | BPF_X, 0, 0, 0); }
+ ;
+
+or
+ : OP_OR '#' number {
+ bpf_set_curr_instr(BPF_ALU | BPF_OR | BPF_K, 0, 0, $3); }
+ | OP_OR 'x' {
+ bpf_set_curr_instr(BPF_ALU | BPF_OR | BPF_X, 0, 0, 0); }
+ | OP_OR '%' 'x' {
+ bpf_set_curr_instr(BPF_ALU | BPF_OR | BPF_X, 0, 0, 0); }
+ ;
+
+xor
+ : OP_XOR '#' number {
+ bpf_set_curr_instr(BPF_ALU | BPF_XOR | BPF_K, 0, 0, $3); }
+ | OP_XOR 'x' {
+ bpf_set_curr_instr(BPF_ALU | BPF_XOR | BPF_X, 0, 0, 0); }
+ | OP_XOR '%' 'x' {
+ bpf_set_curr_instr(BPF_ALU | BPF_XOR | BPF_X, 0, 0, 0); }
+ ;
+
+lsh
+ : OP_LSH '#' number {
+ bpf_set_curr_instr(BPF_ALU | BPF_LSH | BPF_K, 0, 0, $3); }
+ | OP_LSH 'x' {
+ bpf_set_curr_instr(BPF_ALU | BPF_LSH | BPF_X, 0, 0, 0); }
+ | OP_LSH '%' 'x' {
+ bpf_set_curr_instr(BPF_ALU | BPF_LSH | BPF_X, 0, 0, 0); }
+ ;
+
+rsh
+ : OP_RSH '#' number {
+ bpf_set_curr_instr(BPF_ALU | BPF_RSH | BPF_K, 0, 0, $3); }
+ | OP_RSH 'x' {
+ bpf_set_curr_instr(BPF_ALU | BPF_RSH | BPF_X, 0, 0, 0); }
+ | OP_RSH '%' 'x' {
+ bpf_set_curr_instr(BPF_ALU | BPF_RSH | BPF_X, 0, 0, 0); }
+ ;
+
+ret
+ : OP_RET 'a' {
+ bpf_set_curr_instr(BPF_RET | BPF_A, 0, 0, 0); }
+ | OP_RET '%' 'a' {
+ bpf_set_curr_instr(BPF_RET | BPF_A, 0, 0, 0); }
+ | OP_RET 'x' {
+ bpf_set_curr_instr(BPF_RET | BPF_X, 0, 0, 0); }
+ | OP_RET '%' 'x' {
+ bpf_set_curr_instr(BPF_RET | BPF_X, 0, 0, 0); }
+ | OP_RET '#' number {
+ bpf_set_curr_instr(BPF_RET | BPF_K, 0, 0, $3); }
+ ;
+
+tax
+ : OP_TAX {
+ bpf_set_curr_instr(BPF_MISC | BPF_TAX, 0, 0, 0); }
+ ;
+
+txa
+ : OP_TXA {
+ bpf_set_curr_instr(BPF_MISC | BPF_TXA, 0, 0, 0); }
+ ;
+
+%%
+
+static int curr_instr = 0;
+static struct sock_filter out[BPF_MAXINSNS];
+static char **labels, **labels_jt, **labels_jf, **labels_k;
+
+static void bpf_assert_max(void)
+{
+ if (curr_instr >= BPF_MAXINSNS) {
+ fprintf(stderr, "only max %u insns allowed!\n", BPF_MAXINSNS);
+ exit(0);
+ }
+}
+
+static void bpf_set_curr_instr(uint16_t code, uint8_t jt, uint8_t jf,
+ uint32_t k)
+{
+ bpf_assert_max();
+ out[curr_instr].code = code;
+ out[curr_instr].jt = jt;
+ out[curr_instr].jf = jf;
+ out[curr_instr].k = k;
+ curr_instr++;
+}
+
+static void bpf_set_curr_label(char *label)
+{
+ bpf_assert_max();
+ labels[curr_instr] = label;
+}
+
+static void bpf_set_jmp_label(char *label, enum jmp_type type)
+{
+ bpf_assert_max();
+ switch (type) {
+ case JTL:
+ labels_jt[curr_instr] = label;
+ break;
+ case JFL:
+ labels_jf[curr_instr] = label;
+ break;
+ case JKL:
+ labels_k[curr_instr] = label;
+ break;
+ }
+}
+
+static int bpf_find_insns_offset(const char *label)
+{
+ int i, max = curr_instr, ret = -ENOENT;
+
+ for (i = 0; i < max; i++) {
+ if (labels[i] && !strcmp(label, labels[i])) {
+ ret = i;
+ break;
+ }
+ }
+
+ if (ret == -ENOENT) {
+ fprintf(stderr, "no such label \'%s\'!\n", label);
+ exit(0);
+ }
+
+ return ret;
+}
+
+static void bpf_stage_1_insert_insns(void)
+{
+ yyparse();
+}
+
+static void bpf_reduce_k_jumps(void)
+{
+ int i;
+
+ for (i = 0; i < curr_instr; i++) {
+ if (labels_k[i]) {
+ int off = bpf_find_insns_offset(labels_k[i]);
+ out[i].k = (uint32_t) (off - i - 1);
+ }
+ }
+}
+
+static void bpf_reduce_jt_jumps(void)
+{
+ int i;
+
+ for (i = 0; i < curr_instr; i++) {
+ if (labels_jt[i]) {
+ int off = bpf_find_insns_offset(labels_jt[i]);
+ out[i].jt = (uint8_t) (off - i -1);
+ }
+ }
+}
+
+static void bpf_reduce_jf_jumps(void)
+{
+ int i;
+
+ for (i = 0; i < curr_instr; i++) {
+ if (labels_jf[i]) {
+ int off = bpf_find_insns_offset(labels_jf[i]);
+ out[i].jf = (uint8_t) (off - i - 1);
+ }
+ }
+}
+
+static void bpf_stage_2_reduce_labels(void)
+{
+ bpf_reduce_k_jumps();
+ bpf_reduce_jt_jumps();
+ bpf_reduce_jf_jumps();
+}
+
+static void bpf_pretty_print_c(void)
+{
+ int i;
+
+ for (i = 0; i < curr_instr; i++)
+ printf("{ %#04x, %2u, %2u, %#010x },\n", out[i].code,
+ out[i].jt, out[i].jf, out[i].k);
+}
+
+static void bpf_pretty_print(void)
+{
+ int i;
+
+ printf("%u,", curr_instr);
+ for (i = 0; i < curr_instr; i++)
+ printf("%u %u %u %u,", out[i].code,
+ out[i].jt, out[i].jf, out[i].k);
+ printf("\n");
+}
+
+static void bpf_init(void)
+{
+ memset(out, 0, sizeof(out));
+
+ labels = calloc(BPF_MAXINSNS, sizeof(*labels));
+ assert(labels);
+ labels_jt = calloc(BPF_MAXINSNS, sizeof(*labels_jt));
+ assert(labels_jt);
+ labels_jf = calloc(BPF_MAXINSNS, sizeof(*labels_jf));
+ assert(labels_jf);
+ labels_k = calloc(BPF_MAXINSNS, sizeof(*labels_k));
+ assert(labels_k);
+}
+
+static void bpf_destroy_labels(void)
+{
+ int i;
+
+ for (i = 0; i < curr_instr; i++) {
+ free(labels_jf[i]);
+ free(labels_jt[i]);
+ free(labels_k[i]);
+ free(labels[i]);
+ }
+}
+
+static void bpf_destroy(void)
+{
+ bpf_destroy_labels();
+ free(labels_jt);
+ free(labels_jf);
+ free(labels_k);
+ free(labels);
+}
+
+void bpf_asm_compile(FILE *fp, bool cstyle)
+{
+ yyin = fp;
+
+ bpf_init();
+ bpf_stage_1_insert_insns();
+ bpf_stage_2_reduce_labels();
+ bpf_destroy();
+
+ if (cstyle)
+ bpf_pretty_print_c();
+ else
+ bpf_pretty_print();
+
+ if (fp != stdin)
+ fclose(yyin);
+}
+
+void yyerror(const char *str)
+{
+ exit(1);
+}
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 369c03648f88..1cd035708931 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -2078,8 +2078,10 @@ static int process_group_desc(struct perf_file_section *section __maybe_unused,
if (evsel->idx == (int) desc[i].leader_idx) {
evsel->leader = evsel;
/* {anon_group} is a dummy name */
- if (strcmp(desc[i].name, "{anon_group}"))
+ if (strcmp(desc[i].name, "{anon_group}")) {
evsel->group_name = desc[i].name;
+ desc[i].name = NULL;
+ }
evsel->nr_members = desc[i].nr_members;
if (i >= nr_groups || nr > 0) {
@@ -2105,7 +2107,7 @@ static int process_group_desc(struct perf_file_section *section __maybe_unused,
ret = 0;
out_free:
- while ((int) --i >= 0)
+ for (i = 0; i < nr_groups; i++)
free(desc[i].name);
free(desc);
diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c
index cd8e2f592719..49eaf1d7d89d 100644
--- a/tools/perf/util/thread.c
+++ b/tools/perf/util/thread.c
@@ -70,14 +70,13 @@ int thread__set_comm(struct thread *thread, const char *str, u64 timestamp)
/* Override latest entry if it had no specific time coverage */
if (!curr->start) {
comm__override(curr, str, timestamp);
- return 0;
+ } else {
+ new = comm__new(str, timestamp);
+ if (!new)
+ return -ENOMEM;
+ list_add(&new->list, &thread->comm_list);
}
- new = comm__new(str, timestamp);
- if (!new)
- return -ENOMEM;
-
- list_add(&new->list, &thread->comm_list);
thread->comm_set = true;
return 0;
diff --git a/tools/power/cpupower/man/cpupower-idle-info.1 b/tools/power/cpupower/man/cpupower-idle-info.1
index 4178effd9e99..7b3646adb92f 100644
--- a/tools/power/cpupower/man/cpupower-idle-info.1
+++ b/tools/power/cpupower/man/cpupower-idle-info.1
@@ -87,4 +87,5 @@ Thomas Renninger <trenn@suse.de>
.fi
.SH "SEE ALSO"
.LP
-cpupower(1), cpupower\-monitor(1), cpupower\-info(1), cpupower\-set(1)
+cpupower(1), cpupower\-monitor(1), cpupower\-info(1), cpupower\-set(1),
+cpupower\-idle\-set(1)
diff --git a/tools/power/cpupower/man/cpupower-idle-set.1 b/tools/power/cpupower/man/cpupower-idle-set.1
new file mode 100644
index 000000000000..6b1607272a5b
--- /dev/null
+++ b/tools/power/cpupower/man/cpupower-idle-set.1
@@ -0,0 +1,71 @@
+.TH "CPUPOWER-IDLE-SET" "1" "0.1" "" "cpupower Manual"
+.SH "NAME"
+.LP
+cpupower idle\-set \- Utility to set cpu idle state specific kernel options
+.SH "SYNTAX"
+.LP
+cpupower [ \-c cpulist ] idle\-info [\fIoptions\fP]
+.SH "DESCRIPTION"
+.LP
+The cpupower idle\-set subcommand allows to set cpu idle, also called cpu
+sleep state, specific options offered by the kernel. One example is disabling
+sleep states. This can be handy for power vs performance tuning.
+.SH "OPTIONS"
+.LP
+.TP
+\fB\-d\fR \fB\-\-disable\fR
+Disable a specific processor sleep state.
+.TP
+\fB\-e\fR \fB\-\-enable\fR
+Enable a specific processor sleep state.
+
+.SH "REMARKS"
+.LP
+Cpuidle Governors Policy on Disabling Sleep States
+
+.RS 4
+Depending on the used cpuidle governor, implementing the kernel policy
+how to choose sleep states, subsequent sleep states on this core, might get
+disabled as well.
+
+There are two cpuidle governors ladder and menu. While the ladder
+governor is always available, if CONFIG_CPU_IDLE is selected, the
+menu governor additionally requires CONFIG_NO_HZ.
+
+The behavior and the effect of the disable variable depends on the
+implementation of a particular governor. In the ladder governor, for
+example, it is not coherent, i.e. if one is disabling a light state,
+then all deeper states are disabled as well. Likewise, if one enables a
+deep state but a lighter state still is disabled, then this has no effect.
+.RE
+.LP
+Disabling the Lightest Sleep State may not have any Affect
+
+.RS 4
+If criteria are not met to enter deeper sleep states and the lightest sleep
+state is chosen when idle, the kernel may still enter this sleep state,
+irrespective of whether it is disabled or not. This is also reflected in
+the usage count of the disabled sleep state when using the cpupower idle-info
+command.
+.RE
+.LP
+Selecting specific CPU Cores
+
+.RS 4
+By default processor sleep states of all CPU cores are set. Please refer
+to the cpupower(1) manpage in the \-\-cpu option section how to disable
+C-states of specific cores.
+.RE
+.SH "FILES"
+.nf
+\fI/sys/devices/system/cpu/cpu*/cpuidle/state*\fP
+\fI/sys/devices/system/cpu/cpuidle/*\fP
+.fi
+.SH "AUTHORS"
+.nf
+Thomas Renninger <trenn@suse.de>
+.fi
+.SH "SEE ALSO"
+.LP
+cpupower(1), cpupower\-monitor(1), cpupower\-info(1), cpupower\-set(1),
+cpupower\-idle\-info(1)
diff --git a/tools/power/cpupower/utils/cpupower-set.c b/tools/power/cpupower/utils/cpupower-set.c
index dc4de3762111..bcf1d2f0b791 100644
--- a/tools/power/cpupower/utils/cpupower-set.c
+++ b/tools/power/cpupower/utils/cpupower-set.c
@@ -18,9 +18,9 @@
#include "helpers/bitmask.h"
static struct option set_opts[] = {
- { .name = "perf-bias", .has_arg = optional_argument, .flag = NULL, .val = 'b'},
- { .name = "sched-mc", .has_arg = optional_argument, .flag = NULL, .val = 'm'},
- { .name = "sched-smt", .has_arg = optional_argument, .flag = NULL, .val = 's'},
+ { .name = "perf-bias", .has_arg = required_argument, .flag = NULL, .val = 'b'},
+ { .name = "sched-mc", .has_arg = required_argument, .flag = NULL, .val = 'm'},
+ { .name = "sched-smt", .has_arg = required_argument, .flag = NULL, .val = 's'},
{ },
};
diff --git a/tools/power/cpupower/utils/helpers/sysfs.c b/tools/power/cpupower/utils/helpers/sysfs.c
index 5cdc600e8152..851c7a16ca49 100644
--- a/tools/power/cpupower/utils/helpers/sysfs.c
+++ b/tools/power/cpupower/utils/helpers/sysfs.c
@@ -278,7 +278,7 @@ static char *sysfs_idlestate_get_one_string(unsigned int cpu,
int sysfs_is_idlestate_disabled(unsigned int cpu,
unsigned int idlestate)
{
- if (sysfs_get_idlestate_count(cpu) < idlestate)
+ if (sysfs_get_idlestate_count(cpu) <= idlestate)
return -1;
if (!sysfs_idlestate_file_exists(cpu, idlestate,
@@ -303,7 +303,7 @@ int sysfs_idlestate_disable(unsigned int cpu,
char value[SYSFS_PATH_MAX];
int bytes_written;
- if (sysfs_get_idlestate_count(cpu) < idlestate)
+ if (sysfs_get_idlestate_count(cpu) <= idlestate)
return -1;
if (!sysfs_idlestate_file_exists(cpu, idlestate,
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
index fe702076ca46..9d77f13c2d25 100644
--- a/tools/power/x86/turbostat/turbostat.c
+++ b/tools/power/x86/turbostat/turbostat.c
@@ -2,7 +2,7 @@
* turbostat -- show CPU frequency and C-state residency
* on modern Intel turbo-capable processors.
*
- * Copyright (c) 2012 Intel Corporation.
+ * Copyright (c) 2013 Intel Corporation.
* Len Brown <len.brown@intel.com>
*
* This program is free software; you can redistribute it and/or modify it
@@ -47,6 +47,8 @@ unsigned int skip_c1;
unsigned int do_nhm_cstates;
unsigned int do_snb_cstates;
unsigned int do_c8_c9_c10;
+unsigned int do_slm_cstates;
+unsigned int use_c1_residency_msr;
unsigned int has_aperf;
unsigned int has_epb;
unsigned int units = 1000000000; /* Ghz etc */
@@ -81,6 +83,8 @@ double rapl_joule_counter_range;
#define RAPL_DRAM (1 << 3)
#define RAPL_PKG_PERF_STATUS (1 << 4)
#define RAPL_DRAM_PERF_STATUS (1 << 5)
+#define RAPL_PKG_POWER_INFO (1 << 6)
+#define RAPL_CORE_POLICY (1 << 7)
#define TJMAX_DEFAULT 100
#define MAX(a, b) ((a) > (b) ? (a) : (b))
@@ -96,7 +100,7 @@ struct thread_data {
unsigned long long tsc;
unsigned long long aperf;
unsigned long long mperf;
- unsigned long long c1; /* derived */
+ unsigned long long c1;
unsigned long long extra_msr64;
unsigned long long extra_delta64;
unsigned long long extra_msr32;
@@ -266,7 +270,7 @@ void print_header(void)
outp += sprintf(outp, " MSR 0x%03X", extra_msr_offset64);
if (do_nhm_cstates)
outp += sprintf(outp, " %%c1");
- if (do_nhm_cstates)
+ if (do_nhm_cstates && !do_slm_cstates)
outp += sprintf(outp, " %%c3");
if (do_nhm_cstates)
outp += sprintf(outp, " %%c6");
@@ -280,9 +284,9 @@ void print_header(void)
if (do_snb_cstates)
outp += sprintf(outp, " %%pc2");
- if (do_nhm_cstates)
+ if (do_nhm_cstates && !do_slm_cstates)
outp += sprintf(outp, " %%pc3");
- if (do_nhm_cstates)
+ if (do_nhm_cstates && !do_slm_cstates)
outp += sprintf(outp, " %%pc6");
if (do_snb_cstates)
outp += sprintf(outp, " %%pc7");
@@ -480,7 +484,7 @@ int format_counters(struct thread_data *t, struct core_data *c,
if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
goto done;
- if (do_nhm_cstates)
+ if (do_nhm_cstates && !do_slm_cstates)
outp += sprintf(outp, " %6.2f", 100.0 * c->c3/t->tsc);
if (do_nhm_cstates)
outp += sprintf(outp, " %6.2f", 100.0 * c->c6/t->tsc);
@@ -499,9 +503,9 @@ int format_counters(struct thread_data *t, struct core_data *c,
if (do_snb_cstates)
outp += sprintf(outp, " %6.2f", 100.0 * p->pc2/t->tsc);
- if (do_nhm_cstates)
+ if (do_nhm_cstates && !do_slm_cstates)
outp += sprintf(outp, " %6.2f", 100.0 * p->pc3/t->tsc);
- if (do_nhm_cstates)
+ if (do_nhm_cstates && !do_slm_cstates)
outp += sprintf(outp, " %6.2f", 100.0 * p->pc6/t->tsc);
if (do_snb_cstates)
outp += sprintf(outp, " %6.2f", 100.0 * p->pc7/t->tsc);
@@ -648,17 +652,24 @@ delta_thread(struct thread_data *new, struct thread_data *old,
}
- /*
- * As counter collection is not atomic,
- * it is possible for mperf's non-halted cycles + idle states
- * to exceed TSC's all cycles: show c1 = 0% in that case.
- */
- if ((old->mperf + core_delta->c3 + core_delta->c6 + core_delta->c7) > old->tsc)
- old->c1 = 0;
- else {
- /* normal case, derive c1 */
- old->c1 = old->tsc - old->mperf - core_delta->c3
+ if (use_c1_residency_msr) {
+ /*
+ * Some models have a dedicated C1 residency MSR,
+ * which should be more accurate than the derivation below.
+ */
+ } else {
+ /*
+ * As counter collection is not atomic,
+ * it is possible for mperf's non-halted cycles + idle states
+ * to exceed TSC's all cycles: show c1 = 0% in that case.
+ */
+ if ((old->mperf + core_delta->c3 + core_delta->c6 + core_delta->c7) > old->tsc)
+ old->c1 = 0;
+ else {
+ /* normal case, derive c1 */
+ old->c1 = old->tsc - old->mperf - core_delta->c3
- core_delta->c6 - core_delta->c7;
+ }
}
if (old->mperf == 0) {
@@ -872,13 +883,21 @@ int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
if (get_msr(cpu, extra_msr_offset64, &t->extra_msr64))
return -5;
+ if (use_c1_residency_msr) {
+ if (get_msr(cpu, MSR_CORE_C1_RES, &t->c1))
+ return -6;
+ }
+
/* collect core counters only for 1st thread in core */
if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
return 0;
- if (do_nhm_cstates) {
+ if (do_nhm_cstates && !do_slm_cstates) {
if (get_msr(cpu, MSR_CORE_C3_RESIDENCY, &c->c3))
return -6;
+ }
+
+ if (do_nhm_cstates) {
if (get_msr(cpu, MSR_CORE_C6_RESIDENCY, &c->c6))
return -7;
}
@@ -898,7 +917,7 @@ int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
return 0;
- if (do_nhm_cstates) {
+ if (do_nhm_cstates && !do_slm_cstates) {
if (get_msr(cpu, MSR_PKG_C3_RESIDENCY, &p->pc3))
return -9;
if (get_msr(cpu, MSR_PKG_C6_RESIDENCY, &p->pc6))
@@ -977,7 +996,7 @@ void print_verbose_header(void)
ratio, bclk, ratio * bclk);
get_msr(0, MSR_IA32_POWER_CTL, &msr);
- fprintf(stderr, "cpu0: MSR_IA32_POWER_CTL: 0x%08llx (C1E: %sabled)\n",
+ fprintf(stderr, "cpu0: MSR_IA32_POWER_CTL: 0x%08llx (C1E auto-promotion: %sabled)\n",
msr, msr & 0x2 ? "EN" : "DIS");
if (!do_ivt_turbo_ratio_limit)
@@ -1046,25 +1065,28 @@ print_nhm_turbo_ratio_limits:
switch(msr & 0x7) {
case 0:
- fprintf(stderr, "pc0");
+ fprintf(stderr, do_slm_cstates ? "no pkg states" : "pc0");
break;
case 1:
- fprintf(stderr, do_snb_cstates ? "pc2" : "pc0");
+ fprintf(stderr, do_slm_cstates ? "no pkg states" : do_snb_cstates ? "pc2" : "pc0");
break;
case 2:
- fprintf(stderr, do_snb_cstates ? "pc6-noret" : "pc3");
+ fprintf(stderr, do_slm_cstates ? "invalid" : do_snb_cstates ? "pc6-noret" : "pc3");
break;
case 3:
- fprintf(stderr, "pc6");
+ fprintf(stderr, do_slm_cstates ? "invalid" : "pc6");
break;
case 4:
- fprintf(stderr, "pc7");
+ fprintf(stderr, do_slm_cstates ? "pc4" : "pc7");
break;
case 5:
- fprintf(stderr, do_snb_cstates ? "pc7s" : "invalid");
+ fprintf(stderr, do_slm_cstates ? "invalid" : do_snb_cstates ? "pc7s" : "invalid");
+ break;
+ case 6:
+ fprintf(stderr, do_slm_cstates ? "pc6" : "invalid");
break;
case 7:
- fprintf(stderr, "unlimited");
+ fprintf(stderr, do_slm_cstates ? "pc7" : "unlimited");
break;
default:
fprintf(stderr, "invalid");
@@ -1460,6 +1482,8 @@ int has_nehalem_turbo_ratio_limit(unsigned int family, unsigned int model)
case 0x3F: /* HSW */
case 0x45: /* HSW */
case 0x46: /* HSW */
+ case 0x37: /* BYT */
+ case 0x4D: /* AVN */
return 1;
case 0x2E: /* Nehalem-EX Xeon - Beckton */
case 0x2F: /* Westmere-EX Xeon - Eagleton */
@@ -1532,14 +1556,33 @@ int print_epb(struct thread_data *t, struct core_data *c, struct pkg_data *p)
#define RAPL_POWER_GRANULARITY 0x7FFF /* 15 bit power granularity */
#define RAPL_TIME_GRANULARITY 0x3F /* 6 bit time granularity */
+double get_tdp(model)
+{
+ unsigned long long msr;
+
+ if (do_rapl & RAPL_PKG_POWER_INFO)
+ if (!get_msr(0, MSR_PKG_POWER_INFO, &msr))
+ return ((msr >> 0) & RAPL_POWER_GRANULARITY) * rapl_power_units;
+
+ switch (model) {
+ case 0x37:
+ case 0x4D:
+ return 30.0;
+ default:
+ return 135.0;
+ }
+}
+
+
/*
* rapl_probe()
*
- * sets do_rapl
+ * sets do_rapl, rapl_power_units, rapl_energy_units, rapl_time_units
*/
void rapl_probe(unsigned int family, unsigned int model)
{
unsigned long long msr;
+ unsigned int time_unit;
double tdp;
if (!genuine_intel)
@@ -1555,11 +1598,15 @@ void rapl_probe(unsigned int family, unsigned int model)
case 0x3F: /* HSW */
case 0x45: /* HSW */
case 0x46: /* HSW */
- do_rapl = RAPL_PKG | RAPL_CORES | RAPL_GFX;
+ do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_GFX | RAPL_PKG_POWER_INFO;
break;
case 0x2D:
case 0x3E:
- do_rapl = RAPL_PKG | RAPL_CORES | RAPL_DRAM | RAPL_PKG_PERF_STATUS | RAPL_DRAM_PERF_STATUS;
+ do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_DRAM | RAPL_PKG_PERF_STATUS | RAPL_DRAM_PERF_STATUS | RAPL_PKG_POWER_INFO;
+ break;
+ case 0x37: /* BYT */
+ case 0x4D: /* AVN */
+ do_rapl = RAPL_PKG | RAPL_CORES ;
break;
default:
return;
@@ -1570,19 +1617,22 @@ void rapl_probe(unsigned int family, unsigned int model)
return;
rapl_power_units = 1.0 / (1 << (msr & 0xF));
- rapl_energy_units = 1.0 / (1 << (msr >> 8 & 0x1F));
- rapl_time_units = 1.0 / (1 << (msr >> 16 & 0xF));
+ if (model == 0x37)
+ rapl_energy_units = 1.0 * (1 << (msr >> 8 & 0x1F)) / 1000000;
+ else
+ rapl_energy_units = 1.0 / (1 << (msr >> 8 & 0x1F));
- /* get TDP to determine energy counter range */
- if (get_msr(0, MSR_PKG_POWER_INFO, &msr))
- return;
+ time_unit = msr >> 16 & 0xF;
+ if (time_unit == 0)
+ time_unit = 0xA;
- tdp = ((msr >> 0) & RAPL_POWER_GRANULARITY) * rapl_power_units;
+ rapl_time_units = 1.0 / (1 << (time_unit));
- rapl_joule_counter_range = 0xFFFFFFFF * rapl_energy_units / tdp;
+ tdp = get_tdp(model);
+ rapl_joule_counter_range = 0xFFFFFFFF * rapl_energy_units / tdp;
if (verbose)
- fprintf(stderr, "RAPL: %.0f sec. Joule Counter Range\n", rapl_joule_counter_range);
+ fprintf(stderr, "RAPL: %.0f sec. Joule Counter Range, at %.0f Watts\n", rapl_joule_counter_range, tdp);
return;
}
@@ -1668,7 +1718,6 @@ int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p)
{
unsigned long long msr;
int cpu;
- double local_rapl_power_units, local_rapl_energy_units, local_rapl_time_units;
if (!do_rapl)
return 0;
@@ -1686,23 +1735,13 @@ int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p)
if (get_msr(cpu, MSR_RAPL_POWER_UNIT, &msr))
return -1;
- local_rapl_power_units = 1.0 / (1 << (msr & 0xF));
- local_rapl_energy_units = 1.0 / (1 << (msr >> 8 & 0x1F));
- local_rapl_time_units = 1.0 / (1 << (msr >> 16 & 0xF));
-
- if (local_rapl_power_units != rapl_power_units)
- fprintf(stderr, "cpu%d, ERROR: Power units mis-match\n", cpu);
- if (local_rapl_energy_units != rapl_energy_units)
- fprintf(stderr, "cpu%d, ERROR: Energy units mis-match\n", cpu);
- if (local_rapl_time_units != rapl_time_units)
- fprintf(stderr, "cpu%d, ERROR: Time units mis-match\n", cpu);
-
if (verbose) {
fprintf(stderr, "cpu%d: MSR_RAPL_POWER_UNIT: 0x%08llx "
"(%f Watts, %f Joules, %f sec.)\n", cpu, msr,
- local_rapl_power_units, local_rapl_energy_units, local_rapl_time_units);
+ rapl_power_units, rapl_energy_units, rapl_time_units);
}
- if (do_rapl & RAPL_PKG) {
+ if (do_rapl & RAPL_PKG_POWER_INFO) {
+
if (get_msr(cpu, MSR_PKG_POWER_INFO, &msr))
return -5;
@@ -1714,6 +1753,9 @@ int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p)
((msr >> 32) & RAPL_POWER_GRANULARITY) * rapl_power_units,
((msr >> 48) & RAPL_TIME_GRANULARITY) * rapl_time_units);
+ }
+ if (do_rapl & RAPL_PKG) {
+
if (get_msr(cpu, MSR_PKG_POWER_LIMIT, &msr))
return -9;
@@ -1749,12 +1791,16 @@ int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p)
print_power_limit_msr(cpu, msr, "DRAM Limit");
}
- if (do_rapl & RAPL_CORES) {
+ if (do_rapl & RAPL_CORE_POLICY) {
if (verbose) {
if (get_msr(cpu, MSR_PP0_POLICY, &msr))
return -7;
fprintf(stderr, "cpu%d: MSR_PP0_POLICY: %lld\n", cpu, msr & 0xF);
+ }
+ }
+ if (do_rapl & RAPL_CORES) {
+ if (verbose) {
if (get_msr(cpu, MSR_PP0_POWER_LIMIT, &msr))
return -9;
@@ -1813,10 +1859,48 @@ int has_c8_c9_c10(unsigned int family, unsigned int model)
}
+int is_slm(unsigned int family, unsigned int model)
+{
+ if (!genuine_intel)
+ return 0;
+ switch (model) {
+ case 0x37: /* BYT */
+ case 0x4D: /* AVN */
+ return 1;
+ }
+ return 0;
+}
+
+#define SLM_BCLK_FREQS 5
+double slm_freq_table[SLM_BCLK_FREQS] = { 83.3, 100.0, 133.3, 116.7, 80.0};
+
+double slm_bclk(void)
+{
+ unsigned long long msr = 3;
+ unsigned int i;
+ double freq;
+
+ if (get_msr(0, MSR_FSB_FREQ, &msr))
+ fprintf(stderr, "SLM BCLK: unknown\n");
+
+ i = msr & 0xf;
+ if (i >= SLM_BCLK_FREQS) {
+ fprintf(stderr, "SLM BCLK[%d] invalid\n", i);
+ msr = 3;
+ }
+ freq = slm_freq_table[i];
+
+ fprintf(stderr, "SLM BCLK: %.1f Mhz\n", freq);
+
+ return freq;
+}
+
double discover_bclk(unsigned int family, unsigned int model)
{
if (is_snb(family, model))
return 100.00;
+ else if (is_slm(family, model))
+ return slm_bclk();
else
return 133.33;
}
@@ -1873,7 +1957,7 @@ int set_temperature_target(struct thread_data *t, struct core_data *c, struct pk
fprintf(stderr, "cpu%d: MSR_IA32_TEMPERATURE_TARGET: 0x%08llx (%d C)\n",
cpu, msr, target_c_local);
- if (target_c_local < 85 || target_c_local > 120)
+ if (target_c_local < 85 || target_c_local > 127)
goto guess;
tcc_activation_temp = target_c_local;
@@ -1970,6 +2054,7 @@ void check_cpuid()
do_smi = do_nhm_cstates;
do_snb_cstates = is_snb(family, model);
do_c8_c9_c10 = has_c8_c9_c10(family, model);
+ do_slm_cstates = is_slm(family, model);
bclk = discover_bclk(family, model);
do_nehalem_turbo_ratio_limit = has_nehalem_turbo_ratio_limit(family, model);
@@ -2331,7 +2416,7 @@ int main(int argc, char **argv)
cmdline(argc, argv);
if (verbose)
- fprintf(stderr, "turbostat v3.4 April 17, 2013"
+ fprintf(stderr, "turbostat v3.5 April 26, 2013"
" - Len Brown <lenb@kernel.org>\n");
turbostat_init();
diff --git a/tools/usb/Makefile b/tools/usb/Makefile
index 396d6c44e9d7..acf2165c04e6 100644
--- a/tools/usb/Makefile
+++ b/tools/usb/Makefile
@@ -3,11 +3,12 @@
CC = $(CROSS_COMPILE)gcc
PTHREAD_LIBS = -lpthread
WARNINGS = -Wall -Wextra
-CFLAGS = $(WARNINGS) -g $(PTHREAD_LIBS) -I../include
+CFLAGS = $(WARNINGS) -g -I../include
+LDFLAGS = $(PTHREAD_LIBS)
all: testusb ffs-test
%: %.c
- $(CC) $(CFLAGS) -o $@ $^
+ $(CC) $(CFLAGS) -o $@ $^ $(LDFLAGS)
clean:
$(RM) testusb ffs-test
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 662f34c3287e..4f588bc94186 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1615,8 +1615,9 @@ EXPORT_SYMBOL_GPL(kvm_read_guest_cached);
int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len)
{
- return kvm_write_guest_page(kvm, gfn, (const void *) empty_zero_page,
- offset, len);
+ const void *zero_page = (const void *) __va(page_to_phys(ZERO_PAGE(0)));
+
+ return kvm_write_guest_page(kvm, gfn, zero_page, offset, len);
}
EXPORT_SYMBOL_GPL(kvm_clear_guest_page);
@@ -1897,6 +1898,9 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
int r;
struct kvm_vcpu *vcpu, *v;
+ if (id >= KVM_MAX_VCPUS)
+ return -EINVAL;
+
vcpu = kvm_arch_vcpu_create(kvm, id);
if (IS_ERR(vcpu))
return PTR_ERR(vcpu);